hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3a6ae868af3da0da200ed7803a12bdbf4c3d2e6
| 408
|
py
|
Python
|
searchApp/management/commands/reindex.py
|
kunalgrover05/google-appengine-django-search
|
6260caa67772c91f2c18e876ffb3738f9a503219
|
[
"BSD-2-Clause"
] | 1
|
2017-04-28T22:18:38.000Z
|
2017-04-28T22:18:38.000Z
|
searchApp/management/commands/reindex.py
|
kunalgrover05/google-appengine-django-search
|
6260caa67772c91f2c18e876ffb3738f9a503219
|
[
"BSD-2-Clause"
] | null | null | null |
searchApp/management/commands/reindex.py
|
kunalgrover05/google-appengine-django-search
|
6260caa67772c91f2c18e876ffb3738f9a503219
|
[
"BSD-2-Clause"
] | null | null | null |
from django.core.management.base import BaseCommand
from searchApp.models import IndexModel
class Command(BaseCommand):
help = 'Sets all indexes as not processed to allow reindex'
def handle(self, *args, **options):
IndexModel.objects.filter(deleted=False).filter(processed=True).update(processed=False)
self.stdout.write(self.style.SUCCESS('Successfully set all for reindexing'))
| 37.090909
| 95
| 0.757353
|
c4d9b8b2a19282185374caea6f8e643a4f460902
| 1,749
|
py
|
Python
|
mainGrabing.py
|
orangeYao/twiOpinion
|
4bc8e58b609c7d45ed2d5726ede594f40f55d313
|
[
"MIT"
] | null | null | null |
mainGrabing.py
|
orangeYao/twiOpinion
|
4bc8e58b609c7d45ed2d5726ede594f40f55d313
|
[
"MIT"
] | null | null | null |
mainGrabing.py
|
orangeYao/twiOpinion
|
4bc8e58b609c7d45ed2d5726ede594f40f55d313
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import time
import os
import subprocess
import functions
def grabSetting():
word_to_grab = raw_input("The tag or keyword you wish to grab from Twitter: ") or "China"
#path = raw_input("The folder you wish to store information in (default as ./output): ") or "./output"
path = "./output"
if (path == "./output" and not os.path.exists("./output")):
os.makedirs("./output")
file_name = path + "/stream_" + word_to_grab + ".json"
print "Started grabing, grabed information will be stored in " + file_name
pw = subprocess.Popen(["python", "twitter_stream_download.py", "-q", word_to_grab, "-d", path])
print ""
return (file_name, pw)
if __name__ == "__main__":
functions.startingInfo()
file_name, pw = grabSetting()
while (True):
checkProgress = raw_input("Type in \"c\" to check number of tweets have been crawled\n"+
"Type in \"f\" to fetch meaningful contents of tweets for training in next step\n"+
"Type in \"s\" to fetch meaningful contents, stop current crawling process and quit: ")
if (checkProgress == "c"):
proc = subprocess.Popen(["wc", "-l", file_name], stdout=subprocess.PIPE)
lines = proc.stdout.read()
print " " + lines.split()[0] + " tweets have been crawled"
if (checkProgress == "f" or checkProgress == "s"):
proc = subprocess.Popen(["python", "readJson.py", "-i", file_name], stdout=subprocess.PIPE)
lines = proc.stdout.read()
print " " + lines + " lines of contents are fetched"
print ""
if (checkProgress == "s"):
pw.kill()
break
| 40.674419
| 121
| 0.58948
|
318e82b74a21866c3216afba99755b2a77cd89be
| 4,650
|
py
|
Python
|
tutorial-contents/405_DQN_Reinforcement_learning.py
|
Feng-XiaoYue/Pytorch-RL
|
86a22a0ee3c8bfe94efb989c98ede25a283d564a
|
[
"MIT"
] | null | null | null |
tutorial-contents/405_DQN_Reinforcement_learning.py
|
Feng-XiaoYue/Pytorch-RL
|
86a22a0ee3c8bfe94efb989c98ede25a283d564a
|
[
"MIT"
] | null | null | null |
tutorial-contents/405_DQN_Reinforcement_learning.py
|
Feng-XiaoYue/Pytorch-RL
|
86a22a0ee3c8bfe94efb989c98ede25a283d564a
|
[
"MIT"
] | null | null | null |
"""
View more, visit my tutorial page: https://mofanpy.com/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
More about Reinforcement learning: https://mofanpy.com/tutorials/machine-learning/reinforcement-learning/
Dependencies:
torch: 0.4
gym: 0.8.1
numpy
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
# Hyper Parameters
BATCH_SIZE = 32
LR = 0.01 # learning rate
EPSILON = 0.9 # greedy policy
GAMMA = 0.9 # reward discount
TARGET_REPLACE_ITER = 100 # target update frequency
MEMORY_CAPACITY = 2000
env = gym.make('CartPole-v0')
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
# print(env.observation_space)
ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape # to confirm the shape
class Net(nn.Module):
def __init__(self, ):
super(Net, self).__init__()
self.fc1 = nn.Linear(N_STATES, 50)
self.fc1.weight.data.normal_(0, 0.1) # initialization
self.out = nn.Linear(50, N_ACTIONS)
self.out.weight.data.normal_(0, 0.1) # initialization
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
actions_value = self.out(x)
return actions_value
class DQN(object):
def __init__(self):
self.eval_net, self.target_net = Net(), Net()
self.learn_step_counter = 0 # for target updating
self.memory_counter = 0 # for storing memory
self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
self.loss_func = nn.MSELoss()
def choose_action(self, x):
# print(x)
x = torch.unsqueeze(torch.FloatTensor(x), 0)
# input only one sample
if np.random.uniform() < EPSILON: # greedy
actions_value = self.eval_net.forward(x)
action = torch.max(actions_value, 1)[1].data.numpy()
action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE) # return the argmax index
else: # random
action = np.random.randint(0, N_ACTIONS)
action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
return action
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % MEMORY_CAPACITY
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target parameter update
if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample batch transitions
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
b_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(b_memory[:, :N_STATES])
b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])
b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])
# q_eval w.r.t the action in experience
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate
q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)
loss = self.loss_func(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
dqn = DQN()
print('\nCollecting experience...')
for i_episode in range(400):
s = env.reset()
ep_r = 0
sum = 0
while True:
env.render()
a = dqn.choose_action(s)
# take action
s_, r, done, info = env.step(a)
# modify the reward
x, x_dot, theta, theta_dot = s_
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
r = r1 + r2
dqn.store_transition(s, a, r, s_)
sum += 1
ep_r += r
if dqn.memory_counter > MEMORY_CAPACITY:
dqn.learn()
if done:
print('Ep: ', i_episode,
'| Ep_r: ', round(ep_r, 2))
print("本轮循环的次数:", sum)
if done:
break
s = s_
| 34.701493
| 125
| 0.606237
|
cc3a0216e863231d48ca7ab19d170226f9fb7644
| 7,175
|
py
|
Python
|
var/spack/repos/builtin/packages/ffmpeg/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2019-12-10T12:41:38.000Z
|
2019-12-10T12:41:38.000Z
|
var/spack/repos/builtin/packages/ffmpeg/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
var/spack/repos/builtin/packages/ffmpeg/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2019-10-29T09:08:17.000Z
|
2019-10-29T09:08:17.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ffmpeg(AutotoolsPackage):
"""FFmpeg is a complete, cross-platform solution to record,
convert and stream audio and video."""
homepage = "https://ffmpeg.org"
url = "http://ffmpeg.org/releases/ffmpeg-4.1.1.tar.bz2"
maintainers = ['xjrc']
version('4.2.2', sha256='b620d187c26f76ca19e74210a0336c3b8380b97730df5cdf45f3e69e89000e5c')
version('4.1.1', sha256='0cb40e3b8acaccd0ecb38aa863f66f0c6e02406246556c2992f67bf650fab058')
version('4.1', sha256='b684fb43244a5c4caae652af9022ed5d85ce15210835bce054a33fb26033a1a5')
version('3.2.4', sha256='c0fa3593a2e9e96ace3c1757900094437ad96d1d6ca19f057c378b5f394496a4')
version('2.8.15', sha256='35647f6c1f6d4a1719bc20b76bf4c26e4ccd665f46b5676c0e91c5a04622ee21')
version('1.0.10', sha256='1dbde434c3b5c573d3b2ffc1babe3814f781c10c4bc66193a4132a44c9715176')
# Licensing
variant('gpl', default=True,
description='allow use of GPL code, the resulting libs '
'and binaries will be under GPL')
variant('version3', default=True,
description='upgrade (L)GPL to version 3')
variant('nonfree', default=False,
description='allow use of nonfree code, the resulting libs '
'and binaries will be unredistributable')
# NOTE: The libopencv option creates a circular dependency.
# NOTE: There are more possible variants that would require additional
# spack packages.
# meta variants: These will toggle several settings
variant('X', default=False, description='X11 support')
variant('drawtext', default=False, description='drawtext filter')
# options
variant('bzlib', default=True, description='bzip2 support')
variant('libaom', default=False, description='AV1 video encoding/decoding')
variant('libmp3lame', default=False, description='MP3 encoding')
variant('libopenjpeg', default=False, description='JPEG 2000 de/encoding')
variant('libopus', default=False, description='Opus de/encoding')
variant('libsnappy', default=False,
description='Snappy compression, needed for hap encoding')
variant('libspeex', default=False, description='Speex de/encoding')
variant('libssh', default=False, description='SFTP protocol')
variant('libvorbis', default=False, description='Vorbis en/decoding')
variant('libwebp', default=False, description='WebP encoding via libwebp')
# TODO: There is an issue with the spack headers property in the libxml2
# package recipe. Comment out the libxml2 variant until that is resolved.
# variant('libxml2', default=False,
# description='XML parsing, needed for dash demuxing support')
variant('libzmq', default=False, description='message passing via libzmq')
variant('lzma', default=False, description='lzma support')
variant('avresample', default=False, description='AV reasmpling component')
variant('openssl', default=False, description='needed for https support')
variant('sdl2', default=False, description='sdl2 support')
variant('shared', default=True, description='build shared libraries')
depends_on('alsa-lib')
depends_on('libiconv')
depends_on('yasm@1.2.0:')
depends_on('zlib')
depends_on('aom', when='+libaom')
depends_on('bzip2', when='+bzlib')
depends_on('fontconfig', when='+drawtext')
depends_on('freetype', when='+drawtext')
depends_on('fribidi', when='+drawtext')
depends_on('lame', when='+libmp3lame')
depends_on('libssh', when='+libssh')
depends_on('libvorbis', when='+libvorbis')
depends_on('libwebp', when='+libwebp')
# TODO: enable libxml2 when libxml2 header issue is resolved
# depends_on('libxml2', when='+libxml2')
depends_on('libxv', when='+X')
depends_on('libzmq', when='+libzmq')
depends_on('openjpeg', when='+libopenjpeg')
depends_on('openssl', when='+openssl')
depends_on('opus', when='+libopus')
depends_on('sdl2', when='+sdl2')
depends_on('snappy', when='+libsnappy')
depends_on('speex', when='+libspeex')
depends_on('xz', when='+lzma')
# TODO: enable when libxml2 header issue is resolved
# conflicts('+libxml2', when='@:3.999')
# See: https://www.ffmpeg.org/index.html#news (search AV1)
conflicts('+libaom', when='@:3.999')
# All of the following constraints were sourced from the official 'ffmpeg'
# change log, which can be found here:
# https://raw.githubusercontent.com/FFmpeg/FFmpeg/release/4.0/Changelog
conflicts('+sdl2', when='@:3.1.999')
conflicts('+libsnappy', when='@:2.7.999')
conflicts('+X', when='@:2.4.999')
conflicts('+lzma', when='@2.3.999:')
conflicts('+libwebp', when='@2.1.999:')
conflicts('+libssh', when='@2.0.999:')
conflicts('+libzmq', when='@:1.999.999')
def enable_or_disable_meta(self, variant, options):
switch = 'enable' if '+{0}'.format(variant) in self.spec else 'disable'
return ['--{0}-{1}'.format(switch, option) for option in options]
def configure_args(self):
spec = self.spec
config_args = [
'--enable-pic',
'--cc={0}'.format(spack_cc),
'--cxx={0}'.format(spack_cxx)
]
# '+X' meta variant #
xlib_opts = []
if spec.satisfies('@2.5:'):
xlib_opts.extend([
'libxcb',
'libxcb-shape',
'libxcb-shm',
'libxcb-xfixes',
'xlib',
])
config_args += self.enable_or_disable_meta('X', xlib_opts)
# '+drawtext' meta variant #
drawtext_opts = [
'{0}fontconfig'.format('lib' if spec.satisfies('@3:') else ''),
'libfreetype',
]
if spec.satisfies('@2.3:'):
drawtext_opts.append('libfribidi')
config_args += self.enable_or_disable_meta('drawtext', drawtext_opts)
# other variants #
variant_opts = [
'bzlib',
'libmp3lame',
'libopenjpeg',
'libopus',
'libspeex',
'libvorbis',
'avresample',
'openssl',
'shared',
]
if spec.satisfies('@2.0:'):
variant_opts.append('libzmq')
if spec.satisfies('@2.1:'):
variant_opts.append('libssh')
if spec.satisfies('@2.2:'):
variant_opts.append('libwebp')
if spec.satisfies('@2.4:'):
variant_opts.append('lzma')
if spec.satisfies('@2.8:'):
variant_opts.append('libsnappy')
if spec.satisfies('@3.2:'):
variant_opts.append('sdl2')
if spec.satisfies('@4:'):
variant_opts.append('libaom')
# TODO: enable when libxml2 header issue is resolved
# variant_opts.append('libxml2')
for variant_opt in variant_opts:
config_args += self.enable_or_disable(variant_opt)
return config_args
| 39.640884
| 96
| 0.639164
|
30cb1b8721370af46484f1b3124bfcbb36a4af20
| 546
|
py
|
Python
|
tests/test_status.py
|
carltongibson/rich
|
bc8737ba238c86f32c94d6c203681592487ed0ca
|
[
"MIT"
] | 1
|
2020-12-24T08:24:11.000Z
|
2020-12-24T08:24:11.000Z
|
tests/test_status.py
|
carltongibson/rich
|
bc8737ba238c86f32c94d6c203681592487ed0ca
|
[
"MIT"
] | 6
|
2021-12-31T12:25:06.000Z
|
2022-02-14T12:29:11.000Z
|
tests/test_status.py
|
carltongibson/rich
|
bc8737ba238c86f32c94d6c203681592487ed0ca
|
[
"MIT"
] | null | null | null |
from time import sleep
from rich.console import Console
from rich.status import Status
from rich.table import Table
def test_status():
console = Console(
color_system=None, width=80, legacy_windows=False, get_time=lambda: 0.0
)
status = Status("foo", console=console)
assert status.console == console
status.update(status="bar", spinner="dots2", spinner_style="red", speed=2.0)
assert isinstance(status.renderable, Table)
# TODO: Testing output is tricky with threads
with status:
sleep(0.2)
| 24.818182
| 80
| 0.703297
|
29ee8f5ac85848a24451a122c239f21be9c14d47
| 1,769
|
py
|
Python
|
tests/core/actions/test_get_not_completed_tasks.py
|
Himon-SYNCRAFT/taskplus
|
9e6293840941d0cb4fd7bac0f8ff66f8e72cc62b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/actions/test_get_not_completed_tasks.py
|
Himon-SYNCRAFT/taskplus
|
9e6293840941d0cb4fd7bac0f8ff66f8e72cc62b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/actions/test_get_not_completed_tasks.py
|
Himon-SYNCRAFT/taskplus
|
9e6293840941d0cb4fd7bac0f8ff66f8e72cc62b
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import mock
import pytest
from taskplus.core.actions import GetNotCompletedTasksAction,\
GetNotCompletedTasksRequest
from taskplus.core.domain import Task
from taskplus.core.shared.response import ResponseFailure
@pytest.fixture
def tasks():
return [
Task(name='name1', content=[], status=mock.Mock(), creator=mock.Mock()),
Task(name='name2', content=[], status=mock.Mock(), creator=mock.Mock()),
]
def test_get_not_completed_tasks(tasks):
repo = mock.Mock()
repo.list.return_value = tasks
request = GetNotCompletedTasksRequest()
action = GetNotCompletedTasksAction(task_repo=repo)
response = action.execute(request)
assert bool(response) is True
assert response.value == repo.list.return_value
def test_get_not_completed_tasks_with_hooks(tasks):
repo = mock.Mock()
repo.list.return_value = tasks
request = GetNotCompletedTasksRequest()
action = GetNotCompletedTasksAction(task_repo=repo)
before = mock.MagicMock()
after = mock.MagicMock()
action.add_before_execution_hook(before)
action.add_after_execution_hook(after)
response = action.execute(request)
assert before.called
assert after.called
assert bool(response) is True
assert response.value == repo.list.return_value
def test_list_handles_exception():
repo = mock.Mock()
error_message = 'Error!!!'
repo.list.side_effect = Exception(error_message)
request = GetNotCompletedTasksRequest()
action = GetNotCompletedTasksAction(task_repo=repo)
response = action.execute(request)
assert bool(response) is False
assert response.value == {
'type': ResponseFailure.SYSTEM_ERROR,
'message': 'Exception: {}'.format(error_message)
}
| 28.079365
| 80
| 0.725269
|
f2389efec65c9932077756f7bb8c32336b4c9572
| 2,265
|
py
|
Python
|
tests/test.py
|
zephenryus/botw-mate
|
52c078a2cc93ff45e7e4e1946c253b6181bbd42e
|
[
"MIT"
] | 1
|
2020-10-11T07:07:26.000Z
|
2020-10-11T07:07:26.000Z
|
tests/test.py
|
zephenryus/botw-mate
|
52c078a2cc93ff45e7e4e1946c253b6181bbd42e
|
[
"MIT"
] | null | null | null |
tests/test.py
|
zephenryus/botw-mate
|
52c078a2cc93ff45e7e4e1946c253b6181bbd42e
|
[
"MIT"
] | 1
|
2020-10-11T07:07:29.000Z
|
2020-10-11T07:07:29.000Z
|
import filecmp
import hashlib
import json
import mate
def mate_to_json():
"""
Tests reading of mate file and exports data as a json file
"""
data = mate.read_mate("assets/5000000000.mate")
print("Saving file output/5000000000.mate.json...")
with open("output/5000000000.mate.json", "w+") as outfile:
out_obj = []
for entry in data:
out_obj.append(entry.__dict__)
outfile.write(json.dumps(out_obj, indent=4, separators=(',', ': ')))
def mate_to_binary_string():
"""
Tests that data is recompiled correctly and matches the original file
"""
data = mate.read_mate("assets/5000000000.mate")
binary_data = mate.compile_mate(data)
hash_md5 = hashlib.md5()
with open("assets/5000000000.mate", "rb") as infile:
for chunk in iter(lambda: infile.read(4096), b""):
hash_md5.update(chunk)
file_hash = hash_md5.hexdigest()
hash_md5 = hashlib.md5()
pos = 0
for chunk in iter(lambda: binary_data[pos:pos + 4096], b""):
pos += 4096
hash_md5.update(chunk)
string_hash = hash_md5.hexdigest()
print("The file and binary string are the same: {0}".format(file_hash == string_hash))
def mate_to_binary_file():
"""
Tests reading data from mate file then writes the same data back as a binary
"""
data = mate.read_mate("assets/5000000000.mate")
mate.write_mate(data, "output/5000000000.mate")
print("The files are the same: {0}".format(filecmp.cmp("assets/5000000000.mate", "output/5000000000.mate")))
def mate_to_image():
"""
Tests reading data from mate file then generating material map images
"""
data = mate.read_mate("assets/5000000000.mate")
mate.generate_map(data, 'output/5000000000.mate.tiff')
mate.generate_material_0_map(data, 'output/5000000000.mate00.tiff')
mate.generate_material_0_map(data, 'output/5000000000.mate01.tiff', color_as_value=True)
mate.generate_material_1_map(data, 'output/5000000000.mate10.tiff')
mate.generate_material_1_map(data, 'output/5000000000.mate11.tiff', color_as_value=True)
def main():
mate_to_json()
mate_to_binary_string()
mate_to_binary_file()
mate_to_image()
if __name__ == "__main__":
main()
| 29.038462
| 112
| 0.679912
|
192e356c7f8478e719c54a3697298779f0ddbd6e
| 267
|
py
|
Python
|
python/setup.py
|
badinmath/approxmc
|
aed834a1a253e4dfeb296cf907da5e4b68b4759c
|
[
"MIT"
] | 25
|
2018-11-25T22:16:20.000Z
|
2020-06-23T11:07:36.000Z
|
python/setup.py
|
badinmath/approxmc
|
aed834a1a253e4dfeb296cf907da5e4b68b4759c
|
[
"MIT"
] | 12
|
2018-12-20T07:17:42.000Z
|
2020-05-23T08:34:49.000Z
|
python/setup.py
|
badinmath/approxmc
|
aed834a1a253e4dfeb296cf907da5e4b68b4759c
|
[
"MIT"
] | 14
|
2018-11-30T06:51:47.000Z
|
2020-05-24T12:07:25.000Z
|
from distutils.core import setup, Extension
pyapproxmc_module = Extension(
'pyapproxmc',
sources=['src/pyapproxmc.cpp'],
libraries=['approxmc', 'cryptominisat5'],
language='C++', )
setup(
name='pyapproxmc',
ext_modules=[pyapproxmc_module]
)
| 20.538462
| 45
| 0.685393
|
be885ff2fbb2444659d5831c11884e1bf6f16054
| 1,803
|
py
|
Python
|
linearEquation.py
|
thebillington/linearEquationML
|
4891fcad24543b13be7098cd409393e4bd7e3982
|
[
"MIT"
] | null | null | null |
linearEquation.py
|
thebillington/linearEquationML
|
4891fcad24543b13be7098cd409393e4bd7e3982
|
[
"MIT"
] | null | null | null |
linearEquation.py
|
thebillington/linearEquationML
|
4891fcad24543b13be7098cd409393e4bd7e3982
|
[
"MIT"
] | null | null | null |
# Disable compatability warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# Import the tensorflow library
import tensorflow as tf
# Create a new tf session
sess = tf.Session()
# Store our mx + c equation variables
m = tf.Variable([.3], tf.float32)
c = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
# Store the placeholder for our y value
y = tf.placeholder(tf.float32)
# Create a model to represent a linear equation
linearModel = m * x + c
# Store the change in y
squared_deltas = tf.square(linearModel - y)
# Store the difference between given y and the recorded y
loss = tf.reduce_sum(squared_deltas)
# Create an optimizer to optimize the values of m and c
optimizer = tf.train.GradientDescentOptimizer(0.01)
# Train the optimizer to minimize the value of loss
train = optimizer.minimize(loss)
# Initialize the variables
init = tf.global_variables_initializer()
sess.run(init)
# Check the loss
#print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
# Get values of x and y from user
xValues = []
yValues = []
r = "y"
while r == "y":
xValues.append(float(input("Enter x value of coordinate: ")))
yValues.append(float(input("Enter y value of coordinate: ")))
r = input("Enter another coordinate? (y/n):")
# Run the linear model to minimize loss with specified x values and their corresponding, correct y values. Train on 100 iterations
for i in range(1000):
sess.run(train, {x: xValues, y: yValues})
# Print the values of m and c
print("For input values of x = {0} and y = {1}, optimized linear equation is: y = {2} x + {3}".format(xValues, yValues, sess.run(m)[0], sess.run(c)[0]))
print("For input values of x = {0} and y = {1}, rounded optimized linear equation is: y = {2:.0f} x + {3:.0f}".format(xValues, yValues, sess.run(m)[0], sess.run(c)[0]))
| 31.631579
| 168
| 0.703272
|
d66d09b5d8f7b3655b8f04494f1fc54c03d2da33
| 4,770
|
py
|
Python
|
news_scraping/main.py
|
autumnlewjb/NewsScraping
|
0eec56495966cc1b5052a541638d0629cd29dff5
|
[
"MIT"
] | 1
|
2020-10-24T17:13:22.000Z
|
2020-10-24T17:13:22.000Z
|
news_scraping/main.py
|
autumnlewjb/NewsScraping
|
0eec56495966cc1b5052a541638d0629cd29dff5
|
[
"MIT"
] | 1
|
2021-03-31T20:04:06.000Z
|
2021-03-31T20:04:06.000Z
|
news_scraping/main.py
|
autumnlewjb/NewsScraping
|
0eec56495966cc1b5052a541638d0629cd29dff5
|
[
"MIT"
] | null | null | null |
import re
from pathlib import Path
from selenium import webdriver
from bs4 import BeautifulSoup as bs
from time import sleep
import os
from datetime import datetime
from news_scraping.month import month_in_words
from news_scraping.url_list import news_link
from news_scraping.notification import Notification
def create_file(directory):
directory = str(directory)
exist = os.path.exists(directory)
if not exist:
os.mkdir(directory)
def validate_title(title):
invalid_symbols = re.compile(r'[\\/:*?"<>|]')
return invalid_symbols.sub('', title)
def write_file(text_only, directory):
title = text_only['title']
news_content = text_only['content']
file = open(directory, 'w+')
file.write(title)
file.write('\n' * 2)
for text in news_content:
file.write(text)
print(text)
file.write('\n')
if not file.closed:
file.close()
class ScrapeNews:
def __init__(self):
self.browser = webdriver.Chrome()
self.url = None
self.date = datetime.now()
self.main_directory = Path().home() / 'Documents' / 'News'
self.notify = Notification()
@property
def date(self):
return self._date
@date.setter
def date(self, date):
year = int(date.year)
month = int(date.month)
day = int(date.day)
self._date = '%02d.%02d.%04d' % (day, month, year)
# self.date = '20.05.2020'
def get_page_link(self):
main_page = "{}?page={}"
page = 0
true_href = []
while True:
self.browser.get(main_page.format(self.url, page))
sleep(2)
source = self.browser.page_source
soup = bs(source, 'lxml')
div = soup.find('div', class_='article-listing')
a_list = div.find_all('a')
post_time = div.find_all('span', class_='created-ago')
href = []
i = 0
while i < len(a_list):
posting_time = post_time[i].text.split(' ')
if posting_time[-1] == 'ago' and posting_time[-2] == 'hour' or posting_time[-2] == 'hours':
href.append('https://www.nst.com.my/' + str(a_list[i].get('href')))
i += 1
if len(href) == 0:
break
else:
true_href.extend(href)
print(href)
page += 1
return true_href
def get_text(self, href):
self.browser.get(str(href))
source = self.browser.page_source
soup = bs(source, 'lxml')
article_date = soup.find('div', class_='article-meta mb-2 mb-lg-0')
# process article_date below
article_date = article_date.text.strip().split(" ")
# ['April', '30,', '2020', '@', 5:05pm]
day = int(article_date[-4].replace(',', ''))
year = int(article_date[-3])
month = month_in_words[article_date[-5]]
news_date = "%02d.%02d.%04d" % (day, month, year)
if news_date == self.date:
title = soup.find('h1', class_='page-title mb-2')
content_div = soup.find('div', class_='field field-body')
page_content = content_div.find_all('p')
page_content = [paragraph.text.strip() for paragraph in page_content]
text_only = dict(title=title.text, content=page_content)
return text_only
def generate_dir(self, title):
if title:
title = validate_title(title)
create_file(self.main_directory)
directory = self.main_directory / str(self.date)
create_file(directory)
self.notify.set_directory(directory)
category = str(self.url.split('/')[-1])
directory = directory / category
create_file(directory)
directory = directory / (str(title) + '.txt')
return str(directory)
def each_category(self):
number_of_news = 0
href = self.get_page_link()
for link in href:
try:
text_only = self.get_text(link)
print(text_only['title'])
print(text_only['content'])
directory = self.generate_dir(text_only['title'])
write_file(text_only, directory)
number_of_news += 1
except (UnicodeEncodeError, TypeError) as ue:
print(ue)
except:
continue
self.notify.get_total_news(number_of_news)
def main(self):
for link in news_link:
self.url = link
self.each_category()
self.browser.quit()
self.notify.send_note()
if __name__ == '__main__':
obj = ScrapeNews()
obj.main()
| 29.263804
| 107
| 0.564151
|
67f83aab92b2cc5b8c1c333ebc60a5a527e4a81c
| 238
|
py
|
Python
|
day019/main.py
|
rainleander/100daysofcode
|
0391170af80b251e7fb3a78a60b55c3145e4551a
|
[
"Apache-2.0"
] | 8
|
2021-01-25T09:14:41.000Z
|
2021-11-24T12:29:26.000Z
|
day019/main.py
|
rainleander/100daysofcode
|
0391170af80b251e7fb3a78a60b55c3145e4551a
|
[
"Apache-2.0"
] | null | null | null |
day019/main.py
|
rainleander/100daysofcode
|
0391170af80b251e7fb3a78a60b55c3145e4551a
|
[
"Apache-2.0"
] | null | null | null |
# Higher Order Functions and Event Listening
from turtle import Turtle, Screen
tim = Turtle()
screen = Screen()
def move_forward():
tim.forward(10)
screen.listen()
screen.onkey(key="space", fun=move_forward)
screen.exitonclick()
| 15.866667
| 44
| 0.739496
|
181b4e47c5db765492f948a5f9a356c6caf1b473
| 3,126
|
py
|
Python
|
pureples/hyperneat/hyperneat.py
|
kevinrpb/pureples
|
c591fefd5b20085f1d0537553631e29733374b16
|
[
"MIT"
] | null | null | null |
pureples/hyperneat/hyperneat.py
|
kevinrpb/pureples
|
c591fefd5b20085f1d0537553631e29733374b16
|
[
"MIT"
] | null | null | null |
pureples/hyperneat/hyperneat.py
|
kevinrpb/pureples
|
c591fefd5b20085f1d0537553631e29733374b16
|
[
"MIT"
] | null | null | null |
import neat
# Creates a recurrent network using a cppn and a substrate.
def create_phenotype_network(cppn, substrate, activation_function="sigmoid"):
input_coordinates = substrate.input_coordinates
output_coordinates = substrate.output_coordinates
hidden_coordinates = substrate.hidden_coordinates # List of layers, first index = top layer.
input_nodes = list(range(len(input_coordinates)))
output_nodes = list(range(len(input_nodes), len(input_nodes)+len(output_coordinates)))
counter = 0
for layer in hidden_coordinates:
counter += len(layer)
hidden_nodes = range(len(input_nodes)+len(output_nodes), len(input_nodes)+len(output_nodes)+counter)
node_evals = []
# Get activation function.
activation_functions = neat.activations.ActivationFunctionSet()
activation = activation_functions.get(activation_function)
# Connect hidden to output.
counter = 0
for oc in output_coordinates:
idx = 0
for layer in hidden_coordinates:
im = find_neurons(cppn, oc, layer, hidden_nodes[idx], False)
idx += len(layer)
if im:
node_evals.append((output_nodes[counter], activation, sum, 0.0, 1.0, im))
counter += 1
# Connect hidden to hidden - starting from the top layer.
current_layer = 1
idx = 0
for layer in hidden_coordinates:
idx += len(layer)
counter = idx - len(layer)
for i in range(current_layer, len(hidden_coordinates)):
for hc in layer:
im = find_neurons(cppn, hc, hidden_coordinates[i], hidden_nodes[idx], False)
if im:
node_evals.append((hidden_nodes[counter], activation, sum, 0.0, 1.0, im))
counter += 1
counter -= idx
current_layer += 1
# Connect input to hidden.
counter = 0
for layer in hidden_coordinates:
for hc in layer:
im = find_neurons(cppn, hc, input_coordinates, input_nodes[0], False)
if im:
node_evals.append((hidden_nodes[counter], activation, sum, 0.0, 1.0, im))
counter += 1
return neat.nn.RecurrentNetwork(input_nodes, output_nodes, node_evals)
# Find the neurons to which the given coord is connected.
def find_neurons(cppn, coord, nodes, start_idx, outgoing, max_weight=5.0):
im = []
idx = start_idx
for node in nodes:
w = query_cppn(coord, node, outgoing, cppn, max_weight)
if w != 0.0: # Only include connection if the weight isn't 0.0.
im.append((idx, w))
idx += 1
return im
# Get the weight from one point to another using the CPPN - takes into consideration which point is source/target.
def query_cppn(coord1, coord2, outgoing, cppn, max_weight=5.0):
if outgoing:
i = [coord1[0], coord1[1], coord2[0], coord2[1], 1.0]
else:
i = [coord2[0], coord2[1], coord1[0], coord1[1], 1.0]
w = cppn.activate(i)[0]
if abs(w) > 0.2: # If abs(weight) is below threshold, treat weight as 0.0.
return w * max_weight
else:
return 0.0
| 34.733333
| 114
| 0.639795
|
bcba2e505c86082a992d168eed91075aa4d9e9ac
| 24,630
|
py
|
Python
|
python/triton/ops/blocksparse/matmul.py
|
shauheen/triton
|
12b6158c5cbc10c56f935985e6f466c9867d9238
|
[
"MIT"
] | 3,352
|
2021-07-28T16:03:44.000Z
|
2022-03-31T15:36:36.000Z
|
python/triton/ops/blocksparse/matmul.py
|
shauheen/triton
|
12b6158c5cbc10c56f935985e6f466c9867d9238
|
[
"MIT"
] | 169
|
2021-07-28T09:10:38.000Z
|
2022-03-31T17:22:19.000Z
|
python/triton/ops/blocksparse/matmul.py
|
shauheen/triton
|
12b6158c5cbc10c56f935985e6f466c9867d9238
|
[
"MIT"
] | 204
|
2021-07-27T20:58:22.000Z
|
2022-03-31T16:45:45.000Z
|
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
import torch
# ********************************************************
# --------------------------------------------------------
# Sparse = Dense x Dense (SDD)
# This operation uses super-blocking to make sure that
# it's done efficiently when small blocks can be grouped
# together
# --------------------------------------------------------
# ********************************************************
@triton.heuristics({
'EVEN_K': lambda *args, **meta: args[15] % meta['TILE_K'] == 0,
})
@triton.jit
def _sdd_kernel(
A, B, C,
stride_za, stride_ha, stride_ma, stride_ak,
stride_zb, stride_hb, stride_bk, stride_nb,
stride_zc, stride_hc, stride_mc, stride_nc,
K, grid_offset, lut, **meta
):
TILE_M = meta['TILE_M']
TILE_N = meta['TILE_N']
TILE_K = meta['TILE_K']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid1 = tl.program_id(1) + grid_offset
blockidm = tl.arange(0, TILE_M) // BLOCK
blockidn = tl.arange(0, TILE_N) // BLOCK
offlutm = blockidm * (TILE_N // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TILE_M // BLOCK) * (TILE_N // BLOCK) * 4
# batch offset
off_z = tl.program_id(2)
# head offset
off_h = tl.load(header + 0)
# initialize pointers to A
start_am = tl.load(header + 1 + offlutm)
offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK)
offs_ak = tl.arange(0, TILE_K)
a_ptrs = A + off_z * stride_za \
+ off_h * stride_ha \
+ offs_am[:, None] * stride_ma \
+ offs_ak[None, :] * stride_ak
# initialize pointers to B
start_bn = tl.load(header + 2 + offlutn)
offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK)
offs_bk = tl.arange(0, TILE_K)
b_ptrs = B + off_z * stride_zb \
+ off_h * stride_hb \
+ offs_bn[None, :] * stride_nb \
+ offs_bk[:, None] * stride_bk
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(K, 0, -TILE_K):
if meta['EVEN_K']:
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
else:
a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.)
b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.)
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
acc += tl.dot(a, b)
a_ptrs += TILE_K * stride_ak
b_ptrs += TILE_K * stride_bk
c = acc.to(C.dtype.element_ty)
## ---------------- ##
## Epilogue ##
## ---------------- ##
blockidm = tl.arange(0, TILE_M) // BLOCK
blockidn = tl.arange(0, TILE_N) // BLOCK
offlutm = blockidm * (TILE_N // BLOCK) * 4
offlutn = blockidn * 4
off_block_id = 3 + offlutm[:, None] + offlutn[None, :]
block_id = tl.load(header + off_block_id)
# initialize pointers to C
offs_cm = tl.arange(0, TILE_M) % BLOCK
offs_cn = tl.arange(0, TILE_N) % BLOCK
pc = C + off_z * stride_zc \
+ block_id * stride_hc \
+ offs_cm[:, None] * stride_mc \
+ offs_cn[None, :] * stride_nc
tl.store(pc, c, mask=True)
def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs):
# (A * B)^T = B^T * A^T
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
# shape constraints
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
Ka, Kb = a.shape[a_dim], b.shape[b_dim]
if Ka != Kb:
raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})")
if Ka % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
# allocate output
n_blocks = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.zeros((a.shape[0], n_blocks, block, block), dtype=a.dtype, device=a.device)
# each iteration of the loop below
# computes the value for one group of super-blocks
# (e.g., all 4x4 super-blocks)
for lut, width, pack in zip(luts, widths, packs):
# maximum grid size in Triton/CUDA is 64k but we may have more
# super-blocks than that.
max_grid = 65535
for off_grid in range(0, width, max_grid):
grid = [1, min(max_grid, width - off_grid), c.shape[0]]
# fmt: off
pgm = _sdd_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(2), c.stride(3),
Ka, off_grid, lut,
TILE_M = block*pack, TILE_N = block*pack, TILE_K = 32, BLOCK = block, num_stages=3,
num_warps=4,
)
# print(pgm.asm['ptx'])
# exit()
return c
def sdd_lut(layout, block, device):
start_width = 128 // block
layout = layout.type(torch.int32)
superblocks = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2], start_width)
luts, widths, packs = [], [], []
for size, nnz in superblocks:
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
return luts, None, widths, packs
# -----------------------------
# Dense = Sparse x Dense (DSD)
# This operation uses a look-up table that contains pre-computed pointer increments
# in order to minimize computations in the inner loop of the matmul kernel.
# -----------------------------
@triton.jit
def _dsd_kernel(
A, B, C,
stride_az, stride_ha, stride_am, stride_ak,
stride_zb, stride_hb, stride_bk, stride_bn,
stride_zc, stride_hc, stride_cm, stride_cn,
DS0, DS1, lut, **meta
):
TILE_M = meta['TILE_M']
TILE_N = meta['TILE_N']
TILE_K = meta['TILE_K']
GROUP_SIZE_M = meta['GROUP_SIZE_M']
#------------#
#- Prologue -#
#------------#
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
num_pid_m = tl.num_programs(0)
num_pid_n = tl.num_programs(1)
pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M)
pidz = tl.program_id(2)
header = lut + pid_m * 4
offset = tl.load(header + 0)
K = tl.load(header + 1)
column = tl.load(header + 2)
off_h = tl.load(header + 3)
pinc = lut + offset
# initialize pointers to A (sparse)
block_id = tl.load(pinc + 1)
block_id = tl.multiple_of(block_id, 8) # compiler hint
offs_am = tl.arange(0, TILE_M)
offs_ak = tl.arange(0, TILE_K)
pa = A + pidz * stride_az \
+ block_id * stride_ha \
+ offs_am[:, None] * stride_am \
+ offs_ak[None, :] * stride_ak
# initialize pointers to B (dense)
offs_bn = pid_n*TILE_N + tl.arange(0, TILE_N)
start_bk = tl.load(pinc)
start_bk = tl.multiple_of(start_bk, 8) # compiler hint
offs_bk = start_bk + tl.arange(0, TILE_K)
pb = B + pidz * stride_zb \
+ off_h * stride_hb \
+ offs_bn[None, :] * stride_bn \
+ offs_bk[:, None] * stride_bk
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(K, 0, -TILE_K):
a = tl.load(pa, mask=True)
b = tl.load(pb, mask=offs_bn[None, :] < DS0)
acc += tl.dot(a, b)
pinc += 2
inc_a = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.load(pinc)
inc_b = tl.multiple_of(inc_b, 8)
pa += inc_a
pb += inc_b*stride_bk
c = acc.to(C.dtype.element_ty)
# initialize pointers to C
offs_cm = column*TILE_M + tl.arange(0, TILE_M)
offs_cn = pid_n*TILE_N + tl.arange(0, TILE_N)
pc = C + off_h * stride_hc \
+ pidz * stride_zc \
+ offs_cm[:, None] * stride_cm \
+ offs_cn[None, :] * stride_cn
tl.store(pc, c, mask = offs_cn[None, :] < DS0)
def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS1 = block * spdims[2 if trans_a else 1]
BS0 = b.size(0)
BS1 = b.size(1)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# allocate output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
# meta-parameter heuristics
TILE_N = {16: 256, 32: 256, 64: 128, 128: 128}[block]
# compute output
grid = lambda meta: [width, triton.cdiv(BS3, meta['TILE_N']), BS0]
# fmt: off
_dsd_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3),
BS3, AS1, lut,
TILE_M = block, TILE_N=TILE_N, TILE_K = min(block, 32), BLOCK = block, num_stages=3,
num_warps=4, GROUP_SIZE_M=8,
)
# exit()
return c
def dsd_lut(layout, block, step, trans, device):
sizes = torch.sum(layout, 2 if trans else 1)
head_id, col_id = sizes.nonzero(as_tuple=True)
sizes = sizes.flatten()
segments = sizes*step
# pointer increments
if trans:
nnz = layout.nonzero(as_tuple=False)
else:
nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
num_blocks = nnz.size(0)
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
# -------------------------------
# dense input pointer increments
# -------------------------------
# given a list of the indices for the first element of each non-zero block.
# For example, for the indices
# [32, 80, 128, 256, 288]
# we would generate the increments
# [32, 48, 48, 128, 32]
# ^
# index of first element
# Note that the inner loop matmul kernel may have a fixed step size (e.g., TILE_K)
# that is smaller than the block size, so we need to do a bit of extra work
# to handle this case
B_idx = nnz[:, 2] * block
B_incs = B_idx.clone()
B_incs[1:] -= B_idx[:-1]
div = block // step
B_incs = B_incs.view(-1, 1).repeat(1, div)
B_incs[:, 1:] = step
B_incs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]]
B_incs = B_incs.view(-1)
# -------------------------------
# sparse input pointer increments
# -------------------------------
# same as above, except that the increments are in the sparse memory layout
if trans:
A_idx = torch.arange(num_blocks)
else:
A_idx = torch.tensor([], dtype=torch.int64, device=layout.device)
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
A_incs = A_idx * block * block
A_incs[1:] -= A_idx[:-1] * block * block
A_incs = A_incs.view(-1, 1).repeat(1, div)
if trans:
A_incs[:, 1:] = step
A_incs[:, 0] -= (div - 1) * step
else:
A_incs[:, 1:] = step * block
A_incs[:, 0] -= (div - 1) * step * block
A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]]
A_incs = A_incs.view(-1)
# create header
width = col_id.size(0)
offsets = offsets*2*div + 4*width
segments = segments*div
header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous()
# create increments
incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
return lut, None, width, None
# -----------------------------
# Dense = Dense x Sparse (DDS)
# -----------------------------
@triton.jit
def _dds_kernel(
A, B, C,
stride_za, stride_ha, stride_ma, stride_ka,
stride_zb, stride_hb, stride_bk, stride_bn,
stride_zc, stride_hc, stride_mc, stride_nc,
DS0, DS1, lut, **meta
):
TILE_M = meta['TILE_M']
TILE_N = meta['TILE_N']
TILE_K = meta['TILE_K']
GROUP_SIZE_M = meta['GROUP_SIZE_M']
#------------#
#- Prologue -#
#------------#
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
num_pid_m = tl.num_programs(0)
num_pid_n = tl.num_programs(1)
pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M)
pid_z = tl.program_id(2)
header = lut + pid_m * 4
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
off_h = tl.load(header + 3)
pinc = lut + offset
# initialize pointers to A (dense)
offs_am = pid_n*TILE_M + tl.arange(0, TILE_M)
start_ak = tl.load(pinc)
start_ak = tl.multiple_of(start_ak, 8)
offs_ak = start_ak + tl.arange(0, TILE_K)
ptrs_a = A + pid_z * stride_za \
+ off_h * stride_ha \
+ offs_am[:, None] * stride_ma \
+ offs_ak[None, :] * stride_ka
# initialize pointers to B (sparse)
block_id = tl.load(pinc + 1)
block_id = tl.multiple_of(block_id, 8)
offs_bn = tl.arange(0, TILE_N)
offs_bk = tl.arange(0, TILE_K)
ptrs_b = B + pid_z * stride_zb \
+ block_id * stride_hb \
+ offs_bn[None, :] * stride_bn \
+ offs_bk[:, None] * stride_bk
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(AS1, 0, -TILE_K):
a = tl.load(ptrs_a, mask = offs_am[:, None] < DS0)
b = tl.load(ptrs_b, mask = True)
acc += tl.dot(a, b)
pinc += 2
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
ptrs_a += inc_a
ptrs_b += inc_b
## ---------------- ##
## Epilogue ##
## ---------------- ##
c = acc.to(C.dtype.element_ty)
# initialize pointers to C (dense)
offs_cm = pid_n * TILE_M + tl.arange(0, TILE_M)
offs_cn = column * TILE_N + tl.arange(0, TILE_N)
ptrs_c = C + off_h * stride_hc \
+ pid_z * stride_zc \
+ offs_cm[:, None] * stride_mc \
+ offs_cn[None, :] * stride_nc
# write back
tl.store(ptrs_c, c, mask = offs_cm[:, None] < DS0)
def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
TILE_M = {16: 256, 32: 256, 64: 128, 128: 128}[block]
grid = lambda meta: [width, triton.cdiv(AS2, meta['TILE_M']), AS0]
# fmt: off
_dds_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3),
AS2, BS2, lut,
TILE_M = TILE_M, TILE_N = block, TILE_K = min(block, 32), BLOCK = block, num_stages=3,
num_warps=4, GROUP_SIZE_M=8,
)
return c
##############
# MAIN API #
##############
class _matmul(torch.autograd.Function):
fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul}
@staticmethod
def forward(
ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs, da_lut, da_num_locks,
da_width, da_packs, db_lut, db_num_locks, db_width, db_packs
):
c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width, c_packs)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_packs = db_packs
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
da, db = None, None
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _matmul.fn[mode_da](
dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_num_locks, ctx.da_width,
ctx.da_packs
)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _matmul.fn[mode_db](
a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_num_locks, ctx.db_width,
ctx.db_packs
)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class matmul:
def make_lut(self, dtype, device):
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = min(block, 32)
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = sdd_lut(layout, block, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = dsd_lut(layout, block, step, not self.trans_a, device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = dsd_lut(layout, block, step, self.trans_b, device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = dsd_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = sdd_lut(layout, block, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = dsd_lut(layout, block, step, not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = dsd_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = dsd_lut(layout, block, step, self.trans_a, device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = sdd_lut(layout, block, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,
da_lut, da_num_locks, da_width, da_packs,
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False):
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.block = block
self.mode = mode
self.trans_a = trans_a
self.trans_b = trans_b
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b, -2)
self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long() # Above code assumes the layout tensor is an integral type
self.layout = layout
self.spdims = layout.shape
def __call__(self, a, b):
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# If we don't check for invalid shapes, devices, & dtypes here, they will lead to undefined behavior
# and potential illegal memory accesses
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# execute
c = _matmul.apply(
a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut, c_num_locks, c_width,
c_packs, da_lut, da_num_locks, da_width, da_packs, db_lut, db_num_locks, db_width, db_packs
)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not a.is_cuda:
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError("Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
| 39.661836
| 126
| 0.566829
|
a99fcb5b144a094a974b7bab5d0cd24188372d79
| 599
|
py
|
Python
|
codigo/Live160/exemplo_02.py
|
BrunoPontesLira/live-de-python
|
da6e463a89ed90d9efaa1c34088ab6460e949de1
|
[
"MIT"
] | 572
|
2018-04-03T03:17:08.000Z
|
2022-03-31T19:05:32.000Z
|
codigo/Live160/exemplo_02.py
|
BrunoPontesLira/live-de-python
|
da6e463a89ed90d9efaa1c34088ab6460e949de1
|
[
"MIT"
] | 176
|
2018-05-18T15:56:16.000Z
|
2022-03-28T20:39:07.000Z
|
codigo/Live160/exemplo_02.py
|
BrunoPontesLira/live-de-python
|
da6e463a89ed90d9efaa1c34088ab6460e949de1
|
[
"MIT"
] | 140
|
2018-04-18T13:59:11.000Z
|
2022-03-29T00:43:49.000Z
|
from exemplo_01 import Pessoa, Grupo, Nota
eduardo = Pessoa(
nome='eduardo',
idade=18,
senha='1234567',
email='eu@dunossauro.live'
)
try:
eduardo.save()
Pessoa.create(
nome='Fausto',
idade=3,
email='fausto@live',
senha='7654321'
)
except:
...
pessoas = [
{'nome': 'Irmão do jorel', 'email': 'irmao@live', 'idade': 1, 'senha': '123'},
{'nome': 'Gesonel', 'email': 'gesinho@live', 'idade': 1, 'senha': '123'},
{'nome': 'Lara', 'email': 'lara@live', 'idade': 1, 'senha': '123'},
]
Pessoa.insert_many(pessoas).execute()
| 19.322581
| 82
| 0.554257
|
8a9b0487329888d6ef011e4ce9bbea81af398d7b
| 4,592
|
py
|
Python
|
git/refs/reference.py
|
sampalest/GitPython
|
8a1f87d69b128e9e25c6718269688c2c35414a4f
|
[
"BSD-3-Clause"
] | null | null | null |
git/refs/reference.py
|
sampalest/GitPython
|
8a1f87d69b128e9e25c6718269688c2c35414a4f
|
[
"BSD-3-Clause"
] | null | null | null |
git/refs/reference.py
|
sampalest/GitPython
|
8a1f87d69b128e9e25c6718269688c2c35414a4f
|
[
"BSD-3-Clause"
] | null | null | null |
from git.util import (
LazyMixin,
IterableObj,
)
from .symbolic import SymbolicReference
__all__ = ["Reference"]
#{ Utilities
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper
#}END utilities
class Reference(SymbolicReference, LazyMixin, IterableObj):
"""Represents a named reference to any object. Subclasses may apply restrictions though,
i.e. Heads can only point to commits."""
__slots__ = ()
_points_to_commits_only = False
_resolve_ref_on_create = True
_common_path_default = "refs"
def __init__(self, repo, path, check_path=True):
"""Initialize this instance
:param repo: Our parent repository
:param path:
Path relative to the .git/ directory pointing to the ref in question, i.e.
refs/heads/master
:param check_path: if False, you can provide any path. Otherwise the path must start with the
default path prefix of this type."""
if check_path and not path.startswith(self._common_path_default + '/'):
raise ValueError("Cannot instantiate %r from path %s" % (self.__class__.__name__, path))
super(Reference, self).__init__(repo, path)
def __str__(self):
return self.name
#{ Interface
def set_object(self, object, logmsg=None): # @ReservedAssignment
"""Special version which checks if the head-log needs an update as well
:return: self"""
oldbinsha = None
if logmsg is not None:
head = self.repo.head
if not head.is_detached and head.ref == self:
oldbinsha = self.commit.binsha
# END handle commit retrieval
# END handle message is set
super(Reference, self).set_object(object, logmsg)
if oldbinsha is not None:
# /* from refs.c in git-source
# * Special hack: If a branch is updated directly and HEAD
# * points to it (may happen on the remote side of a push
# * for example) then logically the HEAD reflog should be
# * updated too.
# * A generic solution implies reverse symref information,
# * but finding all symrefs pointing to the given branch
# * would be rather costly for this rare event (the direct
# * update of a branch) to be worth it. So let's cheat and
# * check with HEAD only which should cover 99% of all usage
# * scenarios (even 100% of the default ones).
# */
self.repo.head.log_append(oldbinsha, logmsg)
# END check if the head
return self
# NOTE: Don't have to overwrite properties as the will only work without a the log
@property
def name(self):
""":return: (shortest) Name of this reference - it may contain path components"""
# first two path tokens are can be removed as they are
# refs/heads or refs/tags or refs/remotes
tokens = self.path.split('/')
if len(tokens) < 3:
return self.path # could be refs/HEAD
return '/'.join(tokens[2:])
@classmethod
def iter_items(cls, repo, common_path=None):
"""Equivalent to SymbolicReference.iter_items, but will return non-detached
references as well."""
return cls._iter_items(repo, common_path)
#}END interface
#{ Remote Interface
@property # type: ignore ## mypy cannot deal with properties with an extra decorator (2021-04-21)
@require_remote_ref_path
def remote_name(self):
"""
:return:
Name of the remote we are a reference of, such as 'origin' for a reference
named 'origin/master'"""
tokens = self.path.split('/')
# /refs/remotes/<remote name>/<branch_name>
return tokens[2]
@property # type: ignore ## mypy cannot deal with properties with an extra decorator (2021-04-21)
@require_remote_ref_path
def remote_head(self):
""":return: Name of the remote head itself, i.e. master.
:note: The returned name is usually not qualified enough to uniquely identify
a branch"""
tokens = self.path.split('/')
return '/'.join(tokens[3:])
#} END remote interface
| 36.15748
| 102
| 0.62892
|
004f5f421090b78b9f9da83dd710c6f6eb112286
| 2,886
|
py
|
Python
|
const.py
|
Ardarmy-best/ardarmy-best
|
cf97d895663f91ddf07342f43db6bb24846ee43b
|
[
"MIT"
] | null | null | null |
const.py
|
Ardarmy-best/ardarmy-best
|
cf97d895663f91ddf07342f43db6bb24846ee43b
|
[
"MIT"
] | null | null | null |
const.py
|
Ardarmy-best/ardarmy-best
|
cf97d895663f91ddf07342f43db6bb24846ee43b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
XML = """<?xml version="1.0"?>
<cross-domain-policy>
<allow-access-from domain="*" to-ports="*" />
</cross-domain-policy>
""".encode()
MAX_NAME_LEN = 70
ROOM_LIMIT = 40
EVENT_ROOM_LIMIT = 40
FREE_GOLD = True
room_items = [{"tpid": "wall15", "d": 3, "oid": 1, "x": 0.0, "y": 0.0,
"z": 0.0},
{"tpid": "wall15", "d": 5, "oid": 2, "x": 13.0, "y": 0.0,
"z": 0.0},
{"tpid": "floor4", "d": 5, "oid": 3, "x": 0.0, "y": 0.0,
"z": 0.0},
{"tpid": "door4", "d": 3, "oid": 4, "x": 3.0, "y": 0.0,
"z": 0.0, "rid": "outside"}]
campaigns = []
clans = True
mobile = True
fortune2 = True
professions = True
reputation = True
if clans:
campaigns.append({"st": 1, "v": 1,
"cil": [{"sc": 0, "gl": 0, "si": 0, "id": 8151,
"tid": "clans", "cid": 643},
{"sc": 0, "gl": 0, "si": 0, "id": 8152,
"tid": "clanRating_off", "cid": 643}],
"id": 643, "iu": "", "tp": 9,
"ed": datetime(2047, 5, 31, 11, 46)})
if mobile:
campaigns.append({"st": 1, "v": 1,
"cil": [{"sc": 0, "gl": 0, "si": 0, "id": 2514,
"tid": "mobile", "cid": 316}],
"id": 316, "iu": "", "tp": 9,
"ed": datetime(2022, 7, 31, 2, 0)})
if fortune2:
campaigns.append({"st": 2, "v": 1,
"cil": [{"sc": 0, "gl": 0, "si": 0, "id": 2434,
"tid": "fortune2", "cid": 299}],
"id": 299, "iu": "", "tp": 9,
"ed": datetime(2030, 10, 31, 2, 0)})
if professions:
campaigns.append({"st": 1, "v": 1,
"cil": [{"sc": 0, "gl": 0, "si": 0, "id": 1110,
"tid": "professions", "cid": 114},
{"sc": 0, "gl": 0, "si": 0, "id": 1111,
"tid": "grdnr", "cid": 114},
{"sc": 0, "gl": 0, "si": 0, "id": 1112,
"tid": "jntr", "cid": 114},
{"sc": 0, "gl": 0, "si": 0, "id": 1577,
"tid": "vsgst", "cid": 114},
{"sc": 0, "gl": 0, "si": 0, "id": 1578,
"tid": "phtghr", "cid": 114}],
"id": 114, "iu": "", "tp": 9,
"ed": datetime(2015, 8, 27, 2, 0)})
if reputation:
campaigns.append({"st": 1, "v": 1,
"cil": [{"sc": 0, "gl": 0, "si": 0, "id": 1109,
"tid": "reputation", "cid": 113}],
"id": 113, "iu": "", "tp": 9,
"ed": datetime(2015, 8, 18, 2, 0)})
| 41.228571
| 71
| 0.340263
|
39ee92825813c6a6f3d49c932b8a7d43e8163d47
| 2,328
|
py
|
Python
|
openomics_web/layouts/clinical_view.py
|
JonnyTran/open-omics
|
ef5db2dc2fdf486ee5e9fa4e0cf5be61b4531232
|
[
"MIT"
] | 12
|
2021-01-14T19:33:48.000Z
|
2022-01-06T16:13:03.000Z
|
openomics_web/layouts/clinical_view.py
|
JonnyTran/open-omics
|
ef5db2dc2fdf486ee5e9fa4e0cf5be61b4531232
|
[
"MIT"
] | 13
|
2020-12-31T20:38:11.000Z
|
2021-11-24T06:21:12.000Z
|
openomics_web/layouts/clinical_view.py
|
JonnyTran/open-omics
|
ef5db2dc2fdf486ee5e9fa4e0cf5be61b4531232
|
[
"MIT"
] | 7
|
2021-02-08T13:42:01.000Z
|
2021-10-21T21:37:14.000Z
|
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import pandas as pd
def ClinicalDataColumnSelect(columns):
"""
Args:
columns:
"""
return html.Div([
html.Div(['Select the gene id/name column to index by:']),
dcc.Dropdown(
id='clinical-patient-col-name',
options=[{'label': col, 'value': col} for col in columns],
style={
'width': '100%',
},
value=columns[0],
),
html.Div(['Select the column prefixes to import:']),
dcc.Dropdown(
id='clinical-data-columns-select',
options=[{'label': col, 'value': col} for col in columns],
style={
'width': '100%',
},
value=columns,
multi=True,
)
])
def ClinicalDataTable(df: pd.DataFrame):
"""
Args:
df (pd.DataFrame):
"""
df.index.rename("id", inplace=True)
print("df.reset_index()", df.reset_index().columns)
return dt.DataTable(
id='clinical-datatable',
columns=[{'name': i, 'id': i, 'deletable': True} for i in df.columns],
data=df.reset_index().to_dict('records'),
style_as_list_view=True,
# fixed_columns={'headers': True, 'data': 1},
style_cell={
'textAlign': 'left',
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
},
style_data={
'whiteSpace': 'normal',
'height': 'auto'
},
style_data_conditional=[
{'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
],
style_table={"maxHeight": '1200px',
'width': '1000px',
'marginTop': '5px',
'marginBottom': '10px',
'overflowX': 'scroll'
},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
virtualization=True,
filter_action="native",
sort_action="native",
sort_mode="multi",
row_selectable="multi",
selected_rows=[],
page_action="native",
page_current=0,
page_size=10,
)
| 28.390244
| 78
| 0.491838
|
d5e42cd6ce36e1056af18d3c55a395bb762c3863
| 15,591
|
py
|
Python
|
train.py
|
jingfeidu/fairseq
|
b41c74dc5be15918d5fd21f199b66b78a601192c
|
[
"BSD-3-Clause"
] | null | null | null |
train.py
|
jingfeidu/fairseq
|
b41c74dc5be15918d5fd21f199b66b78a601192c
|
[
"BSD-3-Clause"
] | null | null | null |
train.py
|
jingfeidu/fairseq
|
b41c74dc5be15918d5fd21f199b66b78a601192c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import itertools
import os
import math
import random
import torch
from fairseq import distributed_utils, options, progress_bar, tasks, utils
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
from fairseq.utils import import_user_module
def main(args):
import_user_module(args)
if args.max_tokens is None:
args.max_tokens = 6000
print(args)
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load dataset splits
load_dataset_splits(task, ['train', 'valid'])
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print(model)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# Make a dummy batch to (i) warm the caching allocator and (ii) as a
# placeholder DistributedDataParallel when there's an uneven number of
# batches per worker.
max_positions = utils.resolve_max_positions(
task.max_positions(),
model.max_positions(),
)
dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions)
oom_batch = task.dataset('train').get_dummy_batch(1, max_positions)
# Build trainer
trainer = Trainer(args, task, model, criterion, dummy_batch, oom_batch)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Initialize dataloader
epoch_itr = task.get_batch_iterator(
dataset=task.dataset(args.train_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
)
# Load the latest checkpoint if one is available
if not load_checkpoint(args, trainer, epoch_itr):
trainer.dummy_train_step([dummy_batch])
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
# train for one epoch
train(args, trainer, task, epoch_itr)
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar='simple',
)
extra_meters = collections.defaultdict(lambda: AverageMeter())
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue # these are already logged above
if 'loss' in k:
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
# ignore the first mini-batch in words-per-second calculation
if i == 0:
trainer.get_meter('wps').reset()
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0:
valid_losses = validate(args, trainer, task, epoch_itr, [first_valid])
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
# reset training meters
for k in [
'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
valid_losses.append(stats['valid_loss'])
return valid_losses
def get_valid_stats(trainer):
stats = collections.OrderedDict()
stats['valid_loss'] = trainer.get_meter('valid_loss').avg
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss').avg
stats['valid_nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('valid_loss').avg
stats['valid_ppl'] = get_perplexity(nll_loss)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(save_checkpoint, 'best'):
stats['best'] = min(save_checkpoint.best, stats['valid_loss'])
return stats
def get_perplexity(loss):
try:
return '{:.2f}'.format(math.pow(2, loss))
except OverflowError:
return float('inf')
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
end_of_epoch and not args.no_epoch_checkpoints and
epoch % args.save_interval == 0
)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
not end_of_epoch and args.save_interval_updates > 0 and
updates % args.save_interval_updates == 0
)
checkpoint_conds['checkpoint_best.pt'] = (
val_loss is not None and
(not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
)
checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink
prev_best = getattr(save_checkpoint, 'best', val_loss)
if val_loss is not None:
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {
'train_iterator': epoch_itr.state_dict(),
'val_loss': val_loss,
}
if hasattr(save_checkpoint, 'best'):
extra_state.update({'best': save_checkpoint.best})
checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint\d+\.pt')
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
if os.path.isabs(args.restore_file):
checkpoint_path = args.restore_file
else:
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
eval(args.optimizer_overrides))
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
return True
else:
print('| no existing checkpoint found {}'.format(checkpoint_path))
return False
def load_dataset_splits(task, splits):
for split in splits:
if split == 'train':
task.load_dataset(split, combine=True)
else:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
try:
task.load_dataset(split_k, combine=False)
except FileNotFoundError as e:
if k > 0:
break
raise e
def distributed_main(i, args):
import socket
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = i
args.distributed_rank = distributed_utils.distributed_init(args)
print('| initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank))
main(args)
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
print(
'''| NOTE: you may get better performance with:
python -m torch.distributed.launch --nproc_per_node {ngpu} train.py {no_c10d}(...)
'''.format(
ngpu=args.distributed_world_size,
no_c10d=(
'--ddp-backend=no_c10d ' if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d'
else ''
),
)
)
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
| 37.568675
| 112
| 0.647168
|
228a8f6efed3fe6d36dc0b35ea795ed0adcd0d62
| 1,579
|
py
|
Python
|
mask_rcnn/modeling/generate_proposal_labels.py
|
rochesterxugroup/csc_249_final_proj_a2d_det
|
0f742981699352181e94a001f3d6f4a5b1824a54
|
[
"MIT"
] | 2
|
2019-04-17T21:03:54.000Z
|
2021-07-14T19:03:19.000Z
|
mask_rcnn/modeling/generate_proposal_labels.py
|
rochesterxugroup/csc_249_final_proj_a2d_det
|
0f742981699352181e94a001f3d6f4a5b1824a54
|
[
"MIT"
] | null | null | null |
mask_rcnn/modeling/generate_proposal_labels.py
|
rochesterxugroup/csc_249_final_proj_a2d_det
|
0f742981699352181e94a001f3d6f4a5b1824a54
|
[
"MIT"
] | 1
|
2019-04-22T23:16:35.000Z
|
2019-04-22T23:16:35.000Z
|
from torch import nn
from mask_rcnn.core.config import cfg
from mask_rcnn.datasets import json_dataset
import mask_rcnn.roi_data.fast_rcnn
class GenerateProposalLabelsOp(nn.Module):
def __init__(self):
super(GenerateProposalLabelsOp, self).__init__()
def forward(self, rpn_rois, roidb, im_info):
"""Op for generating training labels for RPN proposals. This is used
when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
Faster R-CNN training).
blobs_in:
- 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
- 'roidb': roidb entries that will be labeled
- 'im_info': See GenerateProposals doc.
blobs_out:
- (variable set of blobs): returns whatever blobs are required for
training the model. It does this by querying the data loader for
the list of blobs that are needed.
"""
im_scales = im_info.data.numpy()[:, 2]
output_blob_names = roi_data.fast_rcnn.get_fast_rcnn_blob_names()
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
# Note: crowd_thresh=0 will ignore _filter_crowd_proposals
json_dataset.add_proposals(roidb, rpn_rois, im_scales, crowd_thresh=0)
blobs = {k: [] for k in output_blob_names}
roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)
return blobs
| 39.475
| 78
| 0.684611
|
991306105fbcad6ff0549c9641076238d82edc41
| 3,145
|
py
|
Python
|
test/model-caching/onnx/multi-model-classifier/predictor.py
|
ourobouros/cortex
|
1b3aaf909816b93f6a6e3edd0da8c10891e05be9
|
[
"Apache-2.0"
] | 1
|
2022-02-23T08:45:19.000Z
|
2022-02-23T08:45:19.000Z
|
test/model-caching/onnx/multi-model-classifier/predictor.py
|
ourobouros/cortex
|
1b3aaf909816b93f6a6e3edd0da8c10891e05be9
|
[
"Apache-2.0"
] | null | null | null |
test/model-caching/onnx/multi-model-classifier/predictor.py
|
ourobouros/cortex
|
1b3aaf909816b93f6a6e3edd0da8c10891e05be9
|
[
"Apache-2.0"
] | null | null | null |
# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.24.*, run `git checkout -b 0.24` or switch to the `0.24` branch on GitHub)
import numpy as np
import cv2, requests
from scipy.special import softmax
def get_url_image(url_image):
"""
Get numpy image from URL image.
"""
resp = requests.get(url_image, stream=True).raw
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
"""
Resize a numpy image.
"""
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
# calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
else:
# calculate the ratio of the width and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
def preprocess(img_data):
"""
Normalize input for inference.
"""
# move pixel color dimension to position 0
img = np.moveaxis(img_data, 2, 0)
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img.shape).astype("float32")
for i in range(img.shape[0]):
# for each pixel in each channel, divide the value by 255 to get value between [0, 1] and then normalize
norm_img_data[i, :, :] = (img[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i]
# extend to batch size of 1
norm_img_data = norm_img_data[np.newaxis, ...]
return norm_img_data
def postprocess(results):
"""
Eliminates all dimensions of size 1, softmaxes the input and then returns the index of the element with the highest value.
"""
squeezed = np.squeeze(results)
maxed = softmax(squeezed)
result = np.argmax(maxed)
return result
class ONNXPredictor:
def __init__(self, onnx_client, config):
# onnx client
self.client = onnx_client
# for image classifiers
classes = requests.get(config["image-classifier-classes"]).json()
self.image_classes = [classes[str(k)][1] for k in range(len(classes))]
self.resize_value = config["image-resize"]
def predict(self, payload, query_params):
# get request params
model_name = query_params["model"]
model_version = query_params.get("version", "latest")
img_url = payload["url"]
# process the input
img = get_url_image(img_url)
img = image_resize(img, height=self.resize_value)
img = preprocess(img)
# predict
results = self.client.predict(img, model_name, model_version)[0]
# interpret result
result = postprocess(results)
predicted_label = self.image_classes[result]
return {"label": predicted_label, "model": {"name": model_name, "version": model_version}}
| 31.45
| 213
| 0.644515
|
18f49ee2dce853b825f52f4eb056a0ef2ae8ab4f
| 321
|
py
|
Python
|
demo_app/demo/urls.py
|
areski/django-xadmin
|
fbe333894fdb2aaaba57d461e2968ef1312e2a0f
|
[
"BSD-3-Clause"
] | 1
|
2019-11-03T10:55:56.000Z
|
2019-11-03T10:55:56.000Z
|
demo_app/demo/urls.py
|
areski/django-xadmin
|
fbe333894fdb2aaaba57d461e2968ef1312e2a0f
|
[
"BSD-3-Clause"
] | null | null | null |
demo_app/demo/urls.py
|
areski/django-xadmin
|
fbe333894fdb2aaaba57d461e2968ef1312e2a0f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
import xadmin
xadmin.autodiscover()
# from xadmin.plugins import xversion
# xversion.registe_models()
urlpatterns = patterns('',
url(r'', include(xadmin.site.urls)),
)
| 24.692308
| 59
| 0.65109
|
15fbc65216d5873551a0b331b7a9168247923286
| 4,316
|
py
|
Python
|
src/reversi_zero/play_game/game_model.py
|
qiqiguaitm/reversi-alpha-zero
|
665c930707ac16e49bdc9771331631007529c688
|
[
"MIT"
] | 2
|
2018-02-27T07:34:01.000Z
|
2021-09-18T13:39:30.000Z
|
src/reversi_zero/play_game/game_model.py
|
qiqiguaitm/reversi-alpha-zero
|
665c930707ac16e49bdc9771331631007529c688
|
[
"MIT"
] | null | null | null |
src/reversi_zero/play_game/game_model.py
|
qiqiguaitm/reversi-alpha-zero
|
665c930707ac16e49bdc9771331631007529c688
|
[
"MIT"
] | null | null | null |
import enum
from logging import getLogger
from reversi_zero.agent.player import HistoryItem
from reversi_zero.agent.player import ReversiPlayer
from reversi_zero.config import Config
from reversi_zero.env.reversi_env import Player, ReversiEnv
from reversi_zero.lib.bitboard import find_correct_moves
from reversi_zero.lib.model_helpler import load_best_model_weight, reload_newest_next_generation_model_if_changed
from reversi_zero.play_game.common import load_model
from reversi_zero.play_game.EdaxPlayer import *
logger = getLogger(__name__)
GameEvent = enum.Enum("GameEvent", "update ai_move over pass")
class PlayWithHuman:
def __init__(self, config: Config):
self.config = config
self.human_color = None
self.observers = []
self.env = ReversiEnv().reset()
self.model = self._load_model()
self.ai = None # type: ReversiPlayer
self.last_evaluation = None
self.last_history = None # type: HistoryItem
def add_observer(self, observer_func):
self.observers.append(observer_func)
def notify_all(self, event):
for ob_func in self.observers:
ob_func(event)
def start_game(self, human_is_black):
self.human_color = Player.black if human_is_black else Player.white
self.env = ReversiEnv().reset()
self.ai = ReversiPlayer(self.config, self.model)
self.edax_player = EdaxPlayer()
self.act = None
def play_next_turn(self):
self.notify_all(GameEvent.update)
if self.over:
self.notify_all(GameEvent.over)
return
if self.next_player != self.human_color:
self.notify_all(GameEvent.ai_move)
@property
def over(self):
return self.env.done
@property
def next_player(self):
return self.env.next_player
def stone(self, px, py):
"""left top=(0, 0), right bottom=(7,7)"""
pos = int(py * 8 + px)
assert 0 <= pos < 64
bit = 1 << pos
if self.env.board.black & bit:
return Player.black
elif self.env.board.white & bit:
return Player.white
return None
@property
def number_of_black_and_white(self):
return self.env.observation.number_of_black_and_white
def available(self, px, py):
pos = int(py * 8 + px)
if pos < 0 or 64 <= pos:
return False
own, enemy = self.env.board.black, self.env.board.white
if self.human_color == Player.white:
own, enemy = enemy, own
legal_moves = find_correct_moves(own, enemy)
return legal_moves & (1 << pos)
def move(self, px, py, use_edax=False):
pos = int(py * 8 + px)
assert 0 <= pos < 64
if self.next_player != self.human_color:
return False
if use_edax:
logger.debug(f"edax thinking,current act {self.act}...")
'''
if self.act == None:
pos = self.edax_player.action_pos(pos=None, start=True)
elif self.act == 'pass':
pos = self.edax_player.action_pos(pos=None, start=False)
else:
pos = self.edax_player.action_pos(self.act)
'''
own, enemy = self.get_state_of_next_player()
pos = action = self.edax_player.action(own, enemy)
logger.debug("edax steped...")
self.act = 'pass'
self.env.step(pos)
else:
self.env.step(pos)
def _load_model(self):
return load_model(self.config)
def move_by_ai(self):
if self.next_player == self.human_color:
return False
own, enemy = self.get_state_of_next_player()
action = self.ai.action(own, enemy)
self.env.step(action)
self.act = action
self.last_history = self.ai.ask_thought_about(own, enemy)
self.last_evaluation = self.last_history.values[self.last_history.action]
logger.debug(f"evaluation by ai={self.last_evaluation}")
def get_state_of_next_player(self):
if self.next_player == Player.black:
own, enemy = self.env.board.black, self.env.board.white
else:
own, enemy = self.env.board.white, self.env.board.black
return own, enemy
| 31.97037
| 113
| 0.624652
|
3a50c6ed7d6df230dff02205914dfacf0d2a9315
| 12,106
|
py
|
Python
|
airflow/providers/google/cloud/transfers/sql_to_gcs.py
|
bluecolor/airflow
|
d79e7221de76f01b5cd36c15224b59e8bb451c90
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/transfers/sql_to_gcs.py
|
bluecolor/airflow
|
d79e7221de76f01b5cd36c15224b59e8bb451c90
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/transfers/sql_to_gcs.py
|
bluecolor/airflow
|
d79e7221de76f01b5cd36c15224b59e8bb451c90
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base operator for SQL to GCS operators.
"""
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator):
"""
:param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A ``{}`` should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
**kwargs):
super().__init__(**kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info("Executing query")
cursor = self.query()
self.log.info("Writing local data files")
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
self.log.info("Writing local schema file")
files_to_upload.append(self._write_local_schema_file(cursor))
# Flush all files before uploading
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self.log.info("Uploading %d files to GCS.", len(files_to_upload))
self._upload_to_gcs(files_to_upload)
self.log.info("Removing local files")
# Close all temp file handles.
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
self.log.info("Current file count: %d", len(files_to_upload))
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
tmp_file_handle.write(
json.dumps(row_dict, sort_keys=True, ensure_ascii=False).encode("utf-8")
)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
self.log.info("Current file count: %d", len(files_to_upload))
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field):
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system. Schema for database will be read from cursor if
not specified.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
if self.schema:
self.log.info("Using user schema")
schema = self.schema
else:
self.log.info("Starts generating schema")
schema = [self.field_to_bigquery(field) for field in cursor.description]
if isinstance(schema, list):
schema = json.dumps(schema, sort_keys=True)
self.log.info('Using schema for %s', self.schema_filename)
self.log.debug("Current schema: %s", schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(schema.encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') != self.schema_filename else False)
| 41.176871
| 104
| 0.639765
|
6720f12e45c7d69ab855e04f2db589a9c24b4c7c
| 20,683
|
py
|
Python
|
sdk/python/pulumi_aws/get_ami.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/get_ami.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/get_ami.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = [
'GetAmiResult',
'AwaitableGetAmiResult',
'get_ami',
]
@pulumi.output_type
class GetAmiResult:
"""
A collection of values returned by getAmi.
"""
def __init__(__self__, architecture=None, arn=None, block_device_mappings=None, creation_date=None, description=None, executable_users=None, filters=None, hypervisor=None, id=None, image_id=None, image_location=None, image_owner_alias=None, image_type=None, kernel_id=None, most_recent=None, name=None, name_regex=None, owner_id=None, owners=None, platform=None, product_codes=None, public=None, ramdisk_id=None, root_device_name=None, root_device_type=None, root_snapshot_id=None, sriov_net_support=None, state=None, state_reason=None, tags=None, virtualization_type=None):
if architecture and not isinstance(architecture, str):
raise TypeError("Expected argument 'architecture' to be a str")
pulumi.set(__self__, "architecture", architecture)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if block_device_mappings and not isinstance(block_device_mappings, list):
raise TypeError("Expected argument 'block_device_mappings' to be a list")
pulumi.set(__self__, "block_device_mappings", block_device_mappings)
if creation_date and not isinstance(creation_date, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", creation_date)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if executable_users and not isinstance(executable_users, list):
raise TypeError("Expected argument 'executable_users' to be a list")
pulumi.set(__self__, "executable_users", executable_users)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if hypervisor and not isinstance(hypervisor, str):
raise TypeError("Expected argument 'hypervisor' to be a str")
pulumi.set(__self__, "hypervisor", hypervisor)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if image_id and not isinstance(image_id, str):
raise TypeError("Expected argument 'image_id' to be a str")
pulumi.set(__self__, "image_id", image_id)
if image_location and not isinstance(image_location, str):
raise TypeError("Expected argument 'image_location' to be a str")
pulumi.set(__self__, "image_location", image_location)
if image_owner_alias and not isinstance(image_owner_alias, str):
raise TypeError("Expected argument 'image_owner_alias' to be a str")
pulumi.set(__self__, "image_owner_alias", image_owner_alias)
if image_type and not isinstance(image_type, str):
raise TypeError("Expected argument 'image_type' to be a str")
pulumi.set(__self__, "image_type", image_type)
if kernel_id and not isinstance(kernel_id, str):
raise TypeError("Expected argument 'kernel_id' to be a str")
pulumi.set(__self__, "kernel_id", kernel_id)
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
pulumi.set(__self__, "most_recent", most_recent)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if owner_id and not isinstance(owner_id, str):
raise TypeError("Expected argument 'owner_id' to be a str")
pulumi.set(__self__, "owner_id", owner_id)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if platform and not isinstance(platform, str):
raise TypeError("Expected argument 'platform' to be a str")
pulumi.set(__self__, "platform", platform)
if product_codes and not isinstance(product_codes, list):
raise TypeError("Expected argument 'product_codes' to be a list")
pulumi.set(__self__, "product_codes", product_codes)
if public and not isinstance(public, bool):
raise TypeError("Expected argument 'public' to be a bool")
pulumi.set(__self__, "public", public)
if ramdisk_id and not isinstance(ramdisk_id, str):
raise TypeError("Expected argument 'ramdisk_id' to be a str")
pulumi.set(__self__, "ramdisk_id", ramdisk_id)
if root_device_name and not isinstance(root_device_name, str):
raise TypeError("Expected argument 'root_device_name' to be a str")
pulumi.set(__self__, "root_device_name", root_device_name)
if root_device_type and not isinstance(root_device_type, str):
raise TypeError("Expected argument 'root_device_type' to be a str")
pulumi.set(__self__, "root_device_type", root_device_type)
if root_snapshot_id and not isinstance(root_snapshot_id, str):
raise TypeError("Expected argument 'root_snapshot_id' to be a str")
pulumi.set(__self__, "root_snapshot_id", root_snapshot_id)
if sriov_net_support and not isinstance(sriov_net_support, str):
raise TypeError("Expected argument 'sriov_net_support' to be a str")
pulumi.set(__self__, "sriov_net_support", sriov_net_support)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if state_reason and not isinstance(state_reason, dict):
raise TypeError("Expected argument 'state_reason' to be a dict")
pulumi.set(__self__, "state_reason", state_reason)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if virtualization_type and not isinstance(virtualization_type, str):
raise TypeError("Expected argument 'virtualization_type' to be a str")
pulumi.set(__self__, "virtualization_type", virtualization_type)
@property
@pulumi.getter
def architecture(self) -> str:
"""
The OS architecture of the AMI (ie: `i386` or `x86_64`).
"""
return pulumi.get(self, "architecture")
@property
@pulumi.getter
def arn(self) -> str:
"""
The ARN of the AMI.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="blockDeviceMappings")
def block_device_mappings(self) -> Sequence['outputs.GetAmiBlockDeviceMappingResult']:
"""
The block device mappings of the AMI.
* `block_device_mappings.#.device_name` - The physical name of the device.
* `block_device_mappings.#.ebs.delete_on_termination` - `true` if the EBS volume
will be deleted on termination.
* `block_device_mappings.#.ebs.encrypted` - `true` if the EBS volume
is encrypted.
* `block_device_mappings.#.ebs.iops` - `0` if the EBS volume is
not a provisioned IOPS image, otherwise the supported IOPS count.
* `block_device_mappings.#.ebs.snapshot_id` - The ID of the snapshot.
* `block_device_mappings.#.ebs.volume_size` - The size of the volume, in GiB.
* `block_device_mappings.#.ebs.volume_type` - The volume type.
* `block_device_mappings.#.no_device` - Suppresses the specified device
included in the block device mapping of the AMI.
* `block_device_mappings.#.virtual_name` - The virtual device name (for
instance stores).
"""
return pulumi.get(self, "block_device_mappings")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> str:
"""
The date and time the image was created.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the AMI that was provided during image
creation.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="executableUsers")
def executable_users(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "executable_users")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetAmiFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def hypervisor(self) -> str:
"""
The hypervisor type of the image.
"""
return pulumi.get(self, "hypervisor")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> str:
"""
The ID of the AMI. Should be the same as the resource `id`.
"""
return pulumi.get(self, "image_id")
@property
@pulumi.getter(name="imageLocation")
def image_location(self) -> str:
"""
The location of the AMI.
"""
return pulumi.get(self, "image_location")
@property
@pulumi.getter(name="imageOwnerAlias")
def image_owner_alias(self) -> str:
"""
The AWS account alias (for example, `amazon`, `self`) or
the AWS account ID of the AMI owner.
"""
return pulumi.get(self, "image_owner_alias")
@property
@pulumi.getter(name="imageType")
def image_type(self) -> str:
"""
The type of image.
"""
return pulumi.get(self, "image_type")
@property
@pulumi.getter(name="kernelId")
def kernel_id(self) -> str:
"""
The kernel associated with the image, if any. Only applicable
for machine images.
"""
return pulumi.get(self, "kernel_id")
@property
@pulumi.getter(name="mostRecent")
def most_recent(self) -> Optional[bool]:
return pulumi.get(self, "most_recent")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the AMI that was provided during image creation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> str:
"""
The AWS account ID of the image owner.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter
def owners(self) -> Sequence[str]:
return pulumi.get(self, "owners")
@property
@pulumi.getter
def platform(self) -> str:
"""
The value is Windows for `Windows` AMIs; otherwise blank.
"""
return pulumi.get(self, "platform")
@property
@pulumi.getter(name="productCodes")
def product_codes(self) -> Sequence['outputs.GetAmiProductCodeResult']:
"""
Any product codes associated with the AMI.
* `product_codes.#.product_code_id` - The product code.
* `product_codes.#.product_code_type` - The type of product code.
"""
return pulumi.get(self, "product_codes")
@property
@pulumi.getter
def public(self) -> bool:
"""
`true` if the image has public launch permissions.
"""
return pulumi.get(self, "public")
@property
@pulumi.getter(name="ramdiskId")
def ramdisk_id(self) -> str:
"""
The RAM disk associated with the image, if any. Only applicable
for machine images.
"""
return pulumi.get(self, "ramdisk_id")
@property
@pulumi.getter(name="rootDeviceName")
def root_device_name(self) -> str:
"""
The device name of the root device.
"""
return pulumi.get(self, "root_device_name")
@property
@pulumi.getter(name="rootDeviceType")
def root_device_type(self) -> str:
"""
The type of root device (ie: `ebs` or `instance-store`).
"""
return pulumi.get(self, "root_device_type")
@property
@pulumi.getter(name="rootSnapshotId")
def root_snapshot_id(self) -> str:
"""
The snapshot id associated with the root device, if any
(only applies to `ebs` root devices).
"""
return pulumi.get(self, "root_snapshot_id")
@property
@pulumi.getter(name="sriovNetSupport")
def sriov_net_support(self) -> str:
"""
Specifies whether enhanced networking is enabled.
"""
return pulumi.get(self, "sriov_net_support")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the AMI. If the state is `available`, the image
is successfully registered and can be used to launch an instance.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateReason")
def state_reason(self) -> Mapping[str, str]:
"""
Describes a state change. Fields are `UNSET` if not available.
* `state_reason.code` - The reason code for the state change.
* `state_reason.message` - The message for the state change.
"""
return pulumi.get(self, "state_reason")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Any tags assigned to the image.
* `tags.#.key` - The key name of the tag.
* `tags.#.value` - The value of the tag.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="virtualizationType")
def virtualization_type(self) -> str:
"""
The type of virtualization of the AMI (ie: `hvm` or
`paravirtual`).
"""
return pulumi.get(self, "virtualization_type")
class AwaitableGetAmiResult(GetAmiResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAmiResult(
architecture=self.architecture,
arn=self.arn,
block_device_mappings=self.block_device_mappings,
creation_date=self.creation_date,
description=self.description,
executable_users=self.executable_users,
filters=self.filters,
hypervisor=self.hypervisor,
id=self.id,
image_id=self.image_id,
image_location=self.image_location,
image_owner_alias=self.image_owner_alias,
image_type=self.image_type,
kernel_id=self.kernel_id,
most_recent=self.most_recent,
name=self.name,
name_regex=self.name_regex,
owner_id=self.owner_id,
owners=self.owners,
platform=self.platform,
product_codes=self.product_codes,
public=self.public,
ramdisk_id=self.ramdisk_id,
root_device_name=self.root_device_name,
root_device_type=self.root_device_type,
root_snapshot_id=self.root_snapshot_id,
sriov_net_support=self.sriov_net_support,
state=self.state,
state_reason=self.state_reason,
tags=self.tags,
virtualization_type=self.virtualization_type)
def get_ami(executable_users: Optional[Sequence[str]] = None,
filters: Optional[Sequence[pulumi.InputType['GetAmiFilterArgs']]] = None,
most_recent: Optional[bool] = None,
name_regex: Optional[str] = None,
owners: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAmiResult:
"""
Use this data source to get the ID of a registered AMI for use in other
resources.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.get_ami(executable_users=["self"],
filters=[
aws.GetAmiFilterArgs(
name="name",
values=["myami-*"],
),
aws.GetAmiFilterArgs(
name="root-device-type",
values=["ebs"],
),
aws.GetAmiFilterArgs(
name="virtualization-type",
values=["hvm"],
),
],
most_recent=True,
name_regex="^myami-\\d{3}",
owners=["self"])
```
:param Sequence[str] executable_users: Limit search to users with *explicit* launch permission on
the image. Valid items are the numeric account ID or `self`.
:param Sequence[pulumi.InputType['GetAmiFilterArgs']] filters: One or more name/value pairs to filter off of. There are
several valid keys, for a full reference, check out
[describe-images in the AWS CLI reference][1].
:param bool most_recent: If more than one result is returned, use the most
recent AMI.
:param str name_regex: A regex string to apply to the AMI list returned
by AWS. This allows more advanced filtering not supported from the AWS API. This
filtering is done locally on what AWS returns, and could have a performance
impact if the result is large. It is recommended to combine this with other
options to narrow down the list AWS returns.
:param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).
:param Mapping[str, str] tags: Any tags assigned to the image.
* `tags.#.key` - The key name of the tag.
* `tags.#.value` - The value of the tag.
"""
__args__ = dict()
__args__['executableUsers'] = executable_users
__args__['filters'] = filters
__args__['mostRecent'] = most_recent
__args__['nameRegex'] = name_regex
__args__['owners'] = owners
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAmi:getAmi', __args__, opts=opts, typ=GetAmiResult).value
return AwaitableGetAmiResult(
architecture=__ret__.architecture,
arn=__ret__.arn,
block_device_mappings=__ret__.block_device_mappings,
creation_date=__ret__.creation_date,
description=__ret__.description,
executable_users=__ret__.executable_users,
filters=__ret__.filters,
hypervisor=__ret__.hypervisor,
id=__ret__.id,
image_id=__ret__.image_id,
image_location=__ret__.image_location,
image_owner_alias=__ret__.image_owner_alias,
image_type=__ret__.image_type,
kernel_id=__ret__.kernel_id,
most_recent=__ret__.most_recent,
name=__ret__.name,
name_regex=__ret__.name_regex,
owner_id=__ret__.owner_id,
owners=__ret__.owners,
platform=__ret__.platform,
product_codes=__ret__.product_codes,
public=__ret__.public,
ramdisk_id=__ret__.ramdisk_id,
root_device_name=__ret__.root_device_name,
root_device_type=__ret__.root_device_type,
root_snapshot_id=__ret__.root_snapshot_id,
sriov_net_support=__ret__.sriov_net_support,
state=__ret__.state,
state_reason=__ret__.state_reason,
tags=__ret__.tags,
virtualization_type=__ret__.virtualization_type)
| 39.851638
| 578
| 0.641541
|
1fb85092dea195057b2e56bc9c312881a11dfe14
| 658
|
py
|
Python
|
labels/python/get_batch_points.py
|
HYPJUDY/human-pose-estimation-by-deep-learning
|
f873d368165b1564534979012b42d6967ba06efc
|
[
"MIT"
] | 46
|
2017-05-28T08:10:30.000Z
|
2021-11-19T03:11:16.000Z
|
labels/python/get_batch_points.py
|
Sunshine352/human-pose-estimation-by-deep-learning
|
f873d368165b1564534979012b42d6967ba06efc
|
[
"MIT"
] | 9
|
2017-08-15T17:17:51.000Z
|
2019-11-11T11:31:47.000Z
|
labels/python/get_batch_points.py
|
Sunshine352/human-pose-estimation-by-deep-learning
|
f873d368165b1564534979012b42d6967ba06efc
|
[
"MIT"
] | 17
|
2017-07-27T22:18:15.000Z
|
2018-11-22T10:41:06.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import re
import sys
def _get_point(from_list, idx):
return [from_list[idx*2], from_list[idx*2 + 1]]
def get_batch_points(src_file, dst_file):
# get half of data
count = 0
with open(src_file, 'rb') as fr, open(dst_file, 'wb') as fw:
for line in fr:
if count == 0:
fw.write(line)
count = 1
else:
count = 0
def main():
src_file = "../txt/input/train_annos_mix_cleaned.txt"
dst_file = "../txt/input/train_annos_mix_half_cleaned.txt"
get_batch_points(src_file, dst_file)
if __name__ == "__main__":
main()
| 23.5
| 64
| 0.595745
|
133077068bb7e25f28995ed8908592ee808ab104
| 2,673
|
py
|
Python
|
tpdatasrc/co8infra/scr/Spell311 - Minor Globe of Invulnerability.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 69
|
2015-05-05T14:09:25.000Z
|
2022-02-15T06:13:04.000Z
|
tpdatasrc/co8infra/scr/Spell311 - Minor Globe of Invulnerability.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 457
|
2015-05-01T22:07:45.000Z
|
2022-03-31T02:19:10.000Z
|
tpdatasrc/co8infra/scr/Spell311 - Minor Globe of Invulnerability.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 25
|
2016-02-04T21:19:53.000Z
|
2021-11-15T23:14:51.000Z
|
from toee import *
import _include
from co8Util.PersistentData import *
from co8Util.ObjHandling import *
MINOR_GLOBE_OF_INVULNERABILITY_KEY = "Sp311_MINOR_GLOBE_OF_INVULNERABILITY_Activelist"
def OnBeginSpellCast( spell ):
print "Minor Globe of Invulnerability OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-abjuration-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Minor Globe of Invulnerability OnSpellEffect"
spell.duration = 1 * spell.caster_level
target_item = spell.target_list[0]
npc = spell.caster ## added so NPC's can use wand/potion/scroll
if npc.name == 20123: # Falrinth
spell.duration = 10
spell.caster_level = 10
#spell.spell_end( spell.id )
#activeList = Co8PersistentData.getData(MINOR_GLOBE_OF_INVULNERABILITY_KEY)
#if isNone(activeList): activeList = []
#activeList.append([spell.id, derefHandle(spell)]) # doesn't work, spell objects can't be deref'ed
#Co8PersistentData.setData(MINOR_GLOBE_OF_INVULNERABILITY_KEY, activeList)
if npc.type != obj_t_pc and npc.leader_get() == OBJ_HANDLE_NULL and spell.duration <= 0:
spell.duration = 10
spell.caster_level = 10
# put sp-Minor Globe condition on target
spell_obj_partsys_id = game.particles( 'sp-Minor Globe of Invulnerability', target_item.obj )
# target_item.partsys_id = game.particles( 'sp-Minor Globe of Invulnerability', target_item.obj )
target_item.obj.condition_add_with_args( 'sp-Minor Globe of Invulnerability', spell.id, spell.duration, 0, spell_obj_partsys_id )
#target_item.obj.condition_add_arg_x( 3, spell_obj_partsys_id )
#objectevent_id = target_item.obj.condition_get_arg_x( 2 )
def OnBeginRound( spell ):
print "Minor Globe of Invulnerability OnBeginRound"
def OnEndSpellCast( spell ):
print "Minor Globe of Invulnerability OnEndSpellCast"
if spell.caster.name == 20123: # Falrinth
activeList = Co8PersistentData.getData(MINOR_GLOBE_OF_INVULNERABILITY_KEY)
if isNone(activeList):
print "ERROR! Active Globe spell without activeList!"
return
for entry in activeList:
spellID, target = entry
targetObj = refHandle(target)
if spellID == spell.id:
activeList.remove(entry)
#no more active spells
if len(activeList) == 0:
Co8PersistentData.removeData(MINOR_GLOBE_OF_INVULNERABILITY_KEY)
break
Co8PersistentData.setData(MINOR_GLOBE_OF_INVULNERABILITY_KEY, activeList)
break
def OnAreaOfEffectHit( spell ):
print "Minor Globe of Invulnerability OnAreaOfEffectHit"
def OnSpellStruck( spell ):
print "Minor Globe of Invulnerability OnSpellStruck"
| 36.616438
| 130
| 0.772166
|
143a057d69ff69d03d29b1f3ed410288ecb2b7b5
| 25,007
|
py
|
Python
|
tacoma/tools.py
|
benmaier/tacoma
|
c34ea0778236df9e3ac6e2961c1a21801f12a46f
|
[
"MIT"
] | 89
|
2018-09-19T19:08:22.000Z
|
2021-10-08T06:25:29.000Z
|
tacoma/tools.py
|
benmaier/tacoma
|
c34ea0778236df9e3ac6e2961c1a21801f12a46f
|
[
"MIT"
] | 12
|
2018-09-20T09:27:02.000Z
|
2021-03-19T20:20:13.000Z
|
tacoma/tools.py
|
benmaier/tacoma
|
c34ea0778236df9e3ac6e2961c1a21801f12a46f
|
[
"MIT"
] | 7
|
2018-12-09T17:42:17.000Z
|
2020-08-18T14:09:41.000Z
|
from math import log10
import json
import numpy as np
import tacoma as tc
from _tacoma import edge_changes as ec
from _tacoma import edge_lists as el
from _tacoma import edge_lists_with_histograms as el_h
from _tacoma import edge_changes_with_histograms as ec_h
from scipy.optimize import curve_fit
from scipy.stats import lognorm
from scipy.special import gamma as Gamma
from scipy.stats import weibull_min
from scipy.integrate import quad
from lmfit import minimize, Parameters
def complete_graph(N,tmax=1.0):
"""Get a single frame which consists of a complete network.
Parameters
----------
N : int
Number of nodes.
tmax : float, default : 1.0
Maximum time of the static network.
Returns
-------
:class:`_tacoma.edge_lists`
An instance of `tacoma.edge_lists` with t = [0.0], tmax = ``tmax``.
"""
edge_list = []
for i in range(N-1):
for j in range(i+1,N):
edge_list.append((i,j))
this = tc.edge_lists()
this.t = [0.]
this.tmax = tmax
this.edges = [edge_list]
this.N = N
return this
def convert_static_network(N,edge_list,tmax=1.0):
"""Get a single frame which consists of the static network.
Parameters
----------
N : int
Number of nodes.
edge_list : :obj:`list` of :obj:`tuple` of int
The edges of the static graph
tmax : double, default : 1.0
The maximum time until the network is looped.
Returns
-------
:class:`_tacoma.edge_lists`
An instance of `tacoma.edge_lists` with t = [0.0], tmax = ``tmax``.
"""
new_edge_list = []
for e in edge_list:
if e[0] > e[1]:
new_edge_list.append((e[1],e[0]))
elif e[1] > e[0]:
new_edge_list.append((e[0],e[1]))
this = tc.edge_lists()
this.t = [0.]
this.tmax = tmax
this.edges = [ new_edge_list ]
this.N = N
return this
def get_logarithmic_histogram(data,
bins, #number of bins
return_bin_means = True,
density = True):
"""Get a logarithmic histogram.
Parameters
----------
data : array-like
The data to bin.
bins : int
The number of bins.
return_bin_means : bool, default : True
return the geometric means of binning intervals,
otherwise return bin edges
density : bool, default : True
return probability density, if `False`, return counts
Returns
-------
x : numpy.ndarray
Either geometric intervals or interval edges
y : numpy.ndarray
Either probability density or counts.
"""
data = np.array(data)
data = data[data>0]
MIN = min(data)
MAX = max(data)
# check if bins is an array, if not, make an array
try:
bins[0]
except:
# if this fails, we assume `bins` is an integer
bins = np.logspace(log10(MIN), log10(MAX), bins+1,base=10.)
y, x = np.histogram(data,
bins = bins,
density = density,
)
new_x = np.sqrt(x[1:]*x[:-1])
if return_bin_means:
return new_x, y
else:
return x, y
def group_size_histogram(group_sizes_and_durations):
"""Returns the mean number of groups of size `g`
(mean over both ensemble and time).
Parameters
==========
group_sizes_and_durations : :mod:`group_sizes_and_durations`
Result from the function :mod:`measure_group_sizes_and_durations`
Returns
=======
g : :obj:`numpy.ndarray` of float
Group sizes
N : :obj:`numpy.ndarray` of float
Mean number of groups of the corresponding size in `g`.
"""
group_size_histogram = np.array([
(size, val)\
for size, val in enumerate(group_sizes_and_durations.aggregated_size_histogram)\
if val > 0.
],dtype=float)
x_group, y_group = group_size_histogram[:,0], group_size_histogram[:,1]
return x_group, y_group
def mean_coordination_number(group_sizes_and_durations):
"""Returns the mean coordination number (mean over both ensemble and time).
Following the definition by Zhao, Stehle, Bianconi, Barrat,
the coordination number of node `i` is equal to the size of the group
it is part of.
Parameters
==========
group_sizes_and_durations : :mod:`group_sizes_and_durations`
Result from the function :mod:`measure_group_sizes_and_durations`
Returns
=======
mean_coordination_number : float
Temporal and ensemble mean of a node's group size.
"""
m, N_m = group_size_histogram(group_sizes_and_durations)
N = len( group_sizes_and_durations.aggregated_size_histogram) - 1
P_k = m * N_m / N # degree distribution for completely connected groups
return m.dot(P_k) - 1.0
def mean_number_of_groups(group_sizes_and_durations):
"""Returns the mean number of groups (mean over both ensemble and time).
Parameters
==========
group_sizes_and_durations : :mod:`group_sizes_and_durations`
Result from the function :mod:`measure_group_sizes_and_durations`
Returns
=======
mean_number_of_groups : float
Temporal and ensemble mean of the total number of groups
a network consists of at a certain time.
"""
m, N_m = group_size_histogram(group_sizes_and_durations)
return N_m.sum()
def mean_group_size(group_sizes_and_durations):
"""Returns the mean group size (mean over both ensemble and time).
Parameters
==========
group_sizes_and_durations : :mod:`group_sizes_and_durations`
Result from the function :mod:`measure_group_sizes_and_durations`
Returns
=======
mean_group_size : float
Temporal and ensemble mean of the group size of all groups
a network consists of at a certain time.
"""
N = len( group_sizes_and_durations.aggregated_size_histogram) - 1
c = mean_number_of_groups(group_sizes_and_durations)
return float(N) / c
def slow_mean_degree(temporal_network):
"""Returns the mean degree (mean over ensemble) but it takes
ages to compute. You should instead use :mod:`mean_degree`.
Parameters
==========
temporal_network : :mod:`edge_lists` or :mod:`edge_changes`
Returns
=======
t : float
Temporal and ensemble mean of the node degree.
mean_degree : float
Temporal and ensemble mean of the node degree.
"""
temporal_network = tc._get_raw_temporal_network(temporal_network)
if type(temporal_network) == tc.ec:
t = np.array([temporal_network.t0])
t = np.append(t,temporal_network.t)
m = [ len(temporal_network.edges_initial) ]
for i in range(len(temporal_network.edges_in)):
m_in = len(temporal_network.edges_in[i])
m_out = len(temporal_network.edges_out[i])
dm = m_in - m_out
m.append( m[-1] + dm )
m.append(m[-1])
t = np.append(t,temporal_network.tmax)
m = np.array(m, dtype=float)
k = 2.0*m / float(temporal_network.N)
elif type(temporal_network) == tc.el:
t = np.array(temporal_network.t)
m = [ ]
for i in range(len(temporal_network.edges)):
_m = len(temporal_network.edges[i])
m.append( _m )
m.append(m[-1])
t = np.append(t,temporal_network.tmax)
m = np.array(m, dtype=float)
k = 2.0*m / float(temporal_network.N)
else:
raise ValueError('Unknown temporal network format: ' + str(type(temporal_network)))
return t, k
def time_average(t,x,tmax=None):
r"""
Return a temporal average of an observable `x(t)`,
.. math::
\overline x = \frac{1}{t_\mathrm{max}-t_0}
\int\limits_{t_0}^{t_\mathrm{max}} dt\, x(t).
Where it's assumed that `x` changes as a step function, such that
.. math::
\overline x = \frac{1}{t_\mathrm{max}-t_0}
\left( (t_\mathrm{max} - t_{N_t-1}) x(t_{N_t-1}) +
\sum_{i=1}^{N_t-1} (t_{i}-t_{i-1})x(t_{i-1}) \right).
Parameters
----------
t : numpy.ndarray
Times at which `x` changes
x : numpy.ndarray
Value of the observable at the corresponding times.
tmax : float, default : None
If this is None, the integral is computed until
time t[-1], otherwise it's evaluated until tmax.
Returns
-------
average_x : float
The temporal average of `x`.
"""
if len(t) != len(x):
raise ValueError("t and x must have the same shape")
if tmax is not None:
t = np.append(t,tmax)
x = np.append(x,x[-1])
dt = t[1:] - t[:-1]
sum_dt = dt.sum()
return dt.dot(x[:-1]) / sum_dt
def time_RMSE(t,x1,x2,tmax=None):
r"""
Get the root mean squared error (RMSE) of two observables
over the same time. Combine with :func:`tacoma.tools.sample_a_function`.
.. math::
\mathrm{RMSE} = \left(
\frac{1}{t_\mathrm{max}-t_0}
\int\limits_{t_0}^{t_\mathrm{max}} dt\,
\left( x_1(t) - x_2(t) \right)^2.
\right)^{1/2}
Parameters
----------
t : numpy.ndarray
times at which ``x1`` and ``x2`` change
x1 : numpy.ndarray
The first observable to compute the RMSE.
x2 : numpy.ndarray
The second observable to compute the RMSE.
tmax : float, default : None
If this is None, the integral is computed until
time t[-1], otherwise it's evaluated until tmax.
Returns
-------
RMSE : float
The root mean squared error.
"""
if len(t) != len(x1):
raise ValueError("t and x1 must have the same shape")
if len(t) != len(x2):
raise ValueError("t and x2 must have the same shape")
if tmax is not None:
t = np.append(t,tmax)
x1 = np.append(x1,x1[-1])
x2 = np.append(x2,x2[-1])
return np.sqrt(time_average(t,(x1-x2)**2))
def bin_a_function(x,y,bins,mode='mean'):
r"""
Bin a step function :math:`y(t)` over time bins. Binning can be done using
either ``numpy.sum`` or ``numpy.mean``.
Parameters
----------
x : numpy.ndarray
domain values at which `y` changes
y : numpy.ndarray
values of the cunt
bins : numpy.ndarray
Bin to those bin edges
mode : string, default : mean
Build either a ``mean`` over the bins or a ``sum``.
Returns
-------
new_y : numpy.ndarray
binned observable
"""
if mode=='mean':
cumfunc = np.mean
elif mode=='sum':
cumfunc = np.sum
indices = np.searchsorted(x,bins)
new_y = [0]
for i in range(1,len(indices)):
if indices[i-1] == indices[i]:
_y = 0
else:
_y = cumfunc(y[indices[i-1]:indices[i]])
new_y.append( _y )
return new_y
def sample_a_function(t,y,time_points,sample_width=0):
r"""
Sample an observable :math:`y` which is a step function
and changes at corresponding values :math:`t`.
Parameters
----------
t : numpy.ndarray
domain values at which `y` changes
y : numpy.ndarray
observable to sample
time_points : numpy.ndarray
time points for which to sample.
sample_width : float, default : 0.0
sample as a temporal average over [t-sample_width/2, t+sample_width/2].
Returns
-------
new_y : numpy.ndarray
values of `y` sampled at `time_points`.
"""
x = t
new_y = []
for bin in time_points:
if sample_width > 0:
indices = np.searchsorted(x,[bin-sample_width/2., bin+sample_width/2.])
dt = x[(indices[0]+1):indices[1]] - x[indices[0]:(indices[1]-1)]
y_ = y[(indices[0]+1):indices[1]]
val = np.dot(dt,y_)
norm = np.sum(dt)
#x_ = x[(indices[0]+1):indices[1]]
#y_ = y[indices[0]:indices[1]]
#val = np.trapz(y=y_,x=x_)
#norm = np.trapz(y=np.ones_like(x_),x=x_)
new_y.append( val/norm)
else:
indices = np.searchsorted(x,[bin])
if indices[0] == 0:
this_index = 0
elif indices[0] == len(x):
this_index = -1
else:
this_index = indices[0]-1
new_y.append(y[this_index])
return np.array(new_y)
def rescale_time(temporal_network, new_t0, new_tmax):
"""Rescale the time in this temporal network (inplace).
Parameters
==========
temporal_network : :mod:`edge_lists` or :mod:`edge_changes`
new_t0 : float
The new value of t0.
new_tmax : float
The new value of tmax.
Returns
=======
temporal_network : :mod:`edge_lists` or :mod:`edge_changes`
Same instance as input.
"""
if hasattr(temporal_network,'t0'):
this_t0 = temporal_network.t0
temporal_network.t0 = new_t0
else:
this_t0 = temporal_network.t[0]
this_T = temporal_network.tmax - this_t0
new_T = new_tmax - new_t0
temporal_network.t = [ (t - this_t0) / this_T * new_T + new_t0 for t in temporal_network.t ]
temporal_network.tmax = new_tmax
return temporal_network
def contact_coverage(temporal_network):
"""Get the total number of discovered unique edges C(t), i.e. the contact coverage.
Parameters
==========
temporal_network : :class:`_tacoma.edge_trajectories`, :class:`_tacoma.edge_lists`, :class:`_tacoma.edge_changes` or :obj:`list` of :class:`_tacoma.edge_trajectory_entry`
Returns
=======
t : numpy.ndarray
Time points at which new edges have been discovered
C : numpy.ndarray
total number of edges discovered up to time t.
"""
if type(temporal_network) in [ ec, el, el_h, ec_h ]:
traj = tc.get_edge_trajectories(temporal_network)
elif type(temporal_network) == list and type(temporal_network[0]) == tc.edge_trajectory_entry:
traj = temporal_network
elif type(temporal_network) == edge_trajectories:
traj = temporal_network.trajectories
else:
raise ValueError("Unknown type for temporal network:", type(temporal_network))
t = []
count = []
for iedge, entry in enumerate(traj):
t0 = entry.time_pairs[0][0]
if len(t)>0:
if t[-1] == t0:
count[-1] = iedge+1
else:
count.append(iedge+1)
t.append(t0)
else:
count.append(iedge+1)
t.append(t0)
return np.array(t), np.array(count,dtype=float)
def get_edge_probability_and_rate(temporal_network):
r"""
For each edge compute the probability that it is active and the rate
with which it is activated.
Parameters
==========
temporal_network : :class:`_tacoma.edge_lists`, :class:`_tacoma.edge_changes` or :class:`_tacoma.edge_trajectories`
Returns
=======
p : numpy.ndarray
The probability to be switched on for each observed edge of the network
(the remaining un-observed edges have probability p = 0).
omega : numpy.ndarray
The rate with which the observed edges are switched on
:math:`\omega = \left(\frac{1}{\tau^+} + \frac{1}{\tau^{-}}\right)^{-1}`
(the remaining un-observed edges have rate :math:`\omega = 0`).
"""
if type(temporal_network) in [ ec, el, el_h, ec_h ]:
traj = tc.get_edge_trajectories(temporal_network)
tmax = temporal_network.tmax
if type(temporal_network) in [ ec, ec_h ]:
t0 = temporal_network.t0
else:
t0 = temporal_network.t[0]
elif type(temporal_network) == edge_trajectories:
traj = temporal_network.trajectories
tmax = temporal_network.tmax
t0 = temporal_network.t0
else:
raise ValueError("Unknown type for temporal network:", type(temporal_network))
T = tmax - t0
connection_probability = np.empty(len(traj))
activity_rate = np.empty(len(traj))
for iedge, entry in enumerate(traj):
N_events = len(entry.time_pairs)
if entry.time_pairs[0][0] == t0:
N_events -= 1
activity_rate[iedge] = N_events / T
t_on = 0.0
for interval in entry.time_pairs:
t_on += interval[1] - interval[0]
connection_probability[iedge] = t_on / T
return connection_probability, activity_rate
def get_edge_life_times(temporal_network):
r"""
For each edge compute the probability that it is active and the rate
with which it is activated.
Parameters
==========
temporal_network : :class:`_tacoma.edge_lists`, :class:`_tacoma.edge_changes` or :class:`_tacoma.edge_trajectories`
Returns
=======
p : numpy.ndarray
The probability to be switched on for each observed edge of the network
(the remaining un-observed edges have probability p = 0).
omega : numpy.ndarray
The rate with which the observed edges are switched on
:math:`\omega = \left(\frac{1}{\tau^+} + \frac{1}{\tau^{-}}\right)^{-1}`
(the remaining un-observed edges have rate :math:`\omega = 0`).
"""
if type(temporal_network) in [ ec, el, el_h, ec_h ]:
traj = tc.get_edge_trajectories(temporal_network)
tmax = temporal_network.tmax
if type(temporal_network) in [ ec, ec_h ]:
t0 = temporal_network.t0
else:
t0 = temporal_network.t[0]
elif type(temporal_network) == edge_trajectories:
traj = temporal_network.trajectories
tmax = temporal_network.tmax
t0 = temporal_network.t0
else:
raise ValueError("Unknown type for temporal network:", type(temporal_network))
T = tmax - t0
connection_probability = np.empty(len(traj))
activity_rate = np.empty(len(traj))
life_times = []
for iedge, entry in enumerate(traj):
for pair in entry.time_pairs:
if pair[0] != t0 and pair[0] != tmax:
life_times.append(pair[1]-pair[0])
return np.array(life_times)
def get_reduced_time(t, intervals_to_discard):
"""
Reduce the provided time in a way that intervals
are cut out.
Parameters
----------
t : numpy.ndarray
time array to reduce
intervals_to_discard : list of tuple of float
The time intervals which have to be discarded.
Returns
-------
new_t : numpy.ndarray
reduced time with equal shape to ``t``. Each time in
intervals which have to be discarded are remapped
to the beginning of the intervals.
"""
x_ = t.copy()
offset = 0.0
for interval in intervals_to_discard:
t0, t1 = interval
x_[np.logical_and(t>=t0, t<t1)] = t0 - offset
x_[t>=t1] -= t1 - t0
offset += t1 - t0
return x_
def fit_contact_coverage(N, time, contact_coverage, intervals_to_discard_for_fit=[], kind='gamma'):
"""
Fit the contact coverage :math:`C(t)` to an edge activity rate distribution of kind ``kind``.
Parameters
----------
N : int
Number of nodes.
time : numpy.ndarray
Time points at which the contact converage changed.
contact_coverage : numpy.ndarray
Contact coverage at the corresponding times in ``time``.
intervals_to_discard_for_fit : list of tuple of float
a list of time intervals which have to be discarded for the fit
kind : string, default : 'gamma'
Rate distribution model for fit. Can be ``gamma``, ``exponential``,
``uniform``, ``normal``, ``delta``, ``power-law``.
Returns
-------
fit_function : function
The fit function
popt : tuple of float
The found optimal parameters to pass to ``fit_function``
pstd : tuple of float
standard uncertainty of the parameters
"""
fac = N*(N-1)/2.
edge_count = contact_coverage
if kind == 'gamma':
fit = lambda x, alpha, scale: fac * (1 - (scale/(scale + get_reduced_time(x, intervals_to_discard_for_fit)) )**alpha)
popt, pcov = curve_fit(fit, time, edge_count,[0.5,10.0],maxfev=10000)
elif kind == 'exponential':
alpha = 1.0
fit = lambda x, scale: fac * (1 - (scale/(scale + get_reduced_time(x, intervals_to_discard_for_fit)) )**alpha)
popt, pcov = curve_fit(fit, time, edge_count,[10.0],maxfev=10000)
elif kind == 'uniform':
def fit(x, alpha):
t = get_reduced_time(x, intervals_to_discard_for_fit)
result = fac * (1 - (1-np.exp(-alpha*t)) / (alpha * t))
result[t==0] = 0
return result
popt, pcov = curve_fit(fit, time, edge_count,[0.1],maxfev=10000)
elif kind == 'normal':
fit = lambda x, mean, sigma: fac * (1 - np.exp(-mean* get_reduced_time(x, intervals_to_discard_for_fit)+sigma**2/2.0*get_reduced_time(x, intervals_to_discard_for_fit)**2))
popt, pcov = curve_fit(fit, time, edge_count,[0.5,0.01],maxfev=10000)
elif kind == 'delta':
fit = lambda x, scale: fac * (1 - np.exp(-scale * get_reduced_time(x, intervals_to_discard_for_fit)))
popt, pcov = curve_fit(fit, time, edge_count,[0.5],maxfev=10000)
elif kind == 'power-law':
def fit(x,alpha, xmin,xmax,time_is_reduced = False):
#print(alpha, xmin, xmax)
A = (1-alpha) / (xmax**(1-alpha) - xmin**(1-alpha))
if time_is_reduced:
t = x
else:
t = get_reduced_time(x, intervals_to_discard_for_fit)
x_ = np.logspace(np.math.log10(xmin),np.math.log10(xmax),100)
#observable = x_[None,:]**(-alpha)*np.exp(-t[:,None]*x_[None,:])
#print(observable.shape)
#M = np.trapz(observable, x = x_[None,:], axis=0,)
#print(M.shape)
M = np.array([ np.trapz(x_**(-alpha)*np.exp(-t_*x_), x = x_) for t_ in t])
#M = np.array([ quad(lambda x: A * x**(-alpha)*np.exp(-t_*x), xmin,xmax)[0] for t_ in t ])
return fac * (1-M)
def residual(params, x, time_is_reduced=True):
alpha, xmin, xmax = params['alpha'].value, params['xmin'].value, params['xmax'].value
#print(alpha, xmin, xmax)
return fit(x, alpha, xmin, xmax,time_is_reduced) - edge_count
reduced_time = get_reduced_time(time, intervals_to_discard_for_fit)
params = Parameters()
params.add('alpha',value = 1.0, min=1e-16)
params.add('xmin',value = 1e-5, min=1e-16)
params.add('xmax',value = 10, min=1e-16)
out = minimize(residual, params, args = (reduced_time,))
popt = (out.params['alpha'].value, out.params['xmin'].value, out.params['xmax'].value)
#print(help(out))
#print(out.pretty_print())
# TODO: once lmfit is fixed, this has to change to out.covar
pcov = np.zeros(len(popt))
#popt, pcov = curve_fit(fit, time, edge_count,[1,1e-10,10],maxfev=10000)
# elif kind == 'weibull':
# #def fit(x, k, omega_c):
# # nmax = 18
# # moments = np.array([ omega_c**n * Gamma(1+n/k) for n in range(nmax+1) ] )
# # def M(t_,nmax):
# # return np.sum(np.array([ t_**n * moments[n] / np.math.factorial(n) for n in range(nmax+1)]), axis=0)
# #
# # return fac * (1-M(-t,nmax))
# def fit(t_, k, omega_c):
# eps = 1e-16
# integrand = lambda x, t : weibull_min(k,scale=omega_c).pdf(x) * np.exp(-t*x)
# t = get_reduced_time(t_, intervals_to_discard_for_fit)
# return np.array([quad(integrand,eps,np.inf,args=(t_,)) for this_t in t])
#
# popt, pcov = curve_fit(fit, time, edge_count,[0.5, 0.01],maxfev=10000)
# elif kind == 'lognormal':
#
#
# def fit(x, mu, sigma):
# nmax = 15
# moments = np.array([ np.exp(n*mu + n**2*sigma**2 / 2.0 for n in range(nmax+1) ))
# def M(t_,nmax):
# pass
#
#
# t = get_reduced_time(x, intervals_to_discard_for_fit)
#
# pdf = lognorm(sigma,scale=np.exp(mu)).pdf
# #weights = lognorm.rvs(sigma,scale=np.exp(mu),size=N*(N-1)//2)
# #print(weights)
# return fac * ( 1.0 - np.array([np.mean(np.exp(-weights*x_)) for x_ in x]))
#
# popt, pcov = curve_fit(fit, time, edge_count,[0.5,0.5],maxfev=10000)
else:
raise ValueError('Unknown fit function:', kind)
#popt, pcov = curve_fit(fit, fit_x, fit_y,[1./fac,fac,10.0],maxfev=10000)
#popt, pcov = curve_fit(fit, fit_x, fit_y,[2,fac,10.0],maxfev=10000)
return fit, popt, np.sqrt(np.diag(pcov))
| 31.026055
| 179
| 0.593394
|
94e8db2be96fd1e6d69039d9e9c13143cba55184
| 6,984
|
py
|
Python
|
vnpy/trader/optimize.py
|
lanchengjiu/vnpy
|
db3b8d81f5c5da16eb6ca89c2b3b08f2aa55db66
|
[
"MIT"
] | null | null | null |
vnpy/trader/optimize.py
|
lanchengjiu/vnpy
|
db3b8d81f5c5da16eb6ca89c2b3b08f2aa55db66
|
[
"MIT"
] | null | null | null |
vnpy/trader/optimize.py
|
lanchengjiu/vnpy
|
db3b8d81f5c5da16eb6ca89c2b3b08f2aa55db66
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Callable, Tuple
from itertools import product
from concurrent.futures import ProcessPoolExecutor
from random import random, choice
from time import perf_counter
from multiprocessing import Manager, Pool, get_context
from decimal import Decimal
from deap import creator, base, tools, algorithms
OUTPUT_FUNC = Callable[[str], None]
EVALUATE_FUNC = Callable[[dict], dict]
KEY_FUNC = Callable[[list], float]
# Create individual class used in genetic algorithm optimization
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
class OptimizationSetting:
"""
Setting for runnning optimization.
"""
def __init__(self) -> None:
""""""
self.params: Dict[str, List] = {}
self.target_name: str = ""
def add_parameter(
self,
name: str,
start: float,
end: float = None,
step: float = None
) -> Tuple[bool, str]:
""""""
if isinstance(start, list):
self.params[name] = start
return True, "列表参数添加成功"
if end is None and step is None or start == end:
self.params[name] = [start]
return True, "固定参数添加成功"
if start >= end:
return False, "参数优化起始点必须小于终止点"
if step <= 0:
return False, "参数优化步进必须大于0"
decimal_value = Decimal(str(start))
decimal_end = Decimal(str(end))
decimal_step = Decimal(str(step))
value_type = int if isinstance(start, int) and isinstance(step, int) else float
value_list = []
while decimal_value <= decimal_end:
value_list.append(value_type(decimal_value))
decimal_value += decimal_step
self.params[name] = value_list
return True, f"范围参数添加成功,数量{len(value_list)}"
def set_target(self, target_name: str) -> None:
""""""
self.target_name = target_name
def generate_settings(self) -> List[dict]:
""""""
keys = self.params.keys()
values = self.params.values()
products = list(product(*values))
settings = []
for p in products:
setting = dict(zip(keys, p))
settings.append(setting)
return settings
def check_optimization_setting(
optimization_setting: OptimizationSetting,
output: OUTPUT_FUNC = print
) -> bool:
""""""
if not optimization_setting.generate_settings():
output("优化参数组合为空,请检查")
return False
if not optimization_setting.target_name:
output("优化目标未设置,请检查")
return False
return True
def run_bf_optimization(
evaluate_func: EVALUATE_FUNC,
optimization_setting: OptimizationSetting,
key_func: KEY_FUNC,
max_workers: int = None,
output: OUTPUT_FUNC = print
) -> List[Tuple]:
"""Run brutal force optimization"""
settings: List[Dict] = optimization_setting.generate_settings()
output(f"开始执行穷举算法优化")
output(f"参数优化空间:{len(settings)}")
start: int = perf_counter()
with ProcessPoolExecutor(
max_workers,
mp_context=get_context("spawn")
) as executor:
results: List[Tuple] = list(executor.map(evaluate_func, settings))
results.sort(reverse=True, key=key_func)
end: int = perf_counter()
cost: int = int((end - start))
output(f"穷举算法优化完成,耗时{cost}秒")
return results
def run_ga_optimization(
evaluate_func: EVALUATE_FUNC,
optimization_setting: OptimizationSetting,
key_func: KEY_FUNC,
max_workers: int = None,
population_size: int = 100,
ngen_size: int = 30,
output: OUTPUT_FUNC = print
) -> List[Tuple]:
"""Run genetic algorithm optimization"""
# Define functions for generate parameter randomly
buf: List[Dict] = optimization_setting.generate_settings()
settings: List[Tuple] = [list(d.items()) for d in buf]
def generate_parameter() -> list:
""""""
return choice(settings)
def mutate_individual(individual: list, indpb: float) -> tuple:
""""""
size = len(individual)
paramlist = generate_parameter()
for i in range(size):
if random() < indpb:
individual[i] = paramlist[i]
return individual,
# Set up multiprocessing Pool and Manager
with Manager() as manager, Pool(max_workers) as pool:
# Create shared dict for result cache
cache: Dict[Tuple, Tuple] = manager.dict()
# Set up toolbox
toolbox = base.Toolbox()
toolbox.register("individual", tools.initIterate, creator.Individual, generate_parameter)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", mutate_individual, indpb=1)
toolbox.register("select", tools.selNSGA2)
toolbox.register("map", pool.map)
toolbox.register(
"evaluate",
ga_evaluate,
cache,
evaluate_func,
key_func
)
total_size: int = len(settings)
pop_size: int = population_size # number of individuals in each generation
lambda_: int = pop_size # number of children to produce at each generation
mu: int = int(pop_size * 0.8) # number of individuals to select for the next generation
cxpb: float = 0.95 # probability that an offspring is produced by crossover
mutpb: float = 1 - cxpb # probability that an offspring is produced by mutation
ngen: int = ngen_size # number of generation
pop: list = toolbox.population(pop_size)
# Run ga optimization
output(f"开始执行遗传算法优化")
output(f"参数优化空间:{total_size}")
output(f"每代族群总数:{pop_size}")
output(f"优良筛选个数:{mu}")
output(f"迭代次数:{ngen}")
output(f"交叉概率:{cxpb:.0%}")
output(f"突变概率:{mutpb:.0%}")
start: int = perf_counter()
algorithms.eaMuPlusLambda(
pop,
toolbox,
mu,
lambda_,
cxpb,
mutpb,
ngen,
verbose=False
)
end: int = perf_counter()
cost: int = int((end - start))
output(f"遗传算法优化完成,耗时{cost}秒")
results: list = list(cache.values())
results.sort(reverse=True, key=key_func)
return results
def ga_evaluate(
cache: dict,
evaluate_func: callable,
key_func: callable,
parameters: list
) -> float:
"""
Functions to be run in genetic algorithm optimization.
"""
tp: tuple = tuple(parameters)
if tp in cache:
result: tuple = cache[tp]
else:
setting: dict = dict(parameters)
result: dict = evaluate_func(setting)
cache[tp] = result
value: float = key_func(result)
return (value, )
| 28.979253
| 118
| 0.609393
|
1938307d37aa6fe55b018a2eb22602a6af53cbbf
| 4,336
|
py
|
Python
|
nuitka/codegen/LoaderCodes.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | null | null | null |
nuitka/codegen/LoaderCodes.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1
|
2019-03-01T11:33:40.000Z
|
2019-03-01T11:33:40.000Z
|
nuitka/codegen/LoaderCodes.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1
|
2019-03-26T16:56:21.000Z
|
2019-03-26T16:56:21.000Z
|
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Code to generate and interact with module loaders.
This is for generating the look-up table for the modules included in a binary
or distribution folder.
"""
from nuitka.ModuleRegistry import getUncompiledNonTechnicalModules
from . import ConstantCodes
from .Indentation import indented
from .templates.CodeTemplatesLoader import (
template_metapath_loader_body,
template_metapath_loader_bytecode_module_entry,
template_metapath_loader_compiled_module_entry,
template_metapath_loader_compiled_package_entry,
template_metapath_loader_shlib_module_entry,
)
def getModuleMetapathLoaderEntryCode(
module_name, module_identifier, is_shlib, is_package
):
if is_shlib:
assert module_name != "__main__"
assert not is_package
return template_metapath_loader_shlib_module_entry % {
"module_name": module_name
}
elif is_package:
return template_metapath_loader_compiled_package_entry % {
"module_name": module_name,
"module_identifier": module_identifier,
}
else:
return template_metapath_loader_compiled_module_entry % {
"module_name": module_name,
"module_identifier": module_identifier,
}
stream_data = ConstantCodes.stream_data
def getMetapathLoaderBodyCode(other_modules):
metapath_loader_inittab = []
metapath_module_decls = []
for other_module in other_modules:
if other_module.isUncompiledPythonModule():
code_data = other_module.getByteCode()
is_package = other_module.isUncompiledPythonPackage()
flags = ["NUITKA_BYTECODE_FLAG"]
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
metapath_loader_inittab.append(
template_metapath_loader_bytecode_module_entry
% {
"module_name": other_module.getFullName(),
"bytecode": stream_data.getStreamDataOffset(code_data),
"size": len(code_data),
"flags": " | ".join(flags),
}
)
else:
metapath_loader_inittab.append(
getModuleMetapathLoaderEntryCode(
module_name=other_module.getFullName(),
module_identifier=other_module.getCodeName(),
is_shlib=other_module.isPythonShlibModule(),
is_package=other_module.isCompiledPythonPackage(),
)
)
if other_module.isCompiledPythonModule():
metapath_module_decls.append(
"MOD_INIT_DECL( %s );" % other_module.getCodeName()
)
for uncompiled_module in getUncompiledNonTechnicalModules():
code_data = uncompiled_module.getByteCode()
is_package = uncompiled_module.isUncompiledPythonPackage()
flags = ["NUITKA_BYTECODE_FLAG"]
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
metapath_loader_inittab.append(
template_metapath_loader_bytecode_module_entry
% {
"module_name": uncompiled_module.getFullName(),
"bytecode": stream_data.getStreamDataOffset(code_data),
"size": len(code_data),
"flags": " | ".join(flags),
}
)
return template_metapath_loader_body % {
"metapath_module_decls": indented(metapath_module_decls, 0),
"metapath_loader_inittab": indented(metapath_loader_inittab),
}
| 35.540984
| 78
| 0.6619
|
9571126f36283075c946000e425b039186b58383
| 1,494
|
py
|
Python
|
src/sentry/security/emails.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | 1
|
2019-05-28T06:18:03.000Z
|
2019-05-28T06:18:03.000Z
|
src/sentry/security/emails.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | 6
|
2018-10-19T10:04:23.000Z
|
2019-12-09T20:29:12.000Z
|
src/sentry/security/emails.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function
from django.utils import timezone
from sentry.utils.email import MessageBuilder
def generate_security_email(account, type, actor, ip_address, context=None, current_datetime=None):
if current_datetime is None:
current_datetime = timezone.now()
subject = 'Security settings changed'
if type == 'mfa-removed':
assert 'authenticator' in context
template = 'sentry/emails/mfa-removed.txt'
html_template = 'sentry/emails/mfa-removed.html'
elif type == 'mfa-added':
assert 'authenticator' in context
template = 'sentry/emails/mfa-added.txt'
html_template = 'sentry/emails/mfa-added.html'
elif type == 'password-changed':
template = 'sentry/emails/password-changed.txt'
html_template = 'sentry/emails/password-changed.html'
elif type == 'recovery-codes-regenerated':
template = 'sentry/emails/recovery-codes-regenerated.txt'
html_template = 'sentry/emails/recovery-codes-regenerated.html'
else:
raise ValueError('unknown type: {}'.format(type))
new_context = {
'account': account,
'actor': actor,
'ip_address': ip_address,
'datetime': current_datetime,
}
if context:
new_context.update(context)
return MessageBuilder(
subject=subject,
context=new_context,
template=template,
html_template=html_template,
type=type
)
| 32.478261
| 99
| 0.670013
|
825291feb09758aa141b1cad46c67f89da369652
| 2,589
|
py
|
Python
|
var/spack/repos/builtin/packages/gdb/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/gdb/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
var/spack/repos/builtin/packages/gdb/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-06T11:04:37.000Z
|
2020-03-06T11:04:37.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gdb(AutotoolsPackage, GNUMirrorPackage):
"""GDB, the GNU Project debugger, allows you to see what is going on
'inside' another program while it executes -- or what another
program was doing at the moment it crashed.
"""
homepage = "https://www.gnu.org/software/gdb"
gnu_mirror_path = "gdb/gdb-7.10.tar.gz"
version('9.1', sha256='fcda54d4f35bc53fb24b50009a71ca98410d71ff2620942e3c829a7f5d614252')
version('8.3.1', sha256='26ce655216cd03f4611518a7a1c31d80ec8e884c16715e9ba8b436822e51434b')
version('8.3', sha256='b2266ec592440d0eec18ee1790f8558b3b8a2845b76cc83a872e39b501ce8a28')
version('8.2.1', sha256='0107985f1edb8dddef6cdd68a4f4e419f5fec0f488cc204f0b7d482c0c6c9282')
version('8.2', sha256='847e4b65e5a7b872e86019dd59659029e2b06cae962e0ef345f169dcb4b851b8')
version('8.1', sha256='e54a2322da050e4b00785370a282b9b8f0b25861ec7cfbbce0115e253eea910e')
version('8.0.1', sha256='52017d33cab5b6a92455a1a904046d075357abf24153470178c0aadca2d479c5')
version('8.0', sha256='8968a19e14e176ee026f0ca777657c43456514ad41bb2bc7273e8c4219555ac9')
version('7.12.1', sha256='142057eacecfb929d52b561eb47a1103c7d504cec3f659dd8a5ae7bc378f7e77')
version('7.11', sha256='9382f5534aa0754169e1e09b5f1a3b77d1fa8c59c1e57617e06af37cb29c669a')
version('7.10.1', sha256='ff14f8050e6484508c73cbfa63731e57901478490ca1672dc0b5e2b03f6af622')
version('7.10', sha256='50690e6d6b7917a6544190ec9401eaafb555e3cef8981709ea9870296c383ce5')
version('7.9.1', sha256='4994ad986726ac4128a6f1bd8020cd672e9a92aa76b80736563ef992992764ef')
version('7.9', sha256='d282508cb7df0cb8b2cf659032ce1bede7b5725796e3ac90f3cd9d65844a65f2')
version('7.8.2', sha256='fd9a9784ca24528aac8a4e6b8d7ae7e8cf0784e128cd67a185c986deaf6b9929')
variant('python', default=True, description='Compile with Python support')
variant('xz', default=True, description='Compile with lzma support')
# Required dependency
depends_on('texinfo', type='build')
# Optional dependencies
depends_on('python', when='+python')
depends_on('xz', when='+xz')
build_directory = 'spack-build'
def configure_args(self):
args = []
if '+python' in self.spec:
args.append('--with-python')
args.append('LDFLAGS={0}'.format(
self.spec['python'].libs.ld_flags))
return args
| 48.849057
| 96
| 0.760525
|
226f9aa206dcfca89f0b6df0ed18bcff735b39d8
| 5,949
|
py
|
Python
|
hopper/utils/git/watcher.py
|
Xilinx/hopper
|
77b35f0744cde4608fe051ec41535ca0d2ca01c6
|
[
"MIT"
] | 2
|
2015-09-06T02:31:34.000Z
|
2017-08-04T07:13:19.000Z
|
hopper/utils/git/watcher.py
|
Xilinx/hopper
|
77b35f0744cde4608fe051ec41535ca0d2ca01c6
|
[
"MIT"
] | null | null | null |
hopper/utils/git/watcher.py
|
Xilinx/hopper
|
77b35f0744cde4608fe051ec41535ca0d2ca01c6
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import urlparse
import shutil
import datetime
from hopper.utils.logger import *
import hopper.utils.git.tasks
import hopper.utils.git.repo
import hopper.source.meta
import threading
class Watcher:
class GroupState:
def __init__(self, layers):
self.layers = layers
self.refstate = {}
def getRefPairs(self):
pairs = []
for i in self.layers:
if i.source and isinstance(i.source, hopper.source.meta.GitSource):
if i.source.canFetch():
# TODO: handle ref/tag/commit properly below
pairs.append((i.source.remote, "refs/heads/" + i.source.ref))
return pairs
def filterPairs(self, remoterefs):
filteredrefs = {}
for p in self.getRefPairs():
if p[0] in remoterefs:
for i in remoterefs[p[0]].iteritems():
if i[0] == p[1]:
if p[0] not in filteredrefs:
filteredrefs[p[0]] = {}
filteredrefs[p[0]][i[0]] = i[1]
return filteredrefs
def update(self, remoterefs, trigger = False):
rrefs = self.filterPairs(remoterefs)
pairs = self.getRefPairs()
changed = False
oldrefstate = self.refstate
newrefstate = {}
for i in pairs:
if i[0] in rrefs:
if i[1] in rrefs[i[0]]:
newcommit = rrefs[i[0]][i[1]]
if i[0] not in newrefstate:
newrefstate[i[0]] = {}
newrefstate[i[0]][i[1]] = newcommit
log("remote: %s, ref: %s, value = %s" % (i[0], i[1], newcommit))
if trigger:
changed = True
if oldrefstate != None:
if i[0] in oldrefstate and i[1] in oldrefstate[i[0]]:
if newrefstate[i[0]][i[1]] != oldrefstate[i[0]][i[1]]:
changed = True
self.refstate = newrefstate
return changed
def cloneRefPin(self, remoterefs):
filtered = self.filterPairs(remoterefs)
# create layers that match the layers object, fill in pinned refs
pinnedlayers = hopper.source.meta.LayerCollection(self.layers.defaultversion)
for i in self.layers:
if isinstance(i.source, hopper.source.meta.GitSource):
# TODO: fixup pciking of ref name
refname = "refs/heads/" + i.source.ref
refpin = None
if i.source.remote in filtered:
refs = filtered[i.source.remote]
if refname in refs:
refpin = refs[refname]
newsource = hopper.source.meta.GitSource(i.source.remote, refpin)
else:
newsource = i.source
pinnedlayers.add(i.getFullName(), newsource)
return pinnedlayers
def __init__(self, environment):
self.environment = environment
self.stop = threading.Event()
self.thread = None
self.interval = 0
self.lock = threading.RLock()
self.groups = []
self.changeevent = threading.Condition()
self.changequeue = []
def addLayers(self, layers):
group = Watcher.GroupState(layers)
self.groups.append(group)
def start(self, interval = 30):
if self.thread and self.thread.isAlive():
return
self.interval = interval
self.thread = threading.Thread(target = self.__worker__)
self.daemon = True
self.thread.start()
def stop(self):
if self.thread and self.thread.isAlive():
self.stop.set()
self.thread.join()
def alive(self):
if self.thread and self.thread.isAlive():
return True
return False
def trigger(self):
self.__check__(True)
def __check__(self, trigger = False):
with self.lock:
haschanges = False
remotes = []
for i in self.groups:
for p in i.getRefPairs():
if p[0] not in remotes:
remotes.append(p[0])
self.environment.debug("need to update for the following remotes -> %s" % remotes)
refstate = {}
for i in remotes:
self.environment.log("Grabbing refs from remote for %s" % i)
result = hopper.utils.git.tasks.GitTask.run(["ls-remote", i], environment = self.environment)
if result[0] == 0:
refstate[i] = {}
for r in result[1].splitlines():
parts = r.split()
refstate[i][parts[1]] = parts[0]
self.environment.debug("got refs -> %s" % repr(refstate[i]))
else:
self.environment.error("Failed to get remote state for '%s' error message = %s" % (i, result[1]))
return
haschanges = False
for i in self.groups:
if i.update(refstate, trigger):
self.environment.log("Changes have happened since last check, pinning")
changes = i.cloneRefPin(refstate)
self.changequeue.append((i.layers, changes, datetime.datetime.utcnow()))
haschanges = True
if haschanges:
with self.changeevent:
self.changeevent.notifyAll()
def __worker__(self):
while not self.stop.wait(self.interval):
self.__check__()
def wait(self):
if self.alive():
if self.hasnext():
return
with self.changeevent:
self.changeevent.wait()
def hasnext(self):
with self.lock:
if len(self.changequeue) != 0:
return True
return False
def getnext(self):
with self.lock:
if len(self.changequeue) != 0:
return self.changequeue.pop()
return None
| 29.019512
| 102
| 0.680114
|
5255fb08a4194089eb5c25082f883ba8a140c4ed
| 1,198
|
py
|
Python
|
myawards/forms.py
|
ucynthy12/awards
|
51c993b2e5b779103f1a43246d939a66364a187c
|
[
"Unlicense"
] | 1
|
2021-02-15T16:53:39.000Z
|
2021-02-15T16:53:39.000Z
|
myawards/forms.py
|
ucynthy12/awards
|
51c993b2e5b779103f1a43246d939a66364a187c
|
[
"Unlicense"
] | null | null | null |
myawards/forms.py
|
ucynthy12/awards
|
51c993b2e5b779103f1a43246d939a66364a187c
|
[
"Unlicense"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile,Rate,Project
from cloudinary.forms import CloudinaryFileField
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
class UpdateUserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
class UpdateUserProfileForm(forms.ModelForm):
profile_picture = CloudinaryFileField(
options = {
'tags': "directly_uploaded",
'crop': 'limit', 'width': 1000, 'height': 1000,
'eager': [{ 'crop': 'fill', 'width': 150, 'height': 100 }]
})
class Meta:
model = Profile
fields = [ 'profile_picture', 'bio']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
widgets = {
'details': forms.Textarea(attrs={'rows':4, 'cols':30}),
}
fields = ('title','details','image', 'url')
class RateForm(forms.ModelForm):
class Meta:
model = Rate
fields = ['design','usability','content','creativity']
| 24.958333
| 65
| 0.624374
|
82445e9e685df030a7d17d9cc0c0adac922d7eef
| 345
|
py
|
Python
|
malleefowl/processes/__init__.py
|
Ouranosinc/malleefowl
|
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
|
[
"Apache-2.0"
] | null | null | null |
malleefowl/processes/__init__.py
|
Ouranosinc/malleefowl
|
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
|
[
"Apache-2.0"
] | 4
|
2017-09-21T17:14:45.000Z
|
2020-11-11T03:20:42.000Z
|
malleefowl/processes/__init__.py
|
Ouranosinc/malleefowl
|
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
|
[
"Apache-2.0"
] | null | null | null |
from .wps_esgsearch import ESGSearchProcess
from .wps_download import Download
from .wps_thredds import ThreddsDownload
from .wps_workflow import DispelWorkflow
from .wps_custom_workflow import DispelCustomWorkflow
processes = [
ESGSearchProcess(),
Download(),
ThreddsDownload(),
DispelWorkflow(),
DispelCustomWorkflow(),
]
| 24.642857
| 53
| 0.788406
|
bf8daf1d81200afaf91d98de1ac95b91d3211fdc
| 868
|
py
|
Python
|
xlsxwriter/test/comparison/test_image28.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_image28.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_image28.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image28.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(0, 6, self.image_dir + 'red_208.png', {'x_offset': 46, 'y_offset': 1})
workbook.close()
self.assertExcelEqual()
| 24.8
| 101
| 0.619816
|
7611aab81b7d6f766310a43a2729304debf4e4fc
| 16,471
|
py
|
Python
|
project/venv/Lib/site-packages/IPython/core/application.py
|
Maleb44/Django2
|
b58b6d6345a0fce158c2a5d5a4cc6161e5ca62ef
|
[
"MIT"
] | null | null | null |
project/venv/Lib/site-packages/IPython/core/application.py
|
Maleb44/Django2
|
b58b6d6345a0fce158c2a5d5a4cc6161e5ca62ef
|
[
"MIT"
] | null | null | null |
project/venv/Lib/site-packages/IPython/core/application.py
|
Maleb44/Django2
|
b58b6d6345a0fce158c2a5d5a4cc6161e5ca62ef
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
An application for IPython.
All top-level applications should use the classes in this module for
handling configuration and creating configurables.
The job of an :class:`Application` is to create the master configuration
object and then create the configurable objects, passing the config to them.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
from copy import deepcopy
import glob
import logging
import os
import shutil
import sys
from traitlets.config.application import Application, catch_config_error
from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
from IPython.core import release, crashhandler
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.paths import get_ipython_dir, get_ipython_package_dir
from IPython.utils.path import ensure_dir_exists
from IPython.utils import py3compat
from traitlets import List, Unicode, Type, Bool, Dict, Set, Instance, Undefined
if os.name == 'nt':
programdata = os.environ.get('PROGRAMDATA', None)
if programdata:
SYSTEM_CONFIG_DIRS = [os.path.join(programdata, 'ipython')]
else: # PROGRAMDATA is not defined by default on XP.
SYSTEM_CONFIG_DIRS = []
else:
SYSTEM_CONFIG_DIRS = [
"/usr/local/etc/ipython",
"/etc/ipython",
]
# aliases and flags
base_aliases = {
'profile-dir' : 'ProfileDir.location',
'profile' : 'BaseIPythonApplication.profile',
'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
'log-level' : 'Application.log_level',
'config' : 'BaseIPythonApplication.extra_config_file',
}
base_flags = dict(
debug = ({'Application' : {'log_level' : logging.DEBUG}},
"set log level to logging.DEBUG (maximize logging output)"),
quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
"set log level to logging.CRITICAL (minimize logging output)"),
init = ({'BaseIPythonApplication' : {
'copy_config_files' : True,
'auto_create' : True}
}, """Initialize profile with default config files. This is equivalent
to running `ipython profile create <profile>` prior to startup.
""")
)
class ProfileAwareConfigLoader(PyFileConfigLoader):
"""A Python file config loader that is aware of IPython profiles."""
def load_subconfig(self, fname, path=None, profile=None):
if profile is not None:
try:
profile_dir = ProfileDir.find_profile_dir_by_name(
get_ipython_dir(),
profile,
)
except ProfileDirError:
return
path = profile_dir.location
return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
class BaseIPythonApplication(Application):
name = Unicode(u'ipython')
description = Unicode(u'IPython: an enhanced interactive Python shell.')
version = Unicode(release.version)
aliases = Dict(base_aliases)
flags = Dict(base_flags)
classes = List([ProfileDir])
# enable `load_subconfig('cfg.py', profile='name')`
python_config_loader_class = ProfileAwareConfigLoader
# Track whether the config_file has changed,
# because some logic happens only if we aren't using the default.
config_file_specified = Set()
config_file_name = Unicode()
def _config_file_name_default(self):
return self.name.replace('-','_') + u'_config.py'
def _config_file_name_changed(self, name, old, new):
if new != old:
self.config_file_specified.add(new)
# The directory that contains IPython's builtin profiles.
builtin_profile_dir = Unicode(
os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
)
config_file_paths = List(Unicode())
def _config_file_paths_default(self):
return [py3compat.getcwd()]
extra_config_file = Unicode(config=True,
help="""Path to an extra config file to load.
If specified, load this config file in addition to any other IPython config.
""")
def _extra_config_file_changed(self, name, old, new):
try:
self.config_files.remove(old)
except ValueError:
pass
self.config_file_specified.add(new)
self.config_files.append(new)
profile = Unicode(u'default', config=True,
help="""The IPython profile to use."""
)
def _profile_changed(self, name, old, new):
self.builtin_profile_dir = os.path.join(
get_ipython_package_dir(), u'config', u'profile', new
)
ipython_dir = Unicode(config=True,
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This option can also be specified through
the environment variable IPYTHONDIR.
"""
)
def _ipython_dir_default(self):
d = get_ipython_dir()
self._ipython_dir_changed('ipython_dir', d, d)
return d
_in_init_profile_dir = False
profile_dir = Instance(ProfileDir, allow_none=True)
def _profile_dir_default(self):
# avoid recursion
if self._in_init_profile_dir:
return
# profile_dir requested early, force initialization
self.init_profile_dir()
return self.profile_dir
overwrite = Bool(False, config=True,
help="""Whether to overwrite existing config files when copying""")
auto_create = Bool(False, config=True,
help="""Whether to create profile dir if it doesn't exist""")
config_files = List(Unicode())
def _config_files_default(self):
return [self.config_file_name]
copy_config_files = Bool(False, config=True,
help="""Whether to install the default config files into the profile dir.
If a new profile is being created, and IPython contains config files for that
profile, then they will be staged into the new directory. Otherwise,
default config files will be automatically generated.
""")
verbose_crash = Bool(False, config=True,
help="""Create a massive crash report when IPython encounters what may be an
internal error. The default is to append a short message to the
usual traceback""")
# The class to use as the crash handler.
crash_handler_class = Type(crashhandler.CrashHandler)
@catch_config_error
def __init__(self, **kwargs):
super(BaseIPythonApplication, self).__init__(**kwargs)
# ensure current working directory exists
try:
py3compat.getcwd()
except:
# exit if cwd doesn't exist
self.log.error("Current working directory doesn't exist.")
self.exit(1)
#-------------------------------------------------------------------------
# Various stages of Application creation
#-------------------------------------------------------------------------
deprecated_subcommands = {}
def initialize_subcommand(self, subc, argv=None):
if subc in self.deprecated_subcommands:
import time
self.log.warning("Subcommand `ipython {sub}` is deprecated and will be removed "
"in future versions.".format(sub=subc))
self.log.warning("You likely want to use `jupyter {sub}` in the "
"future".format(sub=subc))
return super(BaseIPythonApplication, self).initialize_subcommand(subc, argv)
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler)
def excepthook(self, etype, evalue, tb):
"""this is sys.excepthook after init_crashhandler
set self.verbose_crash=True to use our full crashhandler, instead of
a regular traceback with a short message (crash_handler_lite)
"""
if self.verbose_crash:
return self.crash_handler(etype, evalue, tb)
else:
return crashhandler.crash_handler_lite(etype, evalue, tb)
def _ipython_dir_changed(self, name, old, new):
if old is not Undefined:
str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
sys.getfilesystemencoding()
)
if str_old in sys.path:
sys.path.remove(str_old)
str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
sys.getfilesystemencoding()
)
sys.path.append(str_path)
ensure_dir_exists(new)
readme = os.path.join(new, 'README')
readme_src = os.path.join(get_ipython_package_dir(), u'config', u'profile', 'README')
if not os.path.exists(readme) and os.path.exists(readme_src):
shutil.copy(readme_src, readme)
for d in ('extensions', 'nbextensions'):
path = os.path.join(new, d)
try:
ensure_dir_exists(path)
except OSError as e:
# this will not be EEXIST
self.log.error("couldn't create path %s: %s", path, e)
self.log.debug("IPYTHONDIR set to: %s" % new)
def load_config_file(self, suppress_errors=True):
"""Load the config file.
By default, errors in loading config are handled, and a warning
printed on screen. For testing, the suppress_errors option is set
to False, so errors will make tests fail.
"""
self.log.debug("Searching path %s for config files", self.config_file_paths)
base_config = 'ipython_config.py'
self.log.debug("Attempting to load config file: %s" %
base_config)
try:
Application.load_config_file(
self,
base_config,
path=self.config_file_paths
)
except ConfigFileNotFound:
# ignore errors loading parent
self.log.debug("Config file %s not found", base_config)
pass
for config_file_name in self.config_files:
if not config_file_name or config_file_name == base_config:
continue
self.log.debug("Attempting to load config file: %s" %
self.config_file_name)
try:
Application.load_config_file(
self,
config_file_name,
path=self.config_file_paths
)
except ConfigFileNotFound:
# Only warn if the default config file was NOT being used.
if config_file_name in self.config_file_specified:
msg = self.log.warning
else:
msg = self.log.debug
msg("Config file not found, skipping: %s", config_file_name)
except Exception:
# For testing purposes.
if not suppress_errors:
raise
self.log.warning("Error loading config file: %s" %
self.config_file_name, exc_info=True)
def init_profile_dir(self):
"""initialize the profile dir"""
self._in_init_profile_dir = True
if self.profile_dir is not None:
# already ran
return
if 'ProfileDir.location' not in self.config:
# location not specified, find by profile name
try:
p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
# not found, maybe create it (always create default profile)
if self.auto_create or self.profile == 'default':
try:
p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile: %r"%self.profile)
self.exit(1)
else:
self.log.info("Created profile dir: %r"%p.location)
else:
self.log.fatal("Profile %r not found."%self.profile)
self.exit(1)
else:
self.log.debug("Using existing profile dir: %r"%p.location)
else:
location = self.config.ProfileDir.location
# location is fully specified
try:
p = ProfileDir.find_profile_dir(location, self.config)
except ProfileDirError:
# not found, maybe create it
if self.auto_create:
try:
p = ProfileDir.create_profile_dir(location, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile directory: %r"%location)
self.exit(1)
else:
self.log.debug("Creating new profile dir: %r"%location)
else:
self.log.fatal("Profile directory %r not found."%location)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%location)
# if profile_dir is specified explicitly, set profile name
dir_name = os.path.basename(p.location)
if dir_name.startswith('profile_'):
self.profile = dir_name[8:]
self.profile_dir = p
self.config_file_paths.append(p.location)
self._in_init_profile_dir = False
def init_config_files(self):
"""[optionally] copy default config files into profile dir."""
self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
# copy config files
path = self.builtin_profile_dir
if self.copy_config_files:
src = self.profile
cfg = self.config_file_name
if path and os.path.exists(os.path.join(path, cfg)):
self.log.warning("Staging %r from %s into %r [overwrite=%s]"%(
cfg, src, self.profile_dir.location, self.overwrite)
)
self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
else:
self.stage_default_config_file()
else:
# Still stage *bundled* config files, but not generated ones
# This is necessary for `ipython profile=sympy` to load the profile
# on the first go
files = glob.glob(os.path.join(path, '*.py'))
for fullpath in files:
cfg = os.path.basename(fullpath)
if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
# file was copied
self.log.warning("Staging bundled %s from %s into %r"%(
cfg, self.profile, self.profile_dir.location)
)
def stage_default_config_file(self):
"""auto generate default config file, and stage it into the profile."""
s = self.generate_config_file()
fname = os.path.join(self.profile_dir.location, self.config_file_name)
if self.overwrite or not os.path.exists(fname):
self.log.warning("Generating default config file: %r"%(fname))
with open(fname, 'w') as f:
f.write(s)
@catch_config_error
def initialize(self, argv=None):
# don't hook up crash handler before parsing command-line
self.parse_command_line(argv)
self.init_crash_handler()
if self.subapp is not None:
# stop here if subapp is taking over
return
# save a copy of CLI config to re-load after config files
# so that it has highest priority
cl_config = deepcopy(self.config)
self.init_profile_dir()
self.init_config_files()
self.load_config_file()
# enforce cl-opts override configfile opts:
self.update_config(cl_config)
| 40.075426
| 110
| 0.607249
|
80ea1130d3b5945516725e8112e82a47b5976bb5
| 1,477
|
py
|
Python
|
mindspore/python/mindspore/ops/_op_impl/tbe/log.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
mindspore/python/mindspore/ops/_op_impl/tbe/log.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
mindspore/python/mindspore/ops/_op_impl/tbe/log.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Log op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
log_op_info = TBERegOp("Log") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("log.so") \
.compute_cost(10) \
.kernel_name("log") \
.partial_flag(True) \
.attr("base", "optional", "float", "all", "-1.0") \
.attr("scale", "optional", "float", "all", "1.0") \
.attr("shift", "optional", "float", "all", "0.0") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("formatAgnostic") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(log_op_info)
def _log_tbe():
"""Log TBE register"""
return
| 36.02439
| 79
| 0.64455
|
bb3f5c6b046af3c9d190cead2fc8eb3f3e3b4eeb
| 1,363
|
py
|
Python
|
HW2/merge_sort_06123901.py
|
Maddiezheng/MyLearningNote
|
aec12ad3936bbdd377500256b9cf7628c16c3dc7
|
[
"MIT"
] | null | null | null |
HW2/merge_sort_06123901.py
|
Maddiezheng/MyLearningNote
|
aec12ad3936bbdd377500256b9cf7628c16c3dc7
|
[
"MIT"
] | null | null | null |
HW2/merge_sort_06123901.py
|
Maddiezheng/MyLearningNote
|
aec12ad3936bbdd377500256b9cf7628c16c3dc7
|
[
"MIT"
] | 1
|
2019-10-23T08:30:57.000Z
|
2019-10-23T08:30:57.000Z
|
class Solution():
def MergeSingle(self,left_arr,right_arr):
i = 0 #left array的index
j = 0 #right array的index
a=[] #設定的空list
mid=len(left_arr) #left array的最大index
end=len(right_arr) #right array的最大index
while i<mid and j<end: #在i和j都在index範圍裡的時候比較
if left_arr[i]<=right_arr[j]:
a.append(left_arr[i]) #如果左邊array的值比右邊array還要小,則把當前左邊array的append進a裡面
i+=1 #i的index向後移一個,繼續比較
else:
a.append(right_arr[j])
j+=1
if j==end: #如果j走到end的時候,代表j已經比完了,那個直接把剩下的left array的值放進去
a= a+ left_arr[i:]
else: #i也同理
a= a+ right_arr[j:]
return a
def merge_sort(self,arr):
if len(arr)<=1: #如果array長度不大於1,則直接回傳,默認已經排序
return arr
mid = len(arr)//2 #這裡的//是省略小數,方便取中點
arr1=Solution().merge_sort(arr[:mid]) #遞迴左半邊的array
arr2=Solution().merge_sort(arr[mid:]) #遞迴左半邊的array
return Solution().MergeSingle(arr1,arr2)#單趟merge
arr=[10,3,6,5,14,8]
Solution().merge_sort(arr)
| 35.868421
| 93
| 0.464417
|
d61538d3537aacc068a2c2bc48d4c906f44aea7b
| 124,606
|
py
|
Python
|
Babylon.js-2.4.0/Exporters/Blender/io_export_babylon.py
|
Chaseshak/RetroVRCade
|
7e999af33941f159f40f8e15f648ae04d66c1f8f
|
[
"Apache-2.0"
] | null | null | null |
Babylon.js-2.4.0/Exporters/Blender/io_export_babylon.py
|
Chaseshak/RetroVRCade
|
7e999af33941f159f40f8e15f648ae04d66c1f8f
|
[
"Apache-2.0"
] | null | null | null |
Babylon.js-2.4.0/Exporters/Blender/io_export_babylon.py
|
Chaseshak/RetroVRCade
|
7e999af33941f159f40f8e15f648ae04d66c1f8f
|
[
"Apache-2.0"
] | null | null | null |
bl_info = {
'name': 'Babylon.js',
'author': 'David Catuhe, Jeff Palmer',
'version': (4, 6, 1),
'blender': (2, 75, 0),
'location': 'File > Export > Babylon.js (.babylon)',
'description': 'Export Babylon.js scenes (.babylon)',
'wiki_url': 'https://github.com/BabylonJS/Babylon.js/tree/master/Exporters/Blender',
'tracker_url': '',
'category': 'Import-Export'}
import base64
import bpy
import bpy_extras.io_utils
import time
import io
import math
import mathutils
import os
import shutil
import sys, traceback # for writing errors to log file
#===============================================================================
# Registration the calling of the INFO_MT_file_export file selector
def menu_func(self, context):
self.layout.operator(Main.bl_idname, text = 'Babylon.js [.babylon] ver ' + format_version())
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == '__main__':
register()
#===============================================================================
def format_version():
version = bl_info['version']
return str(version[0]) + '.' + str(version[1]) + '.' + str(version[2])
# output related constants
MAX_VERTEX_ELEMENTS = 65535
MAX_VERTEX_ELEMENTS_32Bit = 16777216
VERTEX_OUTPUT_PER_LINE = 100
MAX_FLOAT_PRECISION_INT = 4
MAX_FLOAT_PRECISION = '%.' + str(MAX_FLOAT_PRECISION_INT) + 'f'
COMPRESS_MATRIX_INDICES = True # this is True for .babylon exporter & False for TOB
FRAME_BASED_ANIMATION = True # this is only able to be turned off by the TOB exporter right now
# used in World constructor, defined in BABYLON.Scene
#FOGMODE_NONE = 0
#FOGMODE_EXP = 1
#FOGMODE_EXP2 = 2
FOGMODE_LINEAR = 3
# used in Mesh & Node constructors, defined in BABYLON.AbstractMesh
BILLBOARDMODE_NONE = 0
#BILLBOARDMODE_X = 1
#BILLBOARDMODE_Y = 2
#BILLBOARDMODE_Z = 4
BILLBOARDMODE_ALL = 7
# used in Mesh constructor, defined in BABYLON.PhysicsEngine
SPHERE_IMPOSTER = 1
BOX_IMPOSTER = 2
#PLANE_IMPOSTER = 3
MESH_IMPOSTER = 4
CAPSULE_IMPOSTER = 5
CONE_IMPOSTER = 6
CYLINDER_IMPOSTER = 7
CONVEX_HULL_IMPOSTER = 8
# camera class names, never formally defined in Babylon, but used in babylonFileLoader
ARC_ROTATE_CAM = 'ArcRotateCamera'
DEV_ORIENT_CAM = 'DeviceOrientationCamera'
FOLLOW_CAM = 'FollowCamera'
FREE_CAM = 'FreeCamera'
GAMEPAD_CAM = 'GamepadCamera'
TOUCH_CAM = 'TouchCamera'
V_JOYSTICKS_CAM = 'VirtualJoysticksCamera'
VR_DEV_ORIENT_FREE_CAM ='VRDeviceOrientationFreeCamera'
WEB_VR_FREE_CAM = 'WebVRFreeCamera'
# 3D camera rigs, defined in BABYLON.Camera, must be strings to be in 'dropdown'
RIG_MODE_NONE = '0'
RIG_MODE_STEREOSCOPIC_ANAGLYPH = '10'
RIG_MODE_STEREOSCOPIC_SIDEBYSIDE_PARALLEL = '11'
RIG_MODE_STEREOSCOPIC_SIDEBYSIDE_CROSSEYED = '12'
RIG_MODE_STEREOSCOPIC_OVERUNDER = '13'
RIG_MODE_VR = '20'
# used in Light constructor, never formally defined in Babylon, but used in babylonFileLoader
POINT_LIGHT = 0
DIRECTIONAL_LIGHT = 1
SPOT_LIGHT = 2
HEMI_LIGHT = 3
#used in ShadowGenerators
NO_SHADOWS = 'NONE'
STD_SHADOWS = 'STD'
POISSON_SHADOWS = 'POISSON'
VARIANCE_SHADOWS = 'VARIANCE'
BLUR_VARIANCE_SHADOWS = 'BLUR_VARIANCE'
# used in Texture constructor, defined in BABYLON.Texture
CLAMP_ADDRESSMODE = 0
WRAP_ADDRESSMODE = 1
MIRROR_ADDRESSMODE = 2
# used in Texture constructor, defined in BABYLON.Texture
EXPLICIT_MODE = 0
SPHERICAL_MODE = 1
#PLANAR_MODE = 2
CUBIC_MODE = 3
#PROJECTION_MODE = 4
#SKYBOX_MODE = 5
DEFAULT_MATERIAL_NAMESPACE = 'Same as Filename'
# passed to Animation constructor from animatable objects, defined in BABYLON.Animation
#ANIMATIONTYPE_FLOAT = 0
ANIMATIONTYPE_VECTOR3 = 1
ANIMATIONTYPE_QUATERNION = 2
ANIMATIONTYPE_MATRIX = 3
#ANIMATIONTYPE_COLOR3 = 4
# passed to Animation constructor from animatable objects, defined in BABYLON.Animation
#ANIMATIONLOOPMODE_RELATIVE = 0
ANIMATIONLOOPMODE_CYCLE = 1
#ANIMATIONLOOPMODE_CONSTANT = 2
#===============================================================================
# Panel displayed in Scene Tab of properties, so settings can be saved in a .blend file
class ExporterSettingsPanel(bpy.types.Panel):
bl_label = 'Exporter Settings ' + format_version()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'scene'
bpy.types.Scene.export_onlySelectedLayer = bpy.props.BoolProperty(
name="Export only selected layers",
description="Export only selected layers",
default = False,
)
bpy.types.Scene.export_flatshadeScene = bpy.props.BoolProperty(
name="Flat shade entire scene",
description="Use face normals on all meshes. Increases vertices.",
default = False,
)
bpy.types.Scene.attachedSound = bpy.props.StringProperty(
name='Sound',
description='',
default = ''
)
bpy.types.Scene.loopSound = bpy.props.BoolProperty(
name='Loop sound',
description='',
default = True
)
bpy.types.Scene.autoPlaySound = bpy.props.BoolProperty(
name='Auto play sound',
description='',
default = True
)
bpy.types.Scene.inlineTextures = bpy.props.BoolProperty(
name="inline",
description="turn textures into encoded strings, for direct inclusion into source code",
default = False,
)
bpy.types.Scene.textureDir = bpy.props.StringProperty(
name='sub-directory',
description='The path below the output directory to write texture files (any separators OS dependent)',
default = ''
)
bpy.types.Scene.ignoreIKBones = bpy.props.BoolProperty(
name="Ignore IK Bones",
description="Do not export bones with either '.ik' or 'ik.'(not case sensitive) in the name",
default = False,
)
def draw(self, context):
layout = self.layout
scene = context.scene
layout.prop(scene, "export_onlySelectedLayer")
layout.prop(scene, "export_flatshadeScene")
layout.prop(scene, "ignoreIKBones")
box = layout.box()
box.label(text='Texture Location:')
box.prop(scene, "inlineTextures")
row = box.row()
row.enabled = not scene.inlineTextures
row.prop(scene, "textureDir")
box = layout.box()
box.prop(scene, 'attachedSound')
box.prop(scene, 'autoPlaySound')
box.prop(scene, 'loopSound')
#===============================================================================
class Main(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
bl_idname = 'scene.babylon' # module will not load with out it, also must have a dot
bl_label = 'Export Babylon.js scene' # used on the label of the actual 'save' button
filename_ext = '.babylon' # required to have one, although not really used
filepath = bpy.props.StringProperty(subtype = 'FILE_PATH') # assigned once the file selector returns
log_handler = None # assigned in execute
nameSpace = None # assigned in execute
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
nWarnings = 0
@staticmethod
def warn(msg, numTabIndent = 1, noNewLine = False):
Main.log('WARNING: ' + msg, numTabIndent, noNewLine)
Main.nWarnings += 1
@staticmethod
def log(msg, numTabIndent = 1, noNewLine = False):
for i in range(numTabIndent):
Main.log_handler.write('\t')
Main.log_handler.write(msg)
if not noNewLine: Main.log_handler.write('\n')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getMaterial(self, baseMaterialId):
fullName = Main.nameSpace + '.' + baseMaterialId
for material in self.materials:
if material.name == fullName:
return material
return None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getSourceMeshInstance(self, dataName):
for mesh in self.meshesAndNodes:
# nodes have no 'dataName', cannot be instanced in any case
if hasattr(mesh, 'dataName') and mesh.dataName == dataName:
return mesh
return None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def execute(self, context):
scene = context.scene
self.scene = scene # reference for passing
try:
start_time = time.time()
filepathDotExtension = self.filepath.rpartition('.')
self.filepathMinusExtension = filepathDotExtension[0]
# assign nameSpace, based on OS
if self.filepathMinusExtension.find('\\') != -1:
Main.nameSpace = legal_js_identifier(self.filepathMinusExtension.rpartition('\\')[2])
else:
Main.nameSpace = legal_js_identifier(self.filepathMinusExtension.rpartition('/')[2])
# explicitly reset globals, in case there was an earlier export this session
Main.nWarnings = 0
Main.log_handler = io.open(self.filepathMinusExtension + '.log', 'w', encoding='utf8')
version = bl_info['version']
Main.log('Exporter version: ' + format_version() + ', Blender version: ' + bpy.app.version_string)
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode = 'OBJECT')
# assign texture location, purely temporary if inlining
self.textureDir = os.path.dirname(self.filepath)
if not scene.inlineTextures:
self.textureDir = os.path.join(self.textureDir, scene.textureDir)
if not os.path.isdir(self.textureDir):
os.makedirs(self.textureDir)
Main.warn("Texture sub-directory did not already exist, created: " + self.textureDir)
Main.log('========= Conversion from Blender to Babylon.js =========', 0)
Main.log('Scene settings used:', 1)
Main.log('selected layers only: ' + format_bool(scene.export_onlySelectedLayer), 2)
Main.log('flat shading entire scene: ' + format_bool(scene.export_flatshadeScene), 2)
Main.log('inline textures: ' + format_bool(scene.inlineTextures), 2)
if not scene.inlineTextures:
Main.log('texture directory: ' + self.textureDir, 2)
self.world = World(scene)
bpy.ops.screen.animation_cancel()
currentFrame = bpy.context.scene.frame_current
# Active camera
if scene.camera != None:
self.activeCamera = scene.camera.name
else:
Main.warn('No active camera has been assigned, or is not in a currently selected Blender layer')
self.cameras = []
self.lights = []
self.shadowGenerators = []
self.skeletons = []
skeletonId = 0
self.meshesAndNodes = []
self.materials = []
self.multiMaterials = []
self.sounds = []
# Scene level sound
if scene.attachedSound != '':
self.sounds.append(Sound(scene.attachedSound, scene.autoPlaySound, scene.loopSound))
# separate loop doing all skeletons, so available in Mesh to make skipping IK bones possible
for object in [object for object in scene.objects]:
scene.frame_set(currentFrame)
if object.type == 'ARMATURE': #skeleton.pose.bones
if object.is_visible(scene):
self.skeletons.append(Skeleton(object, scene, skeletonId))
skeletonId += 1
else:
Main.warn('The following armature not visible in scene thus ignored: ' + object.name)
# exclude lamps in this pass, so ShadowGenerator constructor can be passed meshesAnNodes
for object in [object for object in scene.objects]:
scene.frame_set(currentFrame)
if object.type == 'CAMERA':
if object.is_visible(scene): # no isInSelectedLayer() required, is_visible() handles this for them
self.cameras.append(Camera(object))
else:
Main.warn('The following camera not visible in scene thus ignored: ' + object.name)
elif object.type == 'MESH':
forcedParent = None
nameID = ''
nextStartFace = 0
while True and self.isInSelectedLayer(object, scene):
mesh = Mesh(object, scene, nextStartFace, forcedParent, nameID, self)
if hasattr(mesh, 'instances'):
self.meshesAndNodes.append(mesh)
else:
break
if object.data.attachedSound != '':
self.sounds.append(Sound(object.data.attachedSound, object.data.autoPlaySound, object.data.loopSound, object))
nextStartFace = mesh.offsetFace
if nextStartFace == 0:
break
if forcedParent is None:
nameID = 0
forcedParent = object
Main.warn('The following mesh has exceeded the maximum # of vertex elements & will be broken into multiple Babylon meshes: ' + object.name)
nameID = nameID + 1
elif object.type == 'EMPTY':
self.meshesAndNodes.append(Node(object))
elif object.type != 'LAMP' and object.type != 'ARMATURE':
Main.warn('The following object (type - ' + object.type + ') is not currently exportable thus ignored: ' + object.name)
# Lamp / shadow Generator pass; meshesAnNodes complete & forceParents included
for object in [object for object in scene.objects]:
scene.frame_set(currentFrame)
if object.type == 'LAMP':
if object.is_visible(scene): # no isInSelectedLayer() required, is_visible() handles this for them
bulb = Light(object)
self.lights.append(bulb)
if object.data.shadowMap != 'NONE':
if bulb.light_type == DIRECTIONAL_LIGHT or bulb.light_type == SPOT_LIGHT:
self.shadowGenerators.append(ShadowGenerator(object, self.meshesAndNodes, scene))
else:
Main.warn('Only directional (sun) and spot types of lamp are valid for shadows thus ignored: ' + object.name)
else:
Main.warn('The following lamp not visible in scene thus ignored: ' + object.name)
bpy.context.scene.frame_set(currentFrame)
# output file
self.to_scene_file ()
except:# catch *all* exceptions
ex = sys.exc_info()
Main.log('========= An error was encountered =========', 0)
stack = traceback.format_tb(ex[2])
for line in stack:
Main.log_handler.write(line) # avoid tabs & extra newlines by not calling log() inside catch
Main.log_handler.write('ERROR: ' + str(ex[1]) + '\n')
raise
finally:
Main.log('========= end of processing =========', 0)
elapsed_time = time.time() - start_time
minutes = math.floor(elapsed_time / 60)
seconds = elapsed_time - (minutes * 60)
Main.log('elapsed time: ' + str(minutes) + ' min, ' + format_f(seconds) + ' secs', 0)
Main.log_handler.close()
if (Main.nWarnings > 0):
self.report({'WARNING'}, 'Processing completed, but ' + str(Main.nWarnings) + ' WARNINGS were raised, see log file.')
return {'FINISHED'}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self):
Main.log('========= Writing of scene file started =========', 0)
# Open file
file_handler = io.open(self.filepathMinusExtension + '.babylon', 'w', encoding='utf8')
file_handler.write('{')
file_handler.write('"producer":{"name":"Blender","version":"' + bpy.app.version_string + '","exporter_version":"' + format_version() + '","file":"' + Main.nameSpace + '.babylon"},\n')
self.world.to_scene_file(file_handler)
# Materials
file_handler.write(',\n"materials":[')
first = True
for material in self.materials:
if first != True:
file_handler.write(',\n')
first = False
material.to_scene_file(file_handler)
file_handler.write(']')
# Multi-materials
file_handler.write(',\n"multiMaterials":[')
first = True
for multimaterial in self.multiMaterials:
if first != True:
file_handler.write(',')
first = False
multimaterial.to_scene_file(file_handler)
file_handler.write(']')
# Armatures/Bones
file_handler.write(',\n"skeletons":[')
first = True
for skeleton in self.skeletons:
if first != True:
file_handler.write(',')
first = False
skeleton.to_scene_file(file_handler)
file_handler.write(']')
# Meshes
file_handler.write(',\n"meshes":[')
first = True
for m in range(0, len(self.meshesAndNodes)):
mesh = self.meshesAndNodes[m]
if first != True:
file_handler.write(',')
first = False
mesh.to_scene_file(file_handler)
file_handler.write(']')
# Cameras
file_handler.write(',\n"cameras":[')
first = True
for camera in self.cameras:
if hasattr(camera, 'fatalProblem'): continue
if first != True:
file_handler.write(',')
first = False
camera.update_for_target_attributes(self.meshesAndNodes)
camera.to_scene_file(file_handler)
file_handler.write(']')
# Active camera
if hasattr(self, 'activeCamera'):
write_string(file_handler, 'activeCamera', self.activeCamera)
# Lights
file_handler.write(',\n"lights":[')
first = True
for light in self.lights:
if first != True:
file_handler.write(',')
first = False
light.to_scene_file(file_handler)
file_handler.write(']')
# Shadow generators
file_handler.write(',\n"shadowGenerators":[')
first = True
for shadowGen in self.shadowGenerators:
if first != True:
file_handler.write(',')
first = False
shadowGen.to_scene_file(file_handler)
file_handler.write(']')
# Sounds
if len(self.sounds) > 0:
file_handler.write('\n,"sounds":[')
first = True
for sound in self.sounds:
if first != True:
file_handler.write(',')
first = False
sound.to_scene_file(file_handler)
file_handler.write(']')
# Closing
file_handler.write('\n}')
file_handler.close()
Main.log('========= Writing of scene file completed =========', 0)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def isInSelectedLayer(self, obj, scene):
if not scene.export_onlySelectedLayer:
return True
for l in range(0, len(scene.layers)):
if obj.layers[l] and scene.layers[l]:
return True
return False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_skeleton(self, name):
for skeleton in self.skeletons:
if skeleton.name == name:
return skeleton
#really cannot happen, will cause exception in caller
return None
#===============================================================================
class World:
def __init__(self, scene):
self.autoClear = True
world = scene.world
if world:
self.ambient_color = world.ambient_color
self.clear_color = world.horizon_color
else:
self.ambient_color = mathutils.Color((0.2, 0.2, 0.3))
self.clear_color = mathutils.Color((0.0, 0.0, 0.0))
self.gravity = scene.gravity
if world and world.mist_settings.use_mist:
self.fogMode = FOGMODE_LINEAR
self.fogColor = world.horizon_color
self.fogStart = world.mist_settings.start
self.fogEnd = world.mist_settings.depth
self.fogDensity = 0.1
Main.log('Python World class constructor completed')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
write_bool(file_handler, 'autoClear', self.autoClear, True)
write_color(file_handler, 'clearColor', self.clear_color)
write_color(file_handler, 'ambientColor', self.ambient_color)
write_vector(file_handler, 'gravity', self.gravity)
if hasattr(self, 'fogMode'):
write_int(file_handler, 'fogMode', self.fogMode)
write_color(file_handler, 'fogColor', self.fogColor)
write_float(file_handler, 'fogStart', self.fogStart)
write_float(file_handler, 'fogEnd', self.fogEnd)
write_float(file_handler, 'fogDensity', self.fogDensity)
#===============================================================================
class Sound:
def __init__(self, name, autoplay, loop, connectedMesh = None):
self.name = name;
self.autoplay = autoplay
self.loop = loop
if connectedMesh != None:
self.connectedMeshId = connectedMesh.name
self.maxDistance = connectedMesh.data.maxSoundDistance
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_bool(file_handler, 'autoplay', self.autoplay)
write_bool(file_handler, 'loop', self.loop)
if hasattr(self, 'connectedMeshId'):
write_string(file_handler, 'connectedMeshId', self.connectedMeshId)
write_float(file_handler, 'maxDistance', self.maxDistance)
file_handler.write('}')
#===============================================================================
class FCurveAnimatable:
def define_animations(self, object, supportsRotation, supportsPosition, supportsScaling, xOffsetForRotation = 0):
# just because a sub-class can be animatable does not mean it is
self.animationsPresent = object.animation_data and object.animation_data.action
if (self.animationsPresent):
Main.log('animation processing begun', 2)
# instance each type of animation support regardless of whether there is any data for it
if supportsRotation:
if object.rotation_mode == 'QUATERNION':
if object.type == 'CAMERA':
# if it's a camera, convert quaternions to euler XYZ
rotAnimation = QuaternionToEulerAnimation(object, 'rotation', 'rotation_quaternion', -1, xOffsetForRotation)
else:
rotAnimation = QuaternionAnimation(object, 'rotationQuaternion', 'rotation_quaternion', 1, xOffsetForRotation)
else:
rotAnimation = VectorAnimation(object, 'rotation', 'rotation_euler', -1, xOffsetForRotation)
if supportsPosition:
posAnimation = VectorAnimation(object, 'position', 'location')
if supportsScaling:
scaleAnimation = VectorAnimation(object, 'scaling', 'scale')
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(object, action, False, frameOffset)
if animationRange is None:
continue
if supportsRotation:
hasData = rotAnimation.append_range(object, animationRange)
if supportsPosition:
hasData |= posAnimation.append_range(object, animationRange)
if supportsScaling:
hasData |= scaleAnimation.append_range(object, animationRange)
if hasData:
Main.log('processing action ' + animationRange.to_string(), 3)
self.ranges.append(animationRange)
frameOffset = animationRange.frame_end
#Set Animations
self.animations = []
if supportsRotation and len(rotAnimation.frames) > 0:
self.animations.append(rotAnimation)
if supportsPosition and len(posAnimation.frames) > 0:
self.animations.append(posAnimation)
if supportsScaling and len(scaleAnimation.frames) > 0:
self.animations.append(scaleAnimation)
if (hasattr(object.data, "autoAnimate") and object.data.autoAnimate):
self.autoAnimate = True
self.autoAnimateFrom = bpy.context.scene.frame_end
self.autoAnimateTo = 0
for animation in self.animations:
if self.autoAnimateFrom > animation.get_first_frame():
self.autoAnimateFrom = animation.get_first_frame()
if self.autoAnimateTo < animation.get_last_frame():
self.autoAnimateTo = animation.get_last_frame()
self.autoAnimateLoop = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
if (self.animationsPresent):
file_handler.write('\n,"animations":[')
first = True
for animation in self.animations:
if first == False:
file_handler.write(',')
animation.to_scene_file(file_handler)
first = False
file_handler.write(']')
file_handler.write(',"ranges":[')
first = True
for range in self.ranges:
if first != True:
file_handler.write(',')
first = False
range.to_scene_file(file_handler)
file_handler.write(']')
if (hasattr(self, "autoAnimate") and self.autoAnimate):
write_bool(file_handler, 'autoAnimate', self.autoAnimate)
write_int(file_handler, 'autoAnimateFrom', self.autoAnimateFrom)
write_int(file_handler, 'autoAnimateTo', self.autoAnimateTo)
write_bool(file_handler, 'autoAnimateLoop', self.autoAnimateLoop)
#===============================================================================
class Mesh(FCurveAnimatable):
def __init__(self, object, scene, startFace, forcedParent, nameID, exporter):
self.name = object.name + str(nameID)
Main.log('processing begun of mesh: ' + self.name)
self.define_animations(object, True, True, True) #Should animations be done when forcedParent
self.isVisible = not object.hide_render
self.isEnabled = not object.data.loadDisabled
useFlatShading = scene.export_flatshadeScene or object.data.useFlatShading
self.checkCollisions = object.data.checkCollisions
self.receiveShadows = object.data.receiveShadows
self.castShadows = object.data.castShadows
self.freezeWorldMatrix = object.data.freezeWorldMatrix
# hasSkeleton detection & skeletonID determination
hasSkeleton = False
objArmature = None # if there's an armature, this will be the one!
if len(object.vertex_groups) > 0:
objArmature = object.find_armature()
if objArmature != None:
hasSkeleton = True
# used to get bone index, since could be skipping IK bones
skeleton = exporter.get_skeleton(objArmature.name)
i = 0
for obj in scene.objects:
if obj.type == "ARMATURE":
if obj == objArmature:
self.skeletonId = i
break
else:
i += 1
# determine Position, rotation, & scaling
if forcedParent is None:
# Use local matrix
locMatrix = object.matrix_local
if objArmature != None:
# unless the armature is the parent
if object.parent and object.parent == objArmature:
locMatrix = object.matrix_world * object.parent.matrix_world.inverted()
loc, rot, scale = locMatrix.decompose()
self.position = loc
if object.rotation_mode == 'QUATERNION':
self.rotationQuaternion = rot
else:
self.rotation = scale_vector(rot.to_euler('XYZ'), -1)
self.scaling = scale
else:
# use defaults when not None
self.position = mathutils.Vector((0, 0, 0))
self.rotation = scale_vector(mathutils.Vector((0, 0, 0)), 1) # isn't scaling 0's by 1 same as 0?
self.scaling = mathutils.Vector((1, 1, 1))
# determine parent & dataName
if forcedParent is None:
self.dataName = object.data.name # used to support shared vertex instances in later passed
if object.parent and object.parent.type != 'ARMATURE':
self.parentId = object.parent.name
else:
self.dataName = self.name
self.parentId = forcedParent.name
# Get if this will be an instance of another, before processing materials, to avoid multi-bakes
sourceMesh = exporter.getSourceMeshInstance(self.dataName)
if sourceMesh is not None:
#need to make sure rotation mode matches, since value initially copied in InstancedMesh constructor
if hasattr(sourceMesh, 'rotationQuaternion'):
instRot = None
instRotq = rot
else:
instRot = scale_vector(rot.to_euler('XYZ'), -1)
instRotq = None
instance = MeshInstance(self.name, self.position, instRot, instRotq, self.scaling, self.freezeWorldMatrix)
sourceMesh.instances.append(instance)
Main.log('mesh is an instance of : ' + sourceMesh.name + '. Processing halted.', 2)
return
else:
self.instances = []
# Physics
if object.rigid_body != None:
shape_items = {'SPHERE' : SPHERE_IMPOSTER,
'BOX' : BOX_IMPOSTER,
'MESH' : MESH_IMPOSTER,
'CAPSULE' : CAPSULE_IMPOSTER,
'CONE' : CONE_IMPOSTER,
'CYLINDER' : CYLINDER_IMPOSTER,
'CONVEX_HULL': CONVEX_HULL_IMPOSTER}
shape_type = shape_items[object.rigid_body.collision_shape]
self.physicsImpostor = shape_type
mass = object.rigid_body.mass
if mass < 0.005:
mass = 0
self.physicsMass = mass
self.physicsFriction = object.rigid_body.friction
self.physicsRestitution = object.rigid_body.restitution
# process all of the materials required
maxVerts = MAX_VERTEX_ELEMENTS # change for multi-materials
recipe = BakingRecipe(object)
self.billboardMode = recipe.billboardMode
if recipe.needsBaking:
if recipe.multipleRenders:
Main.warn('Mixing of Cycles & Blender Render in same mesh not supported. No materials exported.', 2)
else:
bakedMat = BakedMaterial(exporter, object, recipe)
exporter.materials.append(bakedMat)
self.materialId = bakedMat.name
else:
bjs_material_slots = []
for slot in object.material_slots:
# None will be returned when either the first encounter or must be unique due to baked textures
material = exporter.getMaterial(slot.name)
if (material != None):
Main.log('registered as also a user of material: ' + slot.name, 2)
else:
material = StdMaterial(slot, exporter, object)
exporter.materials.append(material)
bjs_material_slots.append(material)
if len(bjs_material_slots) == 1:
self.materialId = bjs_material_slots[0].name
elif len(bjs_material_slots) > 1:
multimat = MultiMaterial(bjs_material_slots, len(exporter.multiMaterials))
self.materialId = multimat.name
exporter.multiMaterials.append(multimat)
maxVerts = MAX_VERTEX_ELEMENTS_32Bit
else:
Main.warn('No materials have been assigned: ', 2)
# Get mesh
mesh = object.to_mesh(scene, True, 'PREVIEW')
# Triangulate mesh if required
Mesh.mesh_triangulate(mesh)
# Getting vertices and indices
self.positions = []
self.normals = []
self.uvs = [] # not always used
self.uvs2 = [] # not always used
self.colors = [] # not always used
self.indices = []
self.subMeshes = []
hasUV = len(mesh.tessface_uv_textures) > 0
if hasUV:
which = len(mesh.tessface_uv_textures) - 1 if recipe.needsBaking else 0
UVmap = mesh.tessface_uv_textures[which].data
hasUV2 = len(mesh.tessface_uv_textures) > 1 and not recipe.needsBaking
if hasUV2:
UV2map = mesh.tessface_uv_textures[1].data
hasVertexColor = len(mesh.vertex_colors) > 0
if hasVertexColor:
Colormap = mesh.tessface_vertex_colors.active.data
if hasSkeleton:
weightsPerVertex = []
indicesPerVertex = []
influenceCounts = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 9, so accessed orign 1; 0 used for all those greater than 8
totalInfluencers = 0
highestInfluenceObserved = 0
# used tracking of vertices as they are received
alreadySavedVertices = []
vertices_Normals = []
vertices_UVs = []
vertices_UV2s = []
vertices_Colors = []
vertices_indices = []
vertices_sk_weights = []
vertices_sk_indices = []
self.offsetFace = 0
for v in range(0, len(mesh.vertices)):
alreadySavedVertices.append(False)
vertices_Normals.append([])
vertices_UVs.append([])
vertices_UV2s.append([])
vertices_Colors.append([])
vertices_indices.append([])
vertices_sk_weights.append([])
vertices_sk_indices.append([])
materialsCount = 1 if recipe.needsBaking else max(1, len(object.material_slots))
verticesCount = 0
indicesCount = 0
for materialIndex in range(materialsCount):
if self.offsetFace != 0:
break
subMeshVerticesStart = verticesCount
subMeshIndexStart = indicesCount
for faceIndex in range(startFace, len(mesh.tessfaces)): # For each face
face = mesh.tessfaces[faceIndex]
if face.material_index != materialIndex and not recipe.needsBaking:
continue
if verticesCount + 3 > maxVerts:
self.offsetFace = faceIndex
break
for v in range(3): # For each vertex in face
vertex_index = face.vertices[v]
vertex = mesh.vertices[vertex_index]
position = vertex.co
normal = face.normal if useFlatShading else vertex.normal
#skeletons
if hasSkeleton:
matricesWeights = []
matricesIndices = []
# Getting influences
for group in vertex.groups:
index = group.group
weight = group.weight
# do not need boneIndex; using skeleton.get_index_of_bone()
for boneIndex, bone in enumerate(objArmature.pose.bones):
if object.vertex_groups[index].name == bone.name:
matricesWeights.append(weight)
matricesIndices.append(skeleton.get_index_of_bone(bone.name))
# Texture coordinates
if hasUV:
vertex_UV = UVmap[face.index].uv[v]
if hasUV2:
vertex_UV2 = UV2map[face.index].uv[v]
# Vertex color
if hasVertexColor:
if v == 0:
vertex_Color = Colormap[face.index].color1
if v == 1:
vertex_Color = Colormap[face.index].color2
if v == 2:
vertex_Color = Colormap[face.index].color3
# Check if the current vertex is already saved
alreadySaved = alreadySavedVertices[vertex_index] and not useFlatShading
if alreadySaved:
alreadySaved = False
# UV
index_UV = 0
for savedIndex in vertices_indices[vertex_index]:
vNormal = vertices_Normals[vertex_index][index_UV]
if (normal.x != vNormal.x or normal.y != vNormal.y or normal.z != vNormal.z):
continue;
if hasUV:
vUV = vertices_UVs[vertex_index][index_UV]
if (vUV[0] != vertex_UV[0] or vUV[1] != vertex_UV[1]):
continue
if hasUV2:
vUV2 = vertices_UV2s[vertex_index][index_UV]
if (vUV2[0] != vertex_UV2[0] or vUV2[1] != vertex_UV2[1]):
continue
if hasVertexColor:
vColor = vertices_Colors[vertex_index][index_UV]
if (vColor.r != vertex_Color.r or vColor.g != vertex_Color.g or vColor.b != vertex_Color.b):
continue
if hasSkeleton:
vSkWeight = vertices_sk_weights[vertex_index]
vSkIndices = vertices_sk_indices[vertex_index]
if not same_array(vSkWeight[index_UV], matricesWeights) or not same_array(vSkIndices[index_UV], matricesIndices):
continue
if vertices_indices[vertex_index][index_UV] >= subMeshVerticesStart:
alreadySaved = True
break
index_UV += 1
if (alreadySaved):
# Reuse vertex
index = vertices_indices[vertex_index][index_UV]
else:
# Export new one
index = verticesCount
alreadySavedVertices[vertex_index] = True
vertices_Normals[vertex_index].append(normal)
self.normals.append(normal)
if hasUV:
vertices_UVs[vertex_index].append(vertex_UV)
self.uvs.append(vertex_UV[0])
self.uvs.append(vertex_UV[1])
if hasUV2:
vertices_UV2s[vertex_index].append(vertex_UV2)
self.uvs2.append(vertex_UV2[0])
self.uvs2.append(vertex_UV2[1])
if hasVertexColor:
vertices_Colors[vertex_index].append(vertex_Color)
self.colors.append(vertex_Color.r)
self.colors.append(vertex_Color.g)
self.colors.append(vertex_Color.b)
self.colors.append(1.0)
if hasSkeleton:
vertices_sk_weights[vertex_index].append(matricesWeights)
vertices_sk_indices[vertex_index].append(matricesIndices)
nInfluencers = len(matricesWeights)
totalInfluencers += nInfluencers
if nInfluencers <= 8:
influenceCounts[nInfluencers] += 1
else:
influenceCounts[0] += 1
highestInfluenceObserved = nInfluencers if nInfluencers > highestInfluenceObserved else highestInfluenceObserved
weightsPerVertex.append(matricesWeights)
indicesPerVertex.append(matricesIndices)
vertices_indices[vertex_index].append(index)
self.positions.append(position)
verticesCount += 1
self.indices.append(index)
indicesCount += 1
self.subMeshes.append(SubMesh(materialIndex, subMeshVerticesStart, subMeshIndexStart, verticesCount - subMeshVerticesStart, indicesCount - subMeshIndexStart))
if verticesCount > MAX_VERTEX_ELEMENTS:
Main.warn('Due to multi-materials / Shapekeys & this meshes size, 32bit indices must be used. This may not run on all hardware.', 2)
BakedMaterial.meshBakingClean(object)
Main.log('num positions : ' + str(len(self.positions)), 2)
Main.log('num normals : ' + str(len(self.normals )), 2)
Main.log('num uvs : ' + str(len(self.uvs )), 2)
Main.log('num uvs2 : ' + str(len(self.uvs2 )), 2)
Main.log('num colors : ' + str(len(self.colors )), 2)
Main.log('num indices : ' + str(len(self.indices )), 2)
if hasSkeleton:
Main.log('Skeleton stats: ', 2)
self.toFixedInfluencers(weightsPerVertex, indicesPerVertex, object.data.maxInfluencers, highestInfluenceObserved)
if (COMPRESS_MATRIX_INDICES):
self.skeletonIndices = Mesh.packSkeletonIndices(self.skeletonIndices)
if (self.numBoneInfluencers > 4):
self.skeletonIndicesExtra = Mesh.packSkeletonIndices(self.skeletonIndicesExtra)
Main.log('Total Influencers: ' + format_f(totalInfluencers), 3)
Main.log('Avg # of influencers per vertex: ' + format_f(totalInfluencers / len(self.positions)), 3)
Main.log('Highest # of influencers observed: ' + str(highestInfluenceObserved) + ', num vertices with this: ' + format_int(influenceCounts[highestInfluenceObserved if highestInfluenceObserved < 9 else 0]), 3)
Main.log('exported as ' + str(self.numBoneInfluencers) + ' influencers', 3)
nWeights = len(self.skeletonWeights) + (len(self.skeletonWeightsExtra) if hasattr(self, 'skeletonWeightsExtra') else 0)
Main.log('num skeletonWeights and skeletonIndices: ' + str(nWeights), 3)
numZeroAreaFaces = self.find_zero_area_faces()
if numZeroAreaFaces > 0:
Main.warn('# of 0 area faces found: ' + str(numZeroAreaFaces), 2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def find_zero_area_faces(self):
nFaces = int(len(self.indices) / 3)
nZeroAreaFaces = 0
for f in range(0, nFaces):
faceOffset = f * 3
p1 = self.positions[self.indices[faceOffset ]]
p2 = self.positions[self.indices[faceOffset + 1]]
p3 = self.positions[self.indices[faceOffset + 2]]
if same_vertex(p1, p2) or same_vertex(p1, p3) or same_vertex(p2, p3): nZeroAreaFaces += 1
return nZeroAreaFaces
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def mesh_triangulate(mesh):
try:
import bmesh
bm = bmesh.new()
bm.from_mesh(mesh)
bmesh.ops.triangulate(bm, faces = bm.faces)
bm.to_mesh(mesh)
mesh.calc_tessface()
bm.free()
except:
pass
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def toFixedInfluencers(self, weightsPerVertex, indicesPerVertex, maxInfluencers, highestObserved):
if (maxInfluencers > 8 or maxInfluencers < 1):
maxInfluencers = 8
Main.warn('Maximum # of influencers invalid, set to 8', 3)
self.numBoneInfluencers = maxInfluencers if maxInfluencers < highestObserved else highestObserved
needExtras = self.numBoneInfluencers > 4
maxInfluencersExceeded = 0
fixedWeights = []
fixedIndices = []
fixedWeightsExtra = []
fixedIndicesExtra = []
for i in range(len(weightsPerVertex)):
weights = weightsPerVertex[i]
indices = indicesPerVertex[i]
nInfluencers = len(weights)
if (nInfluencers > self.numBoneInfluencers):
maxInfluencersExceeded += 1
Mesh.sortByDescendingInfluence(weights, indices)
for j in range(4):
fixedWeights.append(weights[j] if nInfluencers > j else 0.0)
fixedIndices.append(indices[j] if nInfluencers > j else 0 )
if needExtras:
for j in range(4, 8):
fixedWeightsExtra.append(weights[j] if nInfluencers > j else 0.0)
fixedIndicesExtra.append(indices[j] if nInfluencers > j else 0 )
self.skeletonWeights = fixedWeights
self.skeletonIndices = fixedIndices
if needExtras:
self.skeletonWeightsExtra = fixedWeightsExtra
self.skeletonIndicesExtra = fixedIndicesExtra
if maxInfluencersExceeded > 0:
Main.warn('Maximum # of influencers exceeded for ' + format_int(maxInfluencersExceeded) + ' vertices, extras ignored', 3)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# sorts one set of weights & indices by descending weight, by reference
# not shown to help with MakeHuman, but did not hurt. In just so it is not lost for future.
@staticmethod
def sortByDescendingInfluence(weights, indices):
notSorted = True
while(notSorted):
notSorted = False
for idx in range(1, len(weights)):
if weights[idx - 1] < weights[idx]:
tmp = weights[idx]
weights[idx ] = weights[idx - 1]
weights[idx - 1] = tmp
tmp = indices[idx]
indices[idx ] = indices[idx - 1]
indices[idx - 1] = tmp
notSorted = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume that toFixedInfluencers has already run, which ensures indices length is a multiple of 4
@staticmethod
def packSkeletonIndices(indices):
compressedIndices = []
for i in range(math.floor(len(indices) / 4)):
idx = i * 4
matricesIndicesCompressed = indices[idx ]
matricesIndicesCompressed += indices[idx + 1] << 8
matricesIndicesCompressed += indices[idx + 2] << 16
matricesIndicesCompressed += indices[idx + 3] << 24
compressedIndices.append(matricesIndicesCompressed)
return compressedIndices
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
if hasattr(self, 'parentId'): write_string(file_handler, 'parentId', self.parentId)
if hasattr(self, 'materialId'): write_string(file_handler, 'materialId', self.materialId)
write_int(file_handler, 'billboardMode', self.billboardMode)
write_vector(file_handler, 'position', self.position)
if hasattr(self, "rotationQuaternion"):
write_quaternion(file_handler, 'rotationQuaternion', self.rotationQuaternion)
else:
write_vector(file_handler, 'rotation', self.rotation)
write_vector(file_handler, 'scaling', self.scaling)
write_bool(file_handler, 'isVisible', self.isVisible)
write_bool(file_handler, 'freezeWorldMatrix', self.freezeWorldMatrix)
write_bool(file_handler, 'isEnabled', self.isEnabled)
write_bool(file_handler, 'checkCollisions', self.checkCollisions)
write_bool(file_handler, 'receiveShadows', self.receiveShadows)
if hasattr(self, 'physicsImpostor'):
write_int(file_handler, 'physicsImpostor', self.physicsImpostor)
write_float(file_handler, 'physicsMass', self.physicsMass)
write_float(file_handler, 'physicsFriction', self.physicsFriction)
write_float(file_handler, 'physicsRestitution', self.physicsRestitution)
# Geometry
if hasattr(self, 'skeletonId'):
write_int(file_handler, 'skeletonId', self.skeletonId)
write_int(file_handler, 'numBoneInfluencers', self.numBoneInfluencers)
write_vector_array(file_handler, 'positions', self.positions)
write_vector_array(file_handler, 'normals' , self.normals )
if len(self.uvs) > 0:
write_array(file_handler, 'uvs', self.uvs)
if len(self.uvs2) > 0:
write_array(file_handler, 'uvs2', self.uvs2)
if len(self.colors) > 0:
write_array(file_handler, 'colors', self.colors)
if hasattr(self, 'skeletonWeights'):
write_array(file_handler, 'matricesWeights', self.skeletonWeights)
write_array(file_handler, 'matricesIndices', self.skeletonIndices)
if hasattr(self, 'skeletonWeightsExtra'):
write_array(file_handler, 'matricesWeightsExtra', self.skeletonWeightsExtra)
write_array(file_handler, 'matricesIndicesExtra', self.skeletonIndicesExtra)
write_array(file_handler, 'indices', self.indices)
# Sub meshes
file_handler.write('\n,"subMeshes":[')
first = True
for subMesh in self.subMeshes:
if first == False:
file_handler.write(',')
subMesh.to_scene_file(file_handler)
first = False
file_handler.write(']')
super().to_scene_file(file_handler) # Animations
# Instances
first = True
file_handler.write('\n,"instances":[')
for instance in self.instances:
if first == False:
file_handler.write(',')
instance.to_scene_file(file_handler)
first = False
file_handler.write(']')
# Close mesh
file_handler.write('}\n')
self.alreadyExported = True
#===============================================================================
class MeshInstance:
def __init__(self, name, position, rotation, rotationQuaternion, scaling, freezeWorldMatrix):
self.name = name
self.position = position
if rotation is not None:
self.rotation = rotation
if rotationQuaternion is not None:
self.rotationQuaternion = rotationQuaternion
self.scaling = scaling
self.freezeWorldMatrix = freezeWorldMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_vector(file_handler, 'position', self.position)
if hasattr(self, 'rotation'):
write_vector(file_handler, 'rotation', self.rotation)
else:
write_quaternion(file_handler, 'rotationQuaternion', self.rotationQuaternion)
write_vector(file_handler, 'scaling', self.scaling)
# freeze World Matrix currently ignored for instances
write_bool(file_handler, 'freezeWorldMatrix', self.freezeWorldMatrix)
file_handler.write('}')
#===============================================================================
class Node(FCurveAnimatable):
def __init__(self, node):
Main.log('processing begun of node: ' + node.name)
self.define_animations(node, True, True, True) #Should animations be done when forcedParent
self.name = node.name
if node.parent and node.parent.type != 'ARMATURE':
self.parentId = node.parent.name
loc, rot, scale = node.matrix_local.decompose()
self.position = loc
if node.rotation_mode == 'QUATERNION':
self.rotationQuaternion = rot
else:
self.rotation = scale_vector(rot.to_euler('XYZ'), -1)
self.scaling = scale
self.isVisible = False
self.isEnabled = True
self.checkCollisions = False
self.billboardMode = BILLBOARDMODE_NONE
self.castShadows = False
self.receiveShadows = False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
if hasattr(self, 'parentId'): write_string(file_handler, 'parentId', self.parentId)
write_vector(file_handler, 'position', self.position)
if hasattr(self, "rotationQuaternion"):
write_quaternion(file_handler, "rotationQuaternion", self.rotationQuaternion)
else:
write_vector(file_handler, 'rotation', self.rotation)
write_vector(file_handler, 'scaling', self.scaling)
write_bool(file_handler, 'isVisible', self.isVisible)
write_bool(file_handler, 'isEnabled', self.isEnabled)
write_bool(file_handler, 'checkCollisions', self.checkCollisions)
write_int(file_handler, 'billboardMode', self.billboardMode)
write_bool(file_handler, 'receiveShadows', self.receiveShadows)
super().to_scene_file(file_handler) # Animations
file_handler.write('}')
#===============================================================================
class SubMesh:
def __init__(self, materialIndex, verticesStart, indexStart, verticesCount, indexCount):
self.materialIndex = materialIndex
self.verticesStart = verticesStart
self.indexStart = indexStart
self.verticesCount = verticesCount
self.indexCount = indexCount
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_int(file_handler, 'materialIndex', self.materialIndex, True)
write_int(file_handler, 'verticesStart', self.verticesStart)
write_int(file_handler, 'verticesCount', self.verticesCount)
write_int(file_handler, 'indexStart' , self.indexStart)
write_int(file_handler, 'indexCount' , self.indexCount)
file_handler.write('}')
#===============================================================================
class Bone:
def __init__(self, bone, skeleton, bonesSoFar):
self.index = len(bonesSoFar)
Main.log('processing begun of bone: ' + bone.name + ', index: '+ str(self.index), 2)
self.name = bone.name
self.length = bone.length
self.posedBone = bone # record so can be used by get_matrix, called by append_animation_pose
self.parentBone = bone.parent
self.matrix_world = skeleton.matrix_world
self.matrix = self.get_bone_matrix(True)
self.parentBoneIndex = Skeleton.get_bone(bone.parent.name, bonesSoFar).index if bone.parent else -1
#animation
if (skeleton.animation_data):
self.animation = Animation(ANIMATIONTYPE_MATRIX, ANIMATIONLOOPMODE_CYCLE, 'anim', '_matrix')
self.previousBoneMatrix = None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def append_animation_pose(self, frame, force = False):
currentBoneMatrix = self.get_bone_matrix(True)
if (force or not same_matrix4(currentBoneMatrix, self.previousBoneMatrix)):
self.animation.frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult):
return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_matrix(bone, matrix_world, doParentMult):
SystemMatrix = mathutils.Matrix.Scale(-1, 4, mathutils.Vector((0, 0, 1))) * mathutils.Matrix.Rotation(math.radians(-90), 4, 'X')
if (bone.parent and doParentMult):
return (SystemMatrix * matrix_world * bone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bone.matrix)
else:
return SystemMatrix * matrix_world * bone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('\n{')
write_string(file_handler, 'name', self.name, True)
write_int(file_handler, 'index', self.index)
write_matrix4(file_handler, 'matrix', self.matrix)
write_matrix4(file_handler, 'rest', self.rest)
write_int(file_handler, 'parentBoneIndex', self.parentBoneIndex)
write_float(file_handler, 'length', self.length)
#animation
if hasattr(self, 'animation'):
file_handler.write('\n,"animation":')
self.animation.to_scene_file(file_handler)
file_handler.write('}')
#===============================================================================
class Skeleton:
def __init__(self, skeleton, scene, id):
Main.log('processing begun of skeleton: ' + skeleton.name + ', id: '+ str(id))
self.name = skeleton.name
self.id = id
self.bones = []
for bone in skeleton.pose.bones:
if scene.ignoreIKBones and ('.ik' in bone.name.lower() or 'ik.' in bone.name.lower() ):
Main.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, skeleton, self.bones))
if (skeleton.animation_data):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(skeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Main.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = skeleton
bpy.ops.object.mode_set(mode='EDIT')
# dimensions when in edit mode, are those at rest
self.dimensions = skeleton.dimensions
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in skeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in skeleton')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_int(file_handler, 'id', self.id) # keep int for legacy of original exporter
write_vector(file_handler, 'dimensionsAtRest', self.dimensions)
file_handler.write(',"bones":[')
first = True
for bone in self.bones:
if first != True:
file_handler.write(',')
first = False
bone.to_scene_file(file_handler)
file_handler.write(']')
if hasattr(self, 'ranges'):
file_handler.write('\n,"ranges":[')
first = True
for range in self.ranges:
if first != True:
file_handler.write(',')
first = False
range.to_scene_file(file_handler)
file_handler.write(']')
file_handler.write('}')
#===============================================================================
class Camera(FCurveAnimatable):
def __init__(self, camera):
if camera.parent and camera.parent.type != 'ARMATURE':
self.parentId = camera.parent.name
self.CameraType = camera.data.CameraType
self.name = camera.name
Main.log('processing begun of camera (' + self.CameraType + '): ' + self.name)
self.define_animations(camera, True, True, False, math.pi / 2)
self.position = camera.location
# for quaternions, convert to euler XYZ, otherwise, use the default rotation_euler
eul = camera.rotation_quaternion.to_euler("XYZ") if camera.rotation_mode == 'QUATERNION' else camera.rotation_euler
self.rotation = mathutils.Vector((-eul[0] + math.pi / 2, eul[1], -eul[2]))
self.fov = camera.data.angle
self.minZ = camera.data.clip_start
self.maxZ = camera.data.clip_end
self.speed = 1.0
self.inertia = 0.9
self.checkCollisions = camera.data.checkCollisions
self.applyGravity = camera.data.applyGravity
self.ellipsoid = camera.data.ellipsoid
self.Camera3DRig = camera.data.Camera3DRig
self.interaxialDistance = camera.data.interaxialDistance
for constraint in camera.constraints:
if constraint.type == 'TRACK_TO':
self.lockedTargetId = constraint.target.name
break
if self.CameraType == ARC_ROTATE_CAM or self.CameraType == FOLLOW_CAM:
if not hasattr(self, 'lockedTargetId'):
Main.warn('Camera type with manditory target specified, but no target to track set. Ignored', 2)
self.fatalProblem = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def update_for_target_attributes(self, meshesAndNodes):
if not hasattr(self, 'lockedTargetId'): return
# find the actual mesh tracking, so properties can be derrived
targetFound = False
for mesh in meshesAndNodes:
if mesh.name == self.lockedTargetId:
targetMesh = mesh
targetFound = True
break;
xApart = 3 if not targetFound else self.position.x - targetMesh.position.x
yApart = 3 if not targetFound else self.position.y - targetMesh.position.y
zApart = 3 if not targetFound else self.position.z - targetMesh.position.z
distance3D = math.sqrt(xApart * xApart + yApart * yApart + zApart * zApart)
alpha = math.atan2(yApart, xApart);
beta = math.atan2(yApart, zApart);
if self.CameraType == FOLLOW_CAM:
self.followHeight = zApart
self.followDistance = distance3D
self.followRotation = 90 + (alpha * 180 / math.pi)
elif self.CameraType == self.CameraType == ARC_ROTATE_CAM:
self.arcRotAlpha = alpha
self.arcRotBeta = beta
self.arcRotRadius = distance3D
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
write_vector(file_handler, 'position', self.position)
write_vector(file_handler, 'rotation', self.rotation)
write_float(file_handler, 'fov', self.fov)
write_float(file_handler, 'minZ', self.minZ)
write_float(file_handler, 'maxZ', self.maxZ)
write_float(file_handler, 'speed', self.speed)
write_float(file_handler, 'inertia', self.inertia)
write_bool(file_handler, 'checkCollisions', self.checkCollisions)
write_bool(file_handler, 'applyGravity', self.applyGravity)
write_array3(file_handler, 'ellipsoid', self.ellipsoid)
# always assign rig, even when none, Reason: Could have VR camera with different Rig than default
write_int(file_handler, 'cameraRigMode', self.Camera3DRig)
write_float(file_handler, 'interaxial_distance', self.interaxialDistance)
write_string(file_handler, 'type', self.CameraType)
if hasattr(self, 'parentId'): write_string(file_handler, 'parentId', self.parentId)
if self.CameraType == FOLLOW_CAM:
write_float(file_handler, 'heightOffset', self.followHeight)
write_float(file_handler, 'radius', self.followDistance)
write_float(file_handler, 'rotationOffset', self.followRotation)
elif self.CameraType == ARC_ROTATE_CAM:
write_float(file_handler, 'alpha', self.arcRotAlpha)
write_float(file_handler, 'beta', self.arcRotBeta)
write_float(file_handler, 'radius', self.arcRotRadius)
if hasattr(self, 'lockedTargetId'):
write_string(file_handler, 'lockedTargetId', self.lockedTargetId)
super().to_scene_file(file_handler) # Animations
file_handler.write('}')
#===============================================================================
class Light(FCurveAnimatable):
def __init__(self, light):
if light.parent and light.parent.type != 'ARMATURE':
self.parentId = light.parent.name
self.name = light.name
Main.log('processing begun of light (' + light.data.type + '): ' + self.name)
self.define_animations(light, False, True, False)
light_type_items = {'POINT': POINT_LIGHT, 'SUN': DIRECTIONAL_LIGHT, 'SPOT': SPOT_LIGHT, 'HEMI': HEMI_LIGHT, 'AREA': POINT_LIGHT}
self.light_type = light_type_items[light.data.type]
if self.light_type == POINT_LIGHT:
self.position = light.location
if hasattr(light.data, 'use_sphere'):
if light.data.use_sphere:
self.range = light.data.distance
elif self.light_type == DIRECTIONAL_LIGHT:
self.position = light.location
self.direction = Light.get_direction(light.matrix_local)
elif self.light_type == SPOT_LIGHT:
self.position = light.location
self.direction = Light.get_direction(light.matrix_local)
self.angle = light.data.spot_size
self.exponent = light.data.spot_blend * 2
if light.data.use_sphere:
self.range = light.data.distance
else:
# Hemi
matrix_local = light.matrix_local.copy()
matrix_local.translation = mathutils.Vector((0, 0, 0))
self.direction = (mathutils.Vector((0, 0, -1)) * matrix_local)
self.direction = scale_vector(self.direction, -1)
self.groundColor = mathutils.Color((0, 0, 0))
self.intensity = light.data.energy
self.diffuse = light.data.color if light.data.use_diffuse else mathutils.Color((0, 0, 0))
self.specular = light.data.color if light.data.use_specular else mathutils.Color((0, 0, 0))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
write_float(file_handler, 'type', self.light_type)
if hasattr(self, 'parentId' ): write_string(file_handler, 'parentId' , self.parentId )
if hasattr(self, 'position' ): write_vector(file_handler, 'position' , self.position )
if hasattr(self, 'direction' ): write_vector(file_handler, 'direction' , self.direction )
if hasattr(self, 'angle' ): write_float (file_handler, 'angle' , self.angle )
if hasattr(self, 'exponent' ): write_float (file_handler, 'exponent' , self.exponent )
if hasattr(self, 'groundColor'): write_color (file_handler, 'groundColor', self.groundColor)
if hasattr(self, 'range' ): write_float (file_handler, 'range' , self.range )
write_float(file_handler, 'intensity', self.intensity)
write_color(file_handler, 'diffuse', self.diffuse)
write_color(file_handler, 'specular', self.specular)
super().to_scene_file(file_handler) # Animations
file_handler.write('}')
@staticmethod
def get_direction(matrix):
return (matrix.to_3x3() * mathutils.Vector((0.0, 0.0, -1.0))).normalized()
#===============================================================================
class ShadowGenerator:
def __init__(self, lamp, meshesAndNodes, scene):
Main.log('processing begun of shadows for light: ' + lamp.name)
self.lightId = lamp.name
self.mapSize = lamp.data.shadowMapSize
self.shadowBias = lamp.data.shadowBias
if lamp.data.shadowMap == VARIANCE_SHADOWS:
self.useVarianceShadowMap = True
elif lamp.data.shadowMap == POISSON_SHADOWS:
self.usePoissonSampling = True
elif lamp.data.shadowMap == BLUR_VARIANCE_SHADOWS:
self.useBlurVarianceShadowMap = True
self.shadowBlurScale = lamp.data.shadowBlurScale
self.shadowBlurBoxOffset = lamp.data.shadowBlurBoxOffset
# .babylon specific section
self.shadowCasters = []
for mesh in meshesAndNodes:
if (mesh.castShadows):
self.shadowCasters.append(mesh.name)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_int(file_handler, 'mapSize', self.mapSize, True)
write_string(file_handler, 'lightId', self.lightId)
write_float(file_handler, 'bias', self.shadowBias)
if hasattr(self, 'useVarianceShadowMap') :
write_bool(file_handler, 'useVarianceShadowMap', self.useVarianceShadowMap)
elif hasattr(self, 'usePoissonSampling'):
write_bool(file_handler, 'usePoissonSampling', self.usePoissonSampling)
elif hasattr(self, 'useBlurVarianceShadowMap'):
write_bool(file_handler, 'useBlurVarianceShadowMap', self.useBlurVarianceShadowMap)
write_int(file_handler, 'blurScale', self.shadowBlurScale)
write_int(file_handler, 'blurBoxOffset', self.shadowBlurBoxOffset)
file_handler.write(',"renderList":[')
first = True
for caster in self.shadowCasters:
if first != True:
file_handler.write(',')
first = False
file_handler.write('"' + caster + '"')
file_handler.write(']')
file_handler.write('}')
#===============================================================================
class MultiMaterial:
def __init__(self, material_slots, idx):
self.name = Main.nameSpace + '.' + 'Multimaterial#' + str(idx)
Main.log('processing begun of multimaterial: ' + self.name, 2)
self.material_slots = material_slots
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
file_handler.write(',"materials":[')
first = True
for material in self.material_slots:
if first != True:
file_handler.write(',')
file_handler.write('"' + material.name +'"')
first = False
file_handler.write(']')
file_handler.write('}')
#===============================================================================
class Texture:
def __init__(self, slot, level, textureOrImage, mesh, exporter):
wasBaked = not hasattr(textureOrImage, 'uv_layer')
if wasBaked:
image = textureOrImage
texture = None
repeat = False
self.hasAlpha = False
self.coordinatesIndex = 0
else:
texture = textureOrImage
image = texture.texture.image
repeat = texture.texture.extension == 'REPEAT'
self.hasAlpha = texture.texture.use_alpha
usingMap = texture.uv_layer
if len(usingMap) == 0:
usingMap = mesh.data.uv_textures[0].name
Main.log('Image texture found, type: ' + slot + ', mapped using: "' + usingMap + '"', 4)
if mesh.data.uv_textures[0].name == usingMap:
self.coordinatesIndex = 0
elif mesh.data.uv_textures[1].name == usingMap:
self.coordinatesIndex = 1
else:
Main.warn('Texture is not mapped as UV or UV2, assigned 1', 5)
self.coordinatesIndex = 0
# always write the file out, since base64 encoding is easiest from a file
try:
imageFilepath = os.path.normpath(bpy.path.abspath(image.filepath))
basename = os.path.basename(imageFilepath)
internalImage = image.packed_file or wasBaked
# when coming from either a packed image or a baked image, then save_render
if internalImage:
if exporter.scene.inlineTextures:
textureFile = os.path.join(exporter.textureDir, basename + "temp")
else:
textureFile = os.path.join(exporter.textureDir, basename)
image.save_render(textureFile)
# when backed by an actual file, copy to target dir, unless inlining
else:
textureFile = bpy.path.abspath(image.filepath)
if not exporter.scene.inlineTextures:
shutil.copy(textureFile, exporter.textureDir)
except:
ex = sys.exc_info()
Main.warn('Error encountered processing image file: ' + ', Error: '+ str(ex[1]))
if exporter.scene.inlineTextures:
# base64 is easiest from a file, so sometimes a temp file was made above; need to delete those
with open(textureFile, "rb") as image_file:
asString = base64.b64encode(image_file.read()).decode()
self.encoded_URI = 'data:image/' + image.file_format + ';base64,' + asString
if internalImage:
os.remove(textureFile)
# capture texture attributes
self.slot = slot
self.name = basename
self.level = level
if (texture and texture.mapping == 'CUBE'):
self.coordinatesMode = CUBIC_MODE
if (texture and texture.mapping == 'SPHERE'):
self.coordinatesMode = SPHERICAL_MODE
else:
self.coordinatesMode = EXPLICIT_MODE
self.uOffset = texture.offset.x if texture else 0.0
self.vOffset = texture.offset.y if texture else 0.0
self.uScale = texture.scale.x if texture else 1.0
self.vScale = texture.scale.y if texture else 1.0
self.uAng = 0
self.vAng = 0
self.wAng = 0
if (repeat):
if (texture.texture.use_mirror_x):
self.wrapU = MIRROR_ADDRESSMODE
else:
self.wrapU = WRAP_ADDRESSMODE
if (texture.texture.use_mirror_y):
self.wrapV = MIRROR_ADDRESSMODE
else:
self.wrapV = WRAP_ADDRESSMODE
else:
self.wrapU = CLAMP_ADDRESSMODE
self.wrapV = CLAMP_ADDRESSMODE
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write(', \n"' + self.slot + '":{')
write_string(file_handler, 'name', self.name, True)
write_float(file_handler, 'level', self.level)
write_float(file_handler, 'hasAlpha', self.hasAlpha)
write_int(file_handler, 'coordinatesMode', self.coordinatesMode)
write_float(file_handler, 'uOffset', self.uOffset)
write_float(file_handler, 'vOffset', self.vOffset)
write_float(file_handler, 'uScale', self.uScale)
write_float(file_handler, 'vScale', self.vScale)
write_float(file_handler, 'uAng', self.uAng)
write_float(file_handler, 'vAng', self.vAng)
write_float(file_handler, 'wAng', self.wAng)
write_int(file_handler, 'wrapU', self.wrapU)
write_int(file_handler, 'wrapV', self.wrapV)
write_int(file_handler, 'coordinatesIndex', self.coordinatesIndex)
if hasattr(self,'encoded_URI'):
write_string(file_handler, 'base64String', self.encoded_URI)
file_handler.write('}')
#===============================================================================
# need to evaluate the need to bake a mesh before even starting; class also stores specific types of bakes
class BakingRecipe:
def __init__(self, mesh, forceBaking = False):
# initialize all members
self.needsBaking = forceBaking
self.diffuseBaking = forceBaking
self.ambientBaking = False
self.opacityBaking = False
self.reflectionBaking = False
self.emissiveBaking = False
self.bumpBaking = False
self.specularBaking = False
# need to make sure a single render
self.cyclesRender = False
blenderRender = False
# transfer from Mesh custom properties
self.bakeSize = mesh.data.bakeSize
self.bakeQuality = mesh.data.bakeQuality # for lossy compression formats
# accumulators set by Blender Game
self.backFaceCulling = True # used only when baking
self.billboardMode = BILLBOARDMODE_ALL if len(mesh.material_slots) == 1 and mesh.material_slots[0].material.game_settings.face_orientation == 'BILLBOARD' else BILLBOARDMODE_NONE
# Cycles specific, need to get the node trees of each material
self.nodeTrees = []
for material_slot in mesh.material_slots:
# a material slot is not a reference to an actual material; need to look up
material = material_slot.material
self.backFaceCulling &= material.game_settings.use_backface_culling
# testing for Cycles renderer has to be different
if material.use_nodes == True:
self.needsBaking = True
self.cyclesRender = True
self.nodeTrees.append(material.node_tree)
for node in material.node_tree.nodes:
id = node.bl_idname
if id == 'ShaderNodeBsdfDiffuse':
self.diffuseBaking = True
if id == 'ShaderNodeAmbientOcclusion':
self.ambientBaking = True
# there is no opacity baking for Cycles AFAIK
if id == '':
self.opacityBaking = True
if id == 'ShaderNodeEmission':
self.emissiveBaking = True
if id == 'ShaderNodeNormal' or id == 'ShaderNodeNormalMap':
self.bumpBaking = True
if id == '':
self.specularBaking = True
else:
blenderRender = True
nDiffuseImages = 0
nReflectionImages = 0
nAmbientImages = 0
nOpacityImages = 0
nEmissiveImages = 0
nBumpImages = 0
nSpecularImages = 0
textures = [mtex for mtex in material.texture_slots if mtex and mtex.texture]
for mtex in textures:
# ignore empty slots
if mtex.texture.type == 'NONE':
continue
# for images, just need to make sure there is only 1 per type
if mtex.texture.type == 'IMAGE' and not forceBaking:
if mtex.use_map_diffuse or mtex.use_map_color_diffuse:
if mtex.texture_coords == 'REFLECTION':
nReflectionImages += 1
else:
nDiffuseImages += 1
if mtex.use_map_ambient:
nAmbientImages += 1
if mtex.use_map_alpha:
nOpacityImages += 1
if mtex.use_map_emit:
nEmissiveImages += 1
if mtex.use_map_normal:
nBumpImages += 1
if mtex.use_map_color_spec:
nSpecularImages += 1
else:
self.needsBaking = True
if mtex.use_map_diffuse or mtex.use_map_color_diffuse:
if mtex.texture_coords == 'REFLECTION':
self.reflectionBaking = True
else:
self.diffuseBaking = True
if mtex.use_map_ambient:
self.ambientBaking = True
if mtex.use_map_alpha:
self.opacityBaking = True
if mtex.use_map_emit:
self.emissiveBaking = True
if mtex.use_map_normal:
self.bumpBaking = True
if mtex.use_map_color_spec:
self.specularBaking = True
# 2nd pass 2 check for multiples of a given image type
if nDiffuseImages > 1:
self.needsBaking = self.diffuseBaking = True
if nReflectionImages > 1:
self.needsBaking = self.nReflectionImages = True
if nAmbientImages > 1:
self.needsBaking = self.ambientBaking = True
if nOpacityImages > 1:
self.needsBaking = self.opacityBaking = True
if nEmissiveImages > 1:
self.needsBaking = self.emissiveBaking = True
if nBumpImages > 1:
self.needsBaking = self.bumpBaking = True
if nSpecularImages > 1:
self.needsBaking = self.specularBaking = True
self.multipleRenders = blenderRender and self.cyclesRender
# check for really old .blend file, eg. 2.49, to ensure that everything requires exists
if self.needsBaking and bpy.data.screens.find('UV Editing') == -1:
Main.warn('Contains material requiring baking, but resources not available. Probably .blend very old', 2)
self.needsBaking = False
#===============================================================================
# Not intended to be instanced directly
class Material:
def __init__(self, checkReadyOnlyOnce):
self.checkReadyOnlyOnce = checkReadyOnlyOnce
# first pass of textures, either appending image type or recording types of bakes to do
self.textures = []
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
write_color(file_handler, 'ambient', self.ambient)
write_color(file_handler, 'diffuse', self.diffuse)
write_color(file_handler, 'specular', self.specular)
write_color(file_handler, 'emissive', self.emissive)
write_float(file_handler, 'specularPower', self.specularPower)
write_float(file_handler, 'alpha', self.alpha)
write_bool(file_handler, 'backFaceCulling', self.backFaceCulling)
write_bool(file_handler, 'checkReadyOnlyOnce', self.checkReadyOnlyOnce)
for texSlot in self.textures:
texSlot.to_scene_file(file_handler)
file_handler.write('}')
#===============================================================================
class StdMaterial(Material):
def __init__(self, material_slot, exporter, mesh):
super().__init__(mesh.data.checkReadyOnlyOnce)
nameSpace = Main.nameSpace if mesh.data.materialNameSpace == DEFAULT_MATERIAL_NAMESPACE else mesh.data.materialNameSpace
self.name = nameSpace + '.' + material_slot.name
Main.log('processing begun of Standard material: ' + material_slot.name, 2)
# a material slot is not a reference to an actual material; need to look up
material = material_slot.material
self.ambient = material.ambient * material.diffuse_color
self.diffuse = material.diffuse_intensity * material.diffuse_color
self.specular = material.specular_intensity * material.specular_color
self.emissive = material.emit * material.diffuse_color
self.specularPower = material.specular_hardness
self.alpha = material.alpha
self.backFaceCulling = material.game_settings.use_backface_culling
textures = [mtex for mtex in material.texture_slots if mtex and mtex.texture]
for mtex in textures:
# test should be un-neccessary, since should be a BakedMaterial; just for completeness
if (mtex.texture.type != 'IMAGE'):
continue
elif not mtex.texture.image:
Main.warn('Material has un-assigned image texture: "' + mtex.name + '" ignored', 3)
continue
elif len(mesh.data.uv_textures) == 0:
Main.warn('Mesh has no UV maps, material: "' + mtex.name + '" ignored', 3)
continue
if mtex.use_map_diffuse or mtex.use_map_color_diffuse:
if mtex.texture_coords == 'REFLECTION':
Main.log('Reflection texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('reflectionTexture', mtex.diffuse_color_factor, mtex, mesh, exporter))
else:
Main.log('Diffuse texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('diffuseTexture', mtex.diffuse_color_factor, mtex, mesh, exporter))
if mtex.use_map_ambient:
Main.log('Ambient texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('ambientTexture', mtex.ambient_factor, mtex, mesh, exporter))
if mtex.use_map_alpha:
if self.alpha > 0:
Main.log('Opacity texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('opacityTexture', mtex.alpha_factor, mtex, mesh, exporter))
else:
Main.warn('Opacity non-std way to indicate opacity, use material alpha to also use Opacity texture', 4)
self.alpha = 1
if mtex.use_map_emit:
Main.log('Emissive texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('emissiveTexture', mtex.emit_factor, mtex, mesh, exporter))
if mtex.use_map_normal:
Main.log('Bump texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('bumpTexture', 1.0 / mtex.normal_factor, mtex, mesh, exporter))
if mtex.use_map_color_spec:
Main.log('Specular texture found "' + mtex.name + '"', 3)
self.textures.append(Texture('specularTexture', mtex.specular_color_factor, mtex, mesh, exporter))
#===============================================================================
class BakedMaterial(Material):
def __init__(self, exporter, mesh, recipe):
super().__init__(mesh.data.checkReadyOnlyOnce)
nameSpace = Main.nameSpace if mesh.data.materialNameSpace == DEFAULT_MATERIAL_NAMESPACE else mesh.data.materialNameSpace
self.name = nameSpace + '.' + mesh.name
Main.log('processing begun of baked material: ' + mesh.name, 2)
# any baking already took in the values. Do not want to apply them again, but want shadows to show.
# These are the default values from StandardMaterials
self.ambient = mathutils.Color((0, 0, 0))
self.diffuse = mathutils.Color((0.8, 0.8, 0.8)) # needed for shadows, but not change anything else
self.specular = mathutils.Color((1, 1, 1))
self.emissive = mathutils.Color((0, 0, 0))
self.specularPower = 64
self.alpha = 1.0
self.backFaceCulling = recipe.backFaceCulling
# texture is baked from selected mesh(es), need to insure this mesh is only one selected
bpy.ops.object.select_all(action='DESELECT')
mesh.select = True
# store setting to restore
engine = exporter.scene.render.engine
# mode_set's only work when there is an active object
exporter.scene.objects.active = mesh
# UV unwrap operates on mesh in only edit mode, procedurals can also give error of 'no images to be found' when not done
# select all verticies of mesh, since smart_project works only with selected verticies
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
# you need UV on a mesh in order to bake image. This is not reqd for procedural textures, so may not exist
# need to look if it might already be created, if so use the first one
uv = mesh.data.uv_textures[0] if len(mesh.data.uv_textures) > 0 else None
if uv == None:
mesh.data.uv_textures.new('BakingUV')
uv = mesh.data.uv_textures['BakingUV']
uv.active = True
uv.active_render = True
bpy.ops.uv.smart_project(angle_limit = 66.0, island_margin = 0.0, user_area_weight = 1.0, use_aspect = True)
uvName = 'BakingUV' # issues with cycles when not done this way
else:
uvName = uv.name
# create a temporary image & link it to the UV/Image Editor so bake_image works
bpy.data.images.new(name = mesh.name + '_BJS_BAKE', width = recipe.bakeSize, height = recipe.bakeSize, alpha = False, float_buffer = False)
image = bpy.data.images[mesh.name + '_BJS_BAKE']
image.file_format = 'JPEG'
image.mapping = 'UV' # default value
image_settings = exporter.scene.render.image_settings
image_settings.file_format = 'JPEG'
image_settings.quality = recipe.bakeQuality # for lossy compression formats
# image_settings.compression = 100 # Amount of time to determine best compression: 0 = no compression with fast file output, 100 = maximum lossless compression with slow file output
# now go thru all the textures that need to be baked
if recipe.diffuseBaking:
self.bake('diffuseTexture', 'DIFFUSE_COLOR', 'TEXTURE', image, mesh, uvName, exporter, recipe)
if recipe.ambientBaking:
self.bake('ambientTexture', 'AO', 'AO', image, mesh, uvName, exporter, recipe)
if recipe.opacityBaking: # no eqivalent found for cycles
self.bake('opacityTexture', None, 'ALPHA', image, mesh, uvName, exporter, recipe)
if recipe.reflectionBaking:
self.bake('reflectionTexture', 'REFLECTION', 'MIRROR_COLOR', image, mesh, uvName, exporter, recipe)
if recipe.emissiveBaking:
self.bake('emissiveTexture', 'EMIT', 'EMIT', image, mesh, uvName, exporter, recipe)
if recipe.bumpBaking:
self.bake('bumpTexture', 'NORMAL', 'NORMALS', image, mesh, uvName, exporter, recipe)
if recipe.specularBaking:
self.bake('specularTexture', 'SPECULAR', 'SPEC_COLOR', image, mesh, uvName, exporter, recipe)
# Toggle vertex selection & mode, if setting changed their value
bpy.ops.mesh.select_all(action='TOGGLE') # still in edit mode toggle select back to previous
bpy.ops.object.mode_set(toggle=True) # change back to Object
bpy.ops.object.select_all(action='TOGGLE') # change scene selection back, not seeming to work
exporter.scene.render.engine = engine
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def bake(self, bjs_type, cycles_type, internal_type, image, mesh, uvName, exporter, recipe):
if recipe.cyclesRender:
if cycles_type is None:
return
self.bakeCycles(cycles_type, image, uvName, recipe.nodeTrees)
else:
self.bakeInternal(internal_type, image, uvName)
self.textures.append(Texture(bjs_type, 1.0, image, mesh, exporter))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def bakeInternal(self, bake_type, image, uvName):
Main.log('Internal baking texture, type: ' + bake_type + ', mapped using: ' + uvName, 3)
# need to use the legal name, since this will become the file name, chars like ':' not legal
legalName = legal_js_identifier(self.name)
image.filepath = legalName + '_' + bake_type + '.jpg'
scene = bpy.context.scene
scene.render.engine = 'BLENDER_RENDER'
scene.render.bake_type = bake_type
# assign the image to the UV Editor, which does not have to shown
bpy.data.screens['UV Editing'].areas[1].spaces[0].image = image
renderer = scene.render
renderer.use_bake_selected_to_active = False
renderer.use_bake_to_vertex_color = False
renderer.use_bake_clear = True
renderer.bake_quad_split = 'AUTO'
renderer.bake_margin = 5
renderer.use_file_extension = True
renderer.use_bake_normalize = True
renderer.use_bake_antialiasing = True
bpy.ops.object.bake_image()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def bakeCycles(self, bake_type, image, uvName, nodeTrees):
Main.log('Cycles baking texture, type: ' + bake_type + ', mapped using: ' + uvName, 3)
legalName = legal_js_identifier(self.name)
image.filepath = legalName + '_' + bake_type + '.jpg'
scene = bpy.context.scene
scene.render.engine = 'CYCLES'
# create an unlinked temporary node to bake to for each material
for tree in nodeTrees:
bakeNode = tree.nodes.new(type='ShaderNodeTexImage')
bakeNode.image = image
bakeNode.select = True
tree.nodes.active = bakeNode
bpy.ops.object.bake(type = bake_type, use_clear = True, margin = 5, use_selected_to_active = False)
for tree in nodeTrees:
tree.nodes.remove(tree.nodes.active)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def meshBakingClean(mesh):
for uvMap in mesh.data.uv_textures:
if uvMap.name == 'BakingUV':
mesh.data.uv_textures.remove(uvMap)
break
# remove an image if it was baked
for image in bpy.data.images:
if image.name == mesh.name + '_BJS_BAKE':
image.user_clear() # cannot remove image unless 0 references
bpy.data.images.remove(image)
break
#===============================================================================
class AnimationRange:
# constructor called by the static actionPrep method
def __init__(self, name, frames, frameOffset):
# process input args to members
self.name = name
self.frames_in = frames
self.frame_start = AnimationRange.nextStartingFrame(frameOffset)
self.frames_out = []
for frame in self.frames_in:
self.frames_out.append(self.frame_start + frame)
highest_idx = len(self.frames_in) - 1
self.highest_frame_in = self.frames_in [highest_idx]
self.frame_end = self.frames_out[highest_idx]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_string(self):
return self.name + ': ' + ' in[' + format_int(self.frames_in[0]) + ' - ' + format_int(self.highest_frame_in) + '], out[' + format_int(self.frame_start) + ' - ' + format_int(self.frame_end) + ']'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_int(file_handler, 'from', self.frame_start)
write_int(file_handler, 'to', self.frame_end)
file_handler.write('}')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def actionPrep(object, action, includeAllFrames, frameOffset):
# assign the action & test if there is any data for that action for this object
object.animation_data.action = action
if len(object.animation_data.action.fcurves) == 0:
return None
if includeAllFrames:
frame_start = int(action.frame_range[0])
frame_end = int(action.frame_range[1])
frames = range(frame_start, frame_end + 1) # range is not inclusive with 2nd arg
else:
# capture built up from fcurves
frames = dict()
for fcurve in object.animation_data.action.fcurves:
for key in fcurve.keyframe_points:
frame = key.co.x
frames[frame] = True
frames = sorted(frames)
return AnimationRange(action.name, frames, frameOffset)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def nextStartingFrame(frameOffset):
if frameOffset == 0: return 0
# ensure a gap of at least 5 frames, starting on an even multiple of 10
frameOffset += 4
remainder = frameOffset % 10
return frameOffset + 10 - remainder
#===============================================================================
class Animation:
def __init__(self, dataType, loopBehavior, name, propertyInBabylon, attrInBlender = None, mult = 1, xOffset = 0):
self.dataType = dataType
self.framePerSecond = bpy.context.scene.render.fps
self.loopBehavior = loopBehavior
self.name = name
self.propertyInBabylon = propertyInBabylon
# these never get used by Bones, so optional in contructor args
self.attrInBlender = attrInBlender
self.mult = mult
self.xOffset = xOffset
#keys
self.frames = []
self.values = [] # vector3 for ANIMATIONTYPE_VECTOR3 & matrices for ANIMATIONTYPE_MATRIX
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# a separate method outside of constructor, so can be called once for each Blender Action object participates in
def append_range(self, object, animationRange):
# action already assigned, always using poses, not every frame, build up again filtering by attrInBlender
for idx in range(len(animationRange.frames_in)):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
self.frames.append(animationRange.frames_out[idx])
self.values.append(self.get_attr(object))
return len(animationRange.frames_in) > 0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# for auto animate
def get_first_frame(self):
return self.frames[0] if len(self.frames) > 0 else -1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# for auto animate
def get_last_frame(self):
return self.frames[len(self.frames) - 1] if len(self.frames) > 0 else -1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_int(file_handler, 'dataType', self.dataType, True)
write_int(file_handler, 'framePerSecond', self.framePerSecond)
file_handler.write(',"keys":[')
first = True
for frame_idx in range(len(self.frames)):
if first != True:
file_handler.write(',')
first = False
file_handler.write('\n{')
write_int(file_handler, 'frame', self.frames[frame_idx], True)
value_idx = self.values[frame_idx]
if self.dataType == ANIMATIONTYPE_MATRIX:
write_matrix4(file_handler, 'values', value_idx)
elif self.dataType == ANIMATIONTYPE_QUATERNION:
write_quaternion(file_handler, 'values', value_idx)
else:
write_vector(file_handler, 'values', value_idx)
file_handler.write('}')
file_handler.write(']') # close keys
# put this at the end to make less crazy looking ]}]]]}}}}}}}]]]],
# since animation is also at the end of the bone, mesh, camera, or light
write_int(file_handler, 'loopBehavior', self.loopBehavior)
write_string(file_handler, 'name', self.name)
write_string(file_handler, 'property', self.propertyInBabylon)
file_handler.write('}')
#===============================================================================
class VectorAnimation(Animation):
def __init__(self, object, propertyInBabylon, attrInBlender, mult = 1, xOffset = 0):
super().__init__(ANIMATIONTYPE_VECTOR3, ANIMATIONLOOPMODE_CYCLE, propertyInBabylon + ' animation', propertyInBabylon, attrInBlender, mult, xOffset)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_attr(self, object):
return scale_vector(getattr(object, self.attrInBlender), self.mult, self.xOffset)
#===============================================================================
class QuaternionAnimation(Animation):
def __init__(self, object, propertyInBabylon, attrInBlender, mult = 1, xOffset = 0):
super().__init__(ANIMATIONTYPE_QUATERNION, ANIMATIONLOOPMODE_CYCLE, propertyInBabylon + ' animation', propertyInBabylon, attrInBlender, mult, xOffset)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_attr(self, object):
return post_rotate_quaternion(getattr(object, self.attrInBlender), self.xOffset)
#===============================================================================
class QuaternionToEulerAnimation(Animation):
def __init__(self, propertyInBabylon, attrInBlender, mult = 1, xOffset = 0):
super().__init__(ANIMATIONTYPE_VECTOR3, ANIMATIONLOOPMODE_CYCLE, propertyInBabylon + ' animation', propertyInBabylon, attrInBlender, mult, Offset)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_attr(self, object):
quat = getattr(object, self.attrInBlender)
eul = quat.to_euler("XYZ")
return scale_vector(eul, self.mult, self.xOffset)
#===============================================================================
# module level formatting methods, called from multiple classes
#===============================================================================
def legal_js_identifier(input):
out = ''
prefix = ''
for char in input:
if len(out) == 0:
if char in '0123456789':
# cannot take the chance that leading numbers being chopped of cause name conflicts, e.g (01.R & 02.R)
prefix += char
continue
elif char.upper() not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
continue
legal = char if char.upper() in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' else '_'
out += legal
if len(prefix) > 0:
out += '_' + prefix
return out
def format_f(num):
s = MAX_FLOAT_PRECISION % num # rounds to N decimal places while changing to string
s = s.rstrip('0') # ignore trailing zeroes
s = s.rstrip('.') # ignore trailing .
return '0' if s == '-0' else s
def format_matrix4(matrix):
tempMatrix = matrix.copy()
tempMatrix.transpose()
ret = ''
first = True
for vect in tempMatrix:
if (first != True):
ret +=','
first = False;
ret += format_f(vect[0]) + ',' + format_f(vect[1]) + ',' + format_f(vect[2]) + ',' + format_f(vect[3])
return ret
def format_array3(array):
return format_f(array[0]) + ',' + format_f(array[1]) + ',' + format_f(array[2])
def format_array(array, max_per_line = MAX_VERTEX_ELEMENTS, indent = ''):
ret = ''
first = True
nOnLine = 0
for element in array:
if (first != True):
ret +=','
first = False;
ret += format_f(element)
nOnLine += 1
if nOnLine >= max_per_line:
ret += '\n' + indent
nOnLine = 0
return ret
def format_color(color):
return format_f(color.r) + ',' + format_f(color.g) + ',' + format_f(color.b)
def format_vector(vector):
return format_f(vector.x) + ',' + format_f(vector.z) + ',' + format_f(vector.y)
def format_vector_array(vectorArray, max_per_line = MAX_VERTEX_ELEMENTS, indent = ''):
ret = ''
first = True
nOnLine = 0
for vector in vectorArray:
if (first != True):
ret +=','
first = False;
ret += format_vector(vector)
nOnLine += 3
if nOnLine >= max_per_line:
ret += '\n' + indent
nOnLine = 0
return ret
def format_quaternion(quaternion):
return format_f(quaternion.x) + ',' + format_f(quaternion.z) + ',' + format_f(quaternion.y) + ',' + format_f(-quaternion.w)
def format_int(int):
candidate = str(int) # when int string of an int
if '.' in candidate:
return format_f(math.floor(int)) # format_f removes un-neccessary precision
else:
return candidate
def format_bool(bool):
if bool:
return 'true'
else:
return 'false'
def scale_vector(vector, mult, xOffset = 0):
ret = vector.copy()
ret.x *= mult
ret.x += xOffset
ret.z *= mult
ret.y *= mult
return ret
def same_matrix4(matA, matB):
if(matA is None or matB is None): return False
if (len(matA) != len(matB)): return False
for i in range(len(matA)):
if (round(matA[i][0], MAX_FLOAT_PRECISION_INT) != round(matB[i][0], MAX_FLOAT_PRECISION_INT) or
round(matA[i][1], MAX_FLOAT_PRECISION_INT) != round(matB[i][1], MAX_FLOAT_PRECISION_INT) or
round(matA[i][2], MAX_FLOAT_PRECISION_INT) != round(matB[i][2], MAX_FLOAT_PRECISION_INT) or
round(matA[i][3], MAX_FLOAT_PRECISION_INT) != round(matB[i][3], MAX_FLOAT_PRECISION_INT)):
return False
return True
def same_vertex(vertA, vertB):
if(vertA is None or vertB is None): return False
return vertA.x == vertB.x and vertA.y == vertB.y and vertA.z == vertB.z
def same_array(arrayA, arrayB):
if(arrayA is None or arrayB is None): return False
if len(arrayA) != len(arrayB): return False
for i in range(len(arrayA)):
if arrayA[i] != arrayB[i] : return False
return True
#===============================================================================
# module level methods for writing JSON (.babylon) files
#===============================================================================
def write_matrix4(file_handler, name, matrix):
file_handler.write(',"' + name + '":[' + format_matrix4(matrix) + ']')
def write_array(file_handler, name, array):
file_handler.write('\n,"' + name + '":[' + format_array(array) + ']')
def write_array3(file_handler, name, array):
file_handler.write(',"' + name + '":[' + format_array3(array) + ']')
def write_color(file_handler, name, color):
file_handler.write(',"' + name + '":[' + format_color(color) + ']')
def write_vector(file_handler, name, vector):
file_handler.write(',"' + name + '":[' + format_vector(vector) + ']')
def write_vector_array(file_handler, name, vectorArray):
file_handler.write('\n,"' + name + '":[' + format_vector_array(vectorArray) + ']')
def write_quaternion(file_handler, name, quaternion):
file_handler.write(',"' + name +'":[' + format_quaternion(quaternion) + ']')
def write_string(file_handler, name, string, noComma = False):
if noComma == False:
file_handler.write(',')
file_handler.write('"' + name + '":"' + string + '"')
def write_float(file_handler, name, float):
file_handler.write(',"' + name + '":' + format_f(float))
def write_int(file_handler, name, int, noComma = False):
if noComma == False:
file_handler.write(',')
file_handler.write('"' + name + '":' + format_int(int))
def write_bool(file_handler, name, bool, noComma = False):
if noComma == False:
file_handler.write(',')
file_handler.write('"' + name + '":' + format_bool(bool))
#===============================================================================
# custom properties definition and display
#===============================================================================
bpy.types.Mesh.autoAnimate = bpy.props.BoolProperty(
name='Auto launch animations',
description='',
default = False
)
bpy.types.Mesh.useFlatShading = bpy.props.BoolProperty(
name='Use Flat Shading',
description='Use face normals. Increases vertices.',
default = False
)
bpy.types.Mesh.checkCollisions = bpy.props.BoolProperty(
name='Check Collisions',
description='Indicates mesh should be checked that it does not run into anything.',
default = False
)
bpy.types.Mesh.castShadows = bpy.props.BoolProperty(
name='Cast Shadows',
description='',
default = False
)
bpy.types.Mesh.receiveShadows = bpy.props.BoolProperty(
name='Receive Shadows',
description='',
default = False
)
bpy.types.Mesh.bakeSize = bpy.props.IntProperty(
name='Texture Size',
description='',
default = 1024
)
bpy.types.Mesh.bakeQuality = bpy.props.IntProperty(
name='Quality 1-100',
description='The trade-off between Quality - File size(100 highest quality)',
default = 50, min = 1, max = 100
)
bpy.types.Mesh.materialNameSpace = bpy.props.StringProperty(
name='Name Space',
description='Prefix to use for materials for sharing across .blends.',
default = DEFAULT_MATERIAL_NAMESPACE
)
bpy.types.Mesh.checkReadyOnlyOnce = bpy.props.BoolProperty(
name='Check Ready Only Once',
description='When checked better CPU utilization. Advanced user option.',
default = False
)
bpy.types.Mesh.freezeWorldMatrix = bpy.props.BoolProperty(
name='Freeze World Matrix',
description='Indicate the position, rotation, & scale do not change for performance reasons',
default = False
)
bpy.types.Mesh.loadDisabled = bpy.props.BoolProperty(
name='Load Disabled',
description='Indicate this mesh & children should not be active until enabled by code.',
default = False
)
bpy.types.Mesh.attachedSound = bpy.props.StringProperty(
name='Sound',
description='',
default = ''
)
bpy.types.Mesh.loopSound = bpy.props.BoolProperty(
name='Loop sound',
description='',
default = True
)
bpy.types.Mesh.autoPlaySound = bpy.props.BoolProperty(
name='Auto play sound',
description='',
default = True
)
bpy.types.Mesh.maxSoundDistance = bpy.props.FloatProperty(
name='Max sound distance',
description='',
default = 100
)
bpy.types.Mesh.maxInfluencers = bpy.props.IntProperty(
name='Max bone Influencers / Vertex',
description='When fewer than this are observed, the lower value is used.',
default = 8, min = 1, max = 8
)
#===============================================================================
bpy.types.Camera.autoAnimate = bpy.props.BoolProperty(
name='Auto launch animations',
description='',
default = False
)
bpy.types.Camera.CameraType = bpy.props.EnumProperty(
name='Camera Type',
description='',
# ONLY Append, or existing .blends will have their camera changed
items = (
(V_JOYSTICKS_CAM , 'Virtual Joysticks' , 'Use Virtual Joysticks Camera'),
(TOUCH_CAM , 'Touch' , 'Use Touch Camera'),
(GAMEPAD_CAM , 'Gamepad' , 'Use Gamepad Camera'),
(FREE_CAM , 'Free' , 'Use Free Camera'),
(FOLLOW_CAM , 'Follow' , 'Use Follow Camera'),
(DEV_ORIENT_CAM , 'Device Orientation' , 'Use Device Orientation Camera'),
(ARC_ROTATE_CAM , 'Arc Rotate' , 'Use Arc Rotate Camera'),
(VR_DEV_ORIENT_FREE_CAM , 'VR Dev Orientation Free' , 'Use VR Dev Orientation Free Camera'),
(WEB_VR_FREE_CAM , 'Web VR Free' , 'Use Web VR Free Camera')
),
default = FREE_CAM
)
bpy.types.Camera.checkCollisions = bpy.props.BoolProperty(
name='Check Collisions',
description='',
default = False
)
bpy.types.Camera.applyGravity = bpy.props.BoolProperty(
name='Apply Gravity',
description='',
default = False
)
bpy.types.Camera.ellipsoid = bpy.props.FloatVectorProperty(
name='Ellipsoid',
description='',
default = mathutils.Vector((0.2, 0.9, 0.2))
)
bpy.types.Camera.Camera3DRig = bpy.props.EnumProperty(
name='Rig',
description='',
items = (
(RIG_MODE_NONE , 'None' , 'No 3D effects'),
(RIG_MODE_STEREOSCOPIC_ANAGLYPH , 'Anaaglph' , 'Stereoscopic Anagylph'),
(RIG_MODE_STEREOSCOPIC_SIDEBYSIDE_PARALLEL , 'side-by-side Parallel' , 'Stereoscopic side-by-side parallel'),
(RIG_MODE_STEREOSCOPIC_SIDEBYSIDE_CROSSEYED, 'side-by-side crosseyed', 'Stereoscopic side-by-side crosseyed'),
(RIG_MODE_STEREOSCOPIC_OVERUNDER , 'over-under' , 'Stereoscopic over-under'),
(RIG_MODE_VR , 'VR distortion' , 'Use Web VR Free Camera')
),
default = RIG_MODE_NONE
)
bpy.types.Camera.interaxialDistance = bpy.props.FloatProperty(
name='Interaxial Distance',
description='Distance between cameras. Used by all but VR 3D rigs.',
default = 0.0637
)
#===============================================================================
bpy.types.Lamp.autoAnimate = bpy.props.BoolProperty(
name='Auto launch animations',
description='',
default = False
)
bpy.types.Lamp.shadowMap = bpy.props.EnumProperty(
name='Shadow Map',
description='',
items = ((NO_SHADOWS , 'None' , 'No Shadow Maps'),
(STD_SHADOWS , 'Standard' , 'Use Standard Shadow Maps'),
(POISSON_SHADOWS , 'Poisson' , 'Use Poisson Sampling'),
(VARIANCE_SHADOWS , 'Variance' , 'Use Variance Shadow Maps'),
(BLUR_VARIANCE_SHADOWS, 'Blur Variance', 'Use Blur Variance Shadow Maps')
),
default = NO_SHADOWS
)
bpy.types.Lamp.shadowMapSize = bpy.props.IntProperty(
name='Shadow Map Size',
description='',
default = 512
)
bpy.types.Lamp.shadowBias = bpy.props.FloatProperty(
name='Shadow Bias',
description='',
default = 0.00005
)
bpy.types.Lamp.shadowBlurScale = bpy.props.IntProperty(
name='Blur Scale',
description='',
default = 2
)
bpy.types.Lamp.shadowBlurBoxOffset = bpy.props.IntProperty(
name='Blur Box Offset',
description='',
default = 0
)
class ObjectPanel(bpy.types.Panel):
bl_label = 'Babylon.js ' + format_version()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
def draw(self, context):
ob = context.object
if not ob or not ob.data:
return
layout = self.layout
isMesh = isinstance(ob.data, bpy.types.Mesh)
isCamera = isinstance(ob.data, bpy.types.Camera)
isLight = isinstance(ob.data, bpy.types.Lamp)
if isMesh:
row = layout.row()
row.prop(ob.data, 'useFlatShading')
row.prop(ob.data, 'checkCollisions')
row = layout.row()
row.prop(ob.data, 'castShadows')
row.prop(ob.data, 'receiveShadows')
row = layout.row()
row.prop(ob.data, 'freezeWorldMatrix')
row.prop(ob.data, 'loadDisabled')
layout.prop(ob.data, 'autoAnimate')
layout.prop(ob.data, 'maxInfluencers')
box = layout.box()
box.label('Materials')
box.prop(ob.data, 'materialNameSpace')
box.prop(ob.data, 'checkReadyOnlyOnce')
box = layout.box()
box.label(text='Procedural Texture / Cycles Baking')
box.prop(ob.data, 'bakeSize')
box.prop(ob.data, 'bakeQuality')
box = layout.box()
box.prop(ob.data, 'attachedSound')
box.prop(ob.data, 'autoPlaySound')
box.prop(ob.data, 'loopSound')
box.prop(ob.data, 'maxSoundDistance')
elif isCamera:
layout.prop(ob.data, 'CameraType')
layout.prop(ob.data, 'checkCollisions')
layout.prop(ob.data, 'applyGravity')
layout.prop(ob.data, 'ellipsoid')
box = layout.box()
box.label(text="3D Camera Rigs")
box.prop(ob.data, 'Camera3DRig')
box.prop(ob.data, 'interaxialDistance')
layout.prop(ob.data, 'autoAnimate')
elif isLight:
layout.prop(ob.data, 'shadowMap')
layout.prop(ob.data, 'shadowMapSize')
layout.prop(ob.data, 'shadowBias')
box = layout.box()
box.label(text="Blur Variance Shadows")
box.prop(ob.data, 'shadowBlurScale')
box.prop(ob.data, 'shadowBlurBoxOffset')
layout.prop(ob.data, 'autoAnimate')
| 44.327997
| 223
| 0.558256
|
2415915c52d59dc820b62bed1b3bbe9604bce025
| 4,641
|
py
|
Python
|
instagram/settings.py
|
Ombae/neighbor
|
b4d3461bf24eb25259fa25f6e2d6564afb22ca5f
|
[
"MIT"
] | null | null | null |
instagram/settings.py
|
Ombae/neighbor
|
b4d3461bf24eb25259fa25f6e2d6564afb22ca5f
|
[
"MIT"
] | 5
|
2020-06-05T22:44:26.000Z
|
2021-09-08T01:16:21.000Z
|
instagram/settings.py
|
Ombae/neighbor
|
b4d3461bf24eb25259fa25f6e2d6564afb22ca5f
|
[
"MIT"
] | null | null | null |
"""
Django settings for tribune project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config('MODE', default="dev")
SECRET_KEY = config('SECRET_KEY')
# DEBUG = config('DEBUG', default=False, cast=bool)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '6(d1m=h9ydgwk63amqc+emgngy)yvh!3r*mn3l-f9n&h9(swk='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'bootstrap4',
'tinymce',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
# Email configurations remember to install python-decouple
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'instaclone',
# 'USER': 'seth',
# 'PASSWORD': 'SethOmbae1',
# 'HOST': 'localhost', # the missing piece of the puzzle
# 'PORT': ''
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# STATIC_URL = '/static/'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
# ]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals())
| 26.52
| 91
| 0.702435
|
8de12b7758e5b288df7514735fdde365c38c031d
| 31,242
|
py
|
Python
|
evaluate/previous_works/HoHoNet/lib/misc/pano_lsd_align.py
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | 11
|
2021-11-01T05:40:19.000Z
|
2022-03-28T17:59:44.000Z
|
evaluate/previous_works/HoHoNet/lib/misc/pano_lsd_align.py
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | null | null | null |
evaluate/previous_works/HoHoNet/lib/misc/pano_lsd_align.py
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | 1
|
2022-03-29T10:13:47.000Z
|
2022-03-29T10:13:47.000Z
|
'''
This script is helper function for preprocessing.
Most of the code are converted from LayoutNet official's matlab code.
All functions, naming rule and data flow follow official for easier
converting and comparing.
Code is not optimized for python or numpy yet.
Author: Cheng Sun
Email : chengsun@gapp.nthu.edu.tw
'''
import sys
import numpy as np
from scipy.ndimage import map_coordinates
import cv2
def computeUVN(n, in_, planeID):
'''
compute v given u and normal.
'''
if planeID == 2:
n = np.array([n[1], n[2], n[0]])
elif planeID == 3:
n = np.array([n[2], n[0], n[1]])
bc = n[0] * np.sin(in_) + n[1] * np.cos(in_)
bs = n[2]
out = np.arctan(-bc / (bs + 1e-9))
return out
def computeUVN_vec(n, in_, planeID):
'''
vectorization version of computeUVN
@n N x 3
@in_ MN x 1
@planeID N
'''
n = n.copy()
if (planeID == 2).sum():
n[planeID == 2] = np.roll(n[planeID == 2], 2, axis=1)
if (planeID == 3).sum():
n[planeID == 3] = np.roll(n[planeID == 3], 1, axis=1)
n = np.repeat(n, in_.shape[0] // n.shape[0], axis=0)
assert n.shape[0] == in_.shape[0]
bc = n[:, [0]] * np.sin(in_) + n[:, [1]] * np.cos(in_)
bs = n[:, [2]]
out = np.arctan(-bc / (bs + 1e-9))
return out
def xyz2uvN(xyz, planeID=1):
ID1 = (int(planeID) - 1 + 0) % 3
ID2 = (int(planeID) - 1 + 1) % 3
ID3 = (int(planeID) - 1 + 2) % 3
normXY = np.sqrt(xyz[:, [ID1]] ** 2 + xyz[:, [ID2]] ** 2)
normXY[normXY < 0.000001] = 0.000001
normXYZ = np.sqrt(xyz[:, [ID1]] ** 2 + xyz[:, [ID2]] ** 2 + xyz[:, [ID3]] ** 2)
v = np.arcsin(xyz[:, [ID3]] / normXYZ)
u = np.arcsin(xyz[:, [ID1]] / normXY)
valid = (xyz[:, [ID2]] < 0) & (u >= 0)
u[valid] = np.pi - u[valid]
valid = (xyz[:, [ID2]] < 0) & (u <= 0)
u[valid] = -np.pi - u[valid]
uv = np.hstack([u, v])
uv[np.isnan(uv[:, 0]), 0] = 0
return uv
def uv2xyzN(uv, planeID=1):
ID1 = (int(planeID) - 1 + 0) % 3
ID2 = (int(planeID) - 1 + 1) % 3
ID3 = (int(planeID) - 1 + 2) % 3
xyz = np.zeros((uv.shape[0], 3))
xyz[:, ID1] = np.cos(uv[:, 1]) * np.sin(uv[:, 0])
xyz[:, ID2] = np.cos(uv[:, 1]) * np.cos(uv[:, 0])
xyz[:, ID3] = np.sin(uv[:, 1])
return xyz
def uv2xyzN_vec(uv, planeID):
'''
vectorization version of uv2xyzN
@uv N x 2
@planeID N
'''
assert (planeID.astype(int) != planeID).sum() == 0
planeID = planeID.astype(int)
ID1 = (planeID - 1 + 0) % 3
ID2 = (planeID - 1 + 1) % 3
ID3 = (planeID - 1 + 2) % 3
ID = np.arange(len(uv))
xyz = np.zeros((len(uv), 3))
xyz[ID, ID1] = np.cos(uv[:, 1]) * np.sin(uv[:, 0])
xyz[ID, ID2] = np.cos(uv[:, 1]) * np.cos(uv[:, 0])
xyz[ID, ID3] = np.sin(uv[:, 1])
return xyz
def warpImageFast(im, XXdense, YYdense, order=1):
minX = max(1., np.floor(XXdense.min()) - 1)
minY = max(1., np.floor(YYdense.min()) - 1)
maxX = min(im.shape[1], np.ceil(XXdense.max()) + 1)
maxY = min(im.shape[0], np.ceil(YYdense.max()) + 1)
im = im[int(round(minY-1)):int(round(maxY)),
int(round(minX-1)):int(round(maxX))]
assert XXdense.shape == YYdense.shape
out_shape = XXdense.shape
coordinates = [
(YYdense - minY).reshape(-1),
(XXdense - minX).reshape(-1),
]
im_warp = np.stack([
map_coordinates(im[..., c], coordinates, order=order).reshape(out_shape)
for c in range(im.shape[-1])],
axis=-1)
return im_warp
def rotatePanorama(img, vp=None, R=None, order=1):
'''
Rotate panorama
if R is given, vp (vanishing point) will be overlooked
otherwise R is computed from vp
'''
sphereH, sphereW, C = img.shape
# new uv coordinates
TX, TY = np.meshgrid(range(1, sphereW + 1), range(1, sphereH + 1))
TX = TX.reshape(-1, 1, order='F')
TY = TY.reshape(-1, 1, order='F')
ANGx = (TX - sphereW/2 - 0.5) / sphereW * np.pi * 2
ANGy = -(TY - sphereH/2 - 0.5) / sphereH * np.pi
uvNew = np.hstack([ANGx, ANGy])
xyzNew = uv2xyzN(uvNew, 1)
# rotation matrix
if R is None:
R = np.linalg.inv(vp.T)
xyzOld = np.linalg.solve(R, xyzNew.T).T
uvOld = xyz2uvN(xyzOld, 1)
Px = (uvOld[:, 0] + np.pi) / (2*np.pi) * sphereW + 0.5
Py = (-uvOld[:, 1] + np.pi/2) / np.pi * sphereH + 0.5
Px = Px.reshape(sphereH, sphereW, order='F')
Py = Py.reshape(sphereH, sphereW, order='F')
# boundary
imgNew = np.zeros((sphereH+2, sphereW+2, C), np.float64)
imgNew[1:-1, 1:-1, :] = img
imgNew[1:-1, 0, :] = img[:, -1, :]
imgNew[1:-1, -1, :] = img[:, 0, :]
imgNew[0, 1:sphereW//2+1, :] = img[0, sphereW-1:sphereW//2-1:-1, :]
imgNew[0, sphereW//2+1:-1, :] = img[0, sphereW//2-1::-1, :]
imgNew[-1, 1:sphereW//2+1, :] = img[-1, sphereW-1:sphereW//2-1:-1, :]
imgNew[-1, sphereW//2+1:-1, :] = img[0, sphereW//2-1::-1, :]
imgNew[0, 0, :] = img[0, 0, :]
imgNew[-1, -1, :] = img[-1, -1, :]
imgNew[0, -1, :] = img[0, -1, :]
imgNew[-1, 0, :] = img[-1, 0, :]
rotImg = warpImageFast(imgNew, Px+1, Py+1, order)
return rotImg
def imgLookAt(im, CENTERx, CENTERy, new_imgH, fov):
sphereH = im.shape[0]
sphereW = im.shape[1]
warped_im = np.zeros((new_imgH, new_imgH, 3))
TX, TY = np.meshgrid(range(1, new_imgH + 1), range(1, new_imgH + 1))
TX = TX.reshape(-1, 1, order='F')
TY = TY.reshape(-1, 1, order='F')
TX = TX - 0.5 - new_imgH/2
TY = TY - 0.5 - new_imgH/2
r = new_imgH / 2 / np.tan(fov/2)
# convert to 3D
R = np.sqrt(TY ** 2 + r ** 2)
ANGy = np.arctan(- TY / r)
ANGy = ANGy + CENTERy
X = np.sin(ANGy) * R
Y = -np.cos(ANGy) * R
Z = TX
INDn = np.nonzero(np.abs(ANGy) > np.pi/2)
# project back to sphere
ANGx = np.arctan(Z / -Y)
RZY = np.sqrt(Z ** 2 + Y ** 2)
ANGy = np.arctan(X / RZY)
ANGx[INDn] = ANGx[INDn] + np.pi
ANGx = ANGx + CENTERx
INDy = np.nonzero(ANGy < -np.pi/2)
ANGy[INDy] = -np.pi - ANGy[INDy]
ANGx[INDy] = ANGx[INDy] + np.pi
INDx = np.nonzero(ANGx <= -np.pi); ANGx[INDx] = ANGx[INDx] + 2 * np.pi
INDx = np.nonzero(ANGx > np.pi); ANGx[INDx] = ANGx[INDx] - 2 * np.pi
INDx = np.nonzero(ANGx > np.pi); ANGx[INDx] = ANGx[INDx] - 2 * np.pi
INDx = np.nonzero(ANGx > np.pi); ANGx[INDx] = ANGx[INDx] - 2 * np.pi
Px = (ANGx + np.pi) / (2*np.pi) * sphereW + 0.5
Py = ((-ANGy) + np.pi/2) / np.pi * sphereH + 0.5
INDxx = np.nonzero(Px < 1)
Px[INDxx] = Px[INDxx] + sphereW
im = np.concatenate([im, im[:, :2]], 1)
Px = Px.reshape(new_imgH, new_imgH, order='F')
Py = Py.reshape(new_imgH, new_imgH, order='F')
warped_im = warpImageFast(im, Px, Py)
return warped_im
def separatePano(panoImg, fov, x, y, imgSize=320):
'''cut a panorama image into several separate views'''
assert x.shape == y.shape
if not isinstance(fov, np.ndarray):
fov = fov * np.ones_like(x)
sepScene = [
{
'img': imgLookAt(panoImg.copy(), xi, yi, imgSize, fovi),
'vx': xi,
'vy': yi,
'fov': fovi,
'sz': imgSize,
}
for xi, yi, fovi in zip(x, y, fov)
]
return sepScene
def lsdWrap(img, LSD=None, **kwargs):
'''
Opencv implementation of
Rafael Grompone von Gioi, Jérémie Jakubowicz, Jean-Michel Morel, and Gregory Randall,
LSD: a Line Segment Detector, Image Processing On Line, vol. 2012.
[Rafael12] http://www.ipol.im/pub/art/2012/gjmr-lsd/?utm_source=doi
@img
input image
@LSD
Constructing by cv2.createLineSegmentDetector
https://docs.opencv.org/3.0-beta/modules/imgproc/doc/feature_detection.html#linesegmentdetector
if LSD is given, kwargs will be ignored
@kwargs
is used to construct LSD
work only if @LSD is not given
'''
if LSD is None:
LSD = cv2.createLineSegmentDetector(**kwargs)
#LSD = cv2.ximgproc.createFastLineDetector(_do_merge=True)
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lines, width, prec, nfa = LSD.detect(img)
#lines = LSD.detect(img)
if lines is None:
return np.zeros_like(img), np.array([])
edgeMap = LSD.drawSegments(np.zeros_like(img), lines)[..., -1]
lines = np.squeeze(lines, 1)
edgeList = np.concatenate([lines, width, prec, nfa], 1)
#edgeList = np.concatenate([lines, np.ones([len(lines), 1])], 1)
return edgeMap, edgeList
def edgeFromImg2Pano(edge):
edgeList = edge['edgeLst']
if len(edgeList) == 0:
return np.array([])
vx = edge['vx']
vy = edge['vy']
fov = edge['fov']
imH, imW = edge['img'].shape
R = (imW/2) / np.tan(fov/2)
# im is the tangent plane, contacting with ball at [x0 y0 z0]
x0 = R * np.cos(vy) * np.sin(vx)
y0 = R * np.cos(vy) * np.cos(vx)
z0 = R * np.sin(vy)
vecposX = np.array([np.cos(vx), -np.sin(vx), 0])
vecposY = np.cross(np.array([x0, y0, z0]), vecposX)
vecposY = vecposY / np.sqrt(vecposY @ vecposY.T)
vecposX = vecposX.reshape(1, -1)
vecposY = vecposY.reshape(1, -1)
Xc = (0 + imW-1) / 2
Yc = (0 + imH-1) / 2
vecx1 = edgeList[:, [0]] - Xc
vecy1 = edgeList[:, [1]] - Yc
vecx2 = edgeList[:, [2]] - Xc
vecy2 = edgeList[:, [3]] - Yc
vec1 = np.tile(vecx1, [1, 3]) * vecposX + np.tile(vecy1, [1, 3]) * vecposY
vec2 = np.tile(vecx2, [1, 3]) * vecposX + np.tile(vecy2, [1, 3]) * vecposY
coord1 = [[x0, y0, z0]] + vec1
coord2 = [[x0, y0, z0]] + vec2
normal = np.cross(coord1, coord2, axis=1)
normal = normal / np.linalg.norm(normal, axis=1, keepdims=True)
panoList = np.hstack([normal, coord1, coord2, edgeList[:, [-1]]])
return panoList
def _intersection(range1, range2):
if range1[1] < range1[0]:
range11 = [range1[0], 1]
range12 = [0, range1[1]]
else:
range11 = range1
range12 = [0, 0]
if range2[1] < range2[0]:
range21 = [range2[0], 1]
range22 = [0, range2[1]]
else:
range21 = range2
range22 = [0, 0]
b = max(range11[0], range21[0]) < min(range11[1], range21[1])
if b:
return b
b2 = max(range12[0], range22[0]) < min(range12[1], range22[1])
b = b or b2
return b
def _insideRange(pt, range):
if range[1] > range[0]:
b = pt >= range[0] and pt <= range[1]
else:
b1 = pt >= range[0] and pt <= 1
b2 = pt >= 0 and pt <= range[1]
b = b1 or b2
return b
def combineEdgesN(edges):
'''
Combine some small line segments, should be very conservative
OUTPUT
lines: combined line segments
ori_lines: original line segments
line format [nx ny nz projectPlaneID umin umax LSfov score]
'''
arcList = []
for edge in edges:
panoLst = edge['panoLst']
if len(panoLst) == 0:
continue
arcList.append(panoLst)
arcList = np.vstack(arcList)
# ori lines
numLine = len(arcList)
ori_lines = np.zeros((numLine, 8))
areaXY = np.abs(arcList[:, 2])
areaYZ = np.abs(arcList[:, 0])
areaZX = np.abs(arcList[:, 1])
planeIDs = np.argmax(np.stack([areaXY, areaYZ, areaZX], -1), 1) + 1 # XY YZ ZX
for i in range(numLine):
ori_lines[i, :3] = arcList[i, :3]
ori_lines[i, 3] = planeIDs[i]
coord1 = arcList[i, 3:6]
coord2 = arcList[i, 6:9]
uv = xyz2uvN(np.stack([coord1, coord2]), planeIDs[i])
umax = uv[:, 0].max() + np.pi
umin = uv[:, 0].min() + np.pi
if umax - umin > np.pi:
ori_lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
else:
ori_lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi
ori_lines[i, 6] = np.arccos((
np.dot(coord1, coord2) / (np.linalg.norm(coord1) * np.linalg.norm(coord2))
).clip(-1, 1))
ori_lines[i, 7] = arcList[i, 9]
# additive combination
lines = ori_lines.copy()
for _ in range(3):
numLine = len(lines)
valid_line = np.ones(numLine, bool)
for i in range(numLine):
if not valid_line[i]:
continue
dotProd = (lines[:, :3] * lines[[i], :3]).sum(1)
valid_curr = np.logical_and((np.abs(dotProd) > np.cos(np.pi / 180)), valid_line)
valid_curr[i] = False
for j in np.nonzero(valid_curr)[0]:
range1 = lines[i, 4:6]
range2 = lines[j, 4:6]
valid_rag = _intersection(range1, range2)
if not valid_rag:
continue
# combine
I = np.argmax(np.abs(lines[i, :3]))
if lines[i, I] * lines[j, I] > 0:
nc = lines[i, :3] * lines[i, 6] + lines[j, :3] * lines[j, 6]
else:
nc = lines[i, :3] * lines[i, 6] - lines[j, :3] * lines[j, 6]
nc = nc / np.linalg.norm(nc)
if _insideRange(range1[0], range2):
nrmin = range2[0]
else:
nrmin = range1[0]
if _insideRange(range1[1], range2):
nrmax = range2[1]
else:
nrmax = range1[1]
u = np.array([[nrmin], [nrmax]]) * 2 * np.pi - np.pi
v = computeUVN(nc, u, lines[i, 3])
xyz = uv2xyzN(np.hstack([u, v]), lines[i, 3])
l = np.arccos(np.dot(xyz[0, :], xyz[1, :]).clip(-1, 1))
scr = (lines[i,6]*lines[i,7] + lines[j,6]*lines[j,7]) / (lines[i,6]+lines[j,6])
lines[i] = [*nc, lines[i, 3], nrmin, nrmax, l, scr]
valid_line[j] = False
lines = lines[valid_line]
return lines, ori_lines
def icosahedron2sphere(level):
# this function use a icosahedron to sample uniformly on a sphere
a = 2 / (1 + np.sqrt(5))
M = np.array([
0, a, -1, a, 1, 0, -a, 1, 0,
0, a, 1, -a, 1, 0, a, 1, 0,
0, a, 1, 0, -a, 1, -1, 0, a,
0, a, 1, 1, 0, a, 0, -a, 1,
0, a, -1, 0, -a, -1, 1, 0, -a,
0, a, -1, -1, 0, -a, 0, -a, -1,
0, -a, 1, a, -1, 0, -a, -1, 0,
0, -a, -1, -a, -1, 0, a, -1, 0,
-a, 1, 0, -1, 0, a, -1, 0, -a,
-a, -1, 0, -1, 0, -a, -1, 0, a,
a, 1, 0, 1, 0, -a, 1, 0, a,
a, -1, 0, 1, 0, a, 1, 0, -a,
0, a, 1, -1, 0, a, -a, 1, 0,
0, a, 1, a, 1, 0, 1, 0, a,
0, a, -1, -a, 1, 0, -1, 0, -a,
0, a, -1, 1, 0, -a, a, 1, 0,
0, -a, -1, -1, 0, -a, -a, -1, 0,
0, -a, -1, a, -1, 0, 1, 0, -a,
0, -a, 1, -a, -1, 0, -1, 0, a,
0, -a, 1, 1, 0, a, a, -1, 0])
coor = M.T.reshape(3, 60, order='F').T
coor, idx = np.unique(coor, return_inverse=True, axis=0)
tri = idx.reshape(3, 20, order='F').T
# extrude
coor = list(coor / np.tile(np.linalg.norm(coor, axis=1, keepdims=True), (1, 3)))
for _ in range(level):
triN = []
for t in range(len(tri)):
n = len(coor)
coor.append((coor[tri[t, 0]] + coor[tri[t, 1]]) / 2)
coor.append((coor[tri[t, 1]] + coor[tri[t, 2]]) / 2)
coor.append((coor[tri[t, 2]] + coor[tri[t, 0]]) / 2)
triN.append([n, tri[t, 0], n+2])
triN.append([n, tri[t, 1], n+1])
triN.append([n+1, tri[t, 2], n+2])
triN.append([n, n+1, n+2])
tri = np.array(triN)
# uniquefy
coor, idx = np.unique(coor, return_inverse=True, axis=0)
tri = idx[tri]
# extrude
coor = list(coor / np.tile(np.sqrt(np.sum(coor * coor, 1, keepdims=True)), (1, 3)))
return np.array(coor), np.array(tri)
def curveFitting(inputXYZ, weight):
'''
@inputXYZ: N x 3
@weight : N x 1
'''
l = np.linalg.norm(inputXYZ, axis=1, keepdims=True)
inputXYZ = inputXYZ / l
weightXYZ = inputXYZ * weight
XX = np.sum(weightXYZ[:, 0] ** 2)
YY = np.sum(weightXYZ[:, 1] ** 2)
ZZ = np.sum(weightXYZ[:, 2] ** 2)
XY = np.sum(weightXYZ[:, 0] * weightXYZ[:, 1])
YZ = np.sum(weightXYZ[:, 1] * weightXYZ[:, 2])
ZX = np.sum(weightXYZ[:, 2] * weightXYZ[:, 0])
A = np.array([
[XX, XY, ZX],
[XY, YY, YZ],
[ZX, YZ, ZZ]])
U, S, Vh = np.linalg.svd(A)
outputNM = Vh[-1, :]
outputNM = outputNM / np.linalg.norm(outputNM)
return outputNM
def sphereHoughVote(segNormal, segLength, segScores, binRadius, orthTolerance, candiSet, force_unempty=True):
# initial guess
numLinesg = len(segNormal)
voteBinPoints = candiSet.copy()
voteBinPoints = voteBinPoints[~(voteBinPoints[:,2] < 0)]
reversValid = (segNormal[:, 2] < 0).reshape(-1)
segNormal[reversValid] = -segNormal[reversValid]
voteBinUV = xyz2uvN(voteBinPoints)
numVoteBin = len(voteBinPoints)
voteBinValues = np.zeros(numVoteBin)
for i in range(numLinesg):
tempNorm = segNormal[[i]]
tempDots = (voteBinPoints * tempNorm).sum(1)
valid = np.abs(tempDots) < np.cos((90 - binRadius) * np.pi / 180)
voteBinValues[valid] = voteBinValues[valid] + segScores[i] * segLength[i]
checkIDs1 = np.nonzero(voteBinUV[:, [1]] > np.pi / 3)[0]
voteMax = 0
checkID1Max = 0
checkID2Max = 0
checkID3Max = 0
for j in range(len(checkIDs1)):
checkID1 = checkIDs1[j]
vote1 = voteBinValues[checkID1]
if voteBinValues[checkID1] == 0 and force_unempty:
continue
checkNormal = voteBinPoints[[checkID1]]
dotProduct = (voteBinPoints * checkNormal).sum(1)
checkIDs2 = np.nonzero(np.abs(dotProduct) < np.cos((90 - orthTolerance) * np.pi / 180))[0]
for i in range(len(checkIDs2)):
checkID2 = checkIDs2[i]
if voteBinValues[checkID2] == 0 and force_unempty:
continue
vote2 = vote1 + voteBinValues[checkID2]
cpv = np.cross(voteBinPoints[checkID1], voteBinPoints[checkID2]).reshape(1, 3)
cpn = np.linalg.norm(cpv)
dotProduct = (voteBinPoints * cpv).sum(1) / cpn
checkIDs3 = np.nonzero(np.abs(dotProduct) > np.cos(orthTolerance * np.pi / 180))[0]
for k in range(len(checkIDs3)):
checkID3 = checkIDs3[k]
if voteBinValues[checkID3] == 0 and force_unempty:
continue
vote3 = vote2 + voteBinValues[checkID3]
if vote3 > voteMax:
lastStepCost = vote3 - voteMax
if voteMax != 0:
tmp = (voteBinPoints[[checkID1Max, checkID2Max, checkID3Max]] * \
voteBinPoints[[checkID1, checkID2, checkID3]]).sum(1)
lastStepAngle = np.arccos(tmp.clip(-1, 1))
else:
lastStepAngle = np.zeros(3)
checkID1Max = checkID1
checkID2Max = checkID2
checkID3Max = checkID3
voteMax = vote3
if checkID1Max == 0:
print('[WARN] sphereHoughVote: no orthogonal voting exist', file=sys.stderr)
return None, 0, 0
initXYZ = voteBinPoints[[checkID1Max, checkID2Max, checkID3Max]]
# refine
refiXYZ = np.zeros((3, 3))
dotprod = (segNormal * initXYZ[[0]]).sum(1)
valid = np.abs(dotprod) < np.cos((90 - binRadius) * np.pi / 180)
validNm = segNormal[valid]
validWt = segLength[valid] * segScores[valid]
validWt = validWt / validWt.max()
refiNM = curveFitting(validNm, validWt)
refiXYZ[0] = refiNM.copy()
dotprod = (segNormal * initXYZ[[1]]).sum(1)
valid = np.abs(dotprod) < np.cos((90 - binRadius) * np.pi / 180)
validNm = segNormal[valid]
validWt = segLength[valid] * segScores[valid]
validWt = validWt / validWt.max()
validNm = np.vstack([validNm, refiXYZ[[0]]])
validWt = np.vstack([validWt, validWt.sum(0, keepdims=1) * 0.1])
refiNM = curveFitting(validNm, validWt)
refiXYZ[1] = refiNM.copy()
refiNM = np.cross(refiXYZ[0], refiXYZ[1])
refiXYZ[2] = refiNM / np.linalg.norm(refiNM)
return refiXYZ, lastStepCost, lastStepAngle
def findMainDirectionEMA(lines):
'''compute vp from set of lines'''
# initial guess
segNormal = lines[:, :3]
segLength = lines[:, [6]]
segScores = np.ones((len(lines), 1))
shortSegValid = (segLength < 5 * np.pi / 180).reshape(-1)
segNormal = segNormal[~shortSegValid, :]
segLength = segLength[~shortSegValid]
segScores = segScores[~shortSegValid]
numLinesg = len(segNormal)
candiSet, tri = icosahedron2sphere(3)
ang = np.arccos((candiSet[tri[0,0]] * candiSet[tri[0,1]]).sum().clip(-1, 1)) / np.pi * 180
binRadius = ang / 2
initXYZ, score, angle = sphereHoughVote(segNormal, segLength, segScores, 2*binRadius, 2, candiSet)
if initXYZ is None:
print('[WARN] findMainDirectionEMA: initial failed', file=sys.stderr)
return None, score, angle
# iterative refine
iter_max = 3
candiSet, tri = icosahedron2sphere(5)
numCandi = len(candiSet)
angD = np.arccos((candiSet[tri[0, 0]] * candiSet[tri[0, 1]]).sum().clip(-1, 1)) / np.pi * 180
binRadiusD = angD / 2
curXYZ = initXYZ.copy()
tol = np.linspace(4*binRadius, 4*binRadiusD, iter_max) # shrink down ls and candi
for it in range(iter_max):
dot1 = np.abs((segNormal * curXYZ[[0]]).sum(1))
dot2 = np.abs((segNormal * curXYZ[[1]]).sum(1))
dot3 = np.abs((segNormal * curXYZ[[2]]).sum(1))
valid1 = dot1 < np.cos((90 - tol[it]) * np.pi / 180)
valid2 = dot2 < np.cos((90 - tol[it]) * np.pi / 180)
valid3 = dot3 < np.cos((90 - tol[it]) * np.pi / 180)
valid = valid1 | valid2 | valid3
if np.sum(valid) == 0:
print('[WARN] findMainDirectionEMA: zero line segments for voting', file=sys.stderr)
break
subSegNormal = segNormal[valid]
subSegLength = segLength[valid]
subSegScores = segScores[valid]
dot1 = np.abs((candiSet * curXYZ[[0]]).sum(1))
dot2 = np.abs((candiSet * curXYZ[[1]]).sum(1))
dot3 = np.abs((candiSet * curXYZ[[2]]).sum(1))
valid1 = dot1 > np.cos(tol[it] * np.pi / 180)
valid2 = dot2 > np.cos(tol[it] * np.pi / 180)
valid3 = dot3 > np.cos(tol[it] * np.pi / 180)
valid = valid1 | valid2 | valid3
if np.sum(valid) == 0:
print('[WARN] findMainDirectionEMA: zero line segments for voting', file=sys.stderr)
break
subCandiSet = candiSet[valid]
tcurXYZ, _, _ = sphereHoughVote(subSegNormal, subSegLength, subSegScores, 2*binRadiusD, 2, subCandiSet)
if tcurXYZ is None:
print('[WARN] findMainDirectionEMA: no answer found', file=sys.stderr)
break
curXYZ = tcurXYZ.copy()
mainDirect = curXYZ.copy()
mainDirect[0] = mainDirect[0] * np.sign(mainDirect[0,2])
mainDirect[1] = mainDirect[1] * np.sign(mainDirect[1,2])
mainDirect[2] = mainDirect[2] * np.sign(mainDirect[2,2])
uv = xyz2uvN(mainDirect)
I1 = np.argmax(uv[:,1])
J = np.setdiff1d(np.arange(3), I1)
I2 = np.argmin(np.abs(np.sin(uv[J,0])))
I2 = J[I2]
I3 = np.setdiff1d(np.arange(3), np.hstack([I1, I2]))
mainDirect = np.vstack([mainDirect[I1], mainDirect[I2], mainDirect[I3]])
mainDirect[0] = mainDirect[0] * np.sign(mainDirect[0,2])
mainDirect[1] = mainDirect[1] * np.sign(mainDirect[1,1])
mainDirect[2] = mainDirect[2] * np.sign(mainDirect[2,0])
mainDirect = np.vstack([mainDirect, -mainDirect])
return mainDirect, score, angle
def multi_linspace(start, stop, num):
div = (num - 1)
y = np.arange(0, num, dtype=np.float64)
steps = (stop - start) / div
return steps.reshape(-1, 1) * y + start.reshape(-1, 1)
def assignVanishingType(lines, vp, tol, area=10):
numLine = len(lines)
numVP = len(vp)
typeCost = np.zeros((numLine, numVP))
# perpendicular
for vid in range(numVP):
cosint = (lines[:, :3] * vp[[vid]]).sum(1)
typeCost[:, vid] = np.arcsin(np.abs(cosint).clip(-1, 1))
# infinity
u = np.stack([lines[:, 4], lines[:, 5]], -1)
u = u.reshape(-1, 1) * 2 * np.pi - np.pi
v = computeUVN_vec(lines[:, :3], u, lines[:, 3])
xyz = uv2xyzN_vec(np.hstack([u, v]), np.repeat(lines[:, 3], 2))
xyz = multi_linspace(xyz[0::2].reshape(-1), xyz[1::2].reshape(-1), 100)
xyz = np.vstack([blk.T for blk in np.split(xyz, numLine)])
xyz = xyz / np.linalg.norm(xyz, axis=1, keepdims=True)
for vid in range(numVP):
ang = np.arccos(np.abs((xyz * vp[[vid]]).sum(1)).clip(-1, 1))
notok = (ang < area * np.pi / 180).reshape(numLine, 100).sum(1) != 0
typeCost[notok, vid] = 100
I = typeCost.min(1)
tp = typeCost.argmin(1)
tp[I > tol] = numVP + 1
return tp, typeCost
def refitLineSegmentB(lines, vp, vpweight=0.1):
'''
Refit direction of line segments
INPUT:
lines: original line segments
vp: vannishing point
vpweight: if set to 0, lines will not change; if set to inf, lines will
be forced to pass vp
'''
numSample = 100
numLine = len(lines)
xyz = np.zeros((numSample+1, 3))
wei = np.ones((numSample+1, 1))
wei[numSample] = vpweight * numSample
lines_ali = lines.copy()
for i in range(numLine):
n = lines[i, :3]
sid = lines[i, 4] * 2 * np.pi
eid = lines[i, 5] * 2 * np.pi
if eid < sid:
x = np.linspace(sid, eid + 2 * np.pi, numSample) % (2 * np.pi)
else:
x = np.linspace(sid, eid, numSample)
u = -np.pi + x.reshape(-1, 1)
v = computeUVN(n, u, lines[i, 3])
xyz[:numSample] = uv2xyzN(np.hstack([u, v]), lines[i, 3])
xyz[numSample] = vp
outputNM = curveFitting(xyz, wei)
lines_ali[i, :3] = outputNM
return lines_ali
def paintParameterLine(parameterLine, width, height):
lines = parameterLine.copy()
panoEdgeC = np.zeros((height, width))
num_sample = max(height, width)
for i in range(len(lines)):
n = lines[i, :3]
sid = lines[i, 4] * 2 * np.pi
eid = lines[i, 5] * 2 * np.pi
if eid < sid:
x = np.linspace(sid, eid + 2 * np.pi, num_sample)
x = x % (2 * np.pi)
else:
x = np.linspace(sid, eid, num_sample)
u = -np.pi + x.reshape(-1, 1)
v = computeUVN(n, u, lines[i, 3])
xyz = uv2xyzN(np.hstack([u, v]), lines[i, 3])
uv = xyz2uvN(xyz, 1)
m = np.minimum(np.floor((uv[:,0] + np.pi) / (2 * np.pi) * width) + 1,
width).astype(np.int32)
n = np.minimum(np.floor(((np.pi / 2) - uv[:, 1]) / np.pi * height) + 1,
height).astype(np.int32)
panoEdgeC[n-1, m-1] = i
return panoEdgeC
def panoEdgeDetection(img, viewSize=320, qError=0.7, refineIter=3):
'''
line detection on panorama
INPUT:
img: image waiting for detection, double type, range 0~1
viewSize: image size of croped views
qError: set smaller if more line segment wanted
OUTPUT:
oLines: detected line segments
vp: vanishing point
views: separate views of panorama
edges: original detection of line segments in separate views
panoEdge: image for visualize line segments
'''
cutSize = viewSize
fov = np.pi / 3
xh = np.arange(-np.pi, np.pi*5/6, np.pi/6)
yh = np.zeros(xh.shape[0])
xp = np.array([-3/3, -2/3, -1/3, 0/3, 1/3, 2/3, -3/3, -2/3, -1/3, 0/3, 1/3, 2/3]) * np.pi
yp = np.array([ 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, -1/4, -1/4, -1/4, -1/4, -1/4, -1/4]) * np.pi
x = np.concatenate([xh, xp, [0, 0]])
y = np.concatenate([yh, yp, [np.pi/2., -np.pi/2]])
sepScene = separatePano(img.copy(), fov, x, y, cutSize)
edge = []
LSD = cv2.createLineSegmentDetector(_refine=cv2.LSD_REFINE_ADV, _quant=qError)
#LSD = cv2.ximgproc.createFastLineDetector(_do_merge=True)
for i, scene in enumerate(sepScene):
edgeMap, edgeList = lsdWrap(scene['img'], LSD)
edge.append({
'img': edgeMap,
'edgeLst': edgeList,
'vx': scene['vx'],
'vy': scene['vy'],
'fov': scene['fov'],
})
edge[-1]['panoLst'] = edgeFromImg2Pano(edge[-1])
lines, olines = combineEdgesN(edge)
clines = lines.copy()
for _ in range(refineIter):
mainDirect, score, angle = findMainDirectionEMA(clines)
tp, typeCost = assignVanishingType(lines, mainDirect[:3], 0.1, 10)
lines1 = lines[tp==0]
lines2 = lines[tp==1]
lines3 = lines[tp==2]
lines1rB = refitLineSegmentB(lines1, mainDirect[0], 0)
lines2rB = refitLineSegmentB(lines2, mainDirect[1], 0)
lines3rB = refitLineSegmentB(lines3, mainDirect[2], 0)
clines = np.vstack([lines1rB, lines2rB, lines3rB])
panoEdge1r = paintParameterLine(lines1rB, img.shape[1], img.shape[0])
panoEdge2r = paintParameterLine(lines2rB, img.shape[1], img.shape[0])
panoEdge3r = paintParameterLine(lines3rB, img.shape[1], img.shape[0])
panoEdger = np.stack([panoEdge1r, panoEdge2r, panoEdge3r], -1)
# output
olines = clines
vp = mainDirect
views = sepScene
edges = edge
panoEdge = panoEdger
return olines, vp, views, edges, panoEdge, score, angle
if __name__ == '__main__':
# disable OpenCV3's non thread safe OpenCL option
cv2.ocl.setUseOpenCL(False)
import os
import argparse
import PIL
from PIL import Image
import time
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
parser.add_argument('--o_prefix', required=True)
parser.add_argument('--qError', default=0.7, type=float)
parser.add_argument('--refineIter', default=3, type=int)
args = parser.parse_args()
# Read image
img_ori = np.array(Image.open(args.i).resize((1024, 512)))
# Vanishing point estimation & Line segments detection
s_time = time.time()
olines, vp, views, edges, panoEdge, score, angle = panoEdgeDetection(img_ori,
qError=args.qError,
refineIter=args.refineIter)
print('Elapsed time: %.2f' % (time.time() - s_time))
panoEdge = (panoEdge > 0)
print('Vanishing point:')
for v in vp[2::-1]:
print('%.6f %.6f %.6f' % tuple(v))
# Visualization
edg = rotatePanorama(panoEdge.astype(np.float64), vp[2::-1])
img = rotatePanorama(img_ori / 255.0, vp[2::-1])
one = img.copy() * 0.5
one[(edg > 0.5).sum(-1) > 0] = 0
one[edg[..., 0] > 0.5, 0] = 1
one[edg[..., 1] > 0.5, 1] = 1
one[edg[..., 2] > 0.5, 2] = 1
Image.fromarray((edg * 255).astype(np.uint8)).save('%s_edg.png' % args.o_prefix)
Image.fromarray((img * 255).astype(np.uint8)).save('%s_img.png' % args.o_prefix)
Image.fromarray((one * 255).astype(np.uint8)).save('%s_one.png' % args.o_prefix)
| 33.775135
| 111
| 0.547404
|
fc5f685f1be1348c6b9f9c6e8dbbd846f615c755
| 2,234
|
py
|
Python
|
src/aoc2021/day4/solution.py
|
marcelotrevisani/advent-of-code-2021
|
3855ecab180c23039ed4cb09e0645bf9bc5da38c
|
[
"Apache-2.0"
] | null | null | null |
src/aoc2021/day4/solution.py
|
marcelotrevisani/advent-of-code-2021
|
3855ecab180c23039ed4cb09e0645bf9bc5da38c
|
[
"Apache-2.0"
] | null | null | null |
src/aoc2021/day4/solution.py
|
marcelotrevisani/advent-of-code-2021
|
3855ecab180c23039ed4cb09e0645bf9bc5da38c
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from typing import Optional
import numpy as np
from numpy import ndarray
def parse_input(file_path):
with open(file_path, "r") as input_file:
numbers_draw = np.loadtxt(input_file, dtype=int, delimiter=",", max_rows=1)
all_boards = np.loadtxt(input_file, dtype=int, skiprows=1)
all_boards = all_boards.reshape((all_boards.shape[0] // 5, 5, 5))
return numbers_draw, all_boards
def get_sum_board_winner_value(
board_flag, all_boards, axis=0, ref_winners=None
) -> Optional[ndarray]:
result = None
for dimension in range(all_boards.shape[0]):
if dimension in ref_winners:
continue
one_dimension_flag = board_flag[dimension, :, :]
winner = np.all(one_dimension_flag, axis=axis)
if np.any(winner):
ref_winners.add(dimension)
dimension_board = all_boards[dimension, :, :]
result = result or np.sum(
dimension_board[np.where(one_dimension_flag == False)]
)
return result
def solve1(numbers_draw, all_boards):
return generic_solver(numbers_draw, all_boards, 1)[0]
def generic_solver(numbers_draw, all_boards, stop_winner=None):
board_flag = np.full(all_boards.shape, False, dtype=bool)
result_values = []
winner_boards = set()
for i in numbers_draw:
positions = np.where(all_boards == i)
board_flag[positions] = True
if winner_value := (
get_sum_board_winner_value(
board_flag, all_boards, axis=0, ref_winners=winner_boards
)
or get_sum_board_winner_value(
board_flag, all_boards, axis=1, ref_winners=winner_boards
)
):
result_values.append(winner_value * i)
if len(result_values) == stop_winner:
return result_values
return result_values
def solve2(numbers_draw, all_boards):
return generic_solver(numbers_draw, all_boards)[-1]
if __name__ == "__main__":
numbers_draw, all_boards = parse_input(Path(__file__).parent / "data" / "input.txt")
print(f"Problem 1: {solve1(numbers_draw, all_boards)}")
print(f"Problem 2: {solve2(numbers_draw, all_boards)}")
| 33.343284
| 88
| 0.657117
|
c851915dd12d3bfc70de652c555fd112550cfc92
| 21,260
|
py
|
Python
|
tests/unit/test_req_file.py
|
lyw07/kolibripip
|
e7039eca92b61827faa754311f1489e89a11519d
|
[
"MIT"
] | null | null | null |
tests/unit/test_req_file.py
|
lyw07/kolibripip
|
e7039eca92b61827faa754311f1489e89a11519d
|
[
"MIT"
] | null | null | null |
tests/unit/test_req_file.py
|
lyw07/kolibripip
|
e7039eca92b61827faa754311f1489e89a11519d
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import textwrap
import pytest
from mock import Mock, patch
from pretend import stub
import pip
from pip.download import PipSession
from pip.exceptions import InstallationError, RequirementsFileParseError
from pip.index import PackageFinder
from pip.req.req_file import (
break_args_options, ignore_comments, join_lines, parse_requirements,
preprocess, process_line, skip_regex
)
from pip.req.req_install import InstallRequirement
from tests.lib import requirements_file
@pytest.fixture
def session():
return PipSession()
@pytest.fixture
def finder(session):
return PackageFinder([], [], session=session)
@pytest.fixture
def options(session):
return stub(
isolated_mode=False, index_url='default_url',
skip_requirements_regex=False,
format_control=pip.index.FormatControl(set(), set()))
class TestPreprocess(object):
"""tests for `preprocess`"""
def test_comments_and_joins_case1(self):
content = textwrap.dedent("""\
req1 \\
# comment \\
req2
""")
result = preprocess(content, None)
assert list(result) == [(1, 'req1'), (3, 'req2')]
def test_comments_and_joins_case2(self):
content = textwrap.dedent("""\
req1\\
# comment
""")
result = preprocess(content, None)
assert list(result) == [(1, 'req1')]
def test_comments_and_joins_case3(self):
content = textwrap.dedent("""\
req1 \\
# comment
req2
""")
result = preprocess(content, None)
assert list(result) == [(1, 'req1'), (3, 'req2')]
def test_skip_regex_after_joining_case1(self, options):
content = textwrap.dedent("""\
patt\\
ern
line2
""")
options.skip_requirements_regex = 'pattern'
result = preprocess(content, options)
assert list(result) == [(3, 'line2')]
def test_skip_regex_after_joining_case2(self, options):
content = textwrap.dedent("""\
pattern \\
line2
line3
""")
options.skip_requirements_regex = 'pattern'
result = preprocess(content, options)
assert list(result) == [(3, 'line3')]
class TestIgnoreComments(object):
"""tests for `ignore_comment`"""
def test_ignore_line(self):
lines = [(1, ''), (2, 'req1'), (3, 'req2')]
result = ignore_comments(lines)
assert list(result) == [(2, 'req1'), (3, 'req2')]
def test_ignore_comment(self):
lines = [(1, 'req1'), (2, '# comment'), (3, 'req2')]
result = ignore_comments(lines)
assert list(result) == [(1, 'req1'), (3, 'req2')]
def test_strip_comment(self):
lines = [(1, 'req1'), (2, 'req # comment'), (3, 'req2')]
result = ignore_comments(lines)
assert list(result) == [(1, 'req1'), (2, 'req'), (3, 'req2')]
class TestJoinLines(object):
"""tests for `join_lines`"""
def test_join_lines(self):
lines = enumerate([
'line 1',
'line 2:1 \\',
'line 2:2',
'line 3:1 \\',
'line 3:2 \\',
'line 3:3',
'line 4'
], start=1)
expect = [
(1, 'line 1'),
(2, 'line 2:1 line 2:2'),
(4, 'line 3:1 line 3:2 line 3:3'),
(7, 'line 4'),
]
assert expect == list(join_lines(lines))
def test_last_line_with_escape(self):
lines = enumerate([
'line 1',
'line 2 \\',
], start=1)
expect = [
(1, 'line 1'),
(2, 'line 2 '),
]
assert expect == list(join_lines(lines))
class TestSkipRegex(object):
"""tests for `skip_reqex``"""
def test_skip_regex_pattern_match(self):
options = stub(skip_requirements_regex='.*Bad.*')
line = '--extra-index-url Bad'
assert [] == list(skip_regex(enumerate([line]), options))
def test_skip_regex_pattern_not_match(self):
options = stub(skip_requirements_regex='.*Bad.*')
line = '--extra-index-url Good'
assert [(0, line)] == list(skip_regex(enumerate([line]), options))
def test_skip_regex_no_options(self):
options = None
line = '--extra-index-url Good'
assert [(0, line)] == list(skip_regex(enumerate([line]), options))
def test_skip_regex_no_skip_option(self):
options = stub(skip_requirements_regex=None)
line = '--extra-index-url Good'
assert [(0, line)] == list(skip_regex(enumerate([line]), options))
class TestProcessLine(object):
"""tests for `process_line`"""
def test_parser_error(self):
with pytest.raises(RequirementsFileParseError):
list(process_line("--bogus", "file", 1))
def test_only_one_req_per_line(self):
# pkg_resources raises the ValueError
with pytest.raises(InstallationError):
list(process_line("req1 req2", "file", 1))
def test_yield_line_requirement(self):
line = 'SomeProject'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
def test_yield_line_constraint(self):
line = 'SomeProject'
filename = 'filename'
comes_from = '-c %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(
line, comes_from=comes_from, constraint=True)
found_req = list(process_line(line, filename, 1, constraint=True))[0]
assert repr(found_req) == repr(req)
assert found_req.constraint is True
def test_yield_line_requirement_with_spaces_in_specifier(self):
line = 'SomeProject >= 2'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
assert str(req.req.specifier) == '>=2'
def test_yield_editable_requirement(self):
url = 'git+https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(url, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
def test_yield_editable_constraint(self):
url = 'git+https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
comes_from = '-c %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(
url, comes_from=comes_from, constraint=True)
found_req = list(process_line(line, filename, 1, constraint=True))[0]
assert repr(found_req) == repr(req)
assert found_req.constraint is True
def test_nested_requirements_file(self, monkeypatch):
line = '-r another_file'
req = InstallRequirement.from_line('SomeProject')
import pip.req.req_file
def stub_parse_requirements(req_url, finder, comes_from, options,
session, wheel_cache, constraint):
return [(req, constraint)]
parse_requirements_stub = stub(call=stub_parse_requirements)
monkeypatch.setattr(pip.req.req_file, 'parse_requirements',
parse_requirements_stub.call)
assert list(process_line(line, 'filename', 1)) == [(req, False)]
def test_nested_constraints_file(self, monkeypatch):
line = '-c another_file'
req = InstallRequirement.from_line('SomeProject')
import pip.req.req_file
def stub_parse_requirements(req_url, finder, comes_from, options,
session, wheel_cache, constraint):
return [(req, constraint)]
parse_requirements_stub = stub(call=stub_parse_requirements)
monkeypatch.setattr(pip.req.req_file, 'parse_requirements',
parse_requirements_stub.call)
assert list(process_line(line, 'filename', 1)) == [(req, True)]
def test_options_on_a_requirement_line(self):
line = 'SomeProject --install-option=yo1 --install-option yo2 '\
'--global-option="yo3" --global-option "yo4"'
filename = 'filename'
req = list(process_line(line, filename, 1))[0]
assert req.options == {
'global_options': ['yo3', 'yo4'],
'install_options': ['yo1', 'yo2']}
def test_hash_options(self):
"""Test the --hash option: mostly its value storage.
Make sure it reads and preserve multiple hashes.
"""
line = ('SomeProject --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b1'
'61e5c1fa7425e73043362938b9824 '
'--hash=sha384:59e1748777448c69de6b800d7a33bbfb9ff1b463e44354c'
'3553bcdb9c666fa90125a3c79f90397bdf5f6a13de828684f '
'--hash=sha256:486ea46224d1bb4fb680f34f7c9ad96a8f24ec88be73ea8'
'e5a6c65260e9cb8a7')
filename = 'filename'
req = list(process_line(line, filename, 1))[0]
assert req.options == {'hashes': {
'sha256': ['2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e730433'
'62938b9824',
'486ea46224d1bb4fb680f34f7c9ad96a8f24ec88be73ea8e5a6c65'
'260e9cb8a7'],
'sha384': ['59e1748777448c69de6b800d7a33bbfb9ff1b463e44354c3553bcd'
'b9c666fa90125a3c79f90397bdf5f6a13de828684f']}}
def test_set_isolated(self, options):
line = 'SomeProject'
filename = 'filename'
options.isolated_mode = True
result = process_line(line, filename, 1, options=options)
assert list(result)[0].isolated
def test_set_finder_no_index(self, finder):
list(process_line("--no-index", "file", 1, finder=finder))
assert finder.index_urls == []
def test_set_finder_index_url(self, finder):
list(process_line("--index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_set_finder_find_links(self, finder):
list(process_line("--find-links=url", "file", 1, finder=finder))
assert finder.find_links == ['url']
def test_set_finder_extra_index_urls(self, finder):
list(process_line("--extra-index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_set_finder_trusted_host(self, finder):
list(process_line("--trusted-host=url", "file", 1, finder=finder))
assert finder.secure_origins == [('*', 'url', '*')]
def test_noop_always_unzip(self, finder):
# noop, but confirm it can be set
list(process_line("--always-unzip", "file", 1, finder=finder))
def test_set_finder_allow_all_prereleases(self, finder):
list(process_line("--pre", "file", 1, finder=finder))
assert finder.allow_all_prereleases
def test_relative_local_find_links(self, finder, monkeypatch):
"""
Test a relative find_links path is joined with the req file directory
"""
# Make sure the test also passes on windows
req_file = os.path.normcase(os.path.abspath(
os.path.normpath('/path/req_file.txt')))
nested_link = os.path.normcase(os.path.abspath(
os.path.normpath('/path/rel_path')))
exists_ = os.path.exists
def exists(path):
if path == nested_link:
return True
else:
exists_(path)
monkeypatch.setattr(os.path, 'exists', exists)
list(process_line("--find-links=rel_path", req_file, 1,
finder=finder))
assert finder.find_links == [nested_link]
def test_relative_http_nested_req_files(self, finder, monkeypatch):
"""
Test a relative nested req file path is joined with the req file url
"""
req_file = 'http://me.com/me/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == 'http://me.com/me/reqs.txt'
def test_relative_local_nested_req_files(self, finder, monkeypatch):
"""
Test a relative nested req file path is joined with the req file dir
"""
req_file = os.path.normpath('/path/req_file.txt')
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == os.path.normpath('/path/reqs.txt')
def test_absolute_local_nested_req_files(self, finder, monkeypatch):
"""
Test an absolute nested req file path
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r /other/reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == '/other/reqs.txt'
def test_absolute_http_nested_req_file_in_local(self, finder, monkeypatch):
"""
Test a nested req file url in a local req file
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r http://me.com/me/reqs.txt", req_file, 1,
finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == 'http://me.com/me/reqs.txt'
def test_set_finder_process_dependency_links(self, finder):
list(process_line(
"--process-dependency-links", "file", 1, finder=finder))
assert finder.process_dependency_links
class TestBreakOptionsArgs(object):
def test_no_args(self):
assert ('', '--option') == break_args_options('--option')
def test_no_options(self):
assert ('arg arg', '') == break_args_options('arg arg')
def test_args_short_options(self):
result = break_args_options('arg arg -s')
assert ('arg arg', '-s') == result
def test_args_long_options(self):
result = break_args_options('arg arg --long')
assert ('arg arg', '--long') == result
class TestOptionVariants(object):
# this suite is really just testing optparse, but added it anyway
def test_variant1(self, finder):
list(process_line("-i url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant2(self, finder):
list(process_line("-i 'url'", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant3(self, finder):
list(process_line("--index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant4(self, finder):
list(process_line("--index-url url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant5(self, finder):
list(process_line("--index-url='url'", "file", 1, finder=finder))
assert finder.index_urls == ['url']
class TestParseRequirements(object):
"""tests for `parse_requirements`"""
@pytest.mark.network
def test_remote_reqs_parse(self):
"""
Test parsing a simple remote requirements file
"""
# this requirements file just contains a comment previously this has
# failed in py3: https://github.com/pypa/pip/issues/760
for req in parse_requirements(
'https://raw.githubusercontent.com/pypa/'
'pip-test-package/master/'
'tests/req_just_comment.txt', session=PipSession()):
pass
def test_multiple_appending_options(self, tmpdir, finder, options):
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url url1 \n")
fp.write("--extra-index-url url2 ")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
session=PipSession(), options=options))
assert finder.index_urls == ['url1', 'url2']
def test_skip_regex(self, tmpdir, finder, options):
options.skip_requirements_regex = '.*Bad.*'
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url Bad \n")
fp.write("--extra-index-url Good ")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
options=options, session=PipSession()))
assert finder.index_urls == ['Good']
def test_join_lines(self, tmpdir, finder):
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url url1 \\\n--extra-index-url url2")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
session=PipSession()))
assert finder.index_urls == ['url1', 'url2']
def test_req_file_parse_no_only_binary(self, data, finder):
list(parse_requirements(
data.reqfiles.join("supported_options2.txt"), finder,
session=PipSession()))
expected = pip.index.FormatControl(set(['fred']), set(['wilma']))
assert finder.format_control == expected
def test_req_file_parse_comment_start_of_line(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("# Comment ")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert not reqs
def test_req_file_parse_comment_end_of_line_with_url(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz # Comment ")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].link.url == "https://example.com/foo.tar.gz"
def test_req_file_parse_egginfo_end_of_line_with_url(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz#egg=wat")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].name == "wat"
def test_req_file_no_finder(self, tmpdir):
"""
Test parsing a requirements file without a finder
"""
with open(tmpdir.join("req.txt"), "w") as fp:
fp.write("""
--find-links https://example.com/
--index-url https://example.com/
--extra-index-url https://two.example.com/
--no-use-wheel
--no-index
""")
parse_requirements(tmpdir.join("req.txt"), session=PipSession())
def test_install_requirements_with_options(self, tmpdir, finder, session,
options):
global_option = '--dry-run'
install_option = '--prefix=/opt'
content = '''
--only-binary :all:
INITools==2.0 --global-option="{global_option}" \
--install-option "{install_option}"
'''.format(global_option=global_option, install_option=install_option)
with requirements_file(content, tmpdir) as reqs_file:
req = next(parse_requirements(reqs_file.abspath,
finder=finder,
options=options,
session=session))
req.source_dir = os.curdir
with patch.object(subprocess, 'Popen') as popen:
popen.return_value.stdout.readline.return_value = b""
try:
req.install([])
except:
pass
last_call = popen.call_args_list[-1]
args = last_call[0][0]
assert (
0 < args.index(global_option) < args.index('install') <
args.index(install_option)
)
assert options.format_control.no_binary == set([':all:'])
assert options.format_control.only_binary == set([])
| 36.52921
| 79
| 0.599718
|
55a18ebeea2ea8a7f9a50ceb3c719c8772f16ea3
| 1,010
|
py
|
Python
|
tests/ops/test_ssm_gp.py
|
Capri2014/pyro
|
546f9010aeb2308ae566726b1cec67a7b4fda9c2
|
[
"MIT"
] | null | null | null |
tests/ops/test_ssm_gp.py
|
Capri2014/pyro
|
546f9010aeb2308ae566726b1cec67a7b4fda9c2
|
[
"MIT"
] | null | null | null |
tests/ops/test_ssm_gp.py
|
Capri2014/pyro
|
546f9010aeb2308ae566726b1cec67a7b4fda9c2
|
[
"MIT"
] | 1
|
2020-01-06T03:19:17.000Z
|
2020-01-06T03:19:17.000Z
|
import pytest
import torch
from pyro.ops.ssm_gp import MaternKernel
from tests.common import assert_equal
@pytest.mark.parametrize('num_gps', [1, 2, 3])
@pytest.mark.parametrize('nu', [0.5, 1.5, 2.5])
def test_matern_kernel(num_gps, nu):
mk = MaternKernel(nu=nu, num_gps=num_gps, length_scale_init=0.1 + torch.rand(num_gps))
dt = torch.rand(1).item()
forward = mk.transition_matrix(dt)
backward = mk.transition_matrix(-dt)
forward_backward = torch.matmul(forward, backward)
# going forward dt in time and then backward dt in time should bring us back to the identity
eye = torch.eye(mk.state_dim).unsqueeze(0).expand(num_gps, mk.state_dim, mk.state_dim)
assert_equal(forward_backward, eye)
# let's just check that these are PSD
mk.stationary_covariance().cholesky()
mk.process_covariance(forward).cholesky()
# evolving forward infinitesimally should yield the identity
nudge = mk.transition_matrix(torch.tensor([1.0e-9]))
assert_equal(nudge, eye)
| 34.827586
| 96
| 0.731683
|
eb56ef17ed2bea19ab06173d75533b2071ba5767
| 450
|
py
|
Python
|
binarysearch.io/265_look_and_say.py
|
mishrakeshav/Competitive-Programming
|
b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2
|
[
"MIT"
] | 2
|
2020-06-25T21:10:32.000Z
|
2020-12-10T06:53:45.000Z
|
binarysearch.io/265_look_and_say.py
|
mishrakeshav/Competitive-Programming
|
b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2
|
[
"MIT"
] | null | null | null |
binarysearch.io/265_look_and_say.py
|
mishrakeshav/Competitive-Programming
|
b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2
|
[
"MIT"
] | 3
|
2020-05-15T14:17:09.000Z
|
2021-07-25T13:18:20.000Z
|
class Solution:
def solve(self, n):
num = '1'
for i in range(n-1):
count = 0
ans = ''
new = num[0]
for a in num:
if new == a:
count += 1
else:
ans += str(count) + new
new = a
count = 1
ans += str(count) + new
num = ans
return num
| 25
| 43
| 0.304444
|
763cdfda3839290e46dc67efe3f3d87490e334fa
| 623
|
py
|
Python
|
pdvega/tests/utils.py
|
Casyfill/pdvega
|
ff43a5313127a78e69e859261a5358f0b6ad2264
|
[
"MIT"
] | 251
|
2018-01-12T09:35:06.000Z
|
2018-08-06T16:28:11.000Z
|
pdvega/tests/utils.py
|
Casyfill/pdvega
|
ff43a5313127a78e69e859261a5358f0b6ad2264
|
[
"MIT"
] | 28
|
2018-01-24T18:56:31.000Z
|
2018-08-21T17:46:24.000Z
|
pdvega/tests/utils.py
|
Casyfill/pdvega
|
ff43a5313127a78e69e859261a5358f0b6ad2264
|
[
"MIT"
] | 23
|
2018-01-12T06:51:39.000Z
|
2018-08-01T00:16:18.000Z
|
IGNORE = object()
def check_encodings(chart, **fields):
edict = chart.encoding.to_dict()
assert set(edict.keys()) == set(fields.keys())
for encoding, expected_field in fields.items():
if expected_field is IGNORE:
continue
actual_field = edict[encoding]['field']
if actual_field != expected_field:
raise ValueError("Expected '{0}' encoding to be '{1}'; got '{2}'"
"".format(encoding, expected_field, actual_field))
def get_data(chart):
return chart.data
def validate_vegalite(chart):
assert chart.to_dict(validate=True)
| 27.086957
| 79
| 0.629213
|
4b756d40a8014c7a6c12dcb1a149a189abf7a170
| 97
|
py
|
Python
|
src/syncsign/__init__.py
|
syncsign/syncsign-python-sdk
|
0bbfd46684f6805536623b1f42dffbf903f6d2ab
|
[
"Apache-2.0"
] | 2
|
2021-03-17T07:59:03.000Z
|
2021-03-22T16:40:31.000Z
|
src/syncsign/__init__.py
|
syncsign/syncsign-python-sdk
|
0bbfd46684f6805536623b1f42dffbf903f6d2ab
|
[
"Apache-2.0"
] | null | null | null |
src/syncsign/__init__.py
|
syncsign/syncsign-python-sdk
|
0bbfd46684f6805536623b1f42dffbf903f6d2ab
|
[
"Apache-2.0"
] | 1
|
2021-08-24T07:11:32.000Z
|
2021-08-24T07:11:32.000Z
|
__all__ = [
'api_helper',
'client',
'configuration',
'decorators',
'http',
]
| 12.125
| 20
| 0.515464
|
79f935d8a4ed89b9ab1161b1b373bc8b7279edfe
| 430
|
py
|
Python
|
test/unittests/test_Withdrawal.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/test_Withdrawal.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 6
|
2018-07-24T22:46:28.000Z
|
2018-07-29T19:13:09.000Z
|
test/unittests/test_Withdrawal.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:22:01.000Z
|
2018-07-24T18:22:01.000Z
|
import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.Input.WaterBudget import Withdrawal
class TestWithdrawal(VariableUnitTest):
def test_Withdrawal(self):
z = self.z
np.testing.assert_array_almost_equal(
Withdrawal.Withdrawal_f(z.NYrs, z.StreamWithdrawal, z.GroundWithdrawal),
Withdrawal.Withdrawal(z.NYrs, z.StreamWithdrawal, z.GroundWithdrawal), decimal=7)
| 33.076923
| 93
| 0.751163
|
a19f0daabc52b61d6275633474e4cc57463dc401
| 1,241
|
py
|
Python
|
dsa/patterns/dp/lcs/longest_repeating_sequence.py
|
bksahu/dsa
|
4b36abbb3e00ce449c435c44260316f46d6d35ec
|
[
"MIT"
] | null | null | null |
dsa/patterns/dp/lcs/longest_repeating_sequence.py
|
bksahu/dsa
|
4b36abbb3e00ce449c435c44260316f46d6d35ec
|
[
"MIT"
] | 4
|
2019-10-02T14:24:54.000Z
|
2020-03-26T07:06:15.000Z
|
dsa/patterns/dp/lcs/longest_repeating_sequence.py
|
bksahu/dsa
|
4b36abbb3e00ce449c435c44260316f46d6d35ec
|
[
"MIT"
] | 2
|
2019-10-02T15:57:51.000Z
|
2020-04-10T07:22:06.000Z
|
"""
Longest Repeating Subsequence
Given a string, print the longest repeating subsequence such that the two subsequence don’t have same
string character at same position, i.e., any i’th character in the two subsequences shouldn’t have the
same index in the original string.
Example:
Input: str = "aab"
Output: "a"
The two subsequence are 'a'(first) and 'a'
(second). Note that 'b' cannot be considered
as part of subsequence as it would be at same
index in both.
"""
def longest_repeating_sequence(s):
n = len(s)
dp = [[0 for _ in range(n+1)] for _ in range(n+1)]
for i in range(1, n+1):
for j in range(1, n+1):
if s[i-1] == s[j-1] and i != j:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
m, n = n, n
lrs = ""
while n != 0 and m != 0:
# if s[n-1] == s[m-1] and m != n:
if dp[n][m] == dp[n-1][m-1] + 1:
lrs = s[n-1] + lrs
n -= 1
m -= 1
elif dp[n-1][m] == dp[n-1][m]:
n -= 1
else:
m -= 1
return lrs
if __name__ == "__main__":
print(longest_repeating_sequence("aab"))
print(longest_repeating_sequence("aabbebbcc"))
| 28.204545
| 103
| 0.542305
|
7b8a958a350fccd0794ff31dcc37d62832f01d3c
| 814
|
py
|
Python
|
cfb-cursos/radiobutton.py
|
joseluizbrits/sobre-python
|
316143c341e5a44070a3b13877419082774bd730
|
[
"MIT"
] | null | null | null |
cfb-cursos/radiobutton.py
|
joseluizbrits/sobre-python
|
316143c341e5a44070a3b13877419082774bd730
|
[
"MIT"
] | null | null | null |
cfb-cursos/radiobutton.py
|
joseluizbrits/sobre-python
|
316143c341e5a44070a3b13877419082774bd730
|
[
"MIT"
] | null | null | null |
from tkinter import *
def imprimirEsporte():
ve = vesporte.get()
if ve == 'f':
print('Esporte Futebol')
elif ve == 'v':
print('Esporte Vôlei')
elif ve == 'b':
print('Esporte Basquete')
else:
print('Selecione um esporte')
app = Tk()
app.title('BLOCO')
app.geometry('500x300')
vesporte = StringVar()
lb_esportes = Label(app, text='Esportes')
lb_esportes.pack()
rb_futebol = Radiobutton(app, text='Futebol', value='f', variable=vesporte)
rb_futebol.pack()
rb_volei = Radiobutton(app, text='Vôlei', value='v', variable=vesporte)
rb_volei.pack()
rb_basquete = Radiobutton(app, text='Basquete', value='b', variable=vesporte)
rb_basquete.pack()
btn_esporte = Button(app, text='Esporte selecionado', command=imprimirEsporte)
btn_esporte.pack()
app.mainloop()
| 21.421053
| 78
| 0.674447
|
608f488e8a567116f73981c0f5674d12a682554f
| 678
|
py
|
Python
|
arelle/examples/plugin/importTestImported1.py
|
jukrupa/Arelle-master
|
0f8108e60fa86c8e324c5aa453765c44766f882f
|
[
"Apache-2.0"
] | null | null | null |
arelle/examples/plugin/importTestImported1.py
|
jukrupa/Arelle-master
|
0f8108e60fa86c8e324c5aa453765c44766f882f
|
[
"Apache-2.0"
] | null | null | null |
arelle/examples/plugin/importTestImported1.py
|
jukrupa/Arelle-master
|
0f8108e60fa86c8e324c5aa453765c44766f882f
|
[
"Apache-2.0"
] | null | null | null |
'''
pluginPackages test case
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
# this module would raise system error due to PEP 366 after python 3.4.3
from . import importTestImported11
def foo():
print ("imported unpackaged plug-in relative imported 1")
__pluginInfo__ = {
'name': 'Unpackaged Relative Import 1',
'version': '0.9',
'description': "This is a unpackaged relative imported plugin.",
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2015 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Import.Unpackaged.Entry6': foo,
# imported plugins
}
| 29.478261
| 83
| 0.69469
|
29d481f7b4f9de7eaa6cd69efae1197e5fe13efb
| 978
|
py
|
Python
|
test/test_playback_reporting_api_custom_query.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_playback_reporting_api_custom_query.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_playback_reporting_api_custom_query.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import embyapi
from embyapi.models.playback_reporting_api_custom_query import PlaybackReportingApiCustomQuery # noqa: E501
from embyapi.rest import ApiException
class TestPlaybackReportingApiCustomQuery(unittest.TestCase):
"""PlaybackReportingApiCustomQuery unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPlaybackReportingApiCustomQuery(self):
"""Test PlaybackReportingApiCustomQuery"""
# FIXME: construct object with mandatory attributes with example values
# model = embyapi.models.playback_reporting_api_custom_query.PlaybackReportingApiCustomQuery() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.45
| 116
| 0.734151
|
28ffe0f5fa8efad4e267111564084e65004179d3
| 2,619
|
py
|
Python
|
restkit/handlers/http_user_handlers/http_report_handlers/report_csv_delete.py
|
ppolxda/restkit
|
eeb6177ccd75f8ba7b2faa252116f1e745d0f91b
|
[
"MIT"
] | null | null | null |
restkit/handlers/http_user_handlers/http_report_handlers/report_csv_delete.py
|
ppolxda/restkit
|
eeb6177ccd75f8ba7b2faa252116f1e745d0f91b
|
[
"MIT"
] | null | null | null |
restkit/handlers/http_user_handlers/http_report_handlers/report_csv_delete.py
|
ppolxda/restkit
|
eeb6177ccd75f8ba7b2faa252116f1e745d0f91b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@create: 2019-09-28 18:18:54.
@author: name
@desc: report_csv_delete
"""
from tornado import gen
from restkit import rest
from restkit.transactions import trans_func
from restkit.transactions import Transaction
from restkit.handlers.http_user_conns.http_sessions import HttpSessionBaseHandler # noqa
from restkit.tools.error_info import _N
from restkit.tools.error_info import _
from restkit.tools.error_info import error_info
try:
from report_task.tasks import TaskError
from report_task.tasks import EnumStatus
except ImportError:
TaskError = None
EnumStatus = EnumStatus
class HttpReportCsvDeleteHandler(HttpSessionBaseHandler):
delete_keys = ['taskid']
@rest.delete(api_key='report_csv[DELETE]',
path='/report_csv', data_types=[],
rest_path=_N('/').join([_('99.Report Csv Manage'),
_('Delete Csv File')]),
query_define={})
def delete_req(self):
req_data = self.decode_json_from_body()
req_data = self.filter_whitelist(req_data, self.delete_keys)
trans = Transaction(
trans=[
self.delete_check(req_data) == error_info.ERROR_SUCESS,
self.delete_processing(req_data) == error_info.ERROR_SUCESS,
],
success=self.delete_sucess,
failure=self.delete_failure
)
yield self.trans_spawn(trans)
@trans_func
def delete_check(self, req_data):
if 'taskid' not in req_data:
raise gen.Return(self.package_rsp(
error_info.ERROR_KEY_INVAILD,
{'key': 'taskid'})
)
return error_info.ERROR_SUCESS
@trans_func
def delete_processing(self, req_data):
try:
task = self.gsettings.csvtask.get_task_by_id(req_data['taskid'])
except TaskError:
raise gen.Return(self.package_rsp(
error_info.ERROR_OP_OBJECT_NOT_FOUND,
{'desc': 'taskid'})
)
taskinfo = task.to_task_dict()
if taskinfo['userid'] != self.session_userid_str():
raise gen.Return(self.package_rsp(
error_info.ERROR_OP_OBJECT_NOT_FOUND,
{'desc': 'taskid2'})
)
task.delete_tasks()
raise gen.Return(error_info.ERROR_SUCESS)
@gen.coroutine
def delete_sucess(self, last_result):
self.write_error_json(error_info.ERROR_SUCESS)
@gen.coroutine
def delete_failure(self, last_result):
self.write_error_json(last_result)
| 30.453488
| 89
| 0.633066
|
84063d8f3bd7802c258521eb18d053f38b446a43
| 8,936
|
py
|
Python
|
mbuild/formats/json_formats.py
|
chrisiacovella/mbuild
|
f4b880e0605c03f0612e087712df4836c47bb29e
|
[
"MIT"
] | null | null | null |
mbuild/formats/json_formats.py
|
chrisiacovella/mbuild
|
f4b880e0605c03f0612e087712df4836c47bb29e
|
[
"MIT"
] | 1
|
2019-09-18T04:59:45.000Z
|
2019-09-18T04:59:45.000Z
|
mbuild/formats/json_formats.py
|
chrisiacovella/mbuild
|
f4b880e0605c03f0612e087712df4836c47bb29e
|
[
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
import mbuild as mb
from mbuild.exceptions import MBuildError
def compound_from_json(json_file):
"""
Convert the given json file into a mb.Compound
Given an input json file, this method scans for the particles, bond information
as well as other hierarchical information regarding the compound and returns a
mb.Compound.
Parameters
-----------
json_file: (path, str) Path of the json file
Returns
-------
parent: mb.Compound, the compound equivelent of the json file
Raises
------
ValueError: This is raised when the JSON file cannot be parsed by python's json module
MBuildError: This is raised on version incompatibility and missing JSON keys, when trying to
convert the compound to JSON.
"""
with open(json_file, 'r') as cmpdfile:
try:
cmpd_dict_and_meta = json.load(cmpdfile)
except ValueError as e:
raise e
try:
_perform_sanity_check(cmpd_dict_and_meta)
except MBuildError as e:
raise e
compound_dict = cmpd_dict_and_meta['Compound']
converted_dict = {}
parent = _dict_to_mb(compound_dict)
converted_dict[compound_dict['id']] = parent
for sub_compound, compound in _dict_successors(compound_dict):
if compound['id'] not in converted_dict:
parent_compound = _dict_to_mb(compound)
converted_dict[compound['id']] = parent_compound
parent_compound = converted_dict[compound['id']]
if sub_compound['id'] not in converted_dict:
sub_cmpd = _dict_to_mb(sub_compound)
converted_dict[sub_compound['id']] = sub_cmpd
sub_cmpd = converted_dict[sub_compound['id']]
label_str = sub_compound['label']
label_list = compound.get('label_list', {})
for key, vals in label_list.items():
if not parent_compound.labels.get(key, None):
parent_compound.labels[key] = list()
if sub_compound['id'] in vals:
parent_compound.labels[key].append(sub_cmpd)
parent_compound.add(sub_cmpd, label=label_str)
_add_ports(compound_dict, converted_dict)
_add_bonds(compound_dict, parent, converted_dict)
return parent
def compound_to_json(cmpd, file_path, include_ports=False):
"""Convert the mb.Compound into equivelent json representation
This method takes in the mb.Compound and tries to save the hierarchical
information of the mb.Compound into a json file.
Parameters
----------
cmpd: mb.Compound
file_path: str, path to save the JSON file.
include_ports: bool, whether to dump port information, default False
"""
# Maintain a bookkeeping dict, to do the nesting of children correctly
version = mb.version
cmpd_info = {}
compound_dict = _particle_info(cmpd, include_ports)
cmpd_info[cmpd] = compound_dict
# Iteratively collect all the information for the children/successors
for sub_compound in cmpd.successors():
if not sub_compound.port_particle:
parent_compound = sub_compound.parent
sub_compound_dict = _particle_info(sub_compound, include_ports)
sub_compound_dict['parent_id'] = id(parent_compound)
sub_compound_dict['is_port'] = False
sub_compound_dict['label'] = None
for key, val in sub_compound.parent.labels.items():
if val == sub_compound:
sub_compound_dict['label'] = key
if isinstance(val, list):
if not cmpd_info[sub_compound.parent].get('label_list', None):
cmpd_info[sub_compound.parent]['label_list'] = OrderedDict()
cmpd_info[sub_compound.parent]['label_list'][key] = [id(x) for x in val]
if not cmpd_info[parent_compound].get('children', False):
cmpd_info[parent_compound]['children'] = list()
cmpd_info[parent_compound]['children'].append(sub_compound_dict)
cmpd_info[sub_compound] = sub_compound_dict
# Should this be nested as well? Not sure...
compound_dict['bonds'] = _bond_info(cmpd)
compound_json = OrderedDict()
compound_json['mbuild-version'] = version
compound_json['type'] = 'Compound'
compound_json['Compound'] = compound_dict
with open(file_path, 'w') as datafile:
json.dump(compound_json, datafile, indent=2)
def _particle_info(cmpd, include_ports=False):
"""Return information about a particle, in a JSON serializable OrderedDict"""
particle_dict = OrderedDict()
particle_dict['id'] = id(cmpd)
particle_dict['name'] = cmpd.name
particle_dict['pos'] = list(cmpd.pos)
particle_dict['charge'] = cmpd.charge
particle_dict['periodicity'] = list(cmpd.periodicity)
if include_ports:
particle_dict['ports'] = list()
for port in cmpd.available_ports():
port_info = OrderedDict()
if port.anchor is not None:
port_info['anchor'] = id(port.anchor)
else:
port_info['anchor'] = None
port_info['label'] = None
# Is this the most efficient way?
for key, val in cmpd.labels.items():
if (val == port) and val.port_particle:
port_info['label'] = key
particle_dict['ports'].append(port_info)
return particle_dict
def _bond_info(cmpd):
"""Given a compound, return the bond information"""
bond_list = list()
for bond in cmpd.bonds():
bond_list.append((id(bond[0]), id(bond[1])))
return bond_list
def _dict_to_mb(compound_dict):
"""Given a dictionary, return the equivelent mb.Compound."""
name = compound_dict.get('name', "Compound")
pos = compound_dict.get('pos', [0.0, 0.0, 0.0])
charge = compound_dict.get('charge', 0.0)
periodicity = compound_dict.get('periodicity', [0.0, 0.0, 0.0])
this_particle = mb.Compound(name=name, pos=pos, charge=charge, periodicity=periodicity)
return this_particle
def _dict_successors(compound_dict):
"""This is a recursive method to get all successors of a given compound and its subcompounds
Notes
-----
This implementation burrows concept form protobuf.py's _proto_successors()
"""
if not compound_dict.get('children', False):
return
else:
for sub_compund in compound_dict['children']:
yield sub_compund, compound_dict
for sub_sub_compound, parent_compound in _dict_successors(sub_compund):
yield (sub_sub_compound, parent_compound)
def _add_ports(compound_dict, converted_dict):
"""After adding all particles, this method will add ports if any exists"""
for subcompound, compound in _dict_successors(compound_dict):
ports = compound.get('ports', None)
if ports:
for port in ports:
label_str = port['label']
port_to_add = mb.Port(anchor=converted_dict[port['anchor']])
converted_dict[compound['id']].add(port_to_add, label_str)
# Not necessary to add same port twice
compound['ports'] = None
ports = subcompound.get('ports', None)
if ports:
for port in ports:
label_str = port['label']
port_to_add = mb.Port(anchor=converted_dict[port['anchor']])
converted_dict[subcompound['id']].add(port_to_add, label_str)
subcompound['ports'] = None
def _add_bonds(compound_dict, parent, converted_dict):
"""Add bonds from the json files to the compound"""
for bond in compound_dict['bonds']:
parent.add_bond(particle_pair=(converted_dict[bond[0]], converted_dict[bond[1]]))
def _perform_sanity_check(json_dict):
"""Perform Sanity Check on the JSON File"""
from warnings import warn
warning_msg = "This Json was written using {0}, current mbuild version is {1}."
this_version = mb.version
json_mbuild_version = json_dict.get('mbuild-version', None)
if not json_mbuild_version:
raise MBuildError("Error. The uploaded JSON file doesn't seem to be correctly formatted")
json_mb_type = json_dict.get('type', None)
if (not json_mb_type) or (json_mb_type != 'Compound'):
raise MBuildError("Error. Cannot convert JSON of type: {}".format(json_mb_type))
[major, minor, patch] = json_mbuild_version.split('.')
[this_major, this_minor, this_patch] = this_version.split('.')
if major != this_major:
raise MBuildError(warning_msg.format(json_mbuild_version, this_version) + " Cannot Convert JSON to compound")
if minor != this_minor:
warn(warning_msg.format(json_mbuild_version, this_version) + " Will Proceed.")
| 39.715556
| 117
| 0.649396
|
01e1f6b44893eec6dd3ee1674bbc477874284a3d
| 21,279
|
py
|
Python
|
experiment.py
|
mtanti/rnn-role
|
89e77bbb226ee2ecc645591455d892fe03e5c271
|
[
"MIT"
] | 14
|
2017-08-13T21:37:55.000Z
|
2020-12-06T07:11:47.000Z
|
experiment.py
|
mtanti/rnn-role
|
89e77bbb226ee2ecc645591455d892fe03e5c271
|
[
"MIT"
] | 1
|
2017-09-22T10:19:40.000Z
|
2017-09-28T07:57:41.000Z
|
experiment.py
|
mtanti/rnn-role
|
89e77bbb226ee2ecc645591455d892fe03e5c271
|
[
"MIT"
] | 5
|
2017-08-14T06:47:15.000Z
|
2021-03-26T16:27:43.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip
import tensorflow as tf
import json
import numpy as np
import scipy.io
import sys, os, shutil
import random
import timeit
import collections
import heapq
max_epochs = 100
num_runs = 3
minibatch_size = 50
results_data_dir = 'results'
mscoco_dir = '.../coco-caption-master' #directory to mscoco evaluation code downloaded from https://github.com/tylin/coco-caption
def get_raw_input_data_dir(dataset):
return '.../'+dataset #directory to karpathy flickr8k or flickr30k dataset downloaded from http://cs.stanford.edu/people/karpathy/deepimagesent/
sys.path.append(mscoco_dir)
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
##################################################################################################################################
def generate_sequence_beamsearch(predictions_function, beam_width=3, clip_len=20):
prev_beam = Beam(beam_width)
prev_beam.add(np.array(1.0, 'float64'), False, [ edge_index ])
while True:
curr_beam = Beam(beam_width)
#Add complete sentences that do not yet have the best probability to the current beam, the rest prepare to add more words to them.
prefix_batch = list()
prob_batch = list()
for (prefix_prob, complete, prefix) in prev_beam:
if complete == True:
curr_beam.add(prefix_prob, True, prefix)
else:
prefix_batch.append(prefix)
prob_batch.append(prefix_prob)
#Get probability of each possible next word for each incomplete prefix.
indexes_distributions = predictions_function(prefix_batch)
#Add next words
for (prefix_prob, prefix, indexes_distribution) in zip(prob_batch, prefix_batch, indexes_distributions):
for (next_index, next_prob) in enumerate(indexes_distribution):
if next_index == unknown_index: #skip unknown tokens
pass
elif next_index == edge_index: #if next word is the end token then mark prefix as complete and leave out the end token
curr_beam.add(prefix_prob*next_prob, True, prefix)
else: #if next word is a non-end token then mark prefix as incomplete
curr_beam.add(prefix_prob*next_prob, False, prefix+[next_index])
(best_prob, best_complete, best_prefix) = max(curr_beam)
if best_complete == True or len(best_prefix)-1 == clip_len: #if the length of the most probable prefix exceeds the clip length (ignoring the start token) then return it as is
return ' '.join(index_to_token[index] for index in best_prefix[1:]) #return best sentence without the start token
prev_beam = curr_beam
class Beam(object):
#For comparison of prefixes, the tuple (prefix_probability, complete_sentence) is used.
#This is so that if two prefixes have equal probabilities then a complete sentence is preferred over an incomplete one since (0.5, False) < (0.5, True)
#################################################################
def __init__(self, beam_width):
self.heap = list()
self.beam_width = beam_width
#################################################################
def add(self, prob, complete, prefix):
heapq.heappush(self.heap, (prob, complete, prefix))
if len(self.heap) > self.beam_width:
heapq.heappop(self.heap)
#################################################################
def __iter__(self):
return iter(self.heap)
with open(results_data_dir+'/results.txt', 'w', encoding='utf-8') as f:
print('dataset', 'min_token_freq', 'vocab_size', 'vocab_used', 'layer_size', 'num_params', 'method', 'run', 'CIDEr', 'Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'METEOR', 'ROUGE_L', sep='\t', file=f)
################################################################
for dataset in [ 'flickr8k', 'flickr30k' ]:
raw_input_data_dir = get_raw_input_data_dir(dataset)
################################################################
print('Loading raw data...')
print(dataset)
with open(raw_input_data_dir+'/dataset.json', 'r', encoding='utf-8') as captions_f:
captions_data = json.load(captions_f)['images']
features = scipy.io.loadmat(raw_input_data_dir+'/vgg_feats.mat')['feats'].T #image features matrix are transposed
raw_dataset = {
'train': { 'filenames': list(), 'images': list(), 'captions': list() },
'val': { 'filenames': list(), 'images': list(), 'captions': list() },
'test': { 'filenames': list(), 'images': list(), 'captions': list() },
}
for (image_id, (caption_data, image)) in enumerate(zip(captions_data, features)):
assert caption_data['sentences'][0]['imgid'] == image_id
split = caption_data['split']
if split == 'restval':
continue
filename = caption_data['filename']
caption_group = [ caption['tokens'] for caption in caption_data['sentences'] ]
image = image/np.linalg.norm(image)
raw_dataset[split]['filenames'].append(filename)
raw_dataset[split]['images'].append(image)
raw_dataset[split]['captions'].append(caption_group)
with open(mscoco_dir+'/annotations/captions.json', 'w', encoding='utf-8') as f:
print(str(json.dumps({
'info': {
'description': None,
'url': None,
'version': None,
'year': None,
'contributor': None,
'date_created': None,
},
'images': [
{
'license': None,
'url': None,
'file_name': None,
'id': image_id,
'width': None,
'date_captured': None,
'height': None
}
for image_id in range(len(raw_dataset['test']['images']))
],
'licenses': [
],
'type': 'captions',
'annotations': [
{
'image_id': image_id,
'id': caption_id,
'caption': ' '.join(caption)
}
for (caption_id, (image_id, caption)) in enumerate((image_id, caption) for (image_id, caption_group) in enumerate(raw_dataset['test']['captions']) for caption in caption_group)
]
})), file=f)
for min_token_freq in [ 3, 4, 5 ]:
all_tokens = (token for caption_group in raw_dataset['train']['captions'] for caption in caption_group for token in caption)
token_freqs = collections.Counter(all_tokens)
vocab = sorted(token_freqs.keys(), key=lambda token:(-token_freqs[token], token))
while token_freqs[vocab[-1]] < min_token_freq:
vocab.pop()
vocab_size = len(vocab) + 2 # + edge and unknown tokens
print('vocab:', vocab_size)
token_to_index = { token: i+2 for (i, token) in enumerate(vocab) }
index_to_token = { i+2: token for (i, token) in enumerate(vocab) }
edge_index = 0
unknown_index = 1
def parse(data):
indexes = list()
lens = list()
images = list()
for (caption_group, img) in zip(data['captions'], data['images']):
for caption in caption_group:
indexes_ = [ token_to_index.get(token, unknown_index) for token in caption ]
indexes.append(indexes_)
lens.append(len(indexes_)+1) #add 1 due to edge token
images.append(img)
maxlen = max(lens)
in_mat = np.zeros((len(indexes), maxlen), np.int32)
out_mat = np.zeros((len(indexes), maxlen), np.int32)
for (row, indexes_) in enumerate(indexes):
in_mat [row,:len(indexes_)+1] = [edge_index]+indexes_
out_mat[row,:len(indexes_)+1] = indexes_+[edge_index]
return (in_mat, out_mat, np.array(lens, np.int32), np.array(images))
(train_captions_in, train_captions_out, train_captions_len, train_images) = parse(raw_dataset['train'])
(val_captions_in, val_captions_out, val_captions_len, val_images) = parse(raw_dataset['val'])
(test_captions_in, test_captions_out, test_captions_len, test_images) = parse(raw_dataset['test'])
################################################################
print('Training...')
for layer_size in [ 128, 256, 512 ]:
for method in [ 'merge', 'inject' ]:
for run in range(1, num_runs+1):
model_name = '_'.join([ str(x) for x in [ method, dataset, min_token_freq, layer_size, run ] ])
os.makedirs(results_data_dir+'/'+model_name)
print()
print('-'*100)
print(dataset, min_token_freq, layer_size, method, run)
print()
tf.reset_default_graph()
#Sequence of token indexes generated thus far included start token (or full correct sequence during training).
seq_in = tf.placeholder(tf.int32, shape=[None, None], name='seq_in') #[seq, token index]
#Length of sequence in seq_in.
seq_len = tf.placeholder(tf.int32, shape=[None], name='seq_len') #[seq len]
#Images
image = tf.placeholder(tf.float32, shape=[None, 4096], name='image') #[seq, image feature]
#Correct sequence to generate during training without start token but with end token
seq_target = tf.placeholder(tf.int32, shape=[None, None], name='seq_target') #[seq, token index]
#Number of sequences to process at once.
batch_size = tf.shape(seq_in)[0]
#Number of tokens in generated sequence.
num_steps = tf.shape(seq_in)[1]
with tf.variable_scope('image'):
#Project image vector into a smaller vector.
W = tf.get_variable('W', [ 4096, layer_size ], tf.float32, tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b', [ layer_size ], tf.float32, tf.zeros_initializer())
post_image = tf.matmul(image, W) + b
with tf.variable_scope('prefix_encoder'):
#Encode each generated sequence prefix into a vector.
#Embedding matrix for token vocabulary.
embeddings = tf.get_variable('embeddings', [ vocab_size, layer_size ], tf.float32, tf.contrib.layers.xavier_initializer()) #[vocabulary token, token feature]
#3tensor of tokens in sequences replaced with their corresponding embedding.
embedded = tf.nn.embedding_lookup(embeddings, seq_in) #[seq, token, token feature]
if method == 'inject':
rnn_input = tf.concat([ embedded, tf.tile(tf.expand_dims(post_image, 1), [1,num_steps,1]) ], axis=2)
else:
rnn_input = embedded
#Use an LSTM to encode the generated prefix.
init_state = tf.contrib.rnn.LSTMStateTuple(c=tf.zeros([ batch_size, layer_size ]), h=tf.zeros([ batch_size, layer_size ]))
cell = tf.contrib.rnn.BasicLSTMCell(layer_size)
(prefix_vectors, _) = tf.nn.dynamic_rnn(cell, rnn_input, sequence_length=seq_len, initial_state=init_state) #[seq, prefix position, prefix feature]
#Mask of which positions in the matrix of sequences are actual labels as opposed to padding.
token_mask = tf.cast(tf.sequence_mask(seq_len, num_steps), tf.float32) #[seq, token flag]
with tf.variable_scope('softmax'):
#Output a probability distribution over the token vocabulary (including the end token)
if method == 'merge':
softmax_input = tf.concat([ prefix_vectors, tf.tile(tf.expand_dims(post_image, 1), [1,num_steps,1]) ], axis=2)
softmax_input_size = layer_size + layer_size #state + image
else:
softmax_input = prefix_vectors
softmax_input_size = layer_size
W = tf.get_variable('W', [ softmax_input_size, vocab_size ], tf.float32, tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b', [ vocab_size ], tf.float32, tf.zeros_initializer())
logits = tf.reshape(tf.matmul(tf.reshape(softmax_input, [ -1, softmax_input_size ]), W) + b, [ batch_size, num_steps, vocab_size ])
predictions = tf.nn.softmax(logits) #[seq, prefix position, token probability]
last_prediction = predictions[:,-1]
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=seq_target, logits=logits) * token_mask
total_loss = tf.reduce_sum(losses)
train_step = tf.train.AdamOptimizer().minimize(total_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
num_params = 0
for v in sess.graph.get_collection('trainable_variables'):
num_params += np.prod(v.get_shape()).value
print('epoch', 'val loss', 'duration', sep='\t')
run_start = start = timeit.default_timer()
validation_loss = 0
for i in range(len(val_images)//minibatch_size):
minibatch_validation_loss = sess.run(total_loss, feed_dict={
seq_in: val_captions_in [i*minibatch_size:(i+1)*minibatch_size],
seq_len: val_captions_len[i*minibatch_size:(i+1)*minibatch_size],
seq_target: val_captions_out[i*minibatch_size:(i+1)*minibatch_size],
image: val_images[i*minibatch_size:(i+1)*minibatch_size]
})
validation_loss += minibatch_validation_loss
print(0, round(validation_loss, 3), round(timeit.default_timer() - start), sep='\t')
last_validation_loss = validation_loss
trainingset_indexes = list(range(len(train_images)))
for epoch in range(1, max_epochs+1):
random.shuffle(trainingset_indexes)
start = timeit.default_timer()
for i in range(len(trainingset_indexes)//minibatch_size):
minibatch_indexes = trainingset_indexes[i*minibatch_size:(i+1)*minibatch_size]
sess.run(train_step, feed_dict={
seq_in: train_captions_in [minibatch_indexes],
seq_len: train_captions_len[minibatch_indexes],
seq_target: train_captions_out[minibatch_indexes],
image: train_images[minibatch_indexes]
})
validation_loss = 0
for i in range(len(val_images)//minibatch_size):
minibatch_validation_loss = sess.run(total_loss, feed_dict={
seq_in: val_captions_in [i*minibatch_size:(i+1)*minibatch_size],
seq_len: val_captions_len[i*minibatch_size:(i+1)*minibatch_size],
seq_target: val_captions_out[i*minibatch_size:(i+1)*minibatch_size],
image: val_images[i*minibatch_size:(i+1)*minibatch_size]
})
validation_loss += minibatch_validation_loss
print(epoch, round(validation_loss, 3), round(timeit.default_timer() - start), sep='\t')
if validation_loss > last_validation_loss:
break
last_validation_loss = validation_loss
saver.save(sess, results_data_dir+'/'+model_name+'/model')
saver.restore(sess, tf.train.latest_checkpoint(results_data_dir+'/'+model_name))
print()
print('evaluating...')
print()
captions = list()
for (i, image_input) in enumerate(raw_dataset['test']['images']):
caption = generate_sequence_beamsearch(lambda prefixes:sess.run(last_prediction, feed_dict={
seq_in: prefixes,
seq_len: [ len(p) for p in prefixes ],
image: image_input.reshape([1,-1]).repeat(len(prefixes), axis=0)
}))
captions.append(caption)
vocab_used = len({ word for caption in captions for word in caption.split(' ') })
with open(results_data_dir+'/'+model_name+'/generated_captions.json', 'w', encoding='utf-8') as f:
print(str(json.dumps([
{
'image_id': image_id,
'caption': caption
}
for (image_id, caption) in enumerate(captions)
])), file=f)
shutil.copyfile(results_data_dir+'/'+model_name+'/generated_captions.json', mscoco_dir+'/results/generated_captions.json')
coco = COCO(mscoco_dir+'/annotations/captions.json')
cocoRes = coco.loadRes(mscoco_dir+'/results/generated_captions.json')
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.evaluate()
gen_result = [ cocoEval.eval[metric] for metric in [ 'CIDEr', 'Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'METEOR', 'ROUGE_L' ] ]
with open(results_data_dir+'/results.txt', 'a', encoding='utf-8') as f:
print(*[ str(x) for x in [dataset, min_token_freq, vocab_size, vocab_used, layer_size, num_params, method, run]+gen_result ], sep='\t', file=f)
print()
print('Duration:', round(timeit.default_timer() - run_start), 's')
print()
| 58.944598
| 200
| 0.492457
|
d860093fe9475d180dde2d94aa2e5ce5d237bc74
| 3,602
|
py
|
Python
|
usaspending_api/etl/management/commands/es_configure.py
|
gaybro8777/usaspending-api
|
fe9d730acd632401bbbefa168e3d86d59560314b
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/etl/management/commands/es_configure.py
|
gaybro8777/usaspending-api
|
fe9d730acd632401bbbefa168e3d86d59560314b
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/etl/management/commands/es_configure.py
|
gaybro8777/usaspending-api
|
fe9d730acd632401bbbefa168e3d86d59560314b
|
[
"CC0-1.0"
] | null | null | null |
import json
import subprocess
from django.core.management.base import BaseCommand
from usaspending_api import settings
from usaspending_api.etl.es_etl_helpers import VIEW_COLUMNS
CURL_STATEMENT = 'curl -XPUT "{url}" -H "Content-Type: application/json" -d \'{data}\''
CURL_COMMANDS = {
"template": "{host}/_template/{name}?pretty",
"cluster": "{host}/_cluster/settings?pretty",
"settings": "{host}/_settings?pretty",
}
FILES = {
"template": settings.APP_DIR / "etl" / "es_transaction_template.json",
"settings": settings.APP_DIR / "etl" / "es_config_objects.json",
}
class Command(BaseCommand):
help = """
This script applies configuration changes to an Elasticsearch cluster.
Requires env var ES_HOSTNAME to be set
"""
def add_arguments(self, parser):
parser.add_argument(
"--template-only",
action="store_true",
help="When this flag is set, skip the cluster and index settings. Useful when creating a new index",
)
def handle(self, *args, **options):
if not settings.ES_HOSTNAME:
raise SystemExit("Fatal error: $ES_HOSTNAME is not set.")
cluster, index_settings = get_elasticsearch_settings()
template = get_index_template()
if not options["template_only"]:
run_curl_cmd(payload=cluster, url=CURL_COMMANDS["cluster"], host=settings.ES_HOSTNAME)
run_curl_cmd(payload=index_settings, url=CURL_COMMANDS["settings"], host=settings.ES_HOSTNAME)
run_curl_cmd(
payload=template, url=CURL_COMMANDS["template"], host=settings.ES_HOSTNAME, name="transaction_template"
)
def run_curl_cmd(**kwargs):
url = kwargs["url"].format(**kwargs)
cmd = CURL_STATEMENT.format(url=url, data=json.dumps(kwargs["payload"]))
print("Running: {}\n\n".format(cmd))
subprocess.Popen(cmd, shell=True).wait()
print("\n\n---------------------------------------------------------------")
return
def get_elasticsearch_settings():
es_config = return_json_from_file(FILES["settings"])
es_config["settings"]["index.max_result_window"] = settings.ES_TRANSACTIONS_MAX_RESULT_WINDOW
return es_config["cluster"], es_config["settings"]
def get_index_template():
template = return_json_from_file(FILES["template"])
template["index_patterns"] = ["*{}".format(settings.ES_TRANSACTIONS_NAME_SUFFIX)]
template["settings"]["index.max_result_window"] = settings.ES_TRANSACTIONS_MAX_RESULT_WINDOW
validate_known_fields(template)
return template
def return_json_from_file(path):
"""Read and parse file as JSON
Library performs JSON validation which is helpful before sending to ES
"""
filepath = str(path)
if not path.exists():
raise SystemExit("Fatal error: file {} does not exist.".format(filepath))
print("Reading file: {}".format(filepath))
with open(filepath, "r") as f:
json_to_dict = json.load(f)
return json_to_dict
def validate_known_fields(template):
defined_fields = set([field for field in template["mappings"]["transaction_mapping"]["properties"]])
load_columns = set(VIEW_COLUMNS)
if defined_fields ^ load_columns: # check if any fields are not in both sets
raise RuntimeError("Mismatch between template and fields in ETL! Resolve before continuing!")
def retrieve_transaction_index_template():
"""This function is used for test configuration"""
with open(str(FILES["template"])) as f:
mapping_dict = json.load(f)
template = json.dumps(mapping_dict)
return template
| 33.981132
| 115
| 0.684064
|
a333ed50794ace428a5bd18043c0deaaaadf30a2
| 2,696
|
py
|
Python
|
scratch_explorer/tests/test_search.py
|
mattjohndavidson/scratch_analysis
|
319423ecdbdb57b870d2e99851f90760496498c2
|
[
"MIT"
] | null | null | null |
scratch_explorer/tests/test_search.py
|
mattjohndavidson/scratch_analysis
|
319423ecdbdb57b870d2e99851f90760496498c2
|
[
"MIT"
] | 8
|
2020-12-05T03:11:04.000Z
|
2020-12-18T01:04:34.000Z
|
scratch_explorer/tests/test_search.py
|
mattjohndavidson/scratch_analysis
|
319423ecdbdb57b870d2e99851f90760496498c2
|
[
"MIT"
] | null | null | null |
"""Unit tests for search.py"""
import unittest
import pandas as pd
from scratch_explorer import search
class test_search(unittest.TestCase):
"""Includes unit tests for search.py"""
data = pd.read_csv('scratch_explorer/data/scratch_sample.csv',
low_memory=False).drop(columns=['Unnamed: 0'])
def test1(self):
"""column input is not a string"""
with self.assertRaises(ValueError):
data = self.data
column = 4
block_search = None
search.search_data(data, column, block_search)
def test2(self):
"""column input is not an option"""
with self.assertRaises(ValueError):
data = self.data
column = 'Number'
block_search = None
search.search_data(data, column, block_search)
def test3(self):
"""not all blocks are block types"""
with self.assertRaises(ValueError):
data = self.data
column = 'block-type'
block_search = ['&', 'not-a-block']
search.search_data(data, column, block_search)
def test4(self):
"""not all blocks are strings"""
with self.assertRaises(ValueError):
data = self.data
column = 'block-type'
block_search = [51, 31, 23]
search.search_data(data, column, block_search)
def test5(self):
"""No block types to search by"""
with self.assertRaises(ValueError):
data = self.data
column = 'block-type'
block_search = []
search.search_data(data, column, block_search)
def test6(self):
"""One shot test: only search for 1 block type"""
data = self.data
column = 'block-type'
block_search = ['&']
result = search.search_data(data, column, block_search)
self.assertEqual(result.iloc[0].p_ID, 99457867)
def test7(self):
"""One shot test: search for 2 block, already been searched"""
data = self.data
column = 'block-type'
block_search = ['&', '+']
block_search_sum = sum([data[block] for block in block_search])
data.insert(len(data.columns), 'search_sum', block_search_sum)
result = search.search_data(data, column, block_search)
self.assertEqual(result.iloc[0].p_ID, 98955356)
def test8(self):
"""One shot test: search for a column"""
data = self.data
column = 'Mastery'
block_search = None
result = search.search_data(data, column, block_search)
self.assertEqual(result.iloc[0].p_ID, 98578463)
if __name__ == '__main__':
unittest.main()
| 32.878049
| 71
| 0.591617
|
66459cb396ee87dd002288d76d721e4bb9aea3bc
| 5,911
|
py
|
Python
|
djgeojson/views.py
|
dennereed/paleocore
|
d6da6c39cde96050ee4b9e7213ec1200530cbeee
|
[
"MIT"
] | 1
|
2021-02-05T19:50:13.000Z
|
2021-02-05T19:50:13.000Z
|
djgeojson/views.py
|
dennereed/paleocore
|
d6da6c39cde96050ee4b9e7213ec1200530cbeee
|
[
"MIT"
] | 59
|
2020-06-17T22:21:51.000Z
|
2022-02-10T05:00:01.000Z
|
djgeojson/views.py
|
dennereed/paleocore
|
d6da6c39cde96050ee4b9e7213ec1200530cbeee
|
[
"MIT"
] | 2
|
2020-07-01T14:11:09.000Z
|
2020-08-10T17:27:26.000Z
|
import math
import django
from django.core.exceptions import ImproperlyConfigured
try:
from django.contrib.gis.db.models.functions import Intersection
except (ImportError, ImproperlyConfigured):
Intersection = None
from django.views.generic import ListView
from django.utils.decorators import method_decorator
from django.views.decorators.gzip import gzip_page
from django.core.exceptions import SuspiciousOperation
from django.core.exceptions import ImproperlyConfigured
try:
from django.contrib.gis.geos import Polygon
except (ImportError, ImproperlyConfigured):
try:
from django.contrib.gis.geos.geometry import Polygon
except (ImportError, ImproperlyConfigured):
from .nogeos import Polygon
try:
from django.contrib.gis.db.models import PointField
except (ImportError, ImproperlyConfigured):
from .fields import PointField
from .http import HttpGeoJSONResponse
from .serializers import Serializer as GeoJSONSerializer
from . import GEOJSON_DEFAULT_SRID
class GeoJSONResponseMixin(object):
"""
A mixin that can be used to render a GeoJSON response.
"""
response_class = HttpGeoJSONResponse
""" Select fields for properties """
properties = []
""" Limit float precision """
precision = None
""" Simplify geometries """
simplify = None
""" Change projection of geometries """
srid = GEOJSON_DEFAULT_SRID
""" Geometry field to serialize """
geometry_field = 'geom'
""" Force 2D """
force2d = False
""" bbox """
bbox = None
""" bbox auto """
bbox_auto = False
use_natural_keys = False
with_modelname = True
crs_type = 'name'
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
serializer = GeoJSONSerializer()
response = self.response_class(**response_kwargs)
queryset = self.get_queryset()
options = dict(properties=self.properties,
precision=self.precision,
simplify=self.simplify,
srid=self.srid,
geometry_field=self.geometry_field,
force2d=self.force2d,
bbox=self.bbox,
bbox_auto=self.bbox_auto,
use_natural_keys=self.use_natural_keys,
with_modelname=self.with_modelname,
crs_type=self.crs_type)
serializer.serialize(queryset, stream=response, ensure_ascii=False,
**options)
return response
class GeoJSONLayerView(GeoJSONResponseMixin, ListView):
"""
A generic view to serve a model as a layer.
"""
@method_decorator(gzip_page)
def dispatch(self, *args, **kwargs):
return super(GeoJSONLayerView, self).dispatch(*args, **kwargs)
class TiledGeoJSONLayerView(GeoJSONLayerView):
width = 256
height = 256
tile_srid = 3857
trim_to_boundary = True
"""Simplify geometries by zoom level (dict <int:float>)"""
simplifications = None
def tile_coord(self, xtile, ytile, zoom):
"""
This returns the NW-corner of the square. Use the function
with xtile+1 and/or ytile+1 to get the other corners.
With xtile+0.5 & ytile+0.5 it will return the center of the tile.
http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Tile_numbers_to_lon..2Flat._2
"""
assert self.tile_srid == 3857, 'Custom tile projection not supported yet'
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lon_deg, lat_deg)
def _parse_args(self):
try:
return [int(v) for v in (self.args[0], self.args[1], self.args[2])]
except (ValueError, IndexError):
try:
return [int(v) for v in (self.kwargs['z'],
self.kwargs['x'],
self.kwargs['y'])]
except (ValueError, TypeError, KeyError):
# Raise suspicious, Django will return ``400 Bad Request``.
error_msg = "Tile (z, x, y) parameters could not be processed."
raise SuspiciousOperation(error_msg)
def get_queryset(self):
"""
Inspired by Glen Roberton's django-geojson-tiles view
"""
self.z, self.x, self.y = self._parse_args()
nw = self.tile_coord(self.x, self.y, self.z)
se = self.tile_coord(self.x + 1, self.y + 1, self.z)
bbox = Polygon((nw, (se[0], nw[1]),
se, (nw[0], se[1]), nw))
bbox.srid = self.srid
qs = super(TiledGeoJSONLayerView, self).get_queryset()
qs = qs.filter(**{
'%s__intersects' % self.geometry_field: bbox
})
self.bbox = bbox.extent
# Simplification dict by zoom level
simplifications = self.simplifications or {}
z = self.z
self.simplify = simplifications.get(z)
while self.simplify is None and z < 32:
z += 1
self.simplify = simplifications.get(z)
# Won't trim point geometries to a boundary
model_field = qs.model._meta.get_field(self.geometry_field)
self.trim_to_boundary = (self.trim_to_boundary and
not isinstance(model_field, PointField) and
Intersection is not None)
if self.trim_to_boundary:
if django.VERSION < (1, 9):
qs = qs.intersection(bbox)
else:
qs = qs.annotate(intersection=Intersection(self.geometry_field, bbox))
self.geometry_field = 'intersection'
return qs
| 35.39521
| 93
| 0.610726
|
8daa2d45f237e47c2966783fcc5a5c54b7ab7069
| 6,399
|
py
|
Python
|
tests/test_07_render/test_712_render_curved_dimension.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | null | null | null |
tests/test_07_render/test_712_render_curved_dimension.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | null | null | null |
tests/test_07_render/test_712_render_curved_dimension.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import math
import pytest
import ezdxf
from ezdxf.document import Drawing
from ezdxf.math import Vec2, arc_angle_span_deg
from ezdxf.render.dim_curved import detect_closer_defpoint, _CurvedDimensionLine
@pytest.fixture(scope="module")
def doc():
return ezdxf.new(setup=True)
class TestDetectCloserDefpoints:
@pytest.mark.parametrize(
"d, offset", # d=direction
[
(Vec2(1, 0), Vec2(0, 0)), # +x direction
(Vec2(0, 1), Vec2(1, -1)), # +y direction
(Vec2(-1, 0), Vec2(-2, 3)), # -x direction
(Vec2(0, -1), Vec2(2, -4)), # -y direction
(Vec2(2, -1), Vec2(20, 45)), # angled
],
ids=["(+x)", "(+y)", "(-x)", "(-y)", "angled"],
)
@pytest.mark.parametrize(
"base",
[
0,
0.5,
1.0,
1.5,
2.0, # equal -> p1
-0.5, # every base left of p1 is closer to p1
-1.0,
-100.0,
],
)
def test_p1_is_closer_to_base(self, base, d, offset):
# e.g. for base=(-1, 0), d=(1, 0):
# base p1 p2
# (-x) <---2---(1)---0---(1)---2---(3)---> (+x)
# By equality p1 if preferred over p2!
# Shift system by an arbitrary offset!
p1 = d * 1 + offset
p2 = d * 3 + offset
base = d * base + offset
assert detect_closer_defpoint(d, base, p1, p2) is p1
@pytest.mark.parametrize(
"d, offset", # d=direction
[
(Vec2(1, 0), Vec2(0, -1)), # +x direction
(Vec2(0, 1), Vec2(2, -2)), # +y direction
(Vec2(-1, 0), Vec2(2, 5)), # -x direction
(Vec2(0, -1), Vec2(1, 0)), # -y direction
(Vec2(2, -1), Vec2(20, 45)), # angled
],
ids=["(+x)", "(+y)", "(-x)", "(-y)", "angled"],
)
@pytest.mark.parametrize(
"base",
[
2.5,
3.0,
4.0, # every base right of p2 is closer to p2
100.0,
],
)
def test_p2_is_closer_to_base(self, base, d, offset):
# e.g. for base=(4.0, 0), d=(1, 0):
# p1 p2 base
# (-x) <---2---1---0---(1)---2---(3)---(4)---> (+x)
# By equality p1 if preferred over p2!
# Shift system by an arbitrary offset!
p1 = d * 1 + offset
p2 = d * 3 + offset
base = d * base + offset
assert detect_closer_defpoint(d, base, p1, p2) is p2
@pytest.mark.parametrize(
"s,e",
[
[60, 120],
[300, 240], # passes 0
[240, 300],
[300, 30], # passes 0
],
)
def test_dimension_line_divided_by_measurement_text(doc: Drawing, s, e):
"""Vertical centered measurement text should hide the part of the
dimension line beneath the text. This creates two arcs instead of one.
"""
msp = doc.modelspace()
dim = msp.add_angular_dim_cra(
center=Vec2(),
radius=5,
start_angle=s,
end_angle=e,
distance=2,
override={"dimtad": 0}, # vertical centered text
)
dim.render()
arcs = dim.dimension.get_geometry_block().query("ARC")
assert len(arcs) == 2
assert sum(
arc_angle_span_deg(arc.dxf.start_angle, arc.dxf.end_angle)
for arc in arcs
) < arc_angle_span_deg(
s, e
), "sum of visual arcs should be smaller than the full arc"
def measure_fixed_angle(msp, angle: float):
x_dist = 15
radius = 3
distance = 1
delta = angle / 2.0
for dimtad, y_dist in [[0, 0], [1, 20], [4, 40]]:
for count in range(8):
center = Vec2(x_dist * count, y_dist)
main_angle = 45.0 * count
start_angle = main_angle - delta
end_angle = main_angle + delta
yield msp.add_angular_dim_cra(
center,
radius,
start_angle,
end_angle,
distance,
override={"dimtad": dimtad},
)
def test_text_and_arrows_fit_between_extension_lines(doc: Drawing):
"""There is enough space between extension lines is to place text and
arrows.
"""
for dim in measure_fixed_angle(doc.modelspace(), angle=20):
render_obj = dim.render()
assert isinstance(render_obj, _CurvedDimensionLine)
assert render_obj.arrows_outside is False
assert render_obj.measurement.is_wide_text is False
assert render_obj.measurement.text_is_outside is False
@pytest.mark.parametrize("angle", [3, 6])
def test_has_outside_text_and_arrows(doc: Drawing, angle):
"""The space between extension lines is too narrow to place text and arrows."""
for dim in measure_fixed_angle(doc.modelspace(), angle=angle):
render_obj = dim.render()
assert isinstance(render_obj, _CurvedDimensionLine)
assert render_obj.arrows_outside is True
assert render_obj.measurement.text_is_outside is True
assert render_obj.measurement.is_wide_text is True
def test_has_outside_text_and_arrows_but_not_a_wide_text(doc: Drawing):
"""The space between extension lines is too narrow to place text and arrows,
but the text alone has enough space.
"""
for dim in measure_fixed_angle(doc.modelspace(), angle=9):
render_obj = dim.render()
assert isinstance(render_obj, _CurvedDimensionLine)
assert render_obj.arrows_outside is True
assert render_obj.measurement.text_is_outside is True
assert render_obj.measurement.is_wide_text is False
def test_fixed_length_extension_lines(doc: Drawing):
msp = doc.modelspace()
dim = msp.add_angular_dim_cra(
center=(0, 0),
radius=5,
distance=2,
start_angle=0,
end_angle=90,
override={
"dimfxlon": 1, # use fixed length extension lines
"dimexe": 0.7, # length "above" the dimension line
"dimfxl": 0.5, # length "below" the dimension line
},
).render()
# only the extension lines are LINE entities:
for line in dim.dimension.get_geometry_block().query("LINE"):
length = line.dxf.start.distance(line.dxf.end)
assert length == pytest.approx(0.5 + 0.7)
if __name__ == "__main__":
pytest.main([__file__])
| 32.647959
| 83
| 0.564776
|
bac73378b84e01c637ac97d3326b9cd792a2768b
| 4,603
|
py
|
Python
|
django/core/serializers/__init__.py
|
pomarec/django
|
98514849dce07acfaa224a90a784bba9d97249e5
|
[
"BSD-3-Clause"
] | 1
|
2015-06-14T07:55:29.000Z
|
2015-06-14T07:55:29.000Z
|
django/core/serializers/__init__.py
|
pomarec/django
|
98514849dce07acfaa224a90a784bba9d97249e5
|
[
"BSD-3-Clause"
] | null | null | null |
django/core/serializers/__init__.py
|
pomarec/django
|
98514849dce07acfaa224a90a784bba9d97249e5
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
import importlib
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
"yaml" : "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
| 30.686667
| 92
| 0.705192
|
79611c5c2de8ef5098feb8fdd8f0271f18113662
| 317
|
py
|
Python
|
Course-2:Graphs/shortest_path.py
|
karenk1010/Coursera-Algorithms-Specialization
|
5d293ff6e74e7d6f2090696d21d282e1734f396a
|
[
"MIT"
] | null | null | null |
Course-2:Graphs/shortest_path.py
|
karenk1010/Coursera-Algorithms-Specialization
|
5d293ff6e74e7d6f2090696d21d282e1734f396a
|
[
"MIT"
] | null | null | null |
Course-2:Graphs/shortest_path.py
|
karenk1010/Coursera-Algorithms-Specialization
|
5d293ff6e74e7d6f2090696d21d282e1734f396a
|
[
"MIT"
] | 2
|
2021-02-04T22:20:15.000Z
|
2021-02-11T13:27:24.000Z
|
#!/usr/bin/python3
"""
Implement Dijkstra's shortest path algorithm using 'dijkstraData.txt' graph.
Source Vertex = 1.
Report shortest path distance to the following vertices in the same order:
7,37,59,82,99,115,133,165,188,197
Problem answer: [2599, 2610, 2947, 2052, 2367, 2399, 2029, 2442, 2505, 3068]
"""
| 24.384615
| 76
| 0.722397
|
1f79eb0f8d5ad76a71568edd3e0b2269a683e236
| 726
|
py
|
Python
|
livereload/cli.py
|
agustinhenze/python-livereload.debian
|
b2c6cc0882962251da14cf94b94d83b4c63ee786
|
[
"BSD-3-Clause"
] | null | null | null |
livereload/cli.py
|
agustinhenze/python-livereload.debian
|
b2c6cc0882962251da14cf94b94d83b4c63ee786
|
[
"BSD-3-Clause"
] | null | null | null |
livereload/cli.py
|
agustinhenze/python-livereload.debian
|
b2c6cc0882962251da14cf94b94d83b4c63ee786
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
from livereload.server import Server
parser = argparse.ArgumentParser(description='Start a `livereload` server')
parser.add_argument(
'--host',
help='Hostname to run `livereload` server on',
type=str,
default='127.0.0.1'
)
parser.add_argument(
'-p', '--port',
help='Port to run `livereload` server on',
type=int,
default=35729
)
parser.add_argument(
'directory',
help='Directory to watch for changes',
type=str,
default='.',
nargs='?'
)
def main(argv=None):
args = parser.parse_args()
# Create a new application
server = Server()
server.watcher.watch(args.directory)
server.serve(host=args.host, port=args.port, root=args.directory)
| 21.352941
| 75
| 0.666667
|
ffbeb4061d6dd866c1995804264ba8beca9493b9
| 1,132
|
py
|
Python
|
server/util/wordembeddings.py
|
Yunicorn228/web-tools
|
056d2d8310f3096c8be90638342bb3cc5715a89f
|
[
"Apache-2.0"
] | 32
|
2017-06-12T15:53:14.000Z
|
2020-08-31T15:23:38.000Z
|
server/util/wordembeddings.py
|
Yunicorn228/web-tools
|
056d2d8310f3096c8be90638342bb3cc5715a89f
|
[
"Apache-2.0"
] | 1,316
|
2017-05-04T17:14:15.000Z
|
2020-09-28T18:32:00.000Z
|
server/util/wordembeddings.py
|
Yunicorn228/web-tools
|
056d2d8310f3096c8be90638342bb3cc5715a89f
|
[
"Apache-2.0"
] | 20
|
2017-10-10T20:07:07.000Z
|
2020-08-30T14:03:06.000Z
|
import requests
import json
from server import config
# Helpers for accessing data from the Media Cloud Word Embeddings server
def google_news_2d(words):
results = _query_for_json("/api/v2/google-news/2d",
{'words[]': words})
return results
def topic_2d(topics_id, snapshots_id, words):
results = _query_for_json("/api/v2/topics/{}/snapshots/{}/2d".format(topics_id, snapshots_id),
{'words[]': words})
return results
def topic_similar_words(topics_id, snapshots_id, words):
results = _query_for_json("/api/v2/topics/{}/snapshots/{}/similar-words".format(topics_id, snapshots_id),
{'words[]': words})
return results
def _query_for_json(endpoint, data):
response = requests.post("{}{}".format(config.get('WORD_EMBEDDINGS_SERVER_URL'), endpoint), data=data)
try:
response_json = response.json()
if 'results' in response_json:
return response_json['results']
except json.decoder.JSONDecodeError:
# this happens in non-english cases
return []
return []
| 30.594595
| 109
| 0.644876
|
01db4af09803a6f73501d2f4599d5a7ddce2e736
| 4,477
|
py
|
Python
|
XrayDataPlots/plotFcalcsrResolutionBin.py
|
MooersLab/jupyterlabcctbxsnips
|
c5f0947b4e8c4e5839b9b6b15c81c62915103155
|
[
"MIT"
] | null | null | null |
XrayDataPlots/plotFcalcsrResolutionBin.py
|
MooersLab/jupyterlabcctbxsnips
|
c5f0947b4e8c4e5839b9b6b15c81c62915103155
|
[
"MIT"
] | null | null | null |
XrayDataPlots/plotFcalcsrResolutionBin.py
|
MooersLab/jupyterlabcctbxsnips
|
c5f0947b4e8c4e5839b9b6b15c81c62915103155
|
[
"MIT"
] | null | null | null |
'''
This script reads in a phenix.refine mtz file.
It plots the R-factor by resolution bin.
The plots are made with matplotlib using miller arrays.
It also plots the correlation coefficients.
The plots were made with matplotlib.
This script was adapted from an example script in iotbx:
Source: https://github.com/cctbx/cctbx_project/blob/master/
iotbx/examples/recalculate_phenix_refine_r_factors.py
'''
# get_ipython().run_line_magic("matplotlib", "inline")
from __future__ import absolute_import, division, print_function
from iotbx.reflection_file_utils import get_r_free_flags_scores
from iotbx.file_reader import any_file
import matplotlib
import matplotlib.pyplot as plt
def compute_r_factors(fobs, fmodel, flags):
fmodel, fobs = fmodel.common_sets(other=fobs)
fmodel, flags = fmodel.common_sets(other=flags)
fc_work = fmodel.select(~(flags.data()))
fo_work = fobs.select(~(flags.data()))
fc_test = fmodel.select(flags.data())
fo_test = fobs.select(flags.data())
r_work = fo_work.r1_factor(fc_work)
r_free = fo_test.r1_factor(fc_test)
print("r_work = %.4f" % r_work)
print("r_free = %.4f" % r_free)
print("")
binner = flags.setup_binner(n_bins=20)
d_star_power = 1.618034
centers = binner.bin_centers(d_star_power)
d_centers = list(centers**(-1 / d_star_power))
# for i in d_centers:
# print(i)
fo_work.use_binning_of(flags)
fc_work.use_binner_of(fo_work)
fo_test.use_binning_of(fo_work)
fc_test.use_binning_of(fo_work)
r_work_list = []
r_free_list = []
cc_work_list = []
cc_free_list = []
for i_bin in fo_work.binner().range_all():
sel_work = fo_work.binner().selection(i_bin)
sel_test = fo_test.binner().selection(i_bin)
fo_work_bin = fo_work.select(sel_work)
fc_work_bin = fc_work.select(sel_work)
fo_test_bin = fo_test.select(sel_test)
fc_test_bin = fc_test.select(sel_test)
if fc_test_bin.size() == 0 : continue
r_work_bin = fo_work_bin.r1_factor(other=fc_work_bin,
assume_index_matching=True)
r_work_list.append(r_work_bin)
r_free_bin = fo_test_bin.r1_factor(other=fc_test_bin,
assume_index_matching=True)
r_free_list.append(r_free_bin)
cc_work_bin = fo_work_bin.correlation(fc_work_bin).coefficient()
cc_work_list.append(cc_work_bin)
cc_free_bin = fo_test_bin.correlation(fc_test_bin).coefficient()
cc_free_list.append(cc_free_bin)
legend = flags.binner().bin_legend(i_bin, show_counts=False)
print("%s %8d %8d %.4f %.4f %.3f %.3f" % (legend, fo_work_bin.size(),
fo_test_bin.size(), r_work_bin, r_free_bin, cc_work_bin, cc_free_bin))
return d_centers, r_work_list, r_free_list, cc_work_list, cc_free_list
def plot_r_factors(d_centers, r_work_list, r_free_list):
plt.scatter(d_centers, r_work_list, label=r"$\mathit{R_{work$")
plt.scatter(d_centers, r_free_list, label=r"$\mathit{R_{free$")
plt.xlabel(r"Resolution ($\mathrm{\AA$)")
plt.ylabel(r"R-factor (%)")
plt.legend(loc="upper right")
plt.savefig("Rs.pdf")
plt.close()
def plot_cc(d_centers, cc_work_list, cc_free_list):
plt.scatter(d_centers, cc_work_list, label=r"$\mathit{CC_{work$")
plt.scatter(d_centers, cc_free_list, label=r"$\mathit{CC_{free$")
plt.xlabel(r"Resolution ($\mathrm{\AA$)")
plt.ylabel(r"Correlation Coefficeint Fo vs Fc (%)")
plt.legend(loc="lower right")
plt.savefig("CCs.pdf")
def run(input_mtz):
mtz_in = any_file(input_mtz)
ma = mtz_in.file_server.miller_arrays
flags = fmodel = fobs = None
# select the output arrays from phenix.refine. This could easily be modified
# to handle MTZ files from other programs.
for array in ma :
labels = array.info().label_string()
if labels.startswith("R-free-flags"):
flags = array
elif labels.startswith("F-model"):
fmodel = abs(array)
elif labels.startswith("F-obs-filtered"):
fobs = array
if (None in [flags, fobs, fmodel]):
raise RuntimeError("Not a valid phenix.refine output file")
scores = get_r_free_flags_scores([flags], None)
test_flag_value = scores.test_flag_values[0]
flags = flags.customized_copy(data=flags.data()==test_flag_value)
(d_centers,
r_work_list,
r_free_list,
cc_work_list,
cc_free_list) = compute_r_factors(fobs, fmodel, flags)
plot_r_factors(d_centers, r_work_list, r_free_list)
plot_cc(d_centers, cc_work_list, cc_free_list)
if (__name__ == "__main__"):
run(input_mtz="28molrepEdited_5_refine_001.mtz")
| 33.410448
| 79
| 0.726603
|
2ae005b12c4abf394935a156f5e38edd13ee56e3
| 3,078
|
py
|
Python
|
libs/ts/ptp.py
|
scambra/HTPC-Manager
|
1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d
|
[
"MIT"
] | 422
|
2015-01-08T14:08:08.000Z
|
2022-02-07T11:47:37.000Z
|
libs/ts/ptp.py
|
scambra/HTPC-Manager
|
1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d
|
[
"MIT"
] | 581
|
2015-01-01T08:07:16.000Z
|
2022-02-23T11:44:37.000Z
|
libs/ts/ptp.py
|
scambra/HTPC-Manager
|
1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d
|
[
"MIT"
] | 115
|
2015-01-08T14:41:00.000Z
|
2022-02-13T12:31:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import urllib
import logging
import htpc
def search(q, cat):
logger = logging.getLogger('modules.torrentsearch')
logger.info('Searching for %s on ptp' % q)
username = htpc.settings.get('torrents_ptp_username', '')
password = htpc.settings.get('torrents_ptp_password', '')
passkey = htpc.settings.get('torrents_ptp_passkey', '')
if not username or not password or not passkey:
logger.error('Check your settings, username, password or passkey is missing')
return []
urls = {
'detail': 'https://tls.passthepopcorn.me/torrents.php?torrentid=%s',
'login': 'https://tls.passthepopcorn.me/ajax.php?action=login',
'search': 'https://tls.passthepopcorn.me/torrents.php?action=search&searchstr=%s&json=noredirect',
'download': 'http://passthepopcorn.me/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s'
}
d = {
'username': username,
'password': password,
'passkey': passkey,
'keeplogged': '1',
'login': 'Login'
}
try:
s = requests.Session()
login = s.post(urls['login'], data=d, timeout=10)
if login.json()['Result'] == 'Ok':
logger.debug('Logged into PTP')
else:
logger.error('%s' % login.json()['Result']['Message'])
if login.json()['Result']['Attempts'] == 1:
# Lets not get banned.. like i did..
logger.info('Wiped PTP username to prevent ban, please check you settings')
htpc.settings.set('torrents_ptp_username', '')
return []
r = s.get(urls['search'] % urllib.quote_plus(q), timeout=10)
result_list = []
if r.ok:
result = r.json()
authkey = result['AuthKey']
logger.debug('Found %s movies with %s' % (len(result['Movies']), q))
for torrent in result['Movies']:
logger.debug('Found %s torrents to %s' % (len(torrent['Torrents']), torrent['Title']))
for t in torrent['Torrents']:
r = {
'Provider': 'passtp',
'BrowseURL': urls['detail'] % t['Id'],
'DownloadURL': urls['download'] % (t['Id'], authkey, passkey),
'ReleaseName': t['ReleaseName'],
'Seeders': t['Seeders'],
'Leechers': t['Leechers'],
'Size': int(t['Size']),
'Source': t['Source'],
'Resolution': 'N/A',
'Container': t['Container'],
'Codec': t['Codec'],
'Snatched': t['Snatched'],
}
result_list.append(r)
logger.debug('Found %s torrents from PTP' % len(result_list))
return result_list
except Exception as e:
logger.error('Error while fetching torrents from PTP %s' % e)
return []
| 35.790698
| 108
| 0.521442
|
b3a342974c28169e89793e8f93e7d695c9e5d2a1
| 31,257
|
py
|
Python
|
tests/python_package_test/test_engine.py
|
zsh-89/LightGBM
|
2df6d9a9bce01a642a3150dc054812eda97391ca
|
[
"MIT"
] | 1
|
2018-09-11T12:38:09.000Z
|
2018-09-11T12:38:09.000Z
|
tests/python_package_test/test_engine.py
|
LisaJing/LightGBM
|
9ed4a9552589c57af85b786534933152599cd1e7
|
[
"MIT"
] | null | null | null |
tests/python_package_test/test_engine.py
|
LisaJing/LightGBM
|
9ed4a9552589c57af85b786534933152599cd1e7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# pylint: skip-file
import copy
import math
import os
import unittest
import lightgbm as lgb
import random
import numpy as np
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
load_iris, load_svmlight_file)
from sklearn.metrics import log_loss, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split, TimeSeriesSplit
from scipy.sparse import csr_matrix
try:
import cPickle as pickle
except ImportError:
import pickle
def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
class TestEngine(unittest.TestCase):
def test_binary(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'num_iteration': 50 # test num_iteration in dict here
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.15)
self.assertEqual(len(evals_result['valid_0']['binary_logloss']), 50)
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
def test_rf(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_freq': 1,
'bagging_fraction': 0.5,
'feature_fraction': 0.5,
'num_leaves': 50,
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.25)
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
def test_regression(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 16)
self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
def test_missing_value_handle(self):
X_train = np.zeros((1000, 1))
y_train = np.zeros(1000)
trues = random.sample(range(1000), 200)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 1
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
self.assertLess(ret, 0.005)
self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
def test_missing_value_handle_na(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [1, 1, 1, 1, 0, 0, 0, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_missing_value_handle_zero(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': True
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_missing_value_handle_none(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'use_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
self.assertAlmostEqual(pred[0], pred[1], places=5)
self.assertAlmostEqual(pred[-1], pred[0], places=5)
def test_categorical_handle(self):
x = [0, 1, 2, 3, 4, 5, 6, 7]
y = [0, 1, 0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': True,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_categorical_handle2(self):
x = [0, np.nan, 0, np.nan, 0, np.nan]
y = [0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': False,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_multiclass(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.2)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_multiclass_rf(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'multiclass',
'metric': 'multi_logloss',
'bagging_freq': 1,
'bagging_fraction': 0.6,
'feature_fraction': 0.6,
'num_class': 10,
'num_leaves': 50,
'min_data': 1,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=100,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.4)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_multiclass_prediction_early_stopping(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
self.assertLess(ret, 0.8)
self.assertGreater(ret, 0.5) # loss will be higher than when evaluating the full model
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 5.5}
ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
self.assertLess(ret, 0.2)
def test_early_stopping(self):
X, y = load_breast_cancer(True)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
valid_set_name = 'valid_set'
# no early stopping
gbm = lgb.train(params, lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
self.assertEqual(gbm.best_iteration, 10)
self.assertIn(valid_set_name, gbm.best_score)
self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
# early stopping occurs
gbm = lgb.train(params, lgb_train,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
self.assertLessEqual(gbm.best_iteration, 100)
self.assertIn(valid_set_name, gbm.best_score)
self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
def test_continue_train(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
model_name = 'model.txt'
init_gbm.save_model(model_name)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
# test custom eval metrics
feval=(lambda p, d: ('mae', mean_absolute_error(p, d.get_label()), False)),
evals_result=evals_result,
init_model='model.txt')
ret = mean_absolute_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 3.5)
self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)
for l1, mae in zip(evals_result['valid_0']['l1'], evals_result['valid_0']['mae']):
self.assertAlmostEqual(l1, mae, places=5)
os.remove(model_name)
def test_continue_train_multiclass(self):
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 3,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 1.5)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_cv(self):
X, y = load_boston(True)
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
# shuffle = False, override metric in params
params_with_metric = {'metric': 'l2', 'verbose': -1}
lgb.cv(params_with_metric, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=False,
metrics='l1', verbose_eval=False)
# shuffle = True, callbacks
lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=True,
metrics='l1', verbose_eval=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
# self defined folds
tss = TimeSeriesSplit(3)
folds = tss.split(X_train)
lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds, stratified=False, verbose_eval=False)
# lambdarank
X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train'))
q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train.query'))
params_lambdarank = {'objective': 'lambdarank', 'verbose': -1, 'eval_at': 3}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
# ... with NDCG (default) metric
cv_res = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, stratified=False, verbose_eval=False)
self.assertEqual(len(cv_res), 2)
self.assertFalse(np.isnan(cv_res['ndcg@3-mean']).any())
# ... with l2 metric
cv_res = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, stratified=False, metrics='l2', verbose_eval=False)
self.assertEqual(len(cv_res), 2)
self.assertFalse(np.isnan(cv_res['l2-mean']).any())
def test_feature_name(self):
X, y = load_boston(True)
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
feature_names = ['f_' + str(i) for i in range(13)]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
self.assertListEqual(feature_names, gbm.feature_name())
# test feature_names with whitespaces
feature_names_with_space = ['f ' + str(i) for i in range(13)]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
self.assertListEqual(feature_names, gbm.feature_name())
def test_save_load_copy_pickle(self):
def test_template(init_model=None, return_model=False):
X, y = load_boston(True)
params = {
'objective': 'regression',
'metric': 'l2',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
gbm = test_template(return_model=True)
ret_origin = test_template(init_model=gbm)
other_ret = []
gbm.save_model('lgb.model')
other_ret.append(test_template(init_model='lgb.model'))
gbm_load = lgb.Booster(model_file='lgb.model')
other_ret.append(test_template(init_model=gbm_load))
other_ret.append(test_template(init_model=copy.copy(gbm)))
other_ret.append(test_template(init_model=copy.deepcopy(gbm)))
with open('lgb.pkl', 'wb') as f:
pickle.dump(gbm, f)
with open('lgb.pkl', 'rb') as f:
gbm_pickle = pickle.load(f)
other_ret.append(test_template(init_model=gbm_pickle))
gbm_pickles = pickle.loads(pickle.dumps(gbm))
other_ret.append(test_template(init_model=gbm_pickles))
for ret in other_ret:
self.assertAlmostEqual(ret_origin, ret, places=5)
@unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed')
def test_pandas_categorical(self):
import pandas as pd
X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
"B": np.random.permutation([1, 2, 3] * 100), # int
"C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float
"D": np.random.permutation([True, False] * 150)}) # bool
y = np.random.permutation([0, 1] * 150)
X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20),
"B": np.random.permutation([1, 3] * 30),
"C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": np.random.permutation([True, False] * 30)})
for col in ["A", "B", "C", "D"]:
X[col] = X[col].astype('category')
X_test[col] = X_test[col].astype('category')
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm0 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False)
pred0 = list(gbm0.predict(X_test))
lgb_train = lgb.Dataset(X, pd.DataFrame(y)) # also test that label can be one-column pd.DataFrame
gbm1 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=[0])
pred1 = list(gbm1.predict(X_test))
lgb_train = lgb.Dataset(X, pd.Series(y)) # also test that label can be pd.Series
gbm2 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=['A'])
pred2 = list(gbm2.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm3 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=['A', 'B', 'C', 'D'])
pred3 = list(gbm3.predict(X_test))
gbm3.save_model('categorical.model')
gbm4 = lgb.Booster(model_file='categorical.model')
pred4 = list(gbm4.predict(X_test))
np.testing.assert_almost_equal(pred0, pred1)
np.testing.assert_almost_equal(pred0, pred2)
np.testing.assert_almost_equal(pred0, pred3)
np.testing.assert_almost_equal(pred0, pred4)
def test_reference_chain(self):
X = np.random.normal(size=(100, 2))
y = np.random.normal(size=100)
tmp_dat = lgb.Dataset(X, y)
# take subsets and train
tmp_dat_train = tmp_dat.subset(np.arange(80))
tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
params = {'objective': 'regression_l2', 'metric': 'rmse'}
gbm = lgb.train(params, tmp_dat_train, num_boost_round=20, valid_sets=[tmp_dat_train, tmp_dat_val])
def test_contribs(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
self.assertLess(np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)), 1e-4)
def test_sliced_data(self):
def train_and_get_predictions(features, labels):
dataset = lgb.Dataset(features, label=labels)
lgb_params = {
'application': 'binary',
'verbose': -1,
'min_data': 5,
}
lgbm_model = lgb.train(
params=lgb_params,
train_set=dataset,
num_boost_round=10,
)
predictions = lgbm_model.predict(features)
return predictions
num_samples = 100
features = np.random.rand(num_samples, 5)
positive_samples = int(num_samples * 0.25)
labels = np.append(
np.ones(positive_samples, dtype=np.float32),
np.zeros(num_samples - positive_samples, dtype=np.float32),
)
# test sliced labels
origin_pred = train_and_get_predictions(features, labels)
stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32)))
sliced_labels = stacked_labels[:, 0]
sliced_pred = train_and_get_predictions(features, sliced_labels)
np.testing.assert_almost_equal(origin_pred, sliced_pred)
# append some columns
stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), features))
stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), stacked_features))
stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
# append some rows
stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
# test sliced 2d matrix
sliced_features = stacked_features[2:102, 2:7]
self.assertTrue(np.all(sliced_features == features))
sliced_pred = train_and_get_predictions(sliced_features, sliced_labels)
np.testing.assert_almost_equal(origin_pred, sliced_pred)
# test sliced CSR
stacked_csr = csr_matrix(stacked_features)
sliced_csr = stacked_csr[2:102, 2:7]
self.assertTrue(np.all(sliced_csr == features))
sliced_pred = train_and_get_predictions(sliced_csr, sliced_labels)
np.testing.assert_almost_equal(origin_pred, sliced_pred)
def test_monotone_constraint(self):
def is_increasing(y):
return np.count_nonzero(np.diff(y) < 0.0) == 0
def is_decreasing(y):
return np.count_nonzero(np.diff(y) > 0.0) == 0
def is_correctly_constrained(learner):
n = 200
variable_x = np.linspace(0, 1, n).reshape((n, 1))
fixed_xs_values = np.linspace(0, 1, n)
for i in range(n):
fixed_x = fixed_xs_values[i] * np.ones((n, 1))
monotonically_increasing_x = np.column_stack((variable_x, fixed_x))
monotonically_increasing_y = learner.predict(monotonically_increasing_x)
monotonically_decreasing_x = np.column_stack((fixed_x, variable_x))
monotonically_decreasing_y = learner.predict(monotonically_decreasing_x)
if not (is_increasing(monotonically_increasing_y) and is_decreasing(monotonically_decreasing_y)):
return False
return True
number_of_dpoints = 3000
x1_positively_correlated_with_y = np.random.random(size=number_of_dpoints)
x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x = np.column_stack((x1_positively_correlated_with_y, x2_negatively_correlated_with_y))
zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
y = (5 * x1_positively_correlated_with_y
+ np.sin(10 * np.pi * x1_positively_correlated_with_y)
- 5 * x2_negatively_correlated_with_y
- np.cos(10 * np.pi * x2_negatively_correlated_with_y)
+ zs)
trainset = lgb.Dataset(x, label=y)
params = {
'min_data': 20,
'num_leaves': 20,
'monotone_constraints': '1,-1'
}
constrained_model = lgb.train(params, trainset)
self.assertTrue(is_correctly_constrained(constrained_model))
def test_refit(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'min_data': 10
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train,
num_boost_round=20)
err_pred = log_loss(y_test, gbm.predict(X_test))
new_gbm = gbm.refit(X_test, y_test)
new_err_pred = log_loss(y_test, new_gbm.predict(X_test))
self.assertGreater(err_pred, new_err_pred)
def test_mape_rf(self):
X, y = load_boston(True)
params = {
'boosting_type': 'rf',
'objective': 'mape',
'verbose': -1,
'bagging_freq': 1,
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'boost_from_average': False
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train,
num_boost_round=20)
pred = gbm.predict(X)
pred_mean = pred.mean()
self.assertGreater(pred_mean, 20)
def test_mape_dart(self):
X, y = load_boston(True)
params = {
'boosting_type': 'dart',
'objective': 'mape',
'verbose': -1,
'bagging_freq': 1,
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'boost_from_average': False
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train,
num_boost_round=40)
pred = gbm.predict(X)
pred_mean = pred.mean()
self.assertGreater(pred_mean, 18)
| 43.232365
| 144
| 0.576991
|
bfe85b7f03066a999bf2e7041df9e2c181f1a8c2
| 8,238
|
py
|
Python
|
hyper_internal_service/worker.py
|
intellivoid/Hyper-Internal-Service
|
16a13fe0a10a12007d286d7f30d7b72dab81d73f
|
[
"Unlicense"
] | null | null | null |
hyper_internal_service/worker.py
|
intellivoid/Hyper-Internal-Service
|
16a13fe0a10a12007d286d7f30d7b72dab81d73f
|
[
"Unlicense"
] | null | null | null |
hyper_internal_service/worker.py
|
intellivoid/Hyper-Internal-Service
|
16a13fe0a10a12007d286d7f30d7b72dab81d73f
|
[
"Unlicense"
] | null | null | null |
"""Async gunicorn worker for hyper_internal_service.web"""
import asyncio
import os
import re
import signal
import sys
from types import FrameType
from typing import Any, Awaitable, Callable, Optional, Union # noqa
from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat
from gunicorn.workers import base
from hyper_internal_service import web
from .helpers import set_result
from .web_app import Application
from .web_log import AccessLogger
try:
import ssl
SSLContext = ssl.SSLContext # noqa
except ImportError: # pragma: no cover
ssl = None # type: ignore
SSLContext = object # type: ignore
__all__ = ('GunicornWebWorker',
'GunicornUVLoopWebWorker',
'GunicornTokioWebWorker')
class GunicornWebWorker(base.Worker):
DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT
DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default
def __init__(self, *args: Any, **kw: Any) -> None: # pragma: no cover
super().__init__(*args, **kw)
self._task = None # type: Optional[asyncio.Task[None]]
self.exit_code = 0
self._notify_waiter = None # type: Optional[asyncio.Future[bool]]
def init_process(self) -> None:
# create new event_loop after fork
asyncio.get_event_loop().close()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self) -> None:
self._task = self.loop.create_task(self._run())
try: # ignore all finalization problems
self.loop.run_until_complete(self._task)
except Exception:
self.log.exception("Exception in gunicorn worker")
if sys.version_info >= (3, 6):
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
sys.exit(self.exit_code)
async def _run(self) -> None:
if isinstance(self.wsgi, Application):
app = self.wsgi
elif asyncio.iscoroutinefunction(self.wsgi):
app = await self.wsgi()
else:
raise RuntimeError("wsgi app should be either Application or "
"async function returning Application, got {}"
.format(self.wsgi))
access_log = self.log.access_log if self.cfg.accesslog else None
runner = web.AppRunner(app,
logger=self.log,
keepalive_timeout=self.cfg.keepalive,
access_log=access_log,
access_log_format=self._get_valid_log_format(
self.cfg.access_log_format))
await runner.setup()
ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None
runner = runner
assert runner is not None
server = runner.server
assert server is not None
for sock in self.sockets:
site = web.SockSite(
runner, sock, ssl_context=ctx,
shutdown_timeout=self.cfg.graceful_timeout / 100 * 95)
await site.start()
# If our parent changed then we shut down.
pid = os.getpid()
try:
while self.alive: # type: ignore
self.notify()
cnt = server.requests_count
if self.cfg.max_requests and cnt > self.cfg.max_requests:
self.alive = False
self.log.info("Max requests, shutting down: %s", self)
elif pid == os.getpid() and self.ppid != os.getppid():
self.alive = False
self.log.info("Parent changed, shutting down: %s", self)
else:
await self._wait_next_notify()
except BaseException:
pass
await runner.cleanup()
def _wait_next_notify(self) -> 'asyncio.Future[bool]':
self._notify_waiter_done()
loop = self.loop
assert loop is not None
self._notify_waiter = waiter = loop.create_future()
self.loop.call_later(1.0, self._notify_waiter_done, waiter)
return waiter
def _notify_waiter_done(self, waiter: 'asyncio.Future[bool]'=None) -> None:
if waiter is None:
waiter = self._notify_waiter
if waiter is not None:
set_result(waiter, True)
if waiter is self._notify_waiter:
self._notify_waiter = None
def init_signals(self) -> None:
# Set up signals through the event loop API.
self.loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,
signal.SIGQUIT, None)
self.loop.add_signal_handler(signal.SIGTERM, self.handle_exit,
signal.SIGTERM, None)
self.loop.add_signal_handler(signal.SIGINT, self.handle_quit,
signal.SIGINT, None)
self.loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,
signal.SIGWINCH, None)
self.loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,
signal.SIGUSR1, None)
self.loop.add_signal_handler(signal.SIGABRT, self.handle_abort,
signal.SIGABRT, None)
# Don't let SIGTERM and SIGUSR1 disturb active requests
# by interrupting system calls
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
def handle_quit(self, sig: int, frame: FrameType) -> None:
self.alive = False
# worker_int callback
self.cfg.worker_int(self)
# wakeup closing process
self._notify_waiter_done()
def handle_abort(self, sig: int, frame: FrameType) -> None:
self.alive = False
self.exit_code = 1
self.cfg.worker_abort(self)
sys.exit(1)
@staticmethod
def _create_ssl_context(cfg: Any) -> 'SSLContext':
""" Creates SSLContext instance for usage in asyncio.create_server.
See ssl.SSLSocket.__init__ for more details.
"""
if ssl is None: # pragma: no cover
raise RuntimeError('SSL is not supported.')
ctx = ssl.SSLContext(cfg.ssl_version)
ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
ctx.verify_mode = cfg.cert_reqs
if cfg.ca_certs:
ctx.load_verify_locations(cfg.ca_certs)
if cfg.ciphers:
ctx.set_ciphers(cfg.ciphers)
return ctx
def _get_valid_log_format(self, source_format: str) -> str:
if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT:
return self.DEFAULT_AIOHTTP_LOG_FORMAT
elif re.search(r'%\([^\)]+\)', source_format):
raise ValueError(
"Gunicorn's style options in form of `%(name)s` are not "
"supported for the log formatting. Please use hyper_internal_service's "
"format specification to configure access log formatting: "
"http://docs.hyper_internal_service.org/en/stable/logging.html"
"#format-specification"
)
else:
return source_format
class GunicornUVLoopWebWorker(GunicornWebWorker):
def init_process(self) -> None:
import uvloop
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup uvloop policy, so that every
# asyncio.get_event_loop() will create an instance
# of uvloop event loop.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
super().init_process()
class GunicornTokioWebWorker(GunicornWebWorker):
def init_process(self) -> None: # pragma: no cover
import tokio
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup tokio policy, so that every
# asyncio.get_event_loop() will create an instance
# of tokio event loop.
asyncio.set_event_loop_policy(tokio.EventLoopPolicy())
super().init_process()
| 33.901235
| 88
| 0.60925
|
c76c73eb2677b2adb45d9b3d1d09a40b410a18a6
| 692
|
py
|
Python
|
tests/xlfunctions_vs_excel/count_test.py
|
ckp95/xlcalculator
|
8f3f04a2a0a2ecb12c3d5837a5b13519137ae427
|
[
"MIT"
] | 54
|
2020-04-26T09:18:29.000Z
|
2022-03-30T08:47:45.000Z
|
tests/xlfunctions_vs_excel/count_test.py
|
st-lo/xlcalculator
|
19190ec90e948b7ab1eceb06448b96cc1728bc51
|
[
"MIT"
] | 41
|
2020-05-04T04:12:36.000Z
|
2022-01-31T02:41:05.000Z
|
tests/xlfunctions_vs_excel/count_test.py
|
st-lo/xlcalculator
|
19190ec90e948b7ab1eceb06448b96cc1728bc51
|
[
"MIT"
] | 21
|
2020-05-21T20:49:23.000Z
|
2022-02-20T14:15:56.000Z
|
from .. import testing
class CountTest(testing.FunctionalTestCase):
filename = "COUNT.xlsx"
def test_evaluation_A1(self):
excel_value = self.evaluator.get_cell_value('Sheet1!A1')
value = self.evaluator.evaluate('Sheet1!A1')
self.assertEqual(excel_value, value)
def test_evaluation_A2(self):
excel_value = self.evaluator.get_cell_value('Sheet1!A2')
value = self.evaluator.evaluate('Sheet1!A2')
self.assertEqual(excel_value, value)
def test_evaluation_A3(self):
excel_value = self.evaluator.get_cell_value('Sheet1!A3')
value = self.evaluator.evaluate('Sheet1!A3')
self.assertEqual(excel_value, value)
| 32.952381
| 64
| 0.695087
|
bdbde3119465c95783820a723b2f45a136448928
| 9,539
|
py
|
Python
|
src/tourbillon_pytorch/tourbillon.py
|
Ramos-Ramos/tourbillon-pytorch
|
b25fbd02ae201569752e321eb5c612c3aa8eaddc
|
[
"MIT"
] | null | null | null |
src/tourbillon_pytorch/tourbillon.py
|
Ramos-Ramos/tourbillon-pytorch
|
b25fbd02ae201569752e321eb5c612c3aa8eaddc
|
[
"MIT"
] | null | null | null |
src/tourbillon_pytorch/tourbillon.py
|
Ramos-Ramos/tourbillon-pytorch
|
b25fbd02ae201569752e321eb5c612c3aa8eaddc
|
[
"MIT"
] | null | null | null |
# Currently only supports one-layer encoder and decoders
from einops.layers.torch import Rearrange
import numpy as np
import torch
from torch import nn
from collections import OrderedDict
from typing import Union, Tuple, List, OrderedDict
def _preprocess_conv_arg(
arg: Union[int, Tuple[int, int]], num_blocks: int
) -> List[Union[int, Tuple[int, int]]]:
"""If `arg` is not a list, repeats the argument in a list of length
`num_layers`
Args:
arg: argument of TourbillonBuildingBlockConv constructor
num_layers: number of TourbillonBuildingBlockConv to construct
Returns:
list of arguments for TourbillonBuildingBlockConv constructors
"""
if type(arg) in (int, tuple):
arg = [arg] * num_blocks
assert len(arg) == num_blocks, 'Number of conv args exceeds number of blocks'
return arg
class TourbillonBuildingBlockBase(nn.Module):
"""Circular autoencoder
Args:
encoder: encoder of autoencoder
decoder: decoder of autoencoder
num_circulations: how many times to cycle through the autoencoder
target_circulation: which circulation to take targets from
output_circulation: which circulation to take outputs from
"""
def __init__(
self,
encoder: nn.Module,
decoder: nn.Module,
num_circulations: int = 2,
target_circulation: int = 0,
output_circulation: int = 1
) -> None:
super().__init__()
self.num_circulations = num_circulations
self.target_circulation = target_circulation
self.output_circulation = output_circulation
self.encoder = encoder
self.decoder = decoder
def forward(self, x: torch.Tensor) -> OrderedDict[str, torch.Tensor]:
"""Forward pass
Args:
x: input tensor
Returns:
ordered dictionary with the following content:
"enc_target": encoder output during target_circulation
"enc_output": encoder output during output_circulation
"dec_target": decoder output during target_circulation
"dec_output": decoder output during output_circulation
"""
outputs = OrderedDict()
for i in range(self.num_circulations):
x = self.encoder(x)
if i == self.output_circulation:
outputs['enc_output'] = x
x = x.detach()
if i == self.target_circulation:
outputs['enc_target'] = x
x = self.decoder(x)
if i == self.output_circulation:
outputs['dec_output'] = x
x = x.detach()
if i == self.target_circulation:
outputs['dec_target'] = x
return outputs
class TourbillonBuildingBlockLinear(TourbillonBuildingBlockBase):
"""Circular autoencoder with feed-forward layers
Args:
in_features: number of input features
hidden_features: number of hidden features to project to
num_circulations: how many times to cycle through the autoencoder
target_circulation: which circulation to take targets from
output_circulation: which circulation to take outputs from
"""
def __init__(
self,
in_features: int,
hidden_features: int,
num_circulations: int = 2,
target_circulation: int = 0,
output_circulation: int = 1
) -> None:
self.in_features = in_features
self.hidden_features = hidden_features
encoder = nn.Sequential(
nn.Linear(in_features, hidden_features),
nn.Tanh()
)
decoder = nn.Sequential(
nn.Linear(hidden_features, in_features),
nn.Sigmoid()
)
super().__init__(encoder, decoder, num_circulations, target_circulation, output_circulation)
class TourbillonBuildingBlockConv(TourbillonBuildingBlockBase):
"""Circular cutoencoder with convolutional layers
Args:
in_features: number of input channels
hidden_features: number of hidden channels to project to
kernel_size: encoder and decoder kernel size
stride: encoder and decoder stride
padding: encoder and decoder padding
num_circulations: how many times to cycle through the autoencoder
target_circulation: which circulation to take targets from
output_circulation: which circulation to take outputs from
"""
def __init__(
self,
in_channels: int,
hidden_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
num_circulations: int = 2,
target_circulation: int = 0,
output_circulation: int = 1
) -> None:
self.in_channels = in_channels
self.hidden_channels = hidden_channels
encoder = nn.Sequential(
nn.Conv2d(in_channels, hidden_channels, kernel_size, stride, padding),
nn.Tanh()
)
decoder = nn.Sequential(
nn.ConvTranspose2d(hidden_channels, in_channels, kernel_size, stride, padding),
nn.Sigmoid()
)
super().__init__(encoder, decoder, num_circulations, target_circulation, output_circulation)
class TourbillonBase(nn.Module):
"""Stack of circular autoencoders
Args:
blocks: list of Tourbillon building blocks
last_hidden_size: hidden size of the last building block
classes: Number of classes. Can be set to `0` to return final block output
instead of class scores.
neck: module for processing output of blocks before final classifier
"""
def __init__(
self,
blocks: nn.ModuleList,
last_hidden_size: int,
classes: int,
neck: nn.Module = nn.Identity()
):
super().__init__()
self.blocks = blocks
self.neck = neck
self.head = nn.Linear(last_hidden_size, classes) if classes > 0 \
else nn.Identity()
if classes > 0:
nn.init.xavier_uniform_(self.head.weight) #bias too?
def forward(self, x: torch.Tensor) -> OrderedDict[str, torch.Tensor]:
"""Forward pass
Args:
x: input tensor
Returns:
ordered dictionary with the following content:
"output": final output
for each block i:
"enc_target_i": encoder output during block i's target_circulation
"enc_output_i": encoder output during block i's output_circulation
"dec_target_i": decoder output during block i's target_circulation
"dec_output_i": decoder output during block i's output_circulation
"""
outputs = OrderedDict()
for i, block in enumerate(self.blocks):
x = block(x)
outputs.update({f'{k}_{i}': v for k, v in x.items()})
x = x['enc_output']
x = x.detach()
x = self.neck(x)
x = self.head(x)
outputs['output'] = x
return outputs
class TourbillonLinear(TourbillonBase):
"""Stack of circular autoencoders with feed-forward layers
Args:
sizes: list consisting of input size followed by hidden size of each block
classes: Number of classes. Can be set to `0` to return final block output
instead of class scores.
num_circulations: how many times to cycle through each autoencoder
target_circulation: which circulation to take targets from
output_circulation: which circulation to take outputs from
"""
def __init__(
self,
sizes: List[int],
classes: int,
num_circulations: int = 2,
target_circulation: int = 0,
output_circulation: int = 1
) -> None:
blocks = nn.ModuleList()
for in_size, out_size in zip(sizes[:-1], sizes[1:]):
blocks.append(TourbillonBuildingBlockLinear(in_size, out_size, num_circulations, target_circulation, output_circulation))
super().__init__(blocks, sizes[-1], classes)
class TourbillonConv(TourbillonBase):
"""Stack of circular autoencoders with convolutional layers
Args:
input size: size of input image
channels: list consisting of input channels followed by the hidden channels
of each block
kernel_sizes: list of kernel sizes for each block
classes: Number of classes. Can be set to `0` to return final block output
instead of class scores.
strides: list of strides for each block
paddings: list of paddings for each block
num_circulations: how many times to cycle through each autoencoder
target_circulation: which circulation to take targets from
output_circulation: which circulation to take outputs from
"""
def __init__(
self,
input_size: Tuple[int, int],
channels: List[int],
kernel_sizes: Union[int, List[Union[int, Tuple[int, int]]]],
classes: Union[int, List[Union[int, Tuple[int, int]]]],
strides: Union[int, List[Union[int, Tuple[int, int]]]] = 1,
paddings: Union[int, List[Union[int, Tuple[int, int]]]] = 0,
num_circulations: int = 2,
target_circulation: int = 0,
output_circulation: int = 1
) -> None:
kernel_sizes = _preprocess_conv_arg(kernel_sizes, len(channels) - 1)
strides = _preprocess_conv_arg(strides, len(channels) - 1)
paddings = _preprocess_conv_arg(paddings, len(channels) - 1)
curr_size = np.array(input_size)
layers = nn.ModuleList()
for in_channels, hidden_channels, kernel_size, stride, padding in zip(channels[:-1], channels[1:], kernel_sizes, strides, paddings):
curr_size = curr_size + 2 * np.array(padding) - np.array(kernel_size) + 1
layers.append(TourbillonBuildingBlockConv(in_channels, hidden_channels, kernel_size, stride, padding, num_circulations, target_circulation, output_circulation))
last_hidden_size = channels[-1] * np.product(curr_size)
neck = Rearrange('b c h w -> b (c h w)')
super().__init__(layers, last_hidden_size, classes, neck)
| 32.893103
| 166
| 0.688961
|
2075e9d5a41cb2558188101bbf21202b478ea644
| 13,311
|
py
|
Python
|
tests/scripts/test_serialization.py
|
drehak/leapp
|
062c76859e6b4a68592c6a387e44a2c1d36949ff
|
[
"Apache-2.0"
] | null | null | null |
tests/scripts/test_serialization.py
|
drehak/leapp
|
062c76859e6b4a68592c6a387e44a2c1d36949ff
|
[
"Apache-2.0"
] | 3
|
2022-01-31T10:24:53.000Z
|
2022-03-29T12:30:04.000Z
|
tests/scripts/test_serialization.py
|
drehak/leapp
|
062c76859e6b4a68592c6a387e44a2c1d36949ff
|
[
"Apache-2.0"
] | null | null | null |
import json
from datetime import datetime
import pytest
import six
from leapp.models import Model, fields
from leapp.topics import Topic
class BadBuiltinField(fields.BuiltinField):
pass
class ModelTestTopic(Topic):
name = 'model-test-topic'
class BasicModel(Model):
topic = ModelTestTopic
message = fields.String(default='Default Value')
class WithStringListModel(Model):
topic = ModelTestTopic
messages = fields.List(fields.String())
class WithNestedModel(Model):
topic = ModelTestTopic
basic = fields.Model(BasicModel)
class WithNullableNestedModel(Model):
topic = ModelTestTopic
basic = fields.Nullable(fields.Model(BasicModel))
class WithNestedListModel(Model):
topic = ModelTestTopic
items = fields.List(fields.Model(BasicModel))
class AllFieldTypesModel(Model):
topic = ModelTestTopic
float_field = fields.Float(default=3.14)
number_int_field = fields.Number(default=1.2)
number_float_field = fields.Number(default=2)
integer_field = fields.Integer(default=1)
str_field = fields.String(default='string')
unicode_field = fields.String(default=u'Unicode string')
date_field = fields.DateTime(default=datetime.utcnow())
bool_field = fields.Boolean(default=True)
class RequiredFieldModel(Model):
topic = ModelTestTopic
field = fields.String()
def test_builtin_needs_override():
with pytest.raises(NotImplementedError):
fields.Nullable(BadBuiltinField()).to_builtin(None, '', None)
def test_base_usage():
with pytest.raises(fields.ModelMisuseError):
fields.Field()
def test_basic_model():
m = BasicModel(message='Some message')
m2 = BasicModel.create(m.dump())
assert m.message == m2.message
def test_string_list_model():
m = WithStringListModel(messages=['Some message'])
m2 = WithStringListModel.create(m.dump())
assert m.messages == m2.messages
m2.messages = 'str'
with pytest.raises(fields.ModelViolationError):
m2.dump()
with pytest.raises(fields.ModelViolationError):
WithStringListModel(messages='str')
def test_string_fields_violations():
f = fields.String()
with pytest.raises(fields.ModelViolationError):
f._validate_model_value(1, 'test_value')
with pytest.raises(fields.ModelViolationError):
f._validate_builtin_value(1, 'test_value')
def test_nested_model():
m = WithNestedModel(basic=BasicModel(message='Some message'))
m2 = WithNestedModel.create(m.dump())
assert m.basic == m2.basic
with pytest.raises(fields.ModelMisuseError):
fields.Model(fields.String())
with pytest.raises(fields.ModelMisuseError):
fields.Model(fields.String)
with pytest.raises(fields.ModelViolationError):
WithNestedModel(basic='Some message')
m = WithNullableNestedModel()
m.basic = None
m.dump()
with pytest.raises(fields.ModelViolationError):
x = WithNestedModel(basic=BasicModel(message='Some message'))
x.basic = None
x.dump()
with pytest.raises(fields.ModelViolationError):
WithNestedModel.create(dict(basic=None))
with pytest.raises(fields.ModelViolationError):
WithNestedModel(basic=None)
assert WithNestedModel.create({'basic': {'message': 'test-message'}}).basic.message == 'test-message'
assert WithNestedModel(basic=BasicModel(message='test-message')).basic.message == 'test-message'
def test_nested_list_model():
m = WithNestedListModel(items=[BasicModel(message='Some message')])
m2 = WithNestedListModel.create(m.dump())
assert m.items == m2.items
def test_field_types():
m = AllFieldTypesModel()
m2 = AllFieldTypesModel.create(m.dump())
assert m == m2
def test_misuse_wrong_list_element_parameter():
with pytest.raises(fields.ModelMisuseError):
class RaisesNonFieldType(Model):
topic = ModelTestTopic
boo = fields.List('')
with pytest.raises(fields.ModelMisuseError):
class RaisesNonFieldInstance(Model):
topic = ModelTestTopic
boo = fields.List(str)
with pytest.raises(fields.ModelMisuseError):
class RaisesNonInstance(Model):
topic = ModelTestTopic
boo = fields.List(fields.String)
def test_list_field():
with pytest.raises(fields.ModelViolationError):
fields.List(fields.String())._validate_builtin_value('something', 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.List(fields.String())._convert_to_model(None, 'test-value')
fields.Nullable(fields.List(fields.String()))._convert_to_model(None, 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.List(fields.String())._convert_from_model(None, 'test-value')
fields.Nullable(fields.List(fields.String()))._convert_from_model(None, 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.Nullable(fields.List(fields.Integer(), minimum=1))._validate_builtin_value([], 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.Nullable(fields.List(fields.Integer(), minimum=1))._validate_model_value([], 'test-value')
fields.List(fields.Integer(), minimum=1)._validate_builtin_value([1], 'test-value')
fields.List(fields.Integer(), minimum=1)._validate_builtin_value([1, 2], 'test-value')
fields.List(fields.Integer(), minimum=1)._validate_model_value([1], 'test-value')
fields.List(fields.Integer(), minimum=1)._validate_model_value([1, 2], 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.List(fields.Integer(), minimum=1, maximum=1)._validate_builtin_value([1, 2], 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.List(fields.Integer(), minimum=1, maximum=1)._validate_model_value([1, 2], 'test-value')
fields.List(fields.Integer(), maximum=2)._validate_builtin_value([1], 'test-value')
fields.List(fields.Integer(), maximum=2)._validate_builtin_value([1, 2], 'test-value')
fields.List(fields.Integer(), maximum=2)._validate_model_value([1], 'test-value')
fields.List(fields.Integer(), maximum=2)._validate_model_value([1, 2], 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.List(fields.Integer(), maximum=3)._validate_builtin_value([1, 2, 3, 4], 'test-value')
def test_datetime_field():
with pytest.raises(fields.ModelViolationError):
fields.DateTime()._convert_to_model('something', 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.DateTime()._convert_to_model(None, 'test-value')
fields.Nullable(fields.DateTime())._convert_to_model(None, 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.DateTime()._convert_from_model(None, 'test-value')
fields.Nullable(fields.DateTime())._convert_from_model(None, 'test-value')
def test_nested_field():
with pytest.raises(fields.ModelViolationError):
fields.Model(BasicModel)._convert_to_model('something', 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.Model(BasicModel)._convert_to_model(None, 'test-value')
fields.Nullable(fields.Model(BasicModel))._convert_to_model(None, 'test-value')
def test_required_field_types():
# all fields which are not nullable are required
with pytest.raises(fields.ModelViolationError):
m = RequiredFieldModel(field='str')
m.field = None
m.dump()
with pytest.raises(fields.ModelViolationError):
RequiredFieldModel()
RequiredFieldModel(field='str')
with pytest.raises(fields.ModelViolationError):
fields.String()._validate_model_value(None, 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.String()._validate_model_value(None, 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.String()._validate_builtin_value(None, 'test-value')
with pytest.raises(fields.ModelViolationError):
fields.String()._validate_builtin_value(None, 'test-value')
def test_not_required_field_types():
fields.Nullable(fields.String())._validate_model_value(None, 'test-value')
fields.Nullable(fields.String())._validate_model_value(None, 'test-value')
fields.Nullable(fields.String())._validate_builtin_value(None, 'test-value')
fields.Nullable(fields.String())._validate_builtin_value(None, 'test-value')
def _make_object(instance, value):
def init(self):
setattr(self, instance.name, value)
return type('Dynamic' + instance.field_type.__name__, (object,), {'__init__': init})()
def _make_dict(instance, value):
return {instance.name: value}
def create_fixture(field, value, name, fail_values=None):
def init(self):
setattr(self, 'field_type', field)
return type('Fixture', (object,), {
'value': value,
'name': name,
'fail_values': fail_values,
'make_object': _make_object,
'make_dict': _make_dict,
'__init__': init
})()
def _create_field_string_list(**kwargs):
return fields.List(fields.String(), **kwargs)
def _create_nested_base_model_field(**kwargs):
return fields.Model(BasicModel, **kwargs)
def _create_string_enum_(*choices):
def _fun(**kwargs):
return fields.StringEnum(choices=choices, **kwargs)
return _fun
BASIC_TYPE_FIXTURES = (
create_fixture(fields.String, 'Test String Value', 'string_value'),
create_fixture(fields.Boolean, True, 'boolean_value'),
create_fixture(fields.Integer, 1, 'integer_value'),
create_fixture(fields.Float, 3.14, 'float_value'),
create_fixture(fields.Number, 3.14, 'number_float_value'),
create_fixture(fields.Number, 2, 'number_integer_value'),
create_fixture(fields.DateTime, datetime.utcnow(), 'datetime_value'),
create_fixture(_create_field_string_list, ['a', 'b', 'c'], 'string_list_value'),
create_fixture(_create_nested_base_model_field, BasicModel(message='Test message'), 'nested_model_value'),
create_fixture(_create_string_enum_('YES', 'NO', 'MAYBE'), 'YES', 'string_enum_value', fail_values=('Woot',)),
create_fixture(fields.JSON, 2, 'json_integral_value'),
create_fixture(fields.JSON, 3.14, 'json_float_value'),
create_fixture(fields.JSON, 'some string', 'json_string_value'),
create_fixture(fields.JSON, ['a', 'b', 1, 2], 'json_list_value'),
create_fixture(fields.JSON, ('a', 'b', 1, 2), 'json_tuple_value'),
create_fixture(fields.JSON, {'a': [None, {'b': 'c'}], 'b': 2}, 'json_dict_value'),
create_fixture(fields.JSON, True, 'json_bool_value'),
)
def test_choices_wrong_value():
with pytest.raises(fields.ModelMisuseError):
fields.StringEnum(choices='abc')
fields.StringEnum(choices=('abc',))
with pytest.raises(fields.ModelMisuseError):
fields.IntegerEnum(choices=1)
fields.IntegerEnum(choices=(1,))
with pytest.raises(fields.ModelMisuseError):
fields.FloatEnum(choices=1.2)
fields.FloatEnum(choices=(1.2,))
with pytest.raises(fields.ModelMisuseError):
fields.NumberEnum(choices=3.14)
fields.NumberEnum(choices=(3.14,))
def test_list_field_default():
fields.List(fields.StringEnum(choices=('1', '2', '3')), default=['1', '2'])
@pytest.mark.parametrize("case", BASIC_TYPE_FIXTURES)
def test_basic_types_sanity(case):
source = case.make_object(case.value)
target = {}
case.field_type().to_builtin(source, case.name, target)
json.dumps(target)
source = case.make_object(None)
target = {}
case.field_type().as_nullable().to_builtin(source, case.name, target)
assert case.name in target and target[case.name] is None
with pytest.raises(fields.ModelViolationError):
source = case.make_object(None)
target = {}
# Should raise an exception because the field is required and null is not allowed
case.field_type().to_builtin(source, case.name, target)
with pytest.raises(fields.ModelViolationError):
source = case.make_object(None)
target = {}
# Should raise an exception because the field null is not allowed
case.field_type().to_builtin(source, case.name, target)
source = case.make_object(case.value)
target = {}
field = case.field_type()
field.to_builtin(source, case.name, target)
assert target.get(case.name) == field._convert_from_model(case.value, case.name)
json.dumps(target)
source = case.make_object(None)
target = {}
field = case.field_type().as_nullable()
field.to_builtin(source, case.name, target)
assert target.get(case.name) is None
json.dumps(target)
if case.fail_values:
for fail_value in case.fail_values:
source = case.make_object(fail_value)
target = {case.name: fail_value}
field = case.field_type()
with pytest.raises(fields.ModelViolationError):
field.to_builtin(source, case.name, target)
target = case.make_object(case.value)
source = {case.name: fail_value}
with pytest.raises(fields.ModelViolationError):
field.to_model(source, case.name, target)
assert isinstance(field.help, six.string_types)
| 34.306701
| 114
| 0.704455
|
d8e5ae34bb4e1d4a53f8e67b01c262a653f369af
| 1,751
|
py
|
Python
|
build/android/PRESUBMIT.py
|
nagineni/chromium-crosswalk
|
5725642f1c67d0f97e8613ec1c3e8107ab53fdf8
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
build/android/PRESUBMIT.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8
|
2015-08-31T06:39:59.000Z
|
2021-12-04T14:53:28.000Z
|
build/android/PRESUBMIT.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for android buildbot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
_DELETIONS_ONLY_FILES = (
'build/android/findbugs_filter/findbugs_known_bugs.txt',
)
def _CheckDeletionsOnlyFiles(input_api, output_api):
"""Check that a certain listed files only have deletions.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath() in _DELETIONS_ONLY_FILES:
if f.ChangedContents():
warnings.append(f.LocalPath())
results = []
if warnings:
results.append(output_api.PresubmitPromptWarning(
'Following files should only contain deletions.', warnings))
return results
def CommonChecks(input_api, output_api):
output = []
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(input_api.PresubmitLocalPath(), *dirs)
output.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
white_list=[r'PRESUBMIT\.py$', r'buildbot/.*\.py$'],
extra_paths_list=[
J(), J('..', '..', 'third_party', 'android_testrunner'),
J('buildbot')]))
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, J('buildbot', 'tests')))
output.extend(_CheckDeletionsOnlyFiles(input_api, output_api))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| 30.189655
| 79
| 0.723587
|
3bcace88c42c337aaf9d215ed68d604cd964bbaa
| 2,810
|
py
|
Python
|
random-forest/python/random_forest.py
|
benhsu75/sigopt-examples
|
8ed99fc81fee2aa4d3e69aef03d3a4ec051a073a
|
[
"MIT"
] | null | null | null |
random-forest/python/random_forest.py
|
benhsu75/sigopt-examples
|
8ed99fc81fee2aa4d3e69aef03d3a4ec051a073a
|
[
"MIT"
] | 29
|
2020-01-28T22:11:43.000Z
|
2022-03-11T23:48:02.000Z
|
random-forest/python/random_forest.py
|
benhsu75/sigopt-examples
|
8ed99fc81fee2aa4d3e69aef03d3a4ec051a073a
|
[
"MIT"
] | null | null | null |
# Use SigOpt to tune a Random Forest Classifier in Python
# Learn more about SigOpt's Python Client:
# https://sigopt.com/docs/overview/python
# Run `pip install sigopt` to download the python API client
# Run `pip install sklearn` to install scikit-learn, a machine learning
# library in Python (http://scikit-learn.org)
from sigopt import Connection
from sklearn import datasets
from sklearn.model_selection import cross_val_score, ShuffleSplit
from sklearn.ensemble import RandomForestClassifier
import numpy
# Learn more about authenticating the SigOpt API:
# https://sigopt.com/docs/overview/authentication
conn = Connection(client_token=SIGOPT_API_TOKEN)
# Load dataset
# We are using the iris dataset as an example
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Create a SigOpt experiment for the Random Forest parameters
experiment = conn.experiments().create(
name="Random Forest (Python)",
project="sigopt-examples",
parameters=[
dict(name="max_features", type="int", bounds=dict(min=1, max=len(iris)-2)),
dict(name="n_estimators", type="int", bounds=dict(min=1, max=100)),
dict(name="min_samples_leaf", type="int", bounds=dict(min=1, max=10))
]
)
print("Created experiment: https://sigopt.com/experiment/" + experiment.id)
# Our object metric is the mean of cross validated accuracies
# We use cross validation to prevent overfitting
def evaluate_model(assignments, X, y):
# evaluate cross folds for accuracy
cv = ShuffleSplit(
n_splits=5,
test_size=0.3,
)
classifier = RandomForestClassifier(
n_estimators=assignments['n_estimators'],
max_features=assignments['max_features'],
min_samples_leaf=assignments['min_samples_leaf']
)
cv_accuracies = cross_val_score(classifier, X, y, cv=cv)
return (numpy.mean(cv_accuracies), numpy.std(cv_accuracies))
# Run the Optimization Loop between 10x - 20x the number of parameters
for _ in range(60):
# Receive a Suggestion from SigOpt
suggestion = conn.experiments(experiment.id).suggestions().create()
# Evaluate the model locally
(value, std) = evaluate_model(suggestion.assignments, X, y)
# Report an Observation (with standard deviation) back to SigOpt
conn.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
value_stddev=std,
)
# Re-fetch the best observed value and assignments
best_assignments = conn.experiments(experiment.id).best_assignments().fetch().data[0].assignments
# To wrap up the Experiment, fit the RandomForest on the best assignments
# and train on all available data
rf = RandomForestClassifier(
n_estimators=best_assignments['n_estimators'],
max_features=best_assignments['max_features'],
min_samples_leaf=best_assignments['min_samples_leaf']
)
rf.fit(X, y)
| 36.025641
| 97
| 0.752669
|
20821c7cec6f0591d0ecab95c59711456008178c
| 14,573
|
py
|
Python
|
asks/sessions.py
|
dahlia/asks
|
69f4416c99d35bea10032d138e6f823469e53cc4
|
[
"MIT"
] | null | null | null |
asks/sessions.py
|
dahlia/asks
|
69f4416c99d35bea10032d138e6f823469e53cc4
|
[
"MIT"
] | null | null | null |
asks/sessions.py
|
dahlia/asks
|
69f4416c99d35bea10032d138e6f823469e53cc4
|
[
"MIT"
] | null | null | null |
'''
The disparate session (Session) is for making requests to multiple locations.
'''
from abc import ABCMeta, abstractmethod
from copy import copy
from functools import partialmethod
from urllib.parse import urlparse, urlunparse
import ssl
from h11 import RemoteProtocolError
from anyio import connect_tcp, create_semaphore
from .cookie_utils import CookieTracker
from .errors import BadHttpResponse
from .req_structs import SocketQ
from .request_object import RequestProcessor
from .utils import get_netloc_port, timeout_manager
__all__ = ['Session']
class BaseSession(metaclass=ABCMeta):
'''
The base class for asks' sessions.
Contains methods for creating sockets, figuring out which type of
socket to create, and all of the HTTP methods ('GET', 'POST', etc.)
'''
def __init__(self, headers=None, ssl_context=None):
'''
Args:
headers (dict): Headers to be applied to all requests.
headers set by http method call will take precedence and
overwrite headers set by the headers arg.
ssl_context (ssl.SSLContext): SSL context to use for https connections.
'''
if headers is not None:
self.headers = headers
else:
self.headers = {}
self.ssl_context = ssl_context
self.encoding = None
self.source_address = None
self._cookie_tracker = None
@property
@abstractmethod
def sema(self):
"""
A semaphore-like context manager.
"""
...
async def _open_connection_http(self, location):
'''
Creates a normal async socket, returns it.
Args:
location (tuple(str, int)): A tuple of net location (eg
'127.0.0.1' or 'example.org') and port (eg 80 or 25000).
'''
sock = await connect_tcp(location[0], location[1], bind_host=self.source_address)
sock._active = True
return sock
async def _open_connection_https(self, location):
'''
Creates an async SSL socket, returns it.
Args:
location (tuple(str, int)): A tuple of net location (eg
'127.0.0.1' or 'example.org') and port (eg 80 or 25000).
'''
sock = await connect_tcp(location[0],
location[1],
ssl_context=self.ssl_context,
bind_host=self.source_address,
autostart_tls=True,
tls_standard_compatible=False)
sock._active = True
return sock
async def _connect(self, host_loc):
'''
Simple enough stuff to figure out where we should connect, and creates
the appropriate connection.
'''
scheme, host, path, parameters, query, fragment = urlparse(
host_loc)
if parameters or query or fragment:
raise TypeError('Supplied info beyond scheme, host.' +
' Host should be top level only: ', path)
host, port = get_netloc_port(scheme, host)
if scheme == 'http':
return await self._open_connection_http(
(host, int(port))), port
else:
return await self._open_connection_https(
(host, int(port))), port
async def request(self, method, url=None, *, path='', retries=1,
connection_timeout=60, **kwargs):
'''
This is the template for all of the `http method` methods for
the Session.
Args:
method (str): A http method, such as 'GET' or 'POST'.
url (str): The url the request should be made to.
path (str): An optional kw-arg for use in Session method calls,
for specifying a particular path. Usually to be used in
conjunction with the base_location/endpoint paradigm.
kwargs: Any number of the following:
data (dict or str): Info to be processed as a
body-bound query.
params (dict or str): Info to be processed as a
url-bound query.
headers (dict): User HTTP headers to be used in the
request.
encoding (str): The str representation of the codec to
process the request under.
json (dict): A dict to be formatted as json and sent in
the request body.
files (dict): A dict of `filename:filepath`s to be sent
as multipart.
cookies (dict): A dict of `name:value` cookies to be
passed in request.
callback (func): A callback function to be called on
each bytechunk of of the response body.
timeout (int or float): A numeric representation of the
longest time to wait on a complete response once a
request has been sent.
retries (int): The number of attempts to try against
connection errors.
max_redirects (int): The maximum number of redirects
allowed.
persist_cookies (True or None): Passing True
instantiates a CookieTracker object to manage the
return of cookies to the server under the relevant
domains.
auth (child of AuthBase): An object for handling auth
construction.
stream (bool): Whether or not to return a StreamResponse
vs Response
When you call something like Session.get() or asks.post(), you're
really calling a partial method that has the 'method' argument
pre-completed.
'''
ALLOWED_KWARGS = {
"data",
"params",
"headers",
"encoding",
"json",
"files",
"cookies",
"callback",
"timeout",
"retries",
"max_redirects",
"persist_cookies",
"auth",
"stream",
}
try:
unknown_kwarg = next(k for k in kwargs if k not in ALLOWED_KWARGS)
except StopIteration:
raise TypeError("request() got an unexpected keyword argument " +
repr(unknown_kwarg)) from None
timeout = kwargs.get('timeout', None)
req_headers = kwargs.pop('headers', None)
if self.headers is not None:
headers = copy(self.headers)
if req_headers is not None:
headers.update(req_headers)
req_headers = headers
async with self.sema:
if url is None:
url = self._make_url() + path
retry = False
sock = None
try:
sock = await timeout_manager(
connection_timeout, self._grab_connection, url)
port = sock.port
req_obj = RequestProcessor(
self,
method,
url,
port,
headers=req_headers,
encoding=self.encoding,
sock=sock,
persist_cookies=self._cookie_tracker,
**kwargs
)
try:
if timeout is None:
sock, r = await req_obj.make_request()
else:
sock, r = await timeout_manager(timeout, req_obj.make_request)
except BadHttpResponse:
if timeout is None:
sock, r = await req_obj.make_request()
else:
sock, r = await timeout_manager(timeout, req_obj.make_request)
if sock is not None:
try:
if r.headers['connection'].lower() == 'close':
sock._active = False
await sock.close()
except KeyError:
pass
await self.return_to_pool(sock)
# ConnectionErrors are special. They are the only kind of exception
# we ever want to suppress. All other exceptions are re-raised or
# raised through another exception.
except ConnectionError as e:
if retries > 0:
retry = True
retries -= 1
else:
raise e
except Exception as e:
if sock:
await self._handle_exception(e, sock)
raise
# any BaseException is considered unlawful murder, and
# Session.cleanup should be called to tidy up sockets.
except BaseException as e:
if sock:
await sock.close()
raise e
if retry:
return (await self.request(method,
url,
path=path,
retries=retries,
headers=headers,
**kwargs))
return r
# These be the actual http methods!
# They are partial methods of `request`. See the `request` docstring
# above for information.
get = partialmethod(request, 'GET')
head = partialmethod(request, 'HEAD')
post = partialmethod(request, 'POST')
put = partialmethod(request, 'PUT')
delete = partialmethod(request, 'DELETE')
options = partialmethod(request, 'OPTIONS')
patch = partialmethod(request, 'PATCH')
async def _handle_exception(self, e, sock):
"""
Given an exception, we want to handle it appropriately. Some exceptions we
prefer to shadow with an asks exception, and some we want to raise directly.
In all cases we clean up the underlying socket.
"""
if isinstance(e, (RemoteProtocolError, AssertionError)):
await sock.close()
raise BadHttpResponse('Invalid HTTP response from server.') from e
if isinstance(e, Exception):
await sock.close()
raise e
@abstractmethod
def _make_url(self):
"""
A method who's result is concated with a uri path.
"""
...
@abstractmethod
async def _grab_connection(self, url):
"""
A method that will return a socket-like object.
"""
...
@abstractmethod
async def return_to_pool(self, sock):
"""
A method that will accept a socket-like object.
"""
...
class Session(BaseSession):
'''
The Session class, for handling piles of requests.
This class inherits from BaseSession, where all of the 'http method'
methods are defined.
'''
def __init__(self,
base_location=None,
endpoint=None,
headers=None,
encoding='utf-8',
persist_cookies=None,
ssl_context=None,
connections=1):
'''
Args:
encoding (str): The encoding asks'll try to use on response bodies.
persist_cookies (bool): Passing True turns on browserishlike
stateful cookie behaviour, returning cookies to the host when
appropriate.
connections (int): The max number of concurrent connections to the
host asks will allow its self to have. The default number of
connections is 1. You may increase this value as you see fit.
'''
super().__init__(headers, ssl_context)
self.encoding = encoding
self.base_location = base_location
self.endpoint = endpoint
if persist_cookies is True:
self._cookie_tracker = CookieTracker()
else:
self._cookie_tracker = persist_cookies
self._conn_pool = SocketQ()
self._sema = None
self._connections = connections
@property
def sema(self):
if self._sema is None:
self._sema = create_semaphore(self._connections)
return self._sema
def _checkout_connection(self, host_loc):
try:
index = self._conn_pool.index(host_loc)
except ValueError:
return None
sock = self._conn_pool.pull(index)
return sock
async def return_to_pool(self, sock):
if sock._active:
self._conn_pool.appendleft(sock)
async def _make_connection(self, host_loc):
sock, port = await self._connect(host_loc)
sock.host, sock.port = host_loc, port
return sock
async def _grab_connection(self, url):
'''
The connection pool handler. Returns a connection
to the caller. If there are no connections ready, and
as many connections checked out as there are available total,
we yield control to the event loop.
If there is a connection ready or space to create a new one, we
pop/create it, register it as checked out, and return it.
Args:
url (str): breaks the url down and uses the top level location
info to see if we have any connections to the location already
lying around.
'''
scheme, host, _, _, _, _ = urlparse(url)
host_loc = urlunparse((scheme, host, '', '', '', ''))
sock = self._checkout_connection(host_loc)
if sock is None:
sock = await self._make_connection(host_loc)
return sock
def _make_url(self):
'''
Puts together the hostloc and current endpoint for use in request uri.
'''
return (self.base_location or '') + (self.endpoint or '')
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
async def close(self):
await self._conn_pool.free_pool()
| 35.285714
| 89
| 0.537432
|
a42159940096953ef92144cd532b623516183fbb
| 4,900
|
py
|
Python
|
app/controllers/subscribed_lists/forms.py
|
palazzem/gello
|
19fe9e4aa8de485dd829a87047ec64f89b5fa7ee
|
[
"Apache-2.0"
] | 44
|
2018-03-28T14:22:23.000Z
|
2022-03-15T07:25:06.000Z
|
app/controllers/subscribed_lists/forms.py
|
palazzem/gello
|
19fe9e4aa8de485dd829a87047ec64f89b5fa7ee
|
[
"Apache-2.0"
] | 44
|
2018-03-28T14:19:03.000Z
|
2022-02-16T10:24:57.000Z
|
app/controllers/subscribed_lists/forms.py
|
palazzem/gello
|
19fe9e4aa8de485dd829a87047ec64f89b5fa7ee
|
[
"Apache-2.0"
] | 12
|
2018-03-28T14:15:43.000Z
|
2021-07-19T17:33:20.000Z
|
# -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""subscribed_lists/forms.py
SubscribedList-related forms.
"""
import textwrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from ...models import List, TrelloMember, SubscribedList
class NewForm(FlaskForm):
"""Form for creating a subscribed_list."""
list_name = StringField(
'List Name',
validators=[Required()],
description=textwrap.dedent(
"""
The name of a Trello list associated with the Trello board
subscribed
"""
)
)
trello_username = StringField(
'Trello Member Username',
description=textwrap.dedent(
"""
An optional field to specify the Trello username for a member to be
automatically assigned to any Trello cards created on this list
"""
)
)
submit = SubmitField('Create')
def __init__(self, board_id, repo_id):
"""Sets the `board_id` for the form."""
FlaskForm.__init__(self)
self._board_id = board_id
self._repo_id = repo_id
def validate(self):
"""Performs validations of the form field values.
- Validates the `list_id` attribute is a `List.trello_list_id`
belonging to the `Board` with `board_id`.
- Validates the `trello_member_id `attribute belongs to a
`TrelloMember`
"""
list_name = self.list_name.data.strip()
trello_username = self.trello_username.data.strip()
trello_list = List.query.filter_by(
name=list_name, board_id=self._board_id
).first()
if trello_list is None:
self._error_message = textwrap.dedent(
f"""
Trello List '{trello_list}' does not exist
"""
)
return False
# Validate the `SubscribedList` does not already exist
subscribed_list = SubscribedList.query.get(
[self._board_id, self._repo_id, trello_list.trello_list_id]
)
if subscribed_list is not None:
self._error_message = textwrap.dedent(
f"""
Subscribed List exists for {self._board_id}, {self._repo_id},
{list_name}
"""
)
return False
# Get the `list_id` to return back to `views.py`
self._list_id = trello_list.trello_list_id
# `trello_member_id` is optional
if not trello_username:
return True
trello_member = TrelloMember.query.filter_by(
username=trello_username
).first()
if trello_member is None:
self._error_message = textwrap.dedent(
f"""
Trello Member '{trello_member}' does not exist
"""
)
return False
# Get the `trello_member_id` to return back to `views.py`
self._trello_member_id = trello_member.trello_member_id
# All custom validations passed
return True
def get_list_id(self):
return self._list_id
def get_trello_member_id(self):
return self._trello_member_id
def get_error_message(self):
return self._error_message
class UpdateForm(FlaskForm):
"""Form for updating an existing subscribed lists."""
trello_update_username = StringField('Trello Member Username')
submit = SubmitField('Update')
def validate(self):
"""Performs validations of the form field values.
- Validates the `trello_member_id `attribute belongs to a
`TrelloMember`
"""
trello_username = self.trello_update_username.data.strip()
# `trello_member_id` is optional
if not trello_username:
return True
trello_member = TrelloMember.query.filter_by(
username=trello_username
).first()
if trello_member is None:
self._error_message = textwrap.dedent(
f"""
Trello Member '{trello_member}' does not exist
"""
)
return False
# Get the `trello_member_id` to return back to `views.py`
self._trello_member_id = trello_member.trello_member_id
# All custom validations passed
return True
def get_trello_member_id(self):
return self._trello_member_id
def get_error_message(self):
return self._error_message
class DeleteForm(FlaskForm):
"""Form for deleting an existing subscribed_list."""
submit = SubmitField('Delete')
| 28.654971
| 79
| 0.611224
|
6f070b6c31e33946916ec7d15e14e0312a8818a5
| 24,422
|
py
|
Python
|
qtconsole/jupyter_widget.py
|
hmaarrfk/qtconsole
|
2379cb7903730de343a917ceb91edf8bfde2659e
|
[
"BSD-3-Clause"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
qtconsole/jupyter_widget.py
|
hmaarrfk/qtconsole
|
2379cb7903730de343a917ceb91edf8bfde2659e
|
[
"BSD-3-Clause"
] | 3
|
2020-03-24T17:46:11.000Z
|
2021-08-23T20:23:16.000Z
|
qtconsole/jupyter_widget.py
|
hmaarrfk/qtconsole
|
2379cb7903730de343a917ceb91edf8bfde2659e
|
[
"BSD-3-Clause"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
"""A FrontendWidget that emulates a repl for a Jupyter kernel.
This supports the additional functionality provided by Jupyter kernel.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
import os.path
import re
from subprocess import Popen
import sys
import time
from textwrap import dedent
from warnings import warn
from qtconsole.qt import QtCore, QtGui
from IPython.lib.lexers import IPythonLexer, IPython3Lexer
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from qtconsole import __version__
from traitlets import Bool, Unicode, observe, default
from .frontend_widget import FrontendWidget
from . import styles
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Default strings to build and display input and output prompts (and separators
# in between)
default_in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
default_out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
default_input_sep = '\n'
default_output_sep = ''
default_output_sep2 = ''
# Base path for most payload sources.
zmq_shell_source = 'ipykernel.zmqshell.ZMQInteractiveShell'
if sys.platform.startswith('win'):
default_editor = 'notepad'
else:
default_editor = ''
#-----------------------------------------------------------------------------
# JupyterWidget class
#-----------------------------------------------------------------------------
class IPythonWidget(FrontendWidget):
"""Dummy class for config inheritance. Destroyed below."""
class JupyterWidget(IPythonWidget):
"""A FrontendWidget for a Jupyter kernel."""
# If set, the 'custom_edit_requested(str, int)' signal will be emitted when
# an editor is needed for a file. This overrides 'editor' and 'editor_line'
# settings.
custom_edit = Bool(False)
custom_edit_requested = QtCore.Signal(object, object)
editor = Unicode(default_editor, config=True,
help="""
A command for invoking a GUI text editor. If the string contains a
{filename} format specifier, it will be used. Otherwise, the filename
will be appended to the end the command. To use a terminal text editor,
the command should launch a new terminal, e.g.
``"gnome-terminal -- vim"``.
""")
editor_line = Unicode(config=True,
help="""
The editor command to use when a specific line number is requested. The
string should contain two format specifiers: {line} and {filename}. If
this parameter is not specified, the line number option to the %edit
magic will be ignored.
""")
style_sheet = Unicode(config=True,
help="""
A CSS stylesheet. The stylesheet can contain classes for:
1. Qt: QPlainTextEdit, QFrame, QWidget, etc
2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
3. QtConsole: .error, .in-prompt, .out-prompt, etc
""")
syntax_style = Unicode(config=True,
help="""
If not empty, use this Pygments style for syntax highlighting.
Otherwise, the style sheet is queried for Pygments style
information.
""")
# Prompts.
in_prompt = Unicode(default_in_prompt, config=True)
out_prompt = Unicode(default_out_prompt, config=True)
input_sep = Unicode(default_input_sep, config=True)
output_sep = Unicode(default_output_sep, config=True)
output_sep2 = Unicode(default_output_sep2, config=True)
# JupyterWidget protected class variables.
_PromptBlock = namedtuple('_PromptBlock', ['block', 'length', 'number'])
_payload_source_edit = 'edit_magic'
_payload_source_exit = 'ask_exit'
_payload_source_next_input = 'set_next_input'
_payload_source_page = 'page'
_retrying_history_request = False
_starting = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
super(JupyterWidget, self).__init__(*args, **kw)
# JupyterWidget protected variables.
self._payload_handlers = {
self._payload_source_edit : self._handle_payload_edit,
self._payload_source_exit : self._handle_payload_exit,
self._payload_source_page : self._handle_payload_page,
self._payload_source_next_input : self._handle_payload_next_input }
self._previous_prompt_obj = None
self._keep_kernel_on_exit = None
# Initialize widget styling.
if self.style_sheet:
self._style_sheet_changed()
self._syntax_style_changed()
else:
self.set_default_style()
# Initialize language name.
self.language_name = None
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#
# For JupyterWidget, override FrontendWidget methods which implement the
# BaseFrontend Mixin abstract interface
#---------------------------------------------------------------------------
def _handle_complete_reply(self, rep):
"""Support Jupyter's improved completion machinery.
"""
self.log.debug("complete: %s", rep.get('content', ''))
cursor = self._get_cursor()
info = self._request_info.get('complete')
if (info and info.id == rep['parent_header']['msg_id']
and info.pos == self._get_input_buffer_cursor_pos()
and info.code == self.input_buffer):
content = rep['content']
matches = content['matches']
start = content['cursor_start']
end = content['cursor_end']
start = max(start, 0)
end = max(end, start)
# Move the control's cursor to the desired end point
cursor_pos = self._get_input_buffer_cursor_pos()
if end < cursor_pos:
cursor.movePosition(QtGui.QTextCursor.Left,
n=(cursor_pos - end))
elif end > cursor_pos:
cursor.movePosition(QtGui.QTextCursor.Right,
n=(end - cursor_pos))
# This line actually applies the move to control's cursor
self._control.setTextCursor(cursor)
offset = end - start
# Move the local cursor object to the start of the match and
# complete.
cursor.movePosition(QtGui.QTextCursor.Left, n=offset)
self._complete_with_items(cursor, matches)
def _handle_execute_reply(self, msg):
"""Support prompt requests.
"""
msg_id = msg['parent_header'].get('msg_id')
info = self._request_info['execute'].get(msg_id)
if info and info.kind == 'prompt':
content = msg['content']
if content['status'] == 'aborted':
self._show_interpreter_prompt()
else:
number = content['execution_count'] + 1
self._show_interpreter_prompt(number)
self._request_info['execute'].pop(msg_id)
else:
super(JupyterWidget, self)._handle_execute_reply(msg)
def _handle_history_reply(self, msg):
""" Handle history tail replies, which are only supported
by Jupyter kernels.
"""
content = msg['content']
if 'history' not in content:
self.log.error("History request failed: %r"%content)
if content.get('status', '') == 'aborted' and \
not self._retrying_history_request:
# a *different* action caused this request to be aborted, so
# we should try again.
self.log.error("Retrying aborted history request")
# prevent multiple retries of aborted requests:
self._retrying_history_request = True
# wait out the kernel's queue flush, which is currently timed at 0.1s
time.sleep(0.25)
self.kernel_client.history(hist_access_type='tail',n=1000)
else:
self._retrying_history_request = False
return
# reset retry flag
self._retrying_history_request = False
history_items = content['history']
self.log.debug("Received history reply with %i entries", len(history_items))
items = []
last_cell = u""
for _, _, cell in history_items:
cell = cell.rstrip()
if cell != last_cell:
items.append(cell)
last_cell = cell
self._set_history(items)
def _insert_other_input(self, cursor, content):
"""Insert function for input from other frontends"""
n = content.get('execution_count', 0)
prompt = self._make_in_prompt(n, remote=True)
cont_prompt = self._make_continuation_prompt(self._prompt, remote=True)
cursor.insertText('\n')
for i, line in enumerate(content['code'].strip().split('\n')):
if i == 0:
self._insert_html(cursor, prompt)
else:
self._insert_html(cursor, cont_prompt)
self._insert_plain_text(cursor, line + '\n')
# Update current prompt number
self._update_prompt(n + 1)
def _handle_execute_input(self, msg):
"""Handle an execute_input message"""
self.log.debug("execute_input: %s", msg.get('content', ''))
if self.include_output(msg):
self._append_custom(self._insert_other_input, msg['content'], before_prompt=True)
def _handle_execute_result(self, msg):
"""Handle an execute_result message"""
self.log.debug("execute_result: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
if 'text/plain' in data:
self._append_plain_text(self.output_sep, before_prompt=True)
self._append_html(
self._make_out_prompt(prompt_number, remote=not self.from_here(msg)),
before_prompt=True
)
text = data['text/plain']
# If the repr is multiline, make sure we start on a new line,
# so that its lines are aligned.
if "\n" in text and not self.output_sep.endswith("\n"):
self._append_plain_text('\n', before_prompt=True)
self._append_plain_text(text + self.output_sep2, before_prompt=True)
if not self.from_here(msg):
self._append_plain_text('\n', before_prompt=True)
def _handle_display_data(self, msg):
"""The base handler for the ``display_data`` message."""
# For now, we don't display data from other frontends, but we
# eventually will as this allows all frontends to monitor the display
# data. But we need to figure out how to handle this in the GUI.
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# In the regular JupyterWidget, we simply print the plain text
# representation.
if 'text/plain' in data:
text = data['text/plain']
self._append_plain_text(text, True)
# This newline seems to be needed for text and html output.
self._append_plain_text(u'\n', True)
def _handle_kernel_info_reply(self, rep):
"""Handle kernel info replies."""
content = rep['content']
self.language_name = content['language_info']['name']
pygments_lexer = content['language_info'].get('pygments_lexer', '')
try:
# Other kernels with pygments_lexer info will have to be
# added here by hand.
if pygments_lexer == 'ipython3':
lexer = IPython3Lexer()
elif pygments_lexer == 'ipython2':
lexer = IPythonLexer()
else:
lexer = get_lexer_by_name(self.language_name)
self._highlighter._lexer = lexer
except ClassNotFound:
pass
self.kernel_banner = content.get('banner', '')
if self._starting:
# finish handling started channels
self._starting = False
super(JupyterWidget, self)._started_channels()
def _started_channels(self):
"""Make a history request"""
self._starting = True
self.kernel_client.kernel_info()
self.kernel_client.history(hist_access_type='tail', n=1000)
#---------------------------------------------------------------------------
# 'FrontendWidget' protected interface
#---------------------------------------------------------------------------
def _process_execute_error(self, msg):
"""Handle an execute_error message"""
self.log.debug("execute_error: %s", msg.get('content', ''))
content = msg['content']
traceback = '\n'.join(content['traceback']) + '\n'
if False:
# FIXME: For now, tracebacks come as plain text, so we can't
# use the html renderer yet. Once we refactor ultratb to
# produce properly styled tracebacks, this branch should be the
# default
traceback = traceback.replace(' ', ' ')
traceback = traceback.replace('\n', '<br/>')
ename = content['ename']
ename_styled = '<span class="error">%s</span>' % ename
traceback = traceback.replace(ename, ename_styled)
self._append_html(traceback)
else:
# This is the fallback for now, using plain text with ansi
# escapes
self._append_plain_text(traceback, before_prompt=not self.from_here(msg))
def _process_execute_payload(self, item):
""" Reimplemented to dispatch payloads to handler methods.
"""
handler = self._payload_handlers.get(item['source'])
if handler is None:
# We have no handler for this type of payload, simply ignore it
return False
else:
handler(item)
return True
def _show_interpreter_prompt(self, number=None):
""" Reimplemented for IPython-style prompts.
"""
# If a number was not specified, make a prompt number request.
if number is None:
msg_id = self.kernel_client.execute('', silent=True)
info = self._ExecutionRequest(msg_id, 'prompt')
self._request_info['execute'][msg_id] = info
return
# Show a new prompt and save information about it so that it can be
# updated later if the prompt number turns out to be wrong.
self._prompt_sep = self.input_sep
self._show_prompt(self._make_in_prompt(number), html=True)
block = self._control.document().lastBlock()
length = len(self._prompt)
self._previous_prompt_obj = self._PromptBlock(block, length, number)
# Update continuation prompt to reflect (possibly) new prompt length.
self._set_continuation_prompt(
self._make_continuation_prompt(self._prompt), html=True)
def _update_prompt(self, new_prompt_number):
"""Replace the last displayed prompt with a new one."""
block = self._previous_prompt_obj.block
# Make sure the prompt block has not been erased.
if block.isValid() and block.text():
# Remove the old prompt and insert a new prompt.
cursor = QtGui.QTextCursor(block)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor,
self._previous_prompt_obj.length)
prompt = self._make_in_prompt(new_prompt_number)
self._prompt = self._insert_html_fetching_plain_text(
cursor, prompt)
# When the HTML is inserted, Qt blows away the syntax
# highlighting for the line, so we need to rehighlight it.
self._highlighter.rehighlightBlock(cursor.block())
# Update the prompt cursor
self._prompt_cursor.setPosition(cursor.position() - 1)
def _show_interpreter_prompt_for_reply(self, msg):
""" Reimplemented for IPython-style prompts.
"""
# Update the old prompt number if necessary.
content = msg['content']
# abort replies do not have any keys:
if content['status'] == 'aborted':
if self._previous_prompt_obj:
previous_prompt_number = self._previous_prompt_obj.number
else:
previous_prompt_number = 0
else:
previous_prompt_number = content['execution_count']
if self._previous_prompt_obj and \
self._previous_prompt_obj.number != previous_prompt_number:
self._update_prompt(previous_prompt_number)
self._previous_prompt_obj = None
# Show a new prompt with the kernel's estimated prompt number.
self._show_interpreter_prompt(previous_prompt_number + 1)
#---------------------------------------------------------------------------
# 'JupyterWidget' interface
#---------------------------------------------------------------------------
def set_default_style(self, colors='lightbg'):
""" Sets the widget style to the class defaults.
Parameters
----------
colors : str, optional (default lightbg)
Whether to use the default light background or dark
background or B&W style.
"""
colors = colors.lower()
if colors=='lightbg':
self.style_sheet = styles.default_light_style_sheet
self.syntax_style = styles.default_light_syntax_style
elif colors=='linux':
self.style_sheet = styles.default_dark_style_sheet
self.syntax_style = styles.default_dark_syntax_style
elif colors=='nocolor':
self.style_sheet = styles.default_bw_style_sheet
self.syntax_style = styles.default_bw_syntax_style
else:
raise KeyError("No such color scheme: %s"%colors)
#---------------------------------------------------------------------------
# 'JupyterWidget' protected interface
#---------------------------------------------------------------------------
def _edit(self, filename, line=None):
""" Opens a Python script for editing.
Parameters
----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file.
"""
if self.custom_edit:
self.custom_edit_requested.emit(filename, line)
elif not self.editor:
self._append_plain_text('No default editor available.\n'
'Specify a GUI text editor in the `JupyterWidget.editor` '
'configurable to enable the %edit magic')
else:
try:
filename = '"%s"' % filename
if line and self.editor_line:
command = self.editor_line.format(filename=filename,
line=line)
else:
try:
command = self.editor.format()
except KeyError:
command = self.editor.format(filename=filename)
else:
command += ' ' + filename
except KeyError:
self._append_plain_text('Invalid editor command.\n')
else:
try:
Popen(command, shell=True)
except OSError:
msg = 'Opening editor with command "%s" failed.\n'
self._append_plain_text(msg % command)
def _make_in_prompt(self, number, remote=False):
""" Given a prompt number, returns an HTML In prompt.
"""
try:
body = self.in_prompt % number
except TypeError:
# allow in_prompt to leave out number, e.g. '>>> '
from xml.sax.saxutils import escape
body = escape(self.in_prompt)
if remote:
body = self.other_output_prefix + body
return '<span class="in-prompt">%s</span>' % body
def _make_continuation_prompt(self, prompt, remote=False):
""" Given a plain text version of an In prompt, returns an HTML
continuation prompt.
"""
end_chars = '...: '
space_count = len(prompt.lstrip('\n')) - len(end_chars)
if remote:
space_count += len(self.other_output_prefix.rsplit('\n')[-1])
body = ' ' * space_count + end_chars
return '<span class="in-prompt">%s</span>' % body
def _make_out_prompt(self, number, remote=False):
""" Given a prompt number, returns an HTML Out prompt.
"""
try:
body = self.out_prompt % number
except TypeError:
# allow out_prompt to leave out number, e.g. '<<< '
from xml.sax.saxutils import escape
body = escape(self.out_prompt)
if remote:
body = self.other_output_prefix + body
return '<span class="out-prompt">%s</span>' % body
#------ Payload handlers --------------------------------------------------
# Payload handlers with a generic interface: each takes the opaque payload
# dict, unpacks it and calls the underlying functions with the necessary
# arguments.
def _handle_payload_edit(self, item):
self._edit(item['filename'], item['line_number'])
def _handle_payload_exit(self, item):
self._keep_kernel_on_exit = item['keepkernel']
self.exit_requested.emit(self)
def _handle_payload_next_input(self, item):
self.input_buffer = item['text']
def _handle_payload_page(self, item):
# Since the plain text widget supports only a very small subset of HTML
# and we have no control over the HTML source, we only page HTML
# payloads in the rich text widget.
data = item['data']
if 'text/html' in data and self.kind == 'rich':
self._page(data['text/html'], html=True)
else:
self._page(data['text/plain'], html=False)
#------ Trait change handlers --------------------------------------------
@observe('style_sheet')
def _style_sheet_changed(self, changed=None):
""" Set the style sheets of the underlying widgets.
"""
self.setStyleSheet(self.style_sheet)
if self._control is not None:
self._control.document().setDefaultStyleSheet(self.style_sheet)
if self._page_control is not None:
self._page_control.document().setDefaultStyleSheet(self.style_sheet)
@observe('syntax_style')
def _syntax_style_changed(self, changed=None):
""" Set the style for the syntax highlighter.
"""
if self._highlighter is None:
# ignore premature calls
return
if self.syntax_style:
self._highlighter.set_style(self.syntax_style)
self._ansi_processor.set_background_color(self.syntax_style)
else:
self._highlighter.set_style_sheet(self.style_sheet)
#------ Trait default initializers -----------------------------------------
@default('banner')
def _banner_default(self):
return "Jupyter QtConsole {version}\n".format(version=__version__)
# Clobber IPythonWidget above:
class IPythonWidget(JupyterWidget):
"""Deprecated class; use JupyterWidget."""
def __init__(self, *a, **kw):
warn("IPythonWidget is deprecated; use JupyterWidget",
DeprecationWarning)
super(IPythonWidget, self).__init__(*a, **kw)
| 40.433775
| 93
| 0.58132
|
f73e26be5e8339103201e497c6e376922c92d858
| 13,682
|
py
|
Python
|
django/contrib/gis/db/models/fields.py
|
Perlence/django
|
4f7328ce8a35160d155c41d362c3d674f8ef4d2d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 4
|
2017-01-09T10:51:20.000Z
|
2020-06-30T14:00:41.000Z
|
django/contrib/gis/db/models/fields.py
|
Perlence/django
|
4f7328ce8a35160d155c41d362c3d674f8ef4d2d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 10
|
2016-05-19T21:54:42.000Z
|
2019-08-09T15:59:50.000Z
|
django/contrib/gis/db/models/fields.py
|
Perlence/django
|
4f7328ce8a35160d155c41d362c3d674f8ef4d2d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2016-08-02T20:16:08.000Z
|
2020-01-07T19:45:38.000Z
|
from collections import defaultdict, namedtuple
from django.contrib.gis import forms, gdal
from django.contrib.gis.db.models.proxy import SpatialProxy
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.geos import (
GeometryCollection, GEOSException, GEOSGeometry, LineString,
MultiLineString, MultiPoint, MultiPolygon, Point, Polygon,
)
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields import Field
from django.utils.translation import gettext_lazy as _
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = defaultdict(dict)
SRIDCacheEntry = namedtuple('SRIDCacheEntry', ['units', 'units_name', 'spheroid', 'geodetic'])
def get_srid_info(srid, connection):
"""
Return the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
from django.contrib.gis.gdal import SpatialReference
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
SpatialRefSys = None
alias, get_srs = (
(connection.alias, lambda srid: SpatialRefSys.objects.using(connection.alias).get(srid=srid).srs)
if SpatialRefSys else
(None, SpatialReference)
)
if srid not in _srid_cache[alias]:
srs = get_srs(srid)
units, units_name = srs.units
_srid_cache[alias][srid] = SRIDCacheEntry(
units=units,
units_name=units_name,
spheroid='SPHEROID["%s",%s,%s]' % (srs['spheroid'], srs.semi_major, srs.inverse_flattening),
geodetic=srs.geographic,
)
return _srid_cache[alias][srid]
class BaseSpatialField(Field):
"""
The Base GIS Field.
It's used as a base class for GeometryField and RasterField. Defines
properties that are common to all GIS fields such as the characteristics
of the spatial reference system of the field.
"""
description = _("The base GIS field.")
empty_strings_allowed = False
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super().__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Always include SRID for less fragility; include spatial index if it's
# not the default value.
kwargs['srid'] = self.srid
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
return name, path, args, kwargs
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def spheroid(self, connection):
return get_srid_info(self.srid, connection).spheroid
def units(self, connection):
return get_srid_info(self.srid, connection).units
def units_name(self, connection):
return get_srid_info(self.srid, connection).units_name
def geodetic(self, connection):
"""
Return true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return get_srid_info(self.srid, connection).geodetic
def get_placeholder(self, value, compiler, connection):
"""
Return the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
def get_srid(self, obj):
"""
Return the default SRID for the given geometry or raster, taking into
account the SRID set for the field. For example, if the input geometry
or raster doesn't have an SRID, then the SRID of the field will be
returned.
"""
srid = obj.srid # SRID of given geometry.
if srid is None or self.srid == -1 or (srid == -1 and self.srid != -1):
return self.srid
else:
return srid
def get_db_prep_value(self, value, connection, *args, **kwargs):
if value is None:
return None
return connection.ops.Adapter(
super().get_db_prep_value(value, connection, *args, **kwargs),
**({'geography': True} if self.geography and connection.ops.geography else {})
)
def get_raster_prep_value(self, value, is_candidate):
"""
Return a GDALRaster if conversion is successful, otherwise return None.
"""
if isinstance(value, gdal.GDALRaster):
return value
elif is_candidate:
try:
return gdal.GDALRaster(value)
except GDALException:
pass
elif isinstance(value, dict):
try:
return gdal.GDALRaster(value)
except GDALException:
raise ValueError("Couldn't create spatial object from lookup value '%s'." % value)
def get_prep_value(self, value):
obj = super().get_prep_value(value)
if obj is None:
return None
# When the input is not a geometry or raster, attempt to construct one
# from the given string input.
if isinstance(obj, GEOSGeometry):
pass
else:
# Check if input is a candidate for conversion to raster or geometry.
is_candidate = isinstance(obj, (bytes, str)) or hasattr(obj, '__geo_interface__')
# Try to convert the input to raster.
raster = self.get_raster_prep_value(obj, is_candidate)
if raster:
obj = raster
elif is_candidate:
try:
obj = GEOSGeometry(obj)
except (GEOSException, GDALException):
raise ValueError("Couldn't create spatial object from lookup value '%s'." % obj)
else:
raise ValueError('Cannot use object with type %s for a spatial lookup parameter.' % type(obj).__name__)
# Assigning the SRID value.
obj.srid = self.get_srid(obj)
return obj
class GeometryField(BaseSpatialField):
"""
The base Geometry field -- maps to the OpenGIS Specification Geometry type.
"""
description = _('The base Geometry field — maps to the OpenGIS Specification Geometry type.')
form_class = forms.GeometryField
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
geom_class = None
def __init__(self, verbose_name=None, dim=2, geography=False, *, extent=(-180.0, -90.0, 180.0, 90.0),
tolerance=0.05, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = extent
self._tolerance = tolerance
super().__init__(verbose_name=verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Include kwargs if they're not the default values.
if self.dim != 2:
kwargs['dim'] = self.dim
if self.geography is not False:
kwargs['geography'] = self.geography
if self._extent != (-180.0, -90.0, 180.0, 90.0):
kwargs['extent'] = self._extent
if self._tolerance != 0.05:
kwargs['tolerance'] = self._tolerance
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, SpatialProxy(self.geom_class or GEOSGeometry, self, load_func=GEOSGeometry))
def formfield(self, **kwargs):
defaults = {
'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
**kwargs,
}
if self.dim > 2 and not getattr(defaults['form_class'].widget, 'supports_3d', False):
defaults.setdefault('widget', forms.Textarea)
return super().formfield(**defaults)
def select_format(self, compiler, sql, params):
"""
Return the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKB.
"""
return compiler.connection.ops.select % sql, params
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
geom_class = Point
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
geom_class = LineString
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
geom_class = Polygon
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
geom_class = MultiPoint
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
geom_class = MultiLineString
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
geom_class = MultiPolygon
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
geom_class = GeometryCollection
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
def select_format(self, compiler, sql, params):
select = compiler.connection.ops.select_extent
return select % sql if select else sql, params
class RasterField(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = 'RASTER'
geography = False
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster support.
if not connection.features.gis_enabled or not connection.features.supports_raster:
raise ImproperlyConfigured('Raster fields require backends with raster support.')
def db_type(self, connection):
self._check_connection(connection)
return super().db_type(connection)
def from_db_value(self, value, expression, connection):
return connection.ops.parse_raster(value)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(gdal.GDALRaster, self))
def get_transform(self, name):
from django.contrib.gis.db.models.lookups import RasterBandTransform
try:
band_index = int(name)
return type(
'SpecificRasterBandTransform',
(RasterBandTransform,),
{'band_index': band_index}
)
except ValueError:
pass
return super().get_transform(name)
| 35.816754
| 119
| 0.656044
|
486b8cc1bfb57047caf90aff322176dfb8e80733
| 2,896
|
py
|
Python
|
nicos_mlz/puma/setups/ecradle.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_mlz/puma/setups/ecradle.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_mlz/puma/setups/ecradle.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
description = 'Eulerian cradle'
group = 'lowlevel'
includes = ['system', 'motorbus1', 'motorbus2', 'motorbus5']
excludes = ['euler']
devices = dict(
st_echi = device('nicos.devices.vendor.ipc.Motor',
bus = 'motorbus2',
addr = 61,
slope = 200,
unit = 'deg',
abslimits = (-1000000, 1000000),
zerosteps = 500000,
visibility = (),
),
co_echi = device('nicos.devices.vendor.ipc.Coder',
bus = 'motorbus1',
addr = 130,
slope = -8192,
# zerosteps = 5334445,
zerosteps = 0,
unit = 'deg',
circular = -360,
visibility = (),
),
echi = device('nicos.devices.generic.Axis',
description = 'euler cradle rotation',
motor = 'st_echi',
coder = 'co_echi',
# offset = -189.99926762282576,
offset = 0,
maxtries = 10,
precision = 0.01,
loopdelay = 1,
),
st_echi1 = device('nicos.devices.vendor.ipc.Motor',
bus = 'motorbus2',
addr = 61,
slope = 1,
unit = 'deg',
abslimits = (1, 999999),
zerosteps = 0,
visibility = (),
),
echi1 = device('nicos.devices.generic.Axis',
description = 'euler cradle rotation',
motor = 'st_echi1',
# offset = -189.99926762282576,
abslimits = (1, 999999),
offset = 0,
maxtries = 10,
precision = 0.01,
loopdelay = 1,
),
st_ephi = device('nicos.devices.vendor.ipc.Motor',
bus = 'motorbus5',
addr = 84,
slope = -100,
unit = 'deg',
abslimits = (-1000000, 1000000),
zerosteps = 500000,
visibility = (),
),
co_ephi = device('nicos.devices.vendor.ipc.Coder',
bus = 'motorbus1',
addr = 136,
slope = 4096,
zerosteps = 0,
unit = 'deg',
circular = -360,
visibility = (),
confbyte = 148,
),
ephi = device('nicos.devices.generic.Axis',
description = 'euler cradle rotation',
motor = 'st_ephi',
coder = 'co_ephi',
offset = 0,
maxtries = 10,
precision = 0.01,
loopdelay = 1,
),
st_ephi1 = device('nicos.devices.vendor.ipc.Motor',
bus = 'motorbus5',
addr = 84,
slope = 1,
unit = 'deg',
abslimits = (1, 999999),
zerosteps = 0,
visibility = (),
),
ephi1 = device('nicos.devices.generic.Axis',
description = 'euler cradle rotation',
motor = 'st_ephi1',
offset = 0,
maxtries = 10,
precision = 0.01,
loopdelay = 1,
),
ec = device('nicos.devices.tas.ecradle.EulerianCradle',
description = 'Eulerian cradle',
cell = 'Sample',
tas = 'puma',
chi = 'echi',
omega = 'ephi'
),
)
| 25.857143
| 60
| 0.501381
|
278f545d11e23c71123536cc4e67d68553d8f8ff
| 8,381
|
py
|
Python
|
src/mist/api/clouds/views.py
|
cc-daveloper/mist.io_mist.api
|
d3f9b8d478f23bf811c0bc6d3078e512aa975f86
|
[
"Apache-2.0"
] | 1
|
2019-04-10T11:37:25.000Z
|
2019-04-10T11:37:25.000Z
|
src/mist/api/clouds/views.py
|
d-mo/mist.api
|
d3f9b8d478f23bf811c0bc6d3078e512aa975f86
|
[
"Apache-2.0"
] | 3
|
2021-04-07T23:15:17.000Z
|
2021-09-23T23:21:45.000Z
|
src/mist/api/clouds/views.py
|
cc-daveloper/mist.io_mist.api
|
d3f9b8d478f23bf811c0bc6d3078e512aa975f86
|
[
"Apache-2.0"
] | null | null | null |
import logging
from pyramid.response import Response
from mist.api.clouds.models import Cloud
from mist.api.auth.methods import auth_context_from_request
from mist.api.helpers import trigger_session_update
from mist.api.helpers import view_config, params_from_request
from mist.api.exceptions import BadRequestError
from mist.api.exceptions import RequiredParameterMissingError, NotFoundError
from mist.api.clouds.methods import filter_list_clouds, add_cloud_v_2
from mist.api.clouds.methods import rename_cloud as m_rename_cloud
from mist.api.clouds.methods import delete_cloud as m_delete_cloud
from mist.api.tag.methods import add_tags_to_resource
from mist.api import config
logging.basicConfig(level=config.PY_LOG_LEVEL,
format=config.PY_LOG_FORMAT,
datefmt=config.PY_LOG_FORMAT_DATE)
log = logging.getLogger(__name__)
OK = Response("OK", 200)
@view_config(route_name='api_v1_clouds', request_method='GET', renderer='json')
def list_clouds(request):
"""
Request a list of all added clouds.
READ permission required on cloud.
---
"""
auth_context = auth_context_from_request(request)
# to prevent iterate throw every cloud
auth_context.check_perm("cloud", "read", None)
return filter_list_clouds(auth_context)
@view_config(route_name='api_v1_clouds',
request_method='POST', renderer='json')
def add_cloud(request):
"""
Add a new cloud
Adds a new cloud to the user and returns the cloud_id
ADD permission required on cloud.
---
api_key:
type: string
api_secret:
type: string
apiurl:
type: string
docker_port:
type: string
machine_key:
type: string
machine_port:
type: string
machine_user:
type: string
provider:
description: The id of the cloud provider.
enum:
- vcloud
- bare_metal
- docker
- libvirt
- openstack
- vsphere
- ec2
- rackspace
- nephoscale
- digitalocean
- softlayer
- gce
- azure
- azure_arm
- linode
- onapp
- indonesian_vcloud
- hostvirtual
- vultr
required: true
type: string
remove_on_error:
type: string
tenant_name:
type: string
title:
description: The human readable title of the cloud.
required: true
type: string
"""
auth_context = auth_context_from_request(request)
cloud_tags = auth_context.check_perm("cloud", "add", None)
owner = auth_context.owner
params = params_from_request(request)
# remove spaces from start/end of string fields that are often included
# when pasting keys, preventing thus succesfull connection with the
# cloud
for key in params.keys():
if type(params[key]) in [unicode, str]:
params[key] = params[key].rstrip().lstrip()
# api_version = request.headers.get('Api-Version', 1)
title = params.get('title', '')
provider = params.get('provider', '')
if not provider:
raise RequiredParameterMissingError('provider')
monitoring = None
ret = add_cloud_v_2(owner, title, provider, params)
cloud_id = ret['cloud_id']
monitoring = ret.get('monitoring')
cloud = Cloud.objects.get(owner=owner, id=cloud_id)
# If insights enabled on org, set poller with half hour period.
if auth_context.org.insights_enabled:
cloud.ctl.set_polling_interval(1800)
if cloud_tags:
add_tags_to_resource(owner, cloud, cloud_tags.items())
c_count = Cloud.objects(owner=owner, deleted=None).count()
ret = cloud.as_dict()
ret['index'] = c_count - 1
if monitoring:
ret['monitoring'] = monitoring
return ret
@view_config(route_name='api_v1_cloud_action', request_method='DELETE')
def delete_cloud(request):
"""
Delete a cloud
Deletes cloud with given cloud_id.
REMOVE permission required on cloud.
---
cloud:
in: path
required: true
type: string
"""
auth_context = auth_context_from_request(request)
cloud_id = request.matchdict['cloud']
try:
Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None)
except Cloud.DoesNotExist:
raise NotFoundError('Cloud does not exist')
auth_context.check_perm('cloud', 'remove', cloud_id)
m_delete_cloud(auth_context.owner, cloud_id)
return OK
@view_config(route_name='api_v1_cloud_action', request_method='PUT')
def rename_cloud(request):
"""
Rename a cloud
Renames cloud with given cloud_id.
EDIT permission required on cloud.
---
cloud:
in: path
required: true
type: string
new_name:
description: ' New name for the key (will also serve as the key''s id)'
type: string
"""
auth_context = auth_context_from_request(request)
cloud_id = request.matchdict['cloud']
try:
Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None)
except Cloud.DoesNotExist:
raise NotFoundError('Cloud does not exist')
params = params_from_request(request)
new_name = params.get('new_name', '')
if not new_name:
raise RequiredParameterMissingError('new_name')
auth_context.check_perm('cloud', 'edit', cloud_id)
m_rename_cloud(auth_context.owner, cloud_id, new_name)
return OK
@view_config(route_name='api_v1_cloud_action', request_method='PATCH')
def update_cloud(request):
"""
UPDATE cloud with given cloud_id.
EDIT permission required on cloud.
Not all fields need to be specified, only the ones being modified
---
cloud:
in: path
required: true
type: string
"""
auth_context = auth_context_from_request(request)
cloud_id = request.matchdict['cloud']
try:
cloud = Cloud.objects.get(owner=auth_context.owner,
id=cloud_id, deleted=None)
except Cloud.DoesNotExist:
raise NotFoundError('Cloud does not exist')
params = params_from_request(request)
creds = params
if not creds:
raise BadRequestError("You should provide your new cloud settings")
auth_context.check_perm('cloud', 'edit', cloud_id)
log.info("Updating cloud: %s", cloud_id)
fail_on_error = params.pop('fail_on_error', True)
fail_on_invalid_params = params.pop('fail_on_invalid_params', True)
polling_interval = params.pop('polling_interval', None)
# Edit the cloud
cloud.ctl.update(fail_on_error=fail_on_error,
fail_on_invalid_params=fail_on_invalid_params, **creds)
try:
polling_interval = int(polling_interval)
except (ValueError, TypeError):
pass
else:
cloud.ctl.set_polling_interval(polling_interval)
log.info("Cloud with id '%s' updated successfully.", cloud.id)
trigger_session_update(auth_context.owner, ['clouds'])
return OK
@view_config(route_name='api_v1_cloud_action', request_method='POST')
def toggle_cloud(request):
"""
Toggle a cloud
Toggles cloud with given cloud_id.
EDIT permission required on cloud.
---
cloud:
in: path
required: true
type: string
new_state:
enum:
- '0'
- '1'
type: string
"""
auth_context = auth_context_from_request(request)
cloud_id = request.matchdict['cloud']
try:
cloud = Cloud.objects.get(owner=auth_context.owner,
id=cloud_id, deleted=None)
except Cloud.DoesNotExist:
raise NotFoundError('Cloud does not exist')
auth_context.check_perm('cloud', 'edit', cloud_id)
new_state = params_from_request(request).get('new_state', None)
dns_enabled = params_from_request(request).get('dns_enabled', None)
if new_state == '1':
cloud.ctl.enable()
elif new_state == '0':
cloud.ctl.disable()
elif new_state:
raise BadRequestError('Invalid cloud state')
if dns_enabled == 1:
cloud.ctl.dns_enable()
elif dns_enabled == 0:
cloud.ctl.dns_disable()
elif dns_enabled:
raise BadRequestError('Invalid DNS state')
if new_state is None and dns_enabled is None:
raise RequiredParameterMissingError('new_state or dns_enabled')
trigger_session_update(auth_context.owner, ['clouds'])
return OK
| 28.604096
| 79
| 0.673547
|
1deef6a5518d7404f568dc417326a691bc7e95d9
| 5,106
|
py
|
Python
|
py_main/ReadData.py
|
ChampionApe/GamsPythonModels
|
aaa234b2627cda2b92e478e8e8503bf9778aebeb
|
[
"MIT"
] | null | null | null |
py_main/ReadData.py
|
ChampionApe/GamsPythonModels
|
aaa234b2627cda2b92e478e8e8503bf9778aebeb
|
[
"MIT"
] | null | null | null |
py_main/ReadData.py
|
ChampionApe/GamsPythonModels
|
aaa234b2627cda2b92e478e8e8503bf9778aebeb
|
[
"MIT"
] | null | null | null |
import os, numpy as np, pandas as pd, DataBase
def clean(db,clean_data):
for var in db.variables['variables']:
db[var] = db[var][(x not in clean_data for x in db[var])]
if np.nan in clean_data:
db[var] = db[var].dropna()
return db
class read_data:
def main(data,export_to=False,clean_data=[np.nan,'NA',0],components=['domestic','trade','tax','invest'],balanced_data_check=True):
"""
Read in production values/prices/quantities from 'data', and export to 'export_to'.
"""
dbs = {}
dbs['domestic'] = read_data.domestic(data)
if 'trade' in components:
dbs['trade'] = read_data.trade(data)
if 'tax' in components:
dbs['tax'] = read_data.tax(data)
if 'invest' in components:
dbs['invest'] = read_data.invest(data,dbs['domestic'])
db = read_data.mergedbs([db_i for db_i in dbs.values()])
clean(db,clean_data)
# Define dummies:
for var in db.variables['variables']:
dummy_name = 'd_'+var
db[dummy_name] = db[var].index
# Assert that data is balanced:
if balanced_data_check is True:
assert max(abs(db['vS'].groupby('s').sum()-db['vD'].groupby('s').sum()))<1e-9, "Data is not balanced."
# Read in prices:
if 'invest' in components:
db.read_from_excel(data['Production_p'], {'vars_panel': {'sheets': {'sec_goods': 2, 'sec_invest_S': 2, 'sec_invest_D': 2}, 'names': {}}})
else:
db.read_from_excel(data['Production_p'], {'vars_panel': {'sheets': {'sec_goods': 2}, 'names': {}}})
# clean data:
clean(db,clean_data)
# quantities:
db['qD'] = db['vD']/db['PwT']
db['qS'] = db['vS']/db['PwT']
if 'invest' in components:
db['qID'] = db['vID']/db['pID']
db['qID'].name = 'qID'
db['qIS'] = db['vIS']/db['pIS']
db['qIS'].name = 'qIS'
for x in ('qD','qS'):
db[x].name = x
# clean data:
clean(db,clean_data)
# export data:
if export_to is not False:
db.merge_internal()
db.db_Gdx.export(export_to)
return db
@staticmethod
def domestic(data):
""" Input/output table, domestic sectors/goods"""
db_dom = DataBase.py_db()
db_dom.read_from_excel(data['Production_v'],{'vars_panel': {'sheets': {'sec_domestic': 2}, 'names': {}}})
db_dom.upd_sets_from_vars()
db_dom['s_prod'] = db_dom['s']
return db_dom
@staticmethod
def trade(data):
""" Trade"""
db_trade = DataBase.py_db()
db_trade.read_from_excel(data['Production_v'],{'vars_panel': {'sheets': {'sec_trade': 2}, 'names': {}}})
db_trade.upd_sets_from_vars()
db_trade['n_for'] = db_trade['n']
db_trade['s_for'] = pd.Index(db_trade['n'],name='s')
return db_trade
@staticmethod
def tax(data):
""" taxes """
db_tax = DataBase.py_db()
db_tax.read_from_excel(data['Production_v'],{'vars_panel': {'sheets': {'sec_tax': 2}, 'names': {}}})
db_tax.upd_sets_from_vars()
db_tax['n_tax'] = db_tax['n']
return db_tax
@staticmethod
def invest(data,db_dom):
""" Investment components"""
db_invest = DataBase.py_db()
db_invest.read_from_excel(data['Production_v'],{'vars_panel': {'sheets': {'sec_invest_S': 2}, 'names': {}}})
db_invest.upd_sets_from_vars()
db_invest['n_dur'] = pd.Index(db_dom['n'].intersection(db_invest['itype']), name='n')
# Investment, demand components:
db_invest.read_from_excel(data['Production_v'],{'vars_panel': {'sheets': {'sec_invest_D': 2}, 'names': {}}})
db_invest.upd_sets_from_vars()
return db_invest
@staticmethod
def mergedbs(dbs):
db = DataBase.py_db(alias=pd.MultiIndex.from_tuples([('s','ss'), ('n','nn'),('n','nnn')]))
for db_i in dbs:
db.merge_dbs(db,db_i)
return db
def PE_from_GE(db_GE,setvalue,setname='s'):
db_new = DataBase.py_db()
if 'alias_set' in db_GE:
if not (len(db_GE['alias_set'])==1 and (db_GE['alias_set']==setname).any()):
db_new['alias_set'] = db_GE['alias_set'][db_GE['alias_set']!=setname]
db_new['alias_map2'] = db_GE['alias_map2'][~db_GE['alias_map2'].isin(db_GE.alias_all[setname])]
db_new['alias_'] = db_GE['alias_'][db_GE['alias_'].get_level_values(0)!=setname]
for set_ in (set(db_GE.sets['sets'])-set(db_GE.alias_all[setname])-set(setname)-set(['alias_set','alias_map2'])):
db_new[set_] = db_GE[set_]
for set_ in db_GE.sets['subsets']:
if set_ not in db_new and db_GE[set_].name!=setname:
db_new[set_] = db_GE[set_]
for set_ in db_GE.sets['mappings']:
if set_!='alias_':
db_new[set_] = db_GE[set_] if setname not in db_GE[set_].names else db_GE[set_][db_GE[set_].get_level_values(setname)!=setvalue].droplevel(level=setname).unique()
for scalar in db_GE.variables['scalar_variables']:
db_new[scalar] = db_GE[scalar]
for scalar in db_GE.parameters['scalar_parameters']:
db_new[scalar] = db_GE[scalar]
for var in db_GE.variables['variables']:
db_new[var] = db_GE[var] if setname not in db_GE[var].index.names else db_GE[var][db_GE[var].index.get_level_values(setname)==setvalue].droplevel(setname)
db_new[var].attrs['type'] = 'variable'
for par in db_GE.parameters['parameters']:
db_new[par] = db_GE[par] if setname not in db_GE[par].index.names else db_GE[par][db_GE[par].index.get_level_values(setname)==setvalue].droplevel(setname)
db_new[par].attrs['type'] = 'parameter'
return db_new
| 39.581395
| 165
| 0.675088
|
c214a33169eeadc2e30af2157f4c1efd2be384a9
| 13,969
|
py
|
Python
|
mijisou-devel/searx/preferences.py
|
xu1991/seartest
|
beb2d5e282d88b29544d2d17add2be6b97454ec1
|
[
"CC0-1.0"
] | 1
|
2020-12-24T16:24:31.000Z
|
2020-12-24T16:24:31.000Z
|
mijisou-devel/searx/preferences.py
|
xu1991/seartest
|
beb2d5e282d88b29544d2d17add2be6b97454ec1
|
[
"CC0-1.0"
] | 6
|
2021-03-31T19:23:49.000Z
|
2021-12-13T20:17:36.000Z
|
mijisou-devel/searx/preferences.py
|
xu1991/seartest
|
beb2d5e282d88b29544d2d17add2be6b97454ec1
|
[
"CC0-1.0"
] | 1
|
2020-09-05T11:45:21.000Z
|
2020-09-05T11:45:21.000Z
|
from base64 import urlsafe_b64encode, urlsafe_b64decode
from zlib import compress, decompress
from sys import version
from searx import settings, autocomplete
from searx.languages import language_codes as languages
from searx.url_utils import parse_qs, urlencode
if version[0] == '3':
unicode = str
COOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 5 # 5 years
LANGUAGE_CODES = [l[0] for l in languages]
DISABLED = 0
ENABLED = 1
DOI_RESOLVERS = list(settings['doi_resolvers'])
class MissingArgumentException(Exception):
pass
class ValidationException(Exception):
pass
class Setting(object):
"""Base class of user settings"""
def __init__(self, default_value, **kwargs):
super(Setting, self).__init__()
self.value = default_value
for key, value in kwargs.items():
setattr(self, key, value)
self._post_init()
def _post_init(self):
pass
def parse(self, data):
self.value = data
def get_value(self):
return self.value
def save(self, name, resp):
resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)
class StringSetting(Setting):
"""Setting of plain string values"""
pass
class EnumStringSetting(Setting):
"""Setting of a value which can only come from the given choices"""
def _validate_selection(self, selection):
if selection not in self.choices:
raise ValidationException('Invalid value: "{0}"'.format(selection))
def _post_init(self):
if not hasattr(self, 'choices'):
raise MissingArgumentException('Missing argument: choices')
self._validate_selection(self.value)
def parse(self, data):
self._validate_selection(data)
self.value = data
class MultipleChoiceSetting(EnumStringSetting):
"""Setting of values which can only come from the given choices"""
def _validate_selections(self, selections):
for item in selections:
if item not in self.choices:
raise ValidationException('Invalid value: "{0}"'.format(selections))
def _post_init(self):
if not hasattr(self, 'choices'):
raise MissingArgumentException('Missing argument: choices')
self._validate_selections(self.value)
def parse(self, data):
if data == '':
self.value = []
return
elements = data.split(',')
self._validate_selections(elements)
self.value = elements
def parse_form(self, data):
self.value = []
for choice in data:
if choice in self.choices and choice not in self.value:
self.value.append(choice)
def save(self, name, resp):
resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)
class SearchLanguageSetting(EnumStringSetting):
"""Available choices may change, so user's value may not be in choices anymore"""
def parse(self, data):
if data not in self.choices and data != self.value:
# hack to give some backwards compatibility with old language cookies
data = str(data).replace('_', '-')
lang = data.split('-')[0]
if data in self.choices:
pass
elif lang in self.choices:
data = lang
else:
data = self.value
self.value = data
class MapSetting(Setting):
"""Setting of a value that has to be translated in order to be storable"""
def _post_init(self):
if not hasattr(self, 'map'):
raise MissingArgumentException('missing argument: map')
if self.value not in self.map.values():
raise ValidationException('Invalid default value')
def parse(self, data):
if data not in self.map:
raise ValidationException('Invalid choice: {0}'.format(data))
self.value = self.map[data]
self.key = data
def save(self, name, resp):
if hasattr(self, 'key'):
resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)
class SwitchableSetting(Setting):
""" Base class for settings that can be turned on && off"""
def _post_init(self):
self.disabled = set()
self.enabled = set()
if not hasattr(self, 'choices'):
raise MissingArgumentException('missing argument: choices')
def transform_form_items(self, items):
return items
def transform_values(self, values):
return values
def parse_cookie(self, data):
if data[DISABLED] != '':
self.disabled = set(data[DISABLED].split(','))
if data[ENABLED] != '':
self.enabled = set(data[ENABLED].split(','))
def parse_form(self, items):
items = self.transform_form_items(items)
self.disabled = set()
self.enabled = set()
for choice in self.choices:
if choice['default_on']:
if choice['id'] in items:
self.disabled.add(choice['id'])
else:
if choice['id'] not in items:
self.enabled.add(choice['id'])
def save(self, resp):
resp.set_cookie('disabled_{0}'.format(self.value), ','.join(self.disabled), max_age=COOKIE_MAX_AGE)
resp.set_cookie('enabled_{0}'.format(self.value), ','.join(self.enabled), max_age=COOKIE_MAX_AGE)
def get_disabled(self):
disabled = self.disabled
for choice in self.choices:
if not choice['default_on'] and choice['id'] not in self.enabled:
disabled.add(choice['id'])
return self.transform_values(disabled)
def get_enabled(self):
enabled = self.enabled
for choice in self.choices:
if choice['default_on'] and choice['id'] not in self.disabled:
enabled.add(choice['id'])
return self.transform_values(enabled)
class EnginesSetting(SwitchableSetting):
def _post_init(self):
super(EnginesSetting, self)._post_init()
transformed_choices = []
for engine_name, engine in self.choices.items():
for category in engine.categories:
transformed_choice = dict()
transformed_choice['default_on'] = not engine.disabled
transformed_choice['id'] = '{}__{}'.format(engine_name, category)
transformed_choices.append(transformed_choice)
self.choices = transformed_choices
def transform_form_items(self, items):
return [item[len('engine_'):].replace('_', ' ').replace(' ', '__') for item in items]
def transform_values(self, values):
if len(values) == 1 and next(iter(values)) == '':
return list()
transformed_values = []
for value in values:
engine, category = value.split('__')
transformed_values.append((engine, category))
return transformed_values
class PluginsSetting(SwitchableSetting):
def _post_init(self):
super(PluginsSetting, self)._post_init()
transformed_choices = []
for plugin in self.choices:
transformed_choice = dict()
transformed_choice['default_on'] = plugin.default_on
transformed_choice['id'] = plugin.id
transformed_choices.append(transformed_choice)
self.choices = transformed_choices
def transform_form_items(self, items):
return [item[len('plugin_'):] for item in items]
class Preferences(object):
"""Validates and saves preferences to cookies"""
def __init__(self, themes, categories, engines, plugins):
super(Preferences, self).__init__()
self.key_value_settings = {'categories': MultipleChoiceSetting(['general'], choices=categories + ['none']),
'language': SearchLanguageSetting(settings['search']['language'],
choices=LANGUAGE_CODES),
'locale': EnumStringSetting(settings['ui']['default_locale'],
choices=list(settings['locales'].keys()) + ['']),
'autocomplete': EnumStringSetting(settings['search']['autocomplete'],
choices=list(autocomplete.backends.keys()) + ['']),
'image_proxy': MapSetting(settings['server']['image_proxy'],
map={'': settings['server']['image_proxy'],
'0': False,
'1': True,
'True': True,
'False': False}),
'method': EnumStringSetting('GET', choices=('GET', 'POST')),
'safesearch': MapSetting(settings['search']['safe_search'], map={'0': 0,
'1': 1,
'2': 2}),
'theme': EnumStringSetting(settings['ui']['default_theme'], choices=themes),
'results_on_new_tab': MapSetting(True, map={'0': False,
'1': True,
'False': False,
'True': True}),
'doi_resolver': MultipleChoiceSetting(['oadoi.org'], choices=DOI_RESOLVERS),
'oscar-style': EnumStringSetting(
settings['ui'].get('theme_args', {}).get('oscar_style', 'logicodev'),
choices=['', 'logicodev', 'logicodev-dark', 'pointhi']),
}
self.engines = EnginesSetting('engines', choices=engines)
self.plugins = PluginsSetting('plugins', choices=plugins)
self.unknown_params = {}
def get_as_url_params(self):
settings_kv = {}
for k, v in self.key_value_settings.items():
if isinstance(v, MultipleChoiceSetting):
settings_kv[k] = ','.join(v.get_value())
else:
settings_kv[k] = v.get_value()
settings_kv['disabled_engines'] = ','.join(self.engines.disabled)
settings_kv['enabled_engines'] = ','.join(self.engines.enabled)
settings_kv['disabled_plugins'] = ','.join(self.plugins.disabled)
settings_kv['enabled_plugins'] = ','.join(self.plugins.enabled)
return urlsafe_b64encode(compress(urlencode(settings_kv).encode('utf-8'))).decode('utf-8')
def parse_encoded_data(self, input_data):
decoded_data = decompress(urlsafe_b64decode(input_data.encode('utf-8')))
self.parse_dict({x: y[0] for x, y in parse_qs(unicode(decoded_data)).items()})
def parse_dict(self, input_data):
for user_setting_name, user_setting in input_data.items():
if user_setting_name in self.key_value_settings:
self.key_value_settings[user_setting_name].parse(user_setting)
elif user_setting_name == 'disabled_engines':
self.engines.parse_cookie((input_data.get('disabled_engines', ''),
input_data.get('enabled_engines', '')))
elif user_setting_name == 'disabled_plugins':
self.plugins.parse_cookie((input_data.get('disabled_plugins', ''),
input_data.get('enabled_plugins', '')))
elif not any(user_setting_name.startswith(x) for x in [
'enabled_',
'disabled_',
'engine_',
'category_',
'plugin_']):
self.unknown_params[user_setting_name] = user_setting
def parse_form(self, input_data):
disabled_engines = []
enabled_categories = []
disabled_plugins = []
for user_setting_name, user_setting in input_data.items():
if user_setting_name in self.key_value_settings:
self.key_value_settings[user_setting_name].parse(user_setting)
elif user_setting_name.startswith('engine_'):
disabled_engines.append(user_setting_name)
elif user_setting_name.startswith('category_'):
enabled_categories.append(user_setting_name[len('category_'):])
elif user_setting_name.startswith('plugin_'):
disabled_plugins.append(user_setting_name)
else:
self.unknown_params[user_setting_name] = user_setting
self.key_value_settings['categories'].parse_form(enabled_categories)
self.engines.parse_form(disabled_engines)
self.plugins.parse_form(disabled_plugins)
# cannot be used in case of engines or plugins
def get_value(self, user_setting_name):
if user_setting_name in self.key_value_settings:
return self.key_value_settings[user_setting_name].get_value()
if user_setting_name in self.unknown_params:
return self.unknown_params[user_setting_name]
def save(self, resp):
for user_setting_name, user_setting in self.key_value_settings.items():
user_setting.save(user_setting_name, resp)
self.engines.save(resp)
self.plugins.save(resp)
for k, v in self.unknown_params.items():
resp.set_cookie(k, v, max_age=COOKIE_MAX_AGE)
return resp
| 40.140805
| 120
| 0.569046
|
8aa0b1bbfb97d81f3f4da7f244c3d2f5412b3a65
| 2,934
|
py
|
Python
|
src/oci/analytics/models/virtual_cloud_network.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/analytics/models/virtual_cloud_network.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/analytics/models/virtual_cloud_network.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class VirtualCloudNetwork(object):
"""
Virtual Cloud Network definition.
"""
def __init__(self, **kwargs):
"""
Initializes a new VirtualCloudNetwork object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this VirtualCloudNetwork.
:type id: str
:param whitelisted_ips:
The value to assign to the whitelisted_ips property of this VirtualCloudNetwork.
:type whitelisted_ips: list[str]
"""
self.swagger_types = {
'id': 'str',
'whitelisted_ips': 'list[str]'
}
self.attribute_map = {
'id': 'id',
'whitelisted_ips': 'whitelistedIps'
}
self._id = None
self._whitelisted_ips = None
@property
def id(self):
"""
**[Required]** Gets the id of this VirtualCloudNetwork.
The Virtual Cloud Network OCID.
:return: The id of this VirtualCloudNetwork.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this VirtualCloudNetwork.
The Virtual Cloud Network OCID.
:param id: The id of this VirtualCloudNetwork.
:type: str
"""
self._id = id
@property
def whitelisted_ips(self):
"""
Gets the whitelisted_ips of this VirtualCloudNetwork.
Source IP addresses or IP address ranges igress rules.
:return: The whitelisted_ips of this VirtualCloudNetwork.
:rtype: list[str]
"""
return self._whitelisted_ips
@whitelisted_ips.setter
def whitelisted_ips(self, whitelisted_ips):
"""
Sets the whitelisted_ips of this VirtualCloudNetwork.
Source IP addresses or IP address ranges igress rules.
:param whitelisted_ips: The whitelisted_ips of this VirtualCloudNetwork.
:type: list[str]
"""
self._whitelisted_ips = whitelisted_ips
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 28.764706
| 245
| 0.639059
|
753ac3dcb7676f90afc23b493eaab5283806af61
| 3,417
|
py
|
Python
|
tests/test_universal_kepler.py
|
benitocm/my-orbits
|
a8ca434af3295c8bcb04bb43fc9fb703deda0087
|
[
"Apache-2.0"
] | 1
|
2021-10-18T13:23:21.000Z
|
2021-10-18T13:23:21.000Z
|
tests/test_universal_kepler.py
|
benitocm/my-orbits
|
a8ca434af3295c8bcb04bb43fc9fb703deda0087
|
[
"Apache-2.0"
] | null | null | null |
tests/test_universal_kepler.py
|
benitocm/my-orbits
|
a8ca434af3295c8bcb04bb43fc9fb703deda0087
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains the tests for timeconv function
"""
# Standard library imports
# Third party imports
from pytest import approx
#https://www.scivision.dev/pytest-approx-equal-assert-allclose/
import numpy as np
from pathlib import Path
import sys
# Local application imports
from myorbit.util.timeut import EQX_B1950, EQX_J2000
import myorbit.data_catalog as dc
from myorbit.ephemeris_input import EphemrisInput
import myorbit.util.timeut as ut
from myorbit.two_body import calc_eph_twobody, calc_eph_minor_body_perturbed, calc_eph_twobody_universal
from myorbit.pert_cowels import calc_eph_by_cowells
from myorbit.pert_enckes import calc_eph_by_enckes
# The configuration file is shared between general config and logging config
CONFIG_INI=Path(__file__).resolve().parents[1].joinpath('conf','config.ini')
print (CONFIG_INI)
# For logging configuration
import logging.config
logging.config.fileConfig(CONFIG_INI, disable_existing_loggers=False)
from common import check_df, TEST_DATA_PATH
# Because ENCKES calcultations takes a lot of time, this flag variable is
# to control when to run them
TEST_ENCKES = False
# The predictions for this one are very bad (1 minute precision)
def test_C_2007_M5_SOHO():
fn = TEST_DATA_PATH.joinpath('jpl_C_2007_M5_SOHO.csv')
exp_df = dc.read_jpl_data(fn)
EXP_DIFF = 492319
EXP_DIFF_UNI = 492301
EXP_DIFF_COWELLS = 257628.2
EXP_DIFF_ENKES = 243121.9
FUNC_NAME=sys._getframe().f_code.co_name
obj=dc.C_2007_M5_SOHO
eph = EphemrisInput(from_date="2006.04.01.0",
to_date = "2008.09.01.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
df = calc_eph_twobody(obj, eph)
method=FUNC_NAME+":calc_eph_twobody"
check_df(df, exp_df, EXP_DIFF, method)
dfu = calc_eph_twobody_universal(obj, eph)
method=FUNC_NAME+":calc_eph_twobody_universal"
check_df(dfu, exp_df, EXP_DIFF_UNI,method)
dfc = calc_eph_by_cowells(obj, eph)
method=FUNC_NAME+":calc_eph_by_cowells"
check_df(dfc, exp_df, EXP_DIFF_COWELLS,method)
if TEST_ENCKES:
dfc = calc_eph_by_enckes(obj, eph)
method=FUNC_NAME+":calc_eph_by_enckes"
check_df(dfc, exp_df, EXP_DIFF_ENKES,method)
def test_C_2003_M3_SOHO():
fn = TEST_DATA_PATH.joinpath('jpl_C_2003_M3_SOHO.csv')
exp_df = dc.read_jpl_data(fn)
EXP_DIFF = 74286.96
EXP_DIFF_UNI = 74285.66
EXP_DIFF_COWELLS = 38009.9
EXP_DIFF_ENKES = 37911.4
FUNC_NAME=sys._getframe().f_code.co_name
obj= dc.C_2003_M3_SOHO
eph = EphemrisInput(from_date="2001.03.01.0",
to_date = "2005.08.31.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
df = calc_eph_twobody(obj, eph)
method=FUNC_NAME+":calc_eph_twobody"
check_df(df, exp_df, EXP_DIFF, method)
dfu = calc_eph_twobody_universal(obj, eph)
method=FUNC_NAME+":calc_eph_twobody_universal"
check_df(dfu, exp_df, EXP_DIFF_UNI,method)
dfc = calc_eph_by_cowells(obj, eph)
method=FUNC_NAME+":calc_eph_by_cowells"
check_df(dfc, exp_df, EXP_DIFF_COWELLS,method)
if TEST_ENCKES:
dfc = calc_eph_by_enckes(obj, eph)
method=FUNC_NAME+":calc_eph_by_enckes"
check_df(dfc, exp_df, EXP_DIFF_ENKES,method)
| 32.235849
| 104
| 0.707638
|
04aa5acd8050c5e0db0907ab5a9bb1ca3cfbbe93
| 1,664
|
py
|
Python
|
_validate_init.py
|
potter420/dash-editor-components
|
871c2f3677259c5563797eeada127dbc4d8a95b6
|
[
"MIT"
] | 26
|
2019-09-19T04:43:23.000Z
|
2021-08-09T14:17:16.000Z
|
_validate_init.py
|
potter420/dash-editor-components
|
871c2f3677259c5563797eeada127dbc4d8a95b6
|
[
"MIT"
] | 8
|
2019-09-20T14:38:19.000Z
|
2021-06-28T19:24:15.000Z
|
_validate_init.py
|
potter420/dash-editor-components
|
871c2f3677259c5563797eeada127dbc4d8a95b6
|
[
"MIT"
] | 3
|
2020-03-12T16:18:41.000Z
|
2021-07-15T07:50:03.000Z
|
"""
DO NOT MODIFY
This file is used to validate your publish settings.
"""
from __future__ import print_function
import os
import sys
import importlib
components_package = 'dash_editor_components'
components_lib = importlib.import_module(components_package)
missing_dist_msg = 'Warning {} was not found in `{}.__init__.{}`!!!'
missing_manifest_msg = '''
Warning {} was not found in `MANIFEST.in`!
It will not be included in the build!
'''
with open('MANIFEST.in', 'r') as f:
manifest = f.read()
def check_dist(dist, filename):
# Support the dev bundle.
if filename.endswith('dev.js'):
return True
return any(
filename in x
for d in dist
for x in (
[d.get('relative_package_path')]
if not isinstance(d.get('relative_package_path'), list)
else d.get('relative_package_path')
)
)
def check_manifest(filename):
return filename in manifest
def check_file(dist, filename):
if not check_dist(dist, filename):
print(
missing_dist_msg.format(filename, components_package, '_js_dist'),
file=sys.stderr
)
if not check_manifest(filename):
print(missing_manifest_msg.format(filename),
file=sys.stderr)
for cur, _, files in os.walk(components_package):
for f in files:
if f.endswith('js'):
# noinspection PyProtectedMember
check_file(components_lib._js_dist, f)
elif f.endswith('css'):
# noinspection PyProtectedMember
check_file(components_lib._css_dist, f)
elif not f.endswith('py'):
check_manifest(f)
| 24.470588
| 78
| 0.644832
|
bdc6c9902c07a5515d5cb9e7531b2f1be3301e56
| 4,605
|
py
|
Python
|
rp/IoT_SmartFridge.py
|
pranjalag19/freezls
|
7d8dfab659b9e09c351fc9e68d995d4de6e774c1
|
[
"Apache-2.0"
] | 3
|
2021-02-20T08:38:50.000Z
|
2021-11-01T16:54:30.000Z
|
rp/IoT_SmartFridge.py
|
pranjalag19/freezls
|
7d8dfab659b9e09c351fc9e68d995d4de6e774c1
|
[
"Apache-2.0"
] | null | null | null |
rp/IoT_SmartFridge.py
|
pranjalag19/freezls
|
7d8dfab659b9e09c351fc9e68d995d4de6e774c1
|
[
"Apache-2.0"
] | 1
|
2021-06-09T12:15:58.000Z
|
2021-06-09T12:15:58.000Z
|
# import necessary packages
import RPi.GPIO as GPIO
import Adafruit_DHT
from Lcd import *
from imutils.video import VideoStream
from pyzbar import pyzbar
import cv2
import imutils
import multiprocessing as mp
import API
import datetime
import time
# setup GPIO mode
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# GPIO pin number
pin_op_sensor = 17 # optical_path_sensor. GPIO17, no.11
pin_ht_sensor = 23 # humidity_temperature_sensor. GPIO23, no.16
pin_led_green = 19 # GPIO19, no.35
pin_led_red = 26 # GPIO26, no.37
pin_buzzer = 10 # GPIO10, no.19
door_close_time = time.time()
# Initialize LCD
lcd = Lcd()
lcd.clear()
#
def get_pin_state(pin):
GPIO.setup(pin, GPIO.IN)
return GPIO.input(pin)
def set_pin_state(pin, state):
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, state)
def beep(pin, x_time):
GPIO.setup(pin, GPIO.OUT)
Buzz = GPIO.PWM(pin,200)
Buzz.start(60)
time.sleep(x_time)
Buzz.stop()
#
def sensor_task():
while True:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, pin_ht_sensor)
if humidity is not None and temperature is not None:
sensor_data = {
'timestamp': int(time.time()*1000),
'temperature': float(temperature),
'humidity':float(humidity/100.0)
}
# upload sensor data
API.upload_sensor_data(sensor_data)
print(datetime.datetime.now(), 'uploaded sensor data')
def monitor_door():
global door_close_time
while True:
door_is_closed = get_pin_state(pin_op_sensor)
if door_is_closed == 0:
time_interval = time.time() - door_close_time
# light on
if time_interval < 60:
set_pin_state(pin_led_green, GPIO.HIGH)
set_pin_state(pin_led_red, GPIO.LOW)
else:
set_pin_state(pin_led_green, GPIO.LOW)
set_pin_state(pin_led_red, GPIO.HIGH)
# lcd display
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, pin_ht_sensor)
lcd.display_string(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),1)
if humidity is not None and temperature is not None:
lcd.display_string(("Temperature: " + str(temperature) + "*C"),2)
lcd.display_string(("Humidity: " + str(humidity) + " %"),3)
else:
door_close_time = time.time()
# light off
set_pin_state(pin_led_green, GPIO.LOW)
set_pin_state(pin_led_red, GPIO.LOW)
# lcd clear
lcd.clear()
def monitor_camera():
vs = None
while True:
camera_status = 1 if get_pin_state(pin_op_sensor) == 0 else 0
if camera_status == 1:
if vs == None:
vs = VideoStream(usePiCamera=True).start()
time.sleep(1.0)
# print("[INFO] .......starting video stream...")
frame = vs.read()
frame = imutils.resize(frame, width=400)
#cv2.imshow("QRcode Scanner", frame)
#key = cv2.waitKey(1) & 0xFF
qrcodes = pyzbar.decode(frame)
for qrcode in qrcodes:
qrcodeData = qrcode.data.decode("utf-8")
beep(pin_buzzer, 0.3)
ingredient = {
'qrcode': qrcodeData
}
# check item name and status
item_name_status = API.check_qr_code(qrcodeData)
item_name = "{:<6}".format(str(item_name_status[0]))
print("3...", datetime.datetime.now(), item_name_status)
# remove/add item
if item_name_status[1] == True:
API.remove_ingredient(ingredient)
#lcd.display_string("Removed item: " + str(item_name_status[0]), 4)
lcd.display_string("Removed item: " + str(item_name), 4)
elif item_name_status[1] == False:
API.add_ingredient(ingredient)
#lcd.display_string("Added item: " + str(item_name_status[0]), 4)
lcd.display_string("Added item: " + str(item_name), 4)
else:
pass
else:
if vs is not None:
#print("[INFO] cleaning up...")
#cv2.destroyAllWindows()
vs.stop()
vs = None
# setup & run a list of processes
targets = [sensor_task, monitor_door, monitor_camera]
for i in targets:
mp.Process(target=i,args=()).start()
| 36.259843
| 94
| 0.578502
|
ce5a13456c8ed8679043cf87ea77107615a7c8c7
| 95,952
|
py
|
Python
|
python/getdist/mcsamples.py
|
sjoudaki/CosmoJBD
|
3c1d029b74034b92cb2974de15e4c18637a5277e
|
[
"MIT"
] | null | null | null |
python/getdist/mcsamples.py
|
sjoudaki/CosmoJBD
|
3c1d029b74034b92cb2974de15e4c18637a5277e
|
[
"MIT"
] | null | null | null |
python/getdist/mcsamples.py
|
sjoudaki/CosmoJBD
|
3c1d029b74034b92cb2974de15e4c18637a5277e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import os
import glob
import logging
import copy
import pickle
import math
import time
import numpy as np
from scipy.stats import norm
import getdist
from getdist import chains, types, covmat, ParamInfo, IniFile
from getdist.densities import Density1D, Density2D
from getdist.chains import Chains, chainFiles, lastModified
from getdist.convolve import convolve1D, convolve2D
import getdist.kde_bandwidth as kde
from getdist.parampriors import ParamBounds
import six
pickle_version = 21
class MCSamplesError(Exception):
"""
An Exception that is raised when there is an error inside the MCSamples class.
"""
pass
class SettingError(MCSamplesError):
"""
An Exception that indicates bad settings.
"""
pass
class ParamError(MCSamplesError):
"""
An Exception that indicates a bad parameter.
"""
pass
def loadMCSamples(file_root, ini=None, jobItem=None, no_cache=False, settings={}, dist_settings={}):
"""
Loads a set of samples from a file or files.
Sample files are plain text (*file_root.txt*) or a set of files (*file_root_1.txt*, *file_root_2.txt*, etc.).
Auxiliary files **file_root.paramnames** gives the parameter names
and (optionally) **file_root.ranges** gives hard prior parameter ranges.
For a description of the various analysis settings and default values see
`analysis_defaults.ini <http://getdist.readthedocs.org/en/latest/analysis_settings.html>`_.
:param file_root: The root name of the files to read (no extension)
:param ini: The name of a .ini file with analysis settings to use
:param jobItem: an optional grid jobItem instance for a CosmoMC grid output
:param no_cache: Indicates whether or not we should cached loaded samples in a pickle
:param settings: dictionary of analysis settings to override defaults
:param dist_settings: (old) alias for settings
:return: The :class:`MCSamples` instance
"""
if settings and dist_settings: raise ValueError('Use settings or dist_settings')
if dist_settings: settings = dist_settings
files = chainFiles(file_root)
path, name = os.path.split(file_root)
path = getdist.cache_dir or path
if not os.path.exists(path): os.mkdir(path)
cachefile = os.path.join(path, name) + '.py_mcsamples'
samples = MCSamples(file_root, jobItem=jobItem, ini=ini, settings=settings)
allfiles = files + [file_root + '.ranges', file_root + '.paramnames', file_root + '.properties.ini']
if not no_cache and os.path.exists(cachefile) and lastModified(allfiles) < os.path.getmtime(cachefile):
try:
with open(cachefile, 'rb') as inp:
cache = pickle.load(inp)
if cache.version == pickle_version and samples.ignore_rows == cache.ignore_rows:
changed = len(samples.contours) != len(cache.contours) or \
np.any(np.array(samples.contours) != np.array(cache.contours))
cache.updateSettings(ini=ini, settings=settings, doUpdate=changed)
return cache
except:
pass
if not len(files):
raise IOError('No chains found: ' + file_root)
samples.readChains(files)
samples.savePickle(cachefile)
return samples
class Kernel1D(object):
def __init__(self, winw, h):
self.winw = winw
self.h = h
self.x = np.arange(-winw, winw + 1)
Win = np.exp(-(self.x / h) ** 2 / 2.)
self.Win = Win / np.sum(Win)
# =============================================================================
class MCSamples(Chains):
"""
The main high-level class for a collection of parameter samples.
Derives from :class:`.chains.Chains`, adding high-level functions including Kernel Density estimates, parameter ranges and custom settings.
"""
def __init__(self, root=None, jobItem=None, ini=None, settings=None, ranges=None, **kwargs):
"""
For a description of the various analysis settings and default values see
`analysis_defaults.ini <http://getdist.readthedocs.org/en/latest/analysis_settings.html>`_.
:param root: A root file name when loading from file
:param jobItem: Optional paramgrid.batchjob.jobItem instance if a member of a parameter grid
:param ini: a .ini file to use for custom analysis settings
:param settings: a dictionary of custom analysis settings
:param ranges: a dictionary giving any additional hard prior bounds for parameters, eg. {'x':[0, 1], 'y':[None,2]}
:param kwargs: keyword arguments passed to inherited classes, e.g. to manually make a samples object from sample arrays in memory:
- **paramNamesFile**: optional name of .paramnames file with parameter names
- **samples**: array of parameter values for each sample, passed to :meth:`setSamples`
- **weights**: array of weights for samples
- **loglikes**: array of -log(Likelihood) for samples
- **names**: list of names for the parameters
- **labels**: list of latex labels for the parameters
- **ignore_rows**:
- if int >=1: The number of rows to skip at the file in the beginning of the file
- if float <1: The fraction of rows to skip at the beginning of the file
- **name_tag**: a name tag for this instance
"""
Chains.__init__(self, root, jobItem=jobItem, **kwargs)
self.version = pickle_version
self.markers = {}
self.ini = ini
self._readRanges()
if ranges:
self.setRanges(ranges)
# Other variables
self.range_ND_contour = 1
self.range_confidence = 0.001
self.num_bins = 128
self.fine_bins = 1024
self.num_bins_2D = 40
self.fine_bins_2D = 256
self.smooth_scale_1D = -1.
self.smooth_scale_2D = -1.
self.boundary_correction_order = 1
self.mult_bias_correction_order = 1
self.max_corr_2D = 0.95
self.contours = np.array([0.68, 0.95])
self.max_scatter_points = 2000
self.credible_interval_threshold = 0.05
self.shade_likes_is_mean_loglikes = False
self.likeStats = None
self.max_mult = 0
self.mean_mult = 0
self.plot_data_dir = ""
if root:
self.rootname = os.path.basename(root)
else:
self.rootname = ""
self.rootdirname = ""
self.indep_thin = 0
self.ignore_rows = float(kwargs.get('ignore_rows', 0))
self.subplot_size_inch = 4.0
self.subplot_size_inch2 = self.subplot_size_inch
self.subplot_size_inch3 = 6.0
self.plot_output = getdist.default_plot_output
self.out_dir = ""
self.max_split_tests = 4
self.force_twotail = False
self.corr_length_thin = 0
self.corr_length_steps = 15
self.converge_test_limit = 0.95
self.done_1Dbins = False
self.density1D = dict()
self.updateSettings(ini=ini, settings=settings)
if root and os.path.exists(root + '.properties.ini'):
# any settings in properties.ini override settings for this specific chain
self.properties = IniFile(root + '.properties.ini')
self._setBurnOptions(self.properties)
if self.properties.bool('burn_removed', False):
self.ignore_frac = 0.
self.ignore_lines = 0
else:
self.properties = None
def setRanges(self, ranges):
"""
Sets the ranges parameters, e.g. hard priors on positivity etc. If a min or max value is None, then it is assumed to be unbounded.
:param ranges: A list or a tuple of [min,max] values for each parameter,
or a dictionary giving [min,max] values for specific parameter names
"""
if isinstance(ranges, (list, tuple)):
for i, minmax in enumerate(ranges):
self.ranges.setRange(self.parName(i), minmax)
elif isinstance(ranges, dict):
for key, value in six.iteritems(ranges):
self.ranges.setRange(key, value)
else:
raise ValueError('MCSamples ranges parameter must be list or dict')
self.needs_update = True
def parName(self, i, starDerived=False):
"""
Gets the name of i'th parameter
:param i: The index of the parameter
:param starDerived: add a star at the end of the name if the parameter is derived
:return: The name of the parameter (string)
"""
return self.paramNames.name(i, starDerived)
def parLabel(self, i):
"""
Gets the latex label of the parameter
:param i: The index or name of a parameter.
:return: The parameter's label.
"""
if isinstance(i, six.string_types):
return self.paramNames.parWithName(i).label
else:
return self.paramNames.names[i].label
def _setBurnOptions(self, ini):
"""
Sets the ignore_rows value from configuration.
:param ini: The :class:`.inifile.IniFile` to be used
"""
ini.setAttr('ignore_rows', self)
self.ignore_lines = int(self.ignore_rows)
if not self.ignore_lines:
self.ignore_frac = self.ignore_rows
else:
self.ignore_frac = 0
def initParameters(self, ini):
"""
Initializes settings.
Gets parameters from :class:`~.inifile.IniFile`.
:param ini: The :class:`~.inifile.IniFile` to be used
"""
self._setBurnOptions(ini)
ini.setAttr('range_ND_contour', self)
ini.setAttr('range_confidence', self)
ini.setAttr('num_bins', self)
ini.setAttr('fine_bins', self)
ini.setAttr('num_bins_2D', self)
ini.setAttr('fine_bins_2D', self)
ini.setAttr('smooth_scale_1D', self)
ini.setAttr('smooth_scale_2D', self)
ini.setAttr('boundary_correction_order', self, 1)
ini.setAttr('mult_bias_correction_order', self, 1)
ini.setAttr('max_scatter_points', self)
ini.setAttr('credible_interval_threshold', self)
ini.setAttr('subplot_size_inch', self)
ini.setAttr('subplot_size_inch2', self)
ini.setAttr('subplot_size_inch3', self)
ini.setAttr('plot_output', self)
ini.setAttr('force_twotail', self)
if self.force_twotail: logging.warning('Computing two tail limits')
ini.setAttr('max_corr_2D', self)
if ini.hasKey('contours'):
ini.setAttr('contours', self)
elif ini.hasKey('num_contours'):
num_contours = ini.int('num_contours', 2)
self.contours = np.array([ini.float('contour' + str(i + 1)) for i in range(num_contours)])
# how small the end bin must be relative to max to use two tail
self.max_frac_twotail = []
for i, contour in enumerate(self.contours):
max_frac = np.exp(-1.0 * math.pow(norm.ppf((1 - contour) / 2), 2) / 2)
if ini:
max_frac = ini.float('max_frac_twotail' + str(i + 1), max_frac)
self.max_frac_twotail.append(max_frac)
ini.setAttr('converge_test_limit', self, self.contours[-1])
ini.setAttr('corr_length_thin', self)
ini.setAttr('corr_length_steps', self)
def _initLimits(self, ini=None):
bin_limits = ""
if ini: bin_limits = ini.string('all_limits', '')
self.markers = {}
for par in self.paramNames.names:
if bin_limits:
line = bin_limits
else:
line = ''
if ini and 'limits[%s]' % par.name in ini.params:
line = ini.string('limits[%s]' % par.name)
if line:
limits = line.split()
if len(limits) == 2:
self.ranges.setRange(par.name, limits)
par.limmin = self.ranges.getLower(par.name)
par.limmax = self.ranges.getUpper(par.name)
par.has_limits_bot = par.limmin is not None
par.has_limits_top = par.limmax is not None
if ini and 'marker[%s]' % par.name in ini.params:
line = ini.string('marker[%s]' % par.name)
if line:
self.markers[par.name] = float(line)
def updateSettings(self, settings=None, ini=None, doUpdate=True):
"""
Updates settings from a .ini file or dictionary
:param settings: The a dict containing settings to set, taking preference over any values in ini
:param ini: The name of .ini file to get settings from, or an :class:`~.inifile.IniFile` instance; by default uses current settings
:param doUpdate: True if should update internal computed values, False otherwise (e.g. if want to make other changes first)
"""
assert (settings is None or isinstance(settings, dict))
if not ini:
ini = self.ini
elif isinstance(ini, six.string_types):
ini = IniFile(ini)
else:
ini = copy.deepcopy(ini)
if not ini: ini = IniFile(getdist.default_getdist_settings)
if settings:
ini.params.update(settings)
self.ini = ini
if ini: self.initParameters(ini)
if doUpdate and self.samples is not None: self.updateBaseStatistics()
def readChains(self, chain_files):
"""
Loads samples from a list of files, removing burn in, deleting fixed parameters, and combining into one self.samples array
:param chain_files: The list of file names to read
:return: self.
"""
self.loadChains(self.root, chain_files)
if self.ignore_frac and (not self.jobItem or
(not self.jobItem.isImportanceJob and not self.jobItem.isBurnRemoved())):
self.removeBurnFraction(self.ignore_frac)
if chains.print_load_details: print('Removed %s as burn in' % self.ignore_frac)
else:
if chains.print_load_details: print('Removed no burn in')
self.deleteFixedParams()
# Make a single array for chains
self.makeSingle()
self.updateBaseStatistics()
return self
def updateBaseStatistics(self):
"""
Updates basic computed statistics (means, covariance etc), e.g. after a change in samples or weights
:return: self
"""
super(MCSamples, self).updateBaseStatistics()
mult_max = (self.mean_mult * self.numrows) / min(self.numrows // 2, 500)
outliers = np.sum(self.weights > mult_max)
if outliers != 0:
logging.warning('outlier fraction %s ', float(outliers) / self.numrows)
self.indep_thin = 0
self._setCov()
self.done_1Dbins = False
self.density1D = dict()
self._initLimits(self.ini)
# Get ND confidence region
self._setLikeStats()
return self
def makeSingleSamples(self, filename="", single_thin=None):
"""
Make file of weight-1 samples by choosing samples
with probability given by their weight.
:param filename: The filename to write to, Leave empty if no output file is needed
:param single_thin: factor to thin by; if not set generates as many samples as it can
:return: numpy array of selected weight-1 samples
"""
if single_thin is None:
single_thin = max(1, self.norm / self.max_mult / self.max_scatter_points)
rand = np.random.random_sample(self.numrows)
if filename:
with open(filename, 'w') as f:
for i, r in enumerate(rand):
if r <= self.weights[i] / self.max_mult / single_thin:
f.write("%16.7E" % 1.0)
f.write("%16.7E" % (self.loglikes[i]))
for j in range(self.n):
f.write("%16.7E" % (self.samples[i][j]))
f.write("\n")
else:
# return data
return self.samples[rand <= self.weights / (self.max_mult * single_thin)]
def writeThinData(self, fname, thin_ix, cool=1):
"""
Writes samples at thin_ix to file
:param fname: The filename to write to.
:param thin_ix: Indices of the samples to write
:param cool: if not 1, cools the samples by this factor
"""
nparams = self.samples.shape[1]
if cool != 1: logging.info('Cooled thinned output with temp: %s', cool)
MaxL = np.max(self.loglikes)
with open(fname, 'w') as f:
i = 0
for thin in thin_ix:
if cool != 1:
newL = self.loglikes[thin] * cool
f.write("%16.7E" % (
np.exp(-(newL - self.loglikes[thin]) - MaxL * (1 - cool))))
f.write("%16.7E" % newL)
for j in nparams:
f.write("%16.7E" % (self.samples[i][j]))
else:
f.write("%f" % 1.)
f.write("%f" % (self.loglikes[thin]))
for j in nparams:
f.write("%16.7E" % (self.samples[i][j]))
i += 1
print('Wrote ', len(thin_ix), ' thinned samples')
def getCovMat(self):
"""
Gets the CovMat instance containing covariance matrix for all the non-derived parameters
(for example useful for subsequent MCMC runs to orthogonalize the parameters)
:return: A :class:`~.covmat.CovMat` object holding the covariance
"""
nparamNonDerived = self.paramNames.numNonDerived()
return covmat.CovMat(matrix=self.fullcov[:nparamNonDerived, :nparamNonDerived],
paramNames=self.paramNames.list()[:nparamNonDerived])
def writeCovMatrix(self, filename=None):
"""
Writes the covrariance matrix of non-derived parameters to a file.
:param filename: The filename to write to; default is file_root.covmat
"""
filename = filename or self.rootdirname + ".covmat"
self.getCovMat().saveToFile(filename)
def writeCorrelationMatrix(self, filename=None):
"""
Write the correlation matrix to a file
:param filename: The file to write to, If none writes to file_root.corr
"""
filename = filename or self.rootdirname + ".corr"
np.savetxt(filename, self.getCorrelationMatrix(), fmt="%15.7E")
def getFractionIndices(self, weights, n):
"""
Calculates the indices of weights that split the weights into sets of equal 1/n fraction of the total weight
:param weights: array of weights
:param n: number of groups to split in to
:return: array of indices of the boundary rows in the weights array
"""
cumsum = np.cumsum(weights)
fraction_indices = np.append(np.searchsorted(cumsum, np.linspace(0, 1, n, endpoint=False) * self.norm),
self.weights.shape[0])
return fraction_indices
def PCA(self, params, param_map=None, normparam=None, writeDataToFile=False, filename=None, conditional_params=[]):
"""
Perform principle component analysis (PCA). In other words,
get eigenvectors and eigenvalues for normalized variables
with optional (log modulus) mapping to find power law fits.
:param params: List of names of the parameters to use
:param param_map: A transformation to apply to parameter values;
A list or string containing either N (no transformation) or L (for log transform) for each parameter.
By default uses log if no parameter values cross zero
:param normparam: optional name of parameter to normalize result (i.e. this parameter will have unit power)
:param writeDataToFile: True if should write the output to file.
:param filename: The filename to write, by default root_name.PCA.
:param conditional_params: optional list of parameters to treat as fixed, i.e. for PCA conditional on fixed values of these parameters
:return: a string description of the output of the PCA
"""
logging.info('Doing PCA for %s parameters', len(params))
if len(conditional_params): logging.info('conditional %u fixed parameters', len(conditional_params))
PCAtext = 'PCA for parameters:\n'
params = [name for name in params if self.paramNames.parWithName(name)]
nparams = len(params)
indices = [self.index[param] for param in params]
conditional_params = [self.index[param] for param in conditional_params]
indices += conditional_params
if normparam:
if normparam in params:
normparam = params.index(normparam)
else:
normparam = -1
else:
normparam = -1
n = len(indices)
PCdata = self.samples[:, indices].copy()
PClabs = []
PCmean = np.zeros(n)
sd = np.zeros(n)
newmean = np.zeros(n)
newsd = np.zeros(n)
if param_map is None:
param_map = ''
for par in self.paramNames.parsWithNames(params):
self._initParamRanges(par.name)
if par.param_max < 0 or par.param_min < par.param_max - par.param_min:
param_map += 'N'
else:
param_map += 'L'
doexp = False
for i, parix in enumerate(indices):
if i < nparams:
label = self.parLabel(parix)
if param_map[i] == 'L':
doexp = True
PCdata[:, i] = np.log(PCdata[:, i])
PClabs.append("ln(" + label + ")")
elif param_map[i] == 'M':
doexp = True
PCdata[:, i] = np.log(-1.0 * PCdata[:, i])
PClabs.append("ln(-" + label + ")")
else:
PClabs.append(label)
PCAtext += "%10s :%s\n" % (str(parix + 1), str(PClabs[i]))
PCmean[i] = np.dot(self.weights, PCdata[:, i]) / self.norm
PCdata[:, i] -= PCmean[i]
sd[i] = np.sqrt(np.dot(self.weights, PCdata[:, i] ** 2) / self.norm)
if sd[i] != 0: PCdata[:, i] /= sd[i]
PCAtext += "\n"
PCAtext += 'Correlation matrix for reduced parameters\n'
correlationMatrix = np.ones((n, n))
for i in range(n):
for j in range(i):
correlationMatrix[j][i] = np.dot(self.weights, PCdata[:, i] * PCdata[:, j]) / self.norm
correlationMatrix[i][j] = correlationMatrix[j][i]
for i in range(nparams):
PCAtext += '%12s :' % params[i]
for j in range(n):
PCAtext += '%8.4f' % correlationMatrix[j][i]
PCAtext += '\n'
if len(conditional_params):
u = np.linalg.inv(correlationMatrix)
u = u[np.ix_(list(range(len(params))), list(range(len(params))))]
u = np.linalg.inv(u)
n = nparams
PCdata = PCdata[:, :nparams]
else:
u = correlationMatrix
evals, evects = np.linalg.eig(u)
isorted = evals.argsort()
u = np.transpose(evects[:, isorted]) # redefining u
PCAtext += '\n'
PCAtext += 'e-values of correlation matrix\n'
for i in range(n):
isort = isorted[i]
PCAtext += 'PC%2i: %8.4f\n' % (i + 1, evals[isort])
PCAtext += '\n'
PCAtext += 'e-vectors\n'
for j in range(n):
PCAtext += '%3i:' % (indices[j] + 1)
for i in range(n):
isort = isorted[i]
PCAtext += '%8.4f' % (evects[j][isort])
PCAtext += '\n'
if normparam != -1:
# Set so parameter normparam has exponent 1
for i in range(n):
u[i, :] = u[i, :] / u[i, normparam] * sd[normparam]
else:
# Normalize so main component has exponent 1
for i in range(n):
maxi = np.abs(u[i, :]).argmax()
u[i, :] = u[i, :] / u[i, maxi] * sd[maxi]
nrows = PCdata.shape[0]
for i in range(nrows):
PCdata[i, :] = np.dot(u, PCdata[i, :])
if doexp: PCdata[i, :] = np.exp(PCdata[i, :])
PCAtext += '\n'
PCAtext += 'Principle components\n'
for i in range(n):
isort = isorted[i]
PCAtext += 'PC%i (e-value: %f)\n' % (i + 1, evals[isort])
for j in range(n):
label = self.parLabel(indices[j])
if param_map[j] in ['L', 'M']:
expo = "%f" % (1.0 / sd[j] * u[i][j])
if param_map[j] == "M":
div = "%f" % (-np.exp(PCmean[j]))
else:
div = "%f" % (np.exp(PCmean[j]))
PCAtext += '[%f] (%s/%s)^{%s}\n' % (u[i][j], label, div, expo)
else:
expo = "%f" % (sd[j] / u[i][j])
if doexp:
PCAtext += '[%f] exp((%s-%f)/%s)\n' % (u[i][j], label, PCmean[j], expo)
else:
PCAtext += '[%f] (%s-%f)/%s)\n' % (u[i][j], label, PCmean[j], expo)
newmean[i] = self.mean(PCdata[:, i])
newsd[i] = np.sqrt(self.mean((PCdata[:, i] - newmean[i]) ** 2))
PCAtext += ' = %f +- %f\n' % (newmean[i], newsd[i])
PCAtext += '\n'
# Find out how correlated these components are with other parameters
PCAtext += 'Correlations of principle components\n'
l = ["%8i" % i for i in range(1, n + 1)]
PCAtext += '%s\n' % ("".join(l))
for i in range(n):
PCdata[:, i] = (PCdata[:, i] - newmean[i]) / newsd[i]
for j in range(n):
PCAtext += 'PC%2i' % (j + 1)
for i in range(n):
PCAtext += '%8.3f' % (self.mean(PCdata[:, i] * PCdata[:, j]))
PCAtext += '\n'
for j in range(self.n):
PCAtext += '%4i' % (j + 1)
for i in range(n):
PCAtext += '%8.3f' % (
np.sum(self.weights * PCdata[:, i]
* (self.samples[:, j] - self.means[j]) / self.sddev[j]) / self.norm)
PCAtext += ' (%s)\n' % (self.parLabel(j))
if writeDataToFile:
with open(filename or self.rootdirname + ".PCA", "w") as f:
f.write(PCAtext)
return PCAtext
def getNumSampleSummaryText(self):
"""
Returns a summary text describing numbers of parameters and samples,
and various measures of the effective numbers of samples.
:return: The summary text as a string.
"""
lines = 'using %s rows, %s parameters; mean weight %s, tot weight %s\n' % (
self.numrows, self.paramNames.numParams(), self.mean_mult, self.norm)
if self.indep_thin != 0:
lines += 'Approx indep samples (N/corr length): %s\n' % (round(self.norm / self.indep_thin))
lines += 'Equiv number of single samples (sum w)/max(w): %s\n' % (round(self.norm / self.max_mult))
lines += 'Effective number of weighted samples (sum w)^2/sum(w^2): %s\n' % (
int(self.norm ** 2 / np.dot(self.weights, self.weights)))
return lines
def getConvergeTests(self, test_confidence=0.95, writeDataToFile=False,
what=['MeanVar', 'GelmanRubin', 'SplitTest', 'RafteryLewis', 'CorrLengths'],
filename=None, feedback=False):
"""
Do convergence tests.
:param test_confidence: confidence limit to test for convergence (two-tail, only applies to some tests)
:param writeDataToFile: True if should write output to a file
:param what: The tests to run. Should be a list of any of the following:
- 'MeanVar': Gelman-Rubin sqrt(var(chain mean)/mean(chain var)) test in individual parameters (multiple chains only)
- 'GelmanRubin': Gelman-Rubin test for the worst orthogonalized parameter (multiple chains only)
- 'SplitTest': Crude test for variation in confidence limits when samples are split up into subsets
- 'RafteryLewis': `Raftery-Lewis test <http://www.stat.washington.edu/tech.reports/raftery-lewis2.ps>`_ (integer weight samples only)
- 'CorrLengths': Sample correlation lengths
:param filename: The filename to write to, default is file_root.converge
:param feedback: If set to True, Prints the output as well as returning it.
:return: text giving the output of the tests
"""
lines = ''
nparam = self.n
chainlist = self.getSeparateChains()
num_chains_used = len(chainlist)
if num_chains_used > 1 and feedback:
print('Number of chains used = ', num_chains_used)
for chain in chainlist: chain.setDiffs()
parForm = self.paramNames.parFormat()
parNames = [parForm % self.parName(j) for j in range(nparam)]
limits = np.array([1 - (1 - test_confidence) / 2, (1 - test_confidence) / 2])
if 'CorrLengths' in what:
lines += "Parameter autocorrelation lengths (effective number of samples N_eff = tot weight/weight length)\n"
lines += "\n"
lines += parForm % "" + '%15s %15s %15s\n' % ('Weight Length', 'Sample length', 'N_eff')
maxoff = np.min([chain.weights.size // 10 for chain in chainlist])
maxN = 0
for j in range(nparam):
corr = np.zeros(maxoff + 1)
for chain in chainlist:
corr += chain.getAutocorrelation(j, maxoff, normalized=False) * chain.norm
corr /= self.norm * self.vars[j]
ix = np.argmin(corr > 0.05 * corr[0])
N = corr[0] + 2 * np.sum(corr[1:ix])
maxN = max(N, maxN)
form = '%15.2E'
if self.mean_mult > 1: form = '%15.2f'
lines += parNames[j] + form % N + ' %15.2f %15i\n' % (N / self.mean_mult, self.norm / N)
self.indep_thin = maxN
lines += "\n"
if num_chains_used > 1 and 'MeanVar' in what:
lines += "\n"
lines += "mean convergence stats using remaining chains\n"
lines += "param sqrt(var(chain mean)/mean(chain var))\n"
lines += "\n"
between_chain_var = np.zeros(nparam)
in_chain_var = np.zeros(nparam)
for chain in chainlist:
between_chain_var += (chain.means - self.means) ** 2
between_chain_var /= (num_chains_used - 1)
for j in range(nparam):
# Get stats for individual chains - the variance of the means over the mean of the variances
for chain in chainlist:
in_chain_var[j] += np.dot(chain.weights, chain.diffs[j] ** 2)
in_chain_var[j] /= self.norm
lines += parNames[j] + "%10.4f %s\n" % (
math.sqrt(between_chain_var[j] / in_chain_var[j]), self.parLabel(j))
lines += "\n"
nparamMC = self.paramNames.numNonDerived()
if num_chains_used > 1 and nparamMC > 0 and 'GelmanRubin' in what:
D = self.getGelmanRubinEigenvalues(chainlist=chainlist)
if D is not None:
self.GelmanRubin = np.max(D)
lines += "var(mean)/mean(var) for eigenvalues of covariance of means of orthonormalized parameters\n"
for jj, Di in enumerate(D):
lines += "%3i%13.5f\n" % (jj + 1, Di)
GRSummary = " var(mean)/mean(var), remaining chains, worst e-value: R-1 = %13.5F" % self.GelmanRubin
else:
self.GelmanRubin = None
GRSummary = logging.warning('Gelman-Rubin covariance not invertible (parameter not moved?)')
if feedback: print(GRSummary)
lines += "\n"
if 'SplitTest' in what:
# Do tests for robustness under using splits of the samples
# Return the rms ([change in upper/lower quantile]/[standard deviation])
# when data split into 2, 3,.. sets
lines += "Split tests: rms_n([delta(upper/lower quantile)]/sd) n={2,3,4}, limit=%.0f%%:\n" % (
100 * self.converge_test_limit)
lines += "i.e. mean sample splitting change in the quantiles in units of the st. dev.\n"
lines += "\n"
frac_indices = []
for i in range(self.max_split_tests - 1):
frac_indices.append(self.getFractionIndices(self.weights, i + 2))
for j in range(nparam):
split_tests = np.zeros((self.max_split_tests - 1, 2))
confids = self.confidence(self.samples[:, j], limits)
for ix, frac in enumerate(frac_indices):
split_n = 2 + ix
for f1, f2 in zip(frac[:-1], frac[1:]):
split_tests[ix, :] += (self.confidence(self.samples[:, j], limits, start=f1,
end=f2) - confids) ** 2
split_tests[ix, :] = np.sqrt(split_tests[ix, :] / split_n) / self.sddev[j]
for endb, typestr in enumerate(['upper', 'lower']):
lines += parNames[j]
for ix in range(self.max_split_tests - 1):
lines += "%9.4f" % (split_tests[ix, endb])
lines += " %s\n" % typestr
lines += "\n"
class LoopException(Exception):
pass
if np.all(np.abs(self.weights - self.weights.astype(np.int)) < 1e-4 / self.max_mult):
if 'RafteryLewis' in what:
# Raftery and Lewis method
# See http://www.stat.washington.edu/tech.reports/raftery-lewis2.ps
# Raw non-importance sampled chains only
thin_fac = np.empty(num_chains_used, dtype=np.int)
epsilon = 0.001
nburn = np.zeros(num_chains_used, dtype=np.int)
markov_thin = np.zeros(num_chains_used, dtype=np.int)
hardest = -1
hardestend = 0
for ix, chain in enumerate(chainlist):
thin_fac[ix] = int(round(np.max(chain.weights)))
try:
for j in range(nparamMC):
# Get binary chain depending on whether above or below confidence value
confids = self.confidence(chain.samples[:, j], limits, weights=chain.weights)
for endb in [0, 1]:
u = confids[endb]
while True:
thin_ix = self.thin_indices(thin_fac[ix], chain.weights)
thin_rows = len(thin_ix)
if thin_rows < 2: break
binchain = np.ones(thin_rows, dtype=np.int)
binchain[chain.samples[thin_ix, j] >= u] = 0
indexes = binchain[:-2] * 4 + binchain[1:-1] * 2 + binchain[2:]
# Estimate transitions probabilities for 2nd order process
tran = np.bincount(indexes, minlength=8).reshape((2, 2, 2))
# tran[:, :, :] = 0
# for i in range(2, thin_rows):
# tran[binchain[i - 2]][binchain[i - 1]][binchain[i]] += 1
# Test whether 2nd order is better than Markov using BIC statistic
g2 = 0
for i1 in [0, 1]:
for i2 in [0, 1]:
for i3 in [0, 1]:
if tran[i1][i2][i3] != 0:
fitted = float(
(tran[i1][i2][0] + tran[i1][i2][1]) *
(tran[0][i2][i3] + tran[1][i2][i3])) \
/ float(tran[0][i2][0] + tran[0][i2][1] +
tran[1][i2][0] + tran[1][i2][1])
focus = float(tran[i1][i2][i3])
g2 += math.log(focus / fitted) * focus
g2 *= 2
if g2 - math.log(float(thin_rows - 2)) * 2 < 0: break
thin_fac[ix] += 1
# Get Markov transition probabilities for binary processes
if np.sum(tran[:, 0, 1]) == 0 or np.sum(tran[:, 1, 0]) == 0:
thin_fac[ix] = 0
raise LoopException()
alpha = np.sum(tran[:, 0, 1]) / float(np.sum(tran[:, 0, 0]) + np.sum(tran[:, 0, 1]))
beta = np.sum(tran[:, 1, 0]) / float(np.sum(tran[:, 1, 0]) + np.sum(tran[:, 1, 1]))
probsum = alpha + beta
tmp1 = math.log(probsum * epsilon / max(alpha, beta)) / math.log(abs(1.0 - probsum))
if int(tmp1 + 1) * thin_fac[ix] > nburn[ix]:
nburn[ix] = int(tmp1 + 1) * thin_fac[ix]
hardest = j
hardestend = endb
markov_thin[ix] = thin_fac[ix]
# Get thin factor to have independent samples rather than Markov
hardest = max(hardest, 0)
u = self.confidence(self.samples[:, hardest], (1 - test_confidence) / 2, hardestend == 0)
while True:
thin_ix = self.thin_indices(thin_fac[ix], chain.weights)
thin_rows = len(thin_ix)
if thin_rows < 2: break
binchain = np.ones(thin_rows, dtype=np.int)
binchain[chain.samples[thin_ix, hardest] >= u] = 0
indexes = binchain[:-1] * 2 + binchain[1:]
# Estimate transitions probabilities for 2nd order process
tran2 = np.bincount(indexes, minlength=4).reshape(2, 2)
# tran2[:, :] = 0
# for i in range(1, thin_rows):
# tran2[binchain[i - 1]][binchain[i]] += 1
# Test whether independence is better than Markov using BIC statistic
g2 = 0
for i1 in [0, 1]:
for i2 in [0, 1]:
if tran2[i1][i2] != 0:
fitted = float(
(tran2[i1][0] + tran2[i1][1]) *
(tran2[0][i2] + tran2[1][i2])) / float(thin_rows - 1)
focus = float(tran2[i1][i2])
if fitted <= 0 or focus <= 0:
print('Raftery and Lewis estimator had problems')
return
g2 += np.log(focus / fitted) * focus
g2 *= 2
if g2 - np.log(float(thin_rows - 1)) < 0: break
thin_fac[ix] += 1
except LoopException:
pass
except:
thin_fac[ix] = 0
if thin_fac[ix] and thin_rows < 2: thin_fac[ix] = 0
lines += "Raftery&Lewis statistics\n"
lines += "\n"
lines += "chain markov_thin indep_thin nburn\n"
for ix in range(num_chains_used):
if thin_fac[ix] == 0:
lines += "%4i Failed/not enough samples\n" % ix
else:
lines += "%4i%12i%12i%12i\n" % (
ix, markov_thin[ix], thin_fac[ix], nburn[ix])
self.RL_indep_thin = np.max(thin_fac)
if feedback:
if not np.all(thin_fac != 0):
print('RL: Not enough samples to estimate convergence stats')
else:
print('RL: Thin for Markov: ', np.max(markov_thin))
print('RL: Thin for indep samples: ', str(self.RL_indep_thin))
print('RL: Estimated burn in steps: ', np.max(nburn), ' (',
int(round(np.max(nburn) / self.mean_mult)), ' rows)')
lines += "\n"
if 'CorrSteps' in what:
# Get correlation lengths. We ignore the fact that there are jumps between chains, so slight underestimate
lines += "Parameter auto-correlations as function of step separation\n"
lines += "\n"
if self.corr_length_thin != 0:
autocorr_thin = self.corr_length_thin
else:
if self.indep_thin == 0:
autocorr_thin = 20
elif self.indep_thin <= 30:
autocorr_thin = 5
else:
autocorr_thin = int(5 * (self.indep_thin / 30))
thin_ix = self.thin_indices(autocorr_thin)
thin_rows = len(thin_ix)
maxoff = int(min(self.corr_length_steps, thin_rows // (2 * num_chains_used)))
if maxoff > 0:
if False:
# ignore ends of chains
corrs = np.zeros([maxoff, nparam])
for j in range(nparam):
diff = self.samples[thin_ix, j] - self.means[j]
for off in range(1, maxoff + 1):
corrs[off - 1][j] = np.dot(diff[off:], diff[:-off]) / (thin_rows - off) / self.vars[j]
lines += parForm % ""
for i in range(maxoff):
lines += "%8i" % ((i + 1) * autocorr_thin)
lines += "\n"
for j in range(nparam):
label = self.parLabel(j)
lines += parNames[j]
for i in range(maxoff):
lines += "%8.3f" % corrs[i][j]
lines += " %s\n" % label
else:
corrs = np.zeros([maxoff, nparam])
for chain in chainlist:
thin_ix = chain.thin_indices(autocorr_thin)
thin_rows = len(thin_ix)
maxoff = min(maxoff, thin_rows // autocorr_thin)
for j in range(nparam):
diff = chain.diffs[j][thin_ix]
for off in range(1, maxoff + 1):
corrs[off - 1][j] += np.dot(diff[off:], diff[:-off]) / (thin_rows - off) / \
self.vars[j]
corrs /= len(chainlist)
lines += parForm % ""
for i in range(maxoff):
lines += "%8i" % ((i + 1) * autocorr_thin)
lines += "\n"
for j in range(nparam):
label = self.parLabel(j)
lines += parNames[j]
for i in range(maxoff):
lines += "%8.3f" % corrs[i][j]
lines += " %s\n" % label
if writeDataToFile:
with open(filename or (self.rootdirname + '.converge'), 'w') as f:
f.write(lines)
return lines
def _get1DNeff(self, par, param):
N_eff = getattr(par, 'N_eff_kde', None)
if N_eff is None:
par.N_eff_kde = self.getEffectiveSamplesGaussianKDE(param, scale=par.sigma_range)
N_eff = par.N_eff_kde
return N_eff
def getAutoBandwidth1D(self, bins, par, param, mult_bias_correction_order=None, kernel_order=1, N_eff=None):
"""
Get optimized kernel density bandwidth (in units the range of the bins)
Based on optimal Improved Sheather-Jones bandwidth for basic Parzen kernel, then scaled if higher-order method being used.
For details see the `notes <http://cosmologist.info/notes/GetDist.pdf>`_.
:param bins: numpy array of binned weights for the samples
:param par: A :class:`~.paramnames.ParamInfo` instance for the parameter to analyse
:param param: index of the parameter to use
:param mult_bias_correction_order: order of multiplicative bias correction (0 is basic Parzen kernel); by default taken from instance settings.
:param kernel_order: order of the kernel (0 is Parzen, 1 does linear boundary correction, 2 is a higher-order kernel)
:param N_eff: effective number of samples. If not specified estimated using weights, autocorrelations, and fiducial bandwidth
:return: kernel density bandwidth (in units the range of the bins)
"""
if N_eff is None:
N_eff = self._get1DNeff(par, param)
h = kde.gaussian_kde_bandwidth_binned(bins, Neff=N_eff)
par.kde_h = h
m = mult_bias_correction_order
if m is None: m = self.mult_bias_correction_order
if kernel_order > 1: m = max(m, 1)
if m:
# higher order method
# e.g. http://biomet.oxfordjournals.org/content/82/2/327.full.pdf+html
# some prefactors given in http://eprints.whiterose.ac.uk/42950/6/taylorcc2%5D.pdf
# Here we just take unit prefactor relative to Gaussian
# and rescale the optimal h for standard KDE to accounts for higher order scaling
# Should be about 1.3 x larger for Gaussian, but smaller in some other cases
return h * N_eff ** (1. / 5 - 1. / (4 * m + 5))
else:
return h
def getAutoBandwidth2D(self, bins, parx, pary, paramx, paramy, corr, rangex, rangey, base_fine_bins_2D,
mult_bias_correction_order=None, min_corr=0.2, N_eff=None):
"""
Get optimized kernel density bandwidth matrix in parameter units, using Improved Sheather Jones method in sheared parameters.
For details see the `notes <http://cosmologist.info/notes/GetDist.pdf>`_.
:param bins: 2D numpy array of binned weights
:param parx: A :class:`~.paramnames.ParamInfo` instance for the x parameter
:param pary: A :class:`~.paramnames.ParamInfo` instance for the y parameter
:param paramx: index of the x parameter
:param paramy: index of the y parameter
:param corr: correlation of the samples
:param rangex: scale in the x parameter
:param rangey: scale in the y parameter
:param base_fine_bins_2D: number of bins to use for re-binning in rotated parameter space
:param mult_bias_correction_order: multiplicative bias correction order (0 is Parzen kernel); by default taken from instance settings
:param min_corr: minimum correlation value at which to bother de-correlating the parameters
:param N_eff: effective number of samples. If not specified, currently uses crude estimate from effective numbers in x and y separately
:return: kernel density bandwidth matrix in parameter units
"""
if N_eff is None:
N_eff = max(self._get1DNeff(parx, paramx), self._get1DNeff(pary, paramy)) # todo: write _get2DNeff
logging.debug('%s %s AutoBandwidth2D: N_eff=%s, corr=%s', parx.name, pary.name, N_eff, corr)
has_limits = parx.has_limits or pary.has_limits
do_correlated = not parx.has_limits or not pary.has_limits
if min_corr < abs(corr) <= self.max_corr_2D and do_correlated:
# 'shear' the data so fairly uncorrelated, making sure shear keeps any bounds on one parameter unchanged
# the binning step will rescale to make roughly isotropic as assumed by the 2D kernel optimizer psi_{ab} derivatives
i, j = paramx, paramy
imax, imin = None, None
if parx.has_limits_bot:
imin = parx.range_min
if parx.has_limits_top:
imax = parx.range_max
if pary.has_limits:
i, j = j, i
if pary.has_limits_bot:
imin = pary.range_min
if pary.has_limits_top:
imax = pary.range_max
cov = self.getCov(pars=[i, j])
S = np.linalg.cholesky(cov)
ichol = np.linalg.inv(S)
S *= ichol[0, 0]
r = ichol[1, :] / ichol[0, 0]
p1 = self.samples[:, i]
p2 = r[0] * self.samples[:, i] + r[1] * self.samples[:, j]
bin1, R1 = kde.bin_samples(p1, nbins=base_fine_bins_2D, range_min=imin, range_max=imax)
bin2, R2 = kde.bin_samples(p2, nbins=base_fine_bins_2D)
rotbins, _ = self._make2Dhist(bin1, bin2, base_fine_bins_2D, base_fine_bins_2D)
opt = kde.KernelOptimizer2D(rotbins, N_eff, 0, do_correlation=not has_limits)
hx, hy, c = opt.get_h()
hx *= R1
hy *= R2
kernelC = S.dot(np.array([[hx ** 2, hx * hy * c], [hx * hy * c, hy ** 2]])).dot(S.T)
hx, hy, c = np.sqrt(kernelC[0, 0]), np.sqrt(kernelC[1, 1]), kernelC[0, 1] / np.sqrt(
kernelC[0, 0] * kernelC[1, 1])
if pary.has_limits:
hx, hy = hy, hx
# print 'derotated pars', hx, hy, c
elif abs(corr) > self.max_corr_2D or not do_correlated and corr > 0.8:
c = max(min(corr, self.max_corr_2D), -self.max_corr_2D)
hx = parx.sigma_range / N_eff ** (1. / 6)
hy = pary.sigma_range / N_eff ** (1. / 6)
else:
opt = kde.KernelOptimizer2D(bins, N_eff, corr, do_correlation=not has_limits)
hx, hy, c = opt.get_h()
hx *= rangex
hy *= rangey
if mult_bias_correction_order is None: mult_bias_correction_order = self.mult_bias_correction_order
logging.debug('hx/sig, hy/sig, corr =%s, %s, %s', hx / parx.err, hy / pary.err, c)
if mult_bias_correction_order:
scale = 1.1 * N_eff ** (1. / 6 - 1. / (2 + 4 * (1 + mult_bias_correction_order)))
hx *= scale
hy *= scale
logging.debug('hx/sig, hy/sig, corr, scale =%s, %s, %s, %s', hx / parx.err, hy / pary.err, c, scale)
return hx, hy, c
def _initParamRanges(self, j, paramConfid=None):
if isinstance(j, six.string_types): j = self.index[j]
paramVec = self.samples[:, j]
return self._initParam(self.paramNames.names[j], paramVec, self.means[j], self.sddev[j], paramConfid)
def _initParam(self, par, paramVec, mean=None, sddev=None, paramConfid=None):
if mean is None: mean = paramVec.mean()
if sddev is None: sddev = paramVec.std()
par.err = sddev
par.mean = mean
par.param_min = np.min(paramVec)
par.param_max = np.max(paramVec)
paramConfid = paramConfid or self.initParamConfidenceData(paramVec)
# sigma_range is estimate related to shape of structure in the distribution = std dev for Gaussian
# search for peaks using quantiles, e.g. like simplified version of Janssen 95 (http://dx.doi.org/10.1080/10485259508832654)
confid_points = np.linspace(0.1, 0.9, 9)
confids = self.confidence(paramConfid,
np.array([self.range_confidence, 1 - self.range_confidence] + list(confid_points)))
par.range_min, par.range_max = confids[0:2]
confids[1:-1] = confids[2:]
confids[0] = par.param_min
confids[-1] = par.param_max
diffs = confids[4:] - confids[:-4]
scale = np.min(diffs) / 1.049
if np.all(diffs > par.err * 1.049) and np.all(diffs < scale * 1.5):
# very flat, can use bigger
par.sigma_range = scale
else:
par.sigma_range = min(par.err, scale)
if self.range_ND_contour >= 0 and self.likeStats:
if self.range_ND_contour >= par.ND_limit_bot.size:
raise SettingError("range_ND_contour should be -1 (off), or 0, 1 for first or second contour level")
par.range_min = min(max(par.range_min - par.err, par.ND_limit_bot[self.range_ND_contour]), par.range_min)
par.range_max = max(max(par.range_max + par.err, par.ND_limit_top[self.range_ND_contour]), par.range_max)
smooth_1D = par.sigma_range * 0.4
if par.has_limits_bot:
if par.range_min - par.limmin > 2 * smooth_1D and par.param_min - par.limmin > smooth_1D:
# long way from limit
par.has_limits_bot = False
else:
par.range_min = par.limmin
if par.has_limits_top:
if par.limmax - par.range_max > 2 * smooth_1D and par.limmax - par.param_max > smooth_1D:
par.has_limits_top = False
else:
par.range_max = par.limmax
if not par.has_limits_bot:
par.range_min -= smooth_1D * 2
if not par.has_limits_top:
par.range_max += smooth_1D * 2
par.has_limits = par.has_limits_top or par.has_limits_bot
return par
def _binSamples(self, paramVec, par, num_fine_bins, borderfrac=0.1):
# High resolution density (sampled many times per smoothing scale). First and last bins are half width
border = (par.range_max - par.range_min) * borderfrac
binmin = min(par.param_min, par.range_min)
if not par.has_limits_bot:
binmin -= border
binmax = max(par.param_max, par.range_max)
if not par.has_limits_top:
binmax += border
fine_width = (binmax - binmin) / (num_fine_bins - 1)
ix = ((paramVec - binmin) / fine_width + 0.5).astype(np.int)
return ix, fine_width, binmin, binmax
def get1DDensity(self, name, **kwargs):
"""
Returns a :class:`~.densities.Density1D` instance for parameter with given name. Result is cached.
:param name: name of the parameter
:param kwargs: arguments for :func:`~MCSamples.get1DDensityGridData`
:return: A :class:`~.densities.Density1D` instance for parameter with given name
"""
if self.needs_update: self.updateBaseStatistics()
if not kwargs:
density = self.density1D.get(name, None)
if density is not None: return density
return self.get1DDensityGridData(name, get_density=True, **kwargs)
def get1DDensityGridData(self, j, writeDataToFile=False, get_density=False, paramConfid=None, meanlikes=False,
**kwargs):
"""
Low-level function to get a :class:`~.densities.Density1D` instance for the marginalized 1D density of a parameter. Result is not cached.
:param j: a name or index of the parameter
:param writeDataToFile: True if should write to text file.
:param get_density: return a :class:`~.densities.Density1D` instance only, does not write out or calculate mean likelihoods for plots
:param paramConfid: optional cached :class:`ParamConfidenceData` instance
:param meanlikes: include mean likelihoods
:param kwargs: optional settings to override instance settings of the same name (see `analysis_settings`):
- **smooth_scale_1D**
- **boundary_correction_order**
- **mult_bias_correction_order**
- **fine_bins**
- **num_bins**
:return: A :class:`~.densities.Density1D` instance
"""
j = self._parAndNumber(j)[0]
if j is None: return None
par = self._initParamRanges(j, paramConfid)
num_bins = kwargs.get('num_bins', self.num_bins)
smooth_scale_1D = kwargs.get('smooth_scale_1D', self.smooth_scale_1D)
boundary_correction_order = kwargs.get('boundary_correction_order', self.boundary_correction_order)
mult_bias_correction_order = kwargs.get('mult_bias_correction_order', self.mult_bias_correction_order)
fine_bins = kwargs.get('fine_bins', self.fine_bins)
paramrange = par.range_max - par.range_min
if paramrange == 0: raise MCSamplesError('Parameter range is zero: ' + par.name)
width = paramrange / (num_bins - 1)
bin_indices, fine_width, binmin, binmax = self._binSamples(self.samples[:, j], par, fine_bins)
bins = np.bincount(bin_indices, weights=self.weights, minlength=fine_bins)
if meanlikes:
if self.shade_likes_is_mean_loglikes:
w = self.weights * self.loglikes
else:
w = self.weights * np.exp((self.mean_loglike - self.loglikes))
finebinlikes = np.bincount(bin_indices, weights=w, minlength=fine_bins)
if smooth_scale_1D <= 0:
# Set automatically.
smooth_1D = self.getAutoBandwidth1D(bins, par, j, mult_bias_correction_order, boundary_correction_order) \
* (binmax - binmin) * abs(smooth_scale_1D) / fine_width
elif smooth_scale_1D < 1.0:
smooth_1D = smooth_scale_1D * par.err / fine_width
else:
smooth_1D = smooth_scale_1D * width / fine_width
if smooth_1D < 2:
logging.warning('fine_bins not large enough to well sample smoothing scale - ' + par.name)
smooth_1D = min(max(1., smooth_1D), fine_bins // 2)
logging.debug("%s 1D sigma_range, std: %s, %s; smooth_1D_bins: %s ", par.name, par.sigma_range, par.err,
smooth_1D)
winw = min(int(round(2.5 * smooth_1D)), fine_bins // 2 - 2)
Kernel = Kernel1D(winw, smooth_1D)
cache = {}
conv = convolve1D(bins, Kernel.Win, 'same', cache=cache)
fine_x = np.linspace(binmin, binmax, fine_bins)
density1D = Density1D(fine_x, P=conv, view_ranges=[par.range_min, par.range_max])
if meanlikes: rawbins = conv.copy()
if par.has_limits and boundary_correction_order >= 0:
# correct for cuts allowing for normalization over window
prior_mask = np.ones(fine_bins + 2 * winw)
if par.has_limits_bot:
prior_mask[winw] = 0.5
prior_mask[: winw] = 0
if par.has_limits_top:
prior_mask[-winw] = 0.5
prior_mask[-winw:] = 0
a0 = convolve1D(prior_mask, Kernel.Win, 'valid', cache=cache)
ix = np.nonzero(a0 * density1D.P)
a0 = a0[ix]
normed = density1D.P[ix] / a0
if boundary_correction_order == 0:
density1D.P[ix] = normed
elif boundary_correction_order <= 2:
# linear boundary kernel, e.g. Jones 1993, Jones and Foster 1996
# www3.stat.sinica.edu.tw/statistica/oldpdf/A6n414.pdf after Eq 1b, expressed for general prior mask
# cf arXiv:1411.5528
xWin = Kernel.Win * Kernel.x
a1 = convolve1D(prior_mask, xWin, 'valid', cache=cache)[ix]
a2 = convolve1D(prior_mask, xWin * Kernel.x, 'valid', cache=cache)[ix]
xP = convolve1D(bins, xWin, 'same', cache=cache)[ix]
if boundary_correction_order == 1:
corrected = (density1D.P[ix] * a2 - xP * a1) / (a0 * a2 - a1 ** 2)
else:
# quadratic correction
a3 = convolve1D(prior_mask, xWin * Kernel.x ** 2, 'valid', cache=cache)[ix]
a4 = convolve1D(prior_mask, xWin * Kernel.x ** 3, 'valid', cache=cache)[ix]
x2P = convolve1D(bins, xWin * Kernel.x, 'same', cache=cache)[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
raise SettingError('Unknown boundary_correction_order (expected 0, 1, 2)')
elif boundary_correction_order == 2:
# higher order kernel
# eg. see http://www.jstor.org/stable/2965571
xWin2 = Kernel.Win * Kernel.x ** 2
x2P = convolve1D(bins, xWin2, 'same', cache=cache)
a2 = np.sum(xWin2)
a4 = np.dot(xWin2, Kernel.x ** 2)
corrected = (density1D.P * a4 - a2 * x2P) / (a4 - a2 ** 2)
ix = density1D.P > 0
density1D.P[ix] *= np.exp(np.minimum(corrected[ix] / density1D.P[ix], 2) - 1)
if mult_bias_correction_order:
prior_mask = np.ones(fine_bins)
if par.has_limits_bot:
prior_mask[0] *= 0.5
if par.has_limits_top:
prior_mask[-1] *= 0.5
a0 = convolve1D(prior_mask, Kernel.Win, 'same', cache=cache)
for _ in range(mult_bias_correction_order):
# estimate using flattened samples to remove second order biases
# mostly good performance, see http://www.jstor.org/stable/2965571 method 3,1 for first order
prob1 = density1D.P.copy()
prob1[prob1 == 0] = 1
fine = bins / prob1
conv = convolve1D(fine, Kernel.Win, 'same', cache=cache)
density1D.setP(density1D.P * conv)
density1D.P /= a0
density1D.normalize('max', in_place=True)
if not kwargs: self.density1D[par.name] = density1D
if get_density: return density1D
if meanlikes:
ix = density1D.P > 0
finebinlikes[ix] /= density1D.P[ix]
binlikes = convolve1D(finebinlikes, Kernel.Win, 'same', cache=cache)
binlikes[ix] *= density1D.P[ix] / rawbins[ix]
if self.shade_likes_is_mean_loglikes:
maxbin = np.min(binlikes)
binlikes = np.where((binlikes - maxbin) < 30, np.exp(-(binlikes - maxbin)), 0)
binlikes[rawbins == 0] = 0
binlikes /= np.max(binlikes)
density1D.likes = binlikes
else:
density1D.likes = None
if writeDataToFile:
# get thinner grid over restricted range for plotting
x = par.range_min + np.arange(num_bins) * width
bincounts = density1D.Prob(x)
if meanlikes:
likeDensity = Density1D(fine_x, P=binlikes)
likes = likeDensity.Prob(x)
else:
likes = None
fname = self.rootname + "_p_" + par.name
filename = os.path.join(self.plot_data_dir, fname + ".dat")
with open(filename, 'w') as f:
for xval, binval in zip(x, bincounts):
f.write("%16.7E%16.7E\n" % (xval, binval))
if meanlikes:
filename_like = os.path.join(self.plot_data_dir, fname + ".likes")
with open(filename_like, 'w') as f:
for xval, binval in zip(x, likes):
f.write("%16.7E%16.7E\n" % (xval, binval))
density = Density1D(x, bincounts)
density.likes = likes
return density
else:
return density1D
def _setEdgeMask2D(self, parx, pary, prior_mask, winw, alledge=False):
if parx.has_limits_bot:
prior_mask[:, winw] /= 2
prior_mask[:, :winw] = 0
if parx.has_limits_top:
prior_mask[:, -winw] /= 2
prior_mask[:, -winw:] = 0
if pary.has_limits_bot:
prior_mask[winw, :] /= 2
prior_mask[:winw:] = 0
if pary.has_limits_top:
prior_mask[-winw, :] /= 2
prior_mask[-winw:, :] = 0
if alledge:
prior_mask[:, :winw] = 0
prior_mask[:, -winw:] = 0
prior_mask[:winw:] = 0
prior_mask[-winw:, :] = 0
def _getScaleForParam(self, par):
# Also ensures that the 1D limits are initialized
density = self.get1DDensity(par)
mn, mx, lim_bot, lim_top = density.getLimits(0.5, accuracy_factor=1)
if lim_bot or lim_top:
scale = (mx - mn) / 0.675
else:
scale = (mx - mn) / (2 * 0.675)
return scale
def _parAndNumber(self, name):
if isinstance(name, ParamInfo): name = name.name
if isinstance(name, six.string_types):
name = self.index.get(name, None)
if name is None: return None, None
if isinstance(name, six.integer_types):
return name, self.paramNames.names[name]
raise ParamError("Unknown parameter type %s" % name)
def _make2Dhist(self, ixs, iys, xsize, ysize):
flatix = ixs + iys * xsize
# note arrays are indexed y,x
return np.bincount(flatix, weights=self.weights,
minlength=xsize * ysize).reshape((ysize, xsize)), flatix
def get2DDensity(self, x, y, normalized=False, **kwargs):
"""
Returns a :class:`~.densties.Density2D` instance with marginalized 2D density.
:param x: index or name of x parameter
:param y: index or name of y parameter
:param kwargs: keyword arguments for the :func:`get2DDensityGridData` function
:param normalized: if False, is normalized so the maximum is 1, if True, density is normalized
:return: :class:`~.densities.Density2D` instance
"""
if self.needs_update: self.updateBaseStatistics()
density = self.get2DDensityGridData(x, y, get_density=True, **kwargs)
if normalized:
density.normalize(in_place=True)
return density
def get2DDensityGridData(self, j, j2, writeDataToFile=False,
num_plot_contours=None, get_density=False, meanlikes=False, **kwargs):
"""
Low-level function to get 2D plot marginalized density and optional additional plot data.
:param j: name or index of the x parameter
:param j2: name or index of the y parameter.
:param writeDataToFile: True if should write data to file
:param num_plot_contours: number of contours to calculate and return in density.contours
:param get_density: only get the 2D marginalized density, no additional plot data
:param meanlikes: calculate mean likelihoods as well as marginalized density (returned as array in density.likes)
:param kwargs: optional settings to override instance settings of the same name (see `analysis_settings`):
- **fine_bins_2D**
- **boundary_correction_order**
- **mult_bias_correction_order**
- **smooth_scale_2D**
:return: a :class:`~.densities.Density2D` instance
"""
if self.needs_update: self.updateBaseStatistics()
start = time.time()
j, parx = self._parAndNumber(j)
j2, pary = self._parAndNumber(j2)
if j is None or j2 is None: return None
self._initParamRanges(j)
self._initParamRanges(j2)
base_fine_bins_2D = kwargs.get('fine_bins_2D', self.fine_bins_2D)
boundary_correction_order = kwargs.get('boundary_correction_order', self.boundary_correction_order)
mult_bias_correction_order = kwargs.get('mult_bias_correction_order', self.mult_bias_correction_order)
smooth_scale_2D = float(kwargs.get('smooth_scale_2D', self.smooth_scale_2D))
has_prior = parx.has_limits or pary.has_limits
corr = self.getCorrelationMatrix()[j2][j]
if corr == 1: logging.warning('Parameters are 100%% correlated: %s, %s', parx.name, pary.name)
logging.debug('Doing 2D: %s - %s', parx.name, pary.name)
logging.debug('sample x_err, y_err, correlation: %s, %s, %s', parx.err, pary.err, corr)
# keep things simple unless obvious degeneracy
if abs(self.max_corr_2D) > 1: raise SettingError('max_corr_2D cannot be >=1')
if abs(corr) < 0.1: corr = 0.
# for tight degeneracies increase bin density
angle_scale = max(0.2, np.sqrt(1 - min(self.max_corr_2D, abs(corr)) ** 2))
nbin2D = int(round(self.num_bins_2D / angle_scale))
fine_bins_2D = base_fine_bins_2D
if corr:
scaled = 192 * int(3 / angle_scale) // 3
if base_fine_bins_2D < scaled and int(1 / angle_scale) > 1:
fine_bins_2D = scaled
ixs, finewidthx, xbinmin, xbinmax = self._binSamples(self.samples[:, j], parx, fine_bins_2D)
iys, finewidthy, ybinmin, ybinmax = self._binSamples(self.samples[:, j2], pary, fine_bins_2D)
xsize = fine_bins_2D
ysize = fine_bins_2D
histbins, flatix = self._make2Dhist(ixs, iys, xsize, ysize)
if meanlikes:
likeweights = self.weights * np.exp(self.mean_loglike - self.loglikes)
finebinlikes = np.bincount(flatix, weights=likeweights,
minlength=xsize * ysize).reshape((ysize, xsize))
# smooth_x and smooth_y should be in rotated bin units
if smooth_scale_2D < 0:
rx, ry, corr = self.getAutoBandwidth2D(histbins, parx, pary, j, j2, corr, xbinmax - xbinmin,
ybinmax - ybinmin,
base_fine_bins_2D,
mult_bias_correction_order=mult_bias_correction_order)
rx = rx * abs(smooth_scale_2D) / finewidthx
ry = ry * abs(smooth_scale_2D) / finewidthy
elif smooth_scale_2D < 1.0:
rx = smooth_scale_2D * parx.err / finewidthx
ry = smooth_scale_2D * pary.err / finewidthy
else:
rx = smooth_scale_2D * fine_bins_2D / nbin2D
ry = smooth_scale_2D * fine_bins_2D / nbin2D
smooth_scale = float(max(rx, ry))
logging.debug('corr, rx, ry: %s, %s, %s', corr, rx, ry)
if smooth_scale < 2:
logging.warning('fine_bins_2D not large enough for optimal density')
winw = int(round(2.5 * smooth_scale))
Cinv = np.linalg.inv(np.array([[ry ** 2, rx * ry * corr], [rx * ry * corr, rx ** 2]]))
ix1, ix2 = np.mgrid[-winw:winw + 1, -winw:winw + 1]
Win = np.exp(-(ix1 ** 2 * Cinv[0, 0] + ix2 ** 2 * Cinv[1, 1] + 2 * Cinv[1, 0] * ix1 * ix2) / 2)
Win /= np.sum(Win)
logging.debug('time 2D binning and bandwidth: %s ; bins: %s', time.time() - start, fine_bins_2D)
start = time.time()
cache = {}
convolvesize = xsize + 2 * winw + Win.shape[0]
bins2D = convolve2D(histbins, Win, 'same', largest_size=convolvesize, cache=cache)
if meanlikes:
bin2Dlikes = convolve2D(finebinlikes, Win, 'same', largest_size=convolvesize, cache=cache)
if mult_bias_correction_order:
ix = bin2Dlikes > 0
finebinlikes[ix] /= bin2Dlikes[ix]
likes2 = convolve2D(finebinlikes, Win, 'same', largest_size=convolvesize, cache=cache)
likes2[ix] *= bin2Dlikes[ix]
bin2Dlikes = likes2
del finebinlikes
mx = 1e-4 * np.max(bins2D)
bin2Dlikes[bins2D > mx] /= bins2D[bins2D > mx]
bin2Dlikes[bins2D <= mx] = 0
else:
bin2Dlikes = None
if has_prior and boundary_correction_order >= 0:
# Correct for edge effects
prior_mask = np.ones((ysize + 2 * winw, xsize + 2 * winw))
self._setEdgeMask2D(parx, pary, prior_mask, winw)
a00 = convolve2D(prior_mask, Win, 'valid', largest_size=convolvesize, cache=cache)
ix = a00 * bins2D > np.max(bins2D) * 1e-8
a00 = a00[ix]
normed = bins2D[ix] / a00
if boundary_correction_order == 1:
# linear boundary correction
indexes = np.arange(-winw, winw + 1)
y = np.empty(Win.shape)
for i in range(Win.shape[0]):
y[:, i] = indexes
winx = Win * indexes
winy = Win * y
a10 = convolve2D(prior_mask, winx, 'valid', largest_size=convolvesize, cache=cache)[ix]
a01 = convolve2D(prior_mask, winy, 'valid', largest_size=convolvesize, cache=cache)[ix]
a20 = convolve2D(prior_mask, winx * indexes, 'valid', largest_size=convolvesize, cache=cache)[ix]
a02 = convolve2D(prior_mask, winy * y, 'valid', largest_size=convolvesize, cache=cache)[ix]
a11 = convolve2D(prior_mask, winy * indexes, 'valid', largest_size=convolvesize, cache=cache)[ix]
xP = convolve2D(histbins, winx, 'same', largest_size=convolvesize, cache=cache)[ix]
yP = convolve2D(histbins, winy, 'same', largest_size=convolvesize, cache=cache)[ix]
denom = (a20 * a01 ** 2 + a10 ** 2 * a02 - a00 * a02 * a20 + a11 ** 2 * a00 - 2 * a01 * a10 * a11)
A = a11 ** 2 - a02 * a20
Ax = a10 * a02 - a01 * a11
Ay = a01 * a20 - a10 * a11
corrected = (bins2D[ix] * A + xP * Ax + yP * Ay) / denom
bins2D[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
elif boundary_correction_order == 0:
# simple boundary correction by normalization
bins2D[ix] = normed
else:
raise SettingError('unknown boundary_correction_order (expected 0 or 1)')
if mult_bias_correction_order:
prior_mask = np.ones((ysize + 2 * winw, xsize + 2 * winw))
self._setEdgeMask2D(parx, pary, prior_mask, winw, alledge=True)
a00 = convolve2D(prior_mask, Win, 'valid', largest_size=convolvesize, cache=cache)
for _ in range(mult_bias_correction_order):
box = histbins.copy() # careful with cache in convolve2D.
ix2 = bins2D > np.max(bins2D) * 1e-8
box[ix2] /= bins2D[ix2]
bins2D *= convolve2D(box, Win, 'same', largest_size=convolvesize, cache=cache)
bins2D /= a00
x = np.linspace(xbinmin, xbinmax, xsize)
y = np.linspace(ybinmin, ybinmax, ysize)
density = Density2D(x, y, bins2D,
view_ranges=[(parx.range_min, parx.range_max), (pary.range_min, pary.range_max)])
density.normalize('max', in_place=True)
if get_density: return density
ncontours = len(self.contours)
if num_plot_contours: ncontours = min(num_plot_contours, ncontours)
contours = self.contours[:ncontours]
logging.debug('time 2D convolutions: %s', time.time() - start)
# Get contour containing contours(:) of the probability
density.contours = density.getContourLevels(contours)
# now make smaller num_bins grid between ranges for plotting
# x = parx.range_min + np.arange(nbin2D + 1) * widthx
# y = pary.range_min + np.arange(nbin2D + 1) * widthy
# bins2D = density.Prob(x, y)
# bins2D[bins2D < 1e-30] = 0
if meanlikes:
bin2Dlikes /= np.max(bin2Dlikes)
density.likes = bin2Dlikes
else:
density.likes = None
if writeDataToFile:
# note store things in confusing transpose form
# if meanlikes:
# filedensity = Density2D(x, y, bin2Dlikes)
# bin2Dlikes = filedensity.Prob(x, y)
plotfile = self.rootname + "_2D_%s_%s" % (parx.name, pary.name)
filename = os.path.join(self.plot_data_dir, plotfile)
np.savetxt(filename, bins2D.T, "%16.7E")
np.savetxt(filename + "_y", x, "%16.7E")
np.savetxt(filename + "_x", y, "%16.7E")
np.savetxt(filename + "_cont", np.atleast_2d(density.contours), "%16.7E")
if meanlikes:
np.savetxt(filename + "_likes", bin2Dlikes.T, "%16.7E")
# res = Density2D(x, y, bins2D)
# res.contours = density.contours
# res.likes = bin2Dlikes
return density
def _setLikeStats(self):
"""
Get and store LikeStats (see :func:`MCSamples.getLikeStats`)
"""
if self.loglikes is None:
self.likeStats = None
return None
m = types.LikeStats()
bestfit_ix = np.argmin(self.loglikes)
maxlike = self.loglikes[bestfit_ix]
m.logLike_sample = maxlike
if np.max(self.loglikes) - maxlike < 30:
m.logMeanInvLike = np.log(self.mean(np.exp(self.loglikes - maxlike))) + maxlike
else:
m.logMeanInvLike = None
m.meanLogLike = self.mean_loglike
m.logMeanLike = -np.log(self.mean(np.exp(-(self.loglikes - maxlike)))) + maxlike
m.names = self.paramNames.names
# get N-dimensional confidence region
indexes = self.loglikes.argsort()
cumsum = np.cumsum(self.weights[indexes])
m.ND_cont1, m.ND_cont2 = np.searchsorted(cumsum, self.norm * self.contours[0:2])
for j, par in enumerate(self.paramNames.names):
region1 = self.samples[indexes[:m.ND_cont1], j]
region2 = self.samples[indexes[:m.ND_cont2], j]
par.ND_limit_bot = np.array([np.min(region1), np.min(region2)])
par.ND_limit_top = np.array([np.max(region1), np.max(region2)])
par.bestfit_sample = self.samples[bestfit_ix][j]
self.likeStats = m
return m
def _readRanges(self):
if self.root:
ranges_file = self.root + '.ranges'
if os.path.isfile(ranges_file):
self.ranges = ParamBounds(ranges_file)
return
self.ranges = ParamBounds()
def getBounds(self):
"""
Returns the bounds in the form of a :class:`~.parampriors.ParamBounds` instance, for example for determining plot ranges
Bounds are not the same as self.ranges, as if samples are not near the range boundary, the bound is set to None
:return: a :class:`~.parampriors.ParamBounds` instance
"""
bounds = ParamBounds()
bounds.names = self.paramNames.list()
for par in self.paramNames.names:
if par.has_limits_bot:
bounds.lower[par.name] = par.limmin
if par.has_limits_top:
bounds.upper[par.name] = par.limmax
return bounds
def getUpper(self, name):
"""
Return the upper limit of the parameter with the given name.
:param name: parameter name
:return: The upper limit if name exists, None otherwise.
"""
par = self.paramNames.parWithName(name)
if par:
return par.limmax
return None
def getLower(self, name):
"""
Return the lower limit of the parameter with the given name.
:param name: parameter name
:return: The lower limit if name exists, None otherwise.
"""
par = self.paramNames.parWithName(name)
if par:
return par.limmin
return None
def getMargeStats(self, include_bestfit=False):
"""
Returns a :class:`~.types.MargeStats` object with marginalized 1D parameter constraints
:param include_bestfit: if True, set best fit values by loading from root_name.minimum file (assuming it exists)
:return: A :class:`~.types.MargeStats` instance
"""
self._setDensitiesandMarge1D()
m = types.MargeStats()
m.hasBestFit = False
m.limits = self.contours
m.names = self.paramNames.names
if include_bestfit:
bf_file = self.root + '.minimum'
if os.path.exists(bf_file):
return types.BestFit(bf_file)
else:
raise MCSamplesError(
'Best fit can only be included if loaded from file and file_root.minimum exists (cannot be calculated from samples)')
return m
def getLikeStats(self):
"""
Get best fit sample and n-D confidence limits, and various likelihood based statistics
:return: a :class:`~.types.LikeStats` instance storing N-D limits for parameter i in result.names[i].ND_limit_top,
result.names[i].ND_limit_bot, and best-fit sample value in result.names[i].bestfit_sample
"""
return self.likeStats or self._setLikeStats()
def getTable(self, columns=1, include_bestfit=False, **kwargs):
"""
Creates and returns a :class:`~.types.ResultTable` instance.
:param columns: number of columns in the table
:param include_bestfit: True if should include the bestfit parameter values (assuming set)
:param kwargs: arguments for :class:`~.types.ResultTable` constructor.
:return: A :class:`~.types.ResultTable` instance
"""
return types.ResultTable(columns, [self.getMargeStats(include_bestfit)], **kwargs)
def getLatex(self, params=None, limit=1):
"""
Get tex snippet for constraints on a list of parameters
:param params: list of parameter names
:param limit: which limit to get, 1 is the first (default 68%), 2 is the second (limits array specified by self.contours)
:return: labels, texs: a list of parameter labels, and a list of tex snippets
"""
marge = self.getMargeStats()
if params is None: params = marge.list()
formatter = types.NoLineTableFormatter()
texs = []
labels = []
for par in params:
tex = marge.texValues(formatter, par, limit=limit)
if tex is not None:
texs.append(tex[0])
labels.append(marge.parWithName(par).getLabel())
else:
texs.append(None)
labels.append(None)
return labels, texs
def getInlineLatex(self, param, limit=1):
r"""
Get snippet like: A=x\\pm y. Will adjust appropriately for one and two tail limits.
:param param: The name of the parameter
:param limit: which limit to get, 1 is the first (default 68%), 2 is the second (limits array specified by self.contours)
:return: The tex snippet.
"""
labels, texs = self.getLatex([param], limit)
if not texs[0][0] in ['<', '>']:
return labels[0] + ' = ' + texs[0]
else:
return labels[0] + ' ' + texs[0]
def _setDensitiesandMarge1D(self, max_frac_twotail=None, writeDataToFile=False, meanlikes=False):
"""
Get all the 1D densities; result is cached.
:param max_frac_twotail: optional override for self.max_frac_twotail
:param writeDataToFile: True if should write to file
:param meanlikes: include mean likelihoods
"""
if self.done_1Dbins: return
for j in range(self.n):
paramConfid = self.initParamConfidenceData(self.samples[:, j])
self.get1DDensityGridData(j, writeDataToFile, get_density=not writeDataToFile, paramConfid=paramConfid,
meanlikes=meanlikes)
self._setMargeLimits(self.paramNames.names[j], paramConfid, max_frac_twotail)
self.done_1Dbins = True
def _setMargeLimits(self, par, paramConfid, max_frac_twotail=None, density1D=None):
"""
Get limits, one or two tail depending on whether posterior
goes to zero at the limits or not
:param par: The :class:`~.paramnames.ParamInfo` to set limits for
:param paramConfid: :class:`~.chains.ParamConfidenceData` instance
:param max_frac_twotail: optional override for self.max_frac_twotail
:param density1D: any existing density 1D instance to use
"""
if max_frac_twotail is None:
max_frac_twotail = self.max_frac_twotail
par.limits = []
density1D = density1D or self.get1DDensity(par.name)
interpGrid = None
for ix1, contour in enumerate(self.contours):
marge_limits_bot = par.has_limits_bot and \
not self.force_twotail and density1D.P[0] > max_frac_twotail[ix1]
marge_limits_top = par.has_limits_top and \
not self.force_twotail and density1D.P[-1] > max_frac_twotail[ix1]
if not marge_limits_bot or not marge_limits_top:
# give limit
if not interpGrid: interpGrid = density1D.initLimitGrids()
tail_limit_bot, tail_limit_top, marge_limits_bot, marge_limits_top = density1D.getLimits(contour,
interpGrid)
limfrac = 1 - contour
if marge_limits_bot:
# fix to end of prior range
tail_limit_bot = par.range_min
elif marge_limits_top:
# 1 tail limit
tail_limit_bot = self.confidence(paramConfid, limfrac, upper=False)
else:
# 2 tail limit
tail_confid_bot = self.confidence(paramConfid, limfrac / 2, upper=False)
if marge_limits_top:
tail_limit_top = par.range_max
elif marge_limits_bot:
tail_limit_top = self.confidence(paramConfid, limfrac, upper=True)
else:
tail_confid_top = self.confidence(paramConfid, limfrac / 2, upper=True)
if not marge_limits_bot and not marge_limits_top:
# Two tail, check if limits are at very different density
if (math.fabs(density1D.Prob(tail_confid_top) -
density1D.Prob(tail_confid_bot)) < self.credible_interval_threshold):
tail_limit_top = tail_confid_top
tail_limit_bot = tail_confid_bot
lim = [tail_limit_bot, tail_limit_top]
else:
# no limit
lim = [par.range_min, par.range_max]
if marge_limits_bot and marge_limits_top:
tag = 'none'
elif marge_limits_bot:
tag = '>'
elif marge_limits_top:
tag = '<'
else:
tag = 'two'
par.limits.append(types.ParamLimit(lim, tag))
def getCorrelatedVariable2DPlots(self, num_plots=12, nparam=None):
"""
Gets a list of most correlated variable pair names.
:param num_plots: The number of plots
:param nparam: maximum number of pairs to get
:return: list of [x,y] pair names
"""
nparam = nparam or self.paramNames.numNonDerived()
try_t = 1e5
x, y = 0, 0
cust2DPlots = []
correlationMatrix = self.correlationMatrix
for _ in range(num_plots):
try_b = -1e5
for ix1 in range(nparam):
for ix2 in range(ix1 + 1, nparam):
if try_b < abs(correlationMatrix[ix1][ix2]) < try_t:
try_b = abs(correlationMatrix[ix1][ix2])
x, y = ix1, ix2
if try_b == -1e5:
break
try_t = try_b
cust2DPlots.append([self.parName(x), self.parName(y)])
return cust2DPlots
def saveAsText(self, root, chain_index=None, make_dirs=False):
"""
Saves samples as text file, including .ranges and .paramnames.
:param root: The root file name to use.
:param chain_index: optional index to be used for the filename.
:param make_dirs: True if should create the directories
"""
super(MCSamples, self).saveAsText(root, chain_index, make_dirs)
if not chain_index:
self.ranges.saveToFile(root + '.ranges')
# Write functions for GetDist.py
def writeScriptPlots1D(self, filename, plotparams=None, ext=None):
"""
Write a script that generates a 1D plot. Only intended for use by GetDist.py script.
:param filename: The filename to write to.
:param plotparams: The list of parameters to plot (default: all)
:param ext: The extension for the filename, Default if None
"""
text = 'markers=' + str(self.markers) + '\n'
if plotparams:
text += 'g.plots_1d(roots,[' + ",".join(['\'' + par + '\'' for par in plotparams]) + '], markers=markers)'
else:
text += 'g.plots_1d(roots, markers=markers)'
self._WritePlotFile(filename, self.subplot_size_inch, text, '', ext)
def writeScriptPlots2D(self, filename, plot_2D_param=None, cust2DPlots=[], writeDataToFile=False, ext=None,
shade_meanlikes=False):
"""
Write script that generates a 2 dimensional plot. Only intended for use by GetDist.py script.
:param filename: The filename to write to.
:param plot_2D_param: parameter to plot other variables against
:param cust2DPlots: list of parts of parameter names to plot
:param writeDataToFile: True if should write to file
:param ext: The extension for the filename, Default if None
:param shade_meanlikes: shade by mean likelihoods
:return: A dictionary indexed by pairs of parameters where 2D densities have been calculated
"""
done2D = {}
text = 'pairs=[]\n'
plot_num = 0
if len(cust2DPlots):
cuts = [par1 + '__' + par2 for par1, par2 in cust2DPlots]
for j, par1 in enumerate(self.paramNames.list()):
if plot_2D_param or cust2DPlots:
if par1 == plot_2D_param: continue
j2min = 0
else:
j2min = j + 1
for j2 in range(j2min, self.n):
par2 = self.parName(j2)
if plot_2D_param and par2 != plot_2D_param: continue
if len(cust2DPlots) and (par1 + '__' + par2) not in cuts: continue
plot_num += 1
done2D[(par1, par2)] = True
if writeDataToFile:
self.get2DDensityGridData(j, j2, writeDataToFile=True, meanlikes=shade_meanlikes)
text += "pairs.append(['%s','%s'])\n" % (par1, par2)
text += 'g.plots_2d(roots,param_pairs=pairs)'
self._WritePlotFile(filename, self.subplot_size_inch2, text, '_2D', ext)
return done2D
def writeScriptPlotsTri(self, filename, triangle_params, ext=None):
"""
Write a script that generates a triangle plot. Only intended for use by GetDist.py script.
:param filename: The filename to write to.
:param triangle_params: list of parameter names to plot
:param ext: The extension for the filename, Default if None
"""
text = 'g.triangle_plot(roots, %s)' % triangle_params
self._WritePlotFile(filename, self.subplot_size_inch, text, '_tri', ext)
def writeScriptPlots3D(self, filename, plot_3D, ext=None):
"""
Writes a script that generates a 3D (coloured-scatter) plot. Only intended for use by GetDist.py script.
:param filename: The filename to write to
:param plot_3D: list of [x,y,z] parameters for the 3 Dimensional plots
:param ext: The extension for the filename, Default if None
"""
text = 'sets=[]\n'
for pars in plot_3D:
text += "sets.append(['%s','%s','%s'])\n" % tuple(pars)
text += 'g.plots_3d(roots,sets)'
self._WritePlotFile(filename, self.subplot_size_inch3, text, '_3D', ext)
def _WritePlotFile(self, filename, subplot_size, text, tag, ext=None):
"""
Write plot file.
Used by other functions
:param filename: The filename to write to
:param subplot_size: The size of the subplot.
:param text: The text to write after the headers.
:param tag: Tag used for the filename the created file will export to.
:param ext: The extension for the filename, Default if None
"""
with open(filename, 'w') as f:
f.write("import getdist.plots as plots, os\n")
if self.plot_data_dir:
f.write("g=plots.GetDistPlotter(plot_data=r'%s')\n" % self.plot_data_dir)
else:
f.write("g=plots.GetDistPlotter(chain_dir=r'%s')\n" % os.path.dirname(self.root))
f.write("g.settings.setWithSubplotSize(%s)\n" % subplot_size)
f.write("roots = ['%s']\n" % self.rootname)
f.write(text + '\n')
ext = ext or self.plot_output
fname = self.rootname + tag + '.' + ext
f.write("g.export(os.path.join(r'%s',r'%s'))\n" % (self.out_dir, fname))
# ==============================================================================
# Useful functions
def GetChainRootFiles(rootdir):
"""
Gets the root names of all chain files in a directory.
:param rootdir: The root directory to check
:return: The root names
"""
pattern = os.path.join(rootdir, '*.paramnames')
files = [os.path.splitext(f)[0] for f in glob.glob(pattern)]
files.sort()
return files
def GetRootFileName(rootdir):
"""
Gets the root name of chains in given directory (assuming only one set of chain files).
:param rootdir: The directory to check
:return: The root file name.
"""
rootFileName = ""
pattern = os.path.join(rootdir, '*_*.txt')
chain_files = glob.glob(pattern)
chain_files.sort()
if chain_files:
chain_file0 = chain_files[0]
rindex = chain_file0.rindex('_')
rootFileName = chain_file0[:rindex]
return rootFileName
# ==============================================================================
| 45.324516
| 151
| 0.558373
|
553aa9aadf4e1b292f02d3cd5090f278d40ab4fb
| 3,303
|
py
|
Python
|
tools/runners/util.py
|
GerHobbelt/jerryscript
|
483c867cb238df1b3d00ada3ee4648fc202dadb0
|
[
"Apache-2.0"
] | null | null | null |
tools/runners/util.py
|
GerHobbelt/jerryscript
|
483c867cb238df1b3d00ada3ee4648fc202dadb0
|
[
"Apache-2.0"
] | null | null | null |
tools/runners/util.py
|
GerHobbelt/jerryscript
|
483c867cb238df1b3d00ada3ee4648fc202dadb0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import codecs
import signal
import subprocess
import sys
TERM_NORMAL = '\033[0m'
TERM_RED = '\033[1;31m'
TERM_GREEN = '\033[1;32m'
def set_timezone(timezone):
assert sys.platform == 'win32', "set_timezone is Windows only function"
subprocess.call(['cmd', '/S', '/C', 'tzutil', '/s', timezone])
def set_timezone_and_exit(timezone):
assert sys.platform == 'win32', "set_timezone_and_exit is Windows only function"
set_timezone(timezone)
sys.exit(1)
def get_timezone():
assert sys.platform == 'win32', "get_timezone is Windows only function"
return subprocess.check_output(['cmd', '/S', '/C', 'tzutil', '/g'], universal_newlines=True)
def set_sighdl_to_reset_timezone(timezone):
assert sys.platform == 'win32', "install_signal_handler_to_restore_timezone is Windows only function"
signal.signal(signal.SIGINT, lambda signal, frame: set_timezone_and_exit(timezone))
def setup_stdio():
(out_stream, err_stream) = (sys.stdout, sys.stderr)
if sys.version_info.major >= 3:
(out_stream, err_stream) = (sys.stdout.buffer, sys.stderr.buffer)
# For tty using native encoding, otherwise (pipe) use 'utf-8'
encoding = sys.stdout.encoding if sys.stdout.isatty() else 'utf-8'
# Always override it to anvoid encode error
sys.stdout = codecs.getwriter(encoding)(out_stream, 'xmlcharrefreplace')
sys.stderr = codecs.getwriter(encoding)(err_stream, 'xmlcharrefreplace')
def print_test_summary(summary_string, total, passed, failed):
print("\n[summary] %s\n" % summary_string)
print("TOTAL: %d" % total)
print("%sPASS: %d%s" % (TERM_GREEN, passed, TERM_NORMAL))
print("%sFAIL: %d%s\n" % (TERM_RED, failed, TERM_NORMAL))
success_color = TERM_GREEN if passed == total else TERM_RED
print("%sSuccess: %d%%%s" % (success_color, passed*100/total, TERM_NORMAL))
def print_test_result(tested, total, is_passed, passed_string, test_path, is_snapshot_generation=None):
if is_snapshot_generation is None:
snapshot_string = ''
elif is_snapshot_generation:
snapshot_string = ' (generate snapshot)'
else:
snapshot_string = ' (execute snapshot)'
color = TERM_GREEN if is_passed else TERM_RED
print("[%4d/%4d] %s%s: %s%s%s" % (tested, total, color, passed_string, test_path, snapshot_string, TERM_NORMAL))
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return []
def get_python_cmd_prefix():
# python script doesn't have execute permission on github actions windows runner
return get_platform_cmd_prefix() + [sys.executable or 'python']
| 36.296703
| 116
| 0.717832
|
9b4c3e59f83a9d37a9f63839fabae16d9b3a4ede
| 1,609
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/slow_requests_based_trigger.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/models/slow_requests_based_trigger.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/models/slow_requests_based_trigger.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SlowRequestsBasedTrigger(Model):
"""
SlowRequestsBasedTrigger
:param time_taken: TimeTaken
:type time_taken: str
:param count: Count
:type count: int
:param time_interval: TimeInterval
:type time_interval: str
"""
_attribute_map = {
'time_taken': {'key': 'timeTaken', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(self, time_taken=None, count=None, time_interval=None):
self.time_taken = time_taken
self.count = count
self.time_interval = time_interval
| 34.234043
| 76
| 0.634556
|
415c2b2da383b904a8c11359a13dcaf7a2288e7c
| 670
|
py
|
Python
|
exer037.py
|
fabionunesdeparis/Fundamentos-em-python3
|
b21d06b44d5b18e99419cd06b4e08363c1f7a7ce
|
[
"MIT"
] | null | null | null |
exer037.py
|
fabionunesdeparis/Fundamentos-em-python3
|
b21d06b44d5b18e99419cd06b4e08363c1f7a7ce
|
[
"MIT"
] | null | null | null |
exer037.py
|
fabionunesdeparis/Fundamentos-em-python3
|
b21d06b44d5b18e99419cd06b4e08363c1f7a7ce
|
[
"MIT"
] | null | null | null |
# @ Fábio C. Nunes 11/05/20
numero = int(input('Digite um número inteiro: '))
print('[1] - Binário\n[2] - Octal\n[3] - Hexadecimal')
opcao = int(input('Digite a opção desejada: '))
if opcao == 1:
binario = str(bin(numero))
print('O número digitado {}, convertido para binário é {}'.format(numero, binario[2:]))
elif opcao == 2:
octal = str(oct(numero))
print('O número digitado {}, convertido para octal é {}'.format(numero, octal[2:]))
elif opcao == 3:
hexadecimal = str(hex(numero))
print('O número digitado {}, convertido para Hexadecimal é {}'.format(numero, hexadecimal[2:]))
else:
print('\033[31mA opção digitada não consta na lista! ')
| 44.666667
| 99
| 0.656716
|
62196a73332a490a8dada1f7eb830f4d7abd9b01
| 15,745
|
py
|
Python
|
sdk/communication/azure-communication-identity/azure/communication/identity/_generated/aio/operations/_communication_identity_operations.py
|
jalauzon-msft/azure-sdk-for-python
|
15967f5c6d3376f2334a382486ba86339786e028
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/communication/azure-communication-identity/azure/communication/identity/_generated/aio/operations/_communication_identity_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-identity/azure/communication/identity/_generated/aio/operations/_communication_identity_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._communication_identity_operations import build_create_request, build_delete_request, build_exchange_teams_user_access_token_request, build_issue_access_token_request, build_revoke_access_tokens_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CommunicationIdentityOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.communication.identity.aio.CommunicationIdentityClient`'s
:attr:`communication_identity` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
args = list(args)
self._client = args.pop(0) if args else kwargs.pop("client")
self._config = args.pop(0) if args else kwargs.pop("config")
self._serialize = args.pop(0) if args else kwargs.pop("serializer")
self._deserialize = args.pop(0) if args else kwargs.pop("deserializer")
@distributed_trace_async
async def create(
self,
create_token_with_scopes: Optional[List[Union[str, "_models.CommunicationTokenScope"]]] = None,
**kwargs: Any
) -> "_models.CommunicationIdentityAccessTokenResult":
"""Create a new identity, and optionally, an access token.
Create a new identity, and optionally, an access token.
:param create_token_with_scopes: Also create access token for the created identity. Default
value is None.
:type create_token_with_scopes: list[str or
~azure.communication.identity.models.CommunicationTokenScope]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CommunicationIdentityAccessTokenResult, or the result of cls(response)
:rtype: ~azure.communication.identity.models.CommunicationIdentityAccessTokenResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CommunicationIdentityAccessTokenResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_body = _models.CommunicationIdentityCreateRequest(create_token_with_scopes=create_token_with_scopes)
if _body is not None:
_json = self._serialize.body(_body, 'CommunicationIdentityCreateRequest')
else:
_json = None
request = build_create_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CommunicationErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CommunicationIdentityAccessTokenResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': "/identities"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
id: str,
**kwargs: Any
) -> None:
"""Delete the identity, revoke all tokens for the identity and delete all associated data.
Delete the identity, revoke all tokens for the identity and delete all associated data.
:param id: Identifier of the identity to be deleted.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-06-01") # type: str
request = build_delete_request(
id=id,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CommunicationErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/identities/{id}"} # type: ignore
@distributed_trace_async
async def revoke_access_tokens( # pylint: disable=inconsistent-return-statements
self,
id: str,
**kwargs: Any
) -> None:
"""Revoke all access tokens for the specific identity.
Revoke all access tokens for the specific identity.
:param id: Identifier of the identity.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-06-01") # type: str
request = build_revoke_access_tokens_request(
id=id,
api_version=api_version,
template_url=self.revoke_access_tokens.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CommunicationErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
revoke_access_tokens.metadata = {'url': "/identities/{id}/:revokeAccessTokens"} # type: ignore
@distributed_trace_async
async def exchange_teams_user_access_token(
self,
token: str,
app_id: str,
user_id: str,
**kwargs: Any
) -> "_models.CommunicationIdentityAccessToken":
"""Exchange an Azure Active Directory (Azure AD) access token of a Teams user for a new
Communication Identity access token with a matching expiration time.
Exchange an Azure Active Directory (Azure AD) access token of a Teams user for a new
Communication Identity access token with a matching expiration time.
:param token: Azure AD access token of a Teams User to acquire a new Communication Identity
access token.
:type token: str
:param app_id: Client ID of an Azure AD application to be verified against the appid claim in
the Azure AD access token.
:type app_id: str
:param user_id: Object ID of an Azure AD user (Teams User) to be verified against the oid claim
in the Azure AD access token.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CommunicationIdentityAccessToken, or the result of cls(response)
:rtype: ~azure.communication.identity.models.CommunicationIdentityAccessToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CommunicationIdentityAccessToken"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_body = _models.TeamsUserExchangeTokenRequest(token=token, app_id=app_id, user_id=user_id)
_json = self._serialize.body(_body, 'TeamsUserExchangeTokenRequest')
request = build_exchange_teams_user_access_token_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.exchange_teams_user_access_token.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CommunicationErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CommunicationIdentityAccessToken', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
exchange_teams_user_access_token.metadata = {'url': "/teamsUser/:exchangeAccessToken"} # type: ignore
@distributed_trace_async
async def issue_access_token(
self,
id: str,
scopes: List[Union[str, "_models.CommunicationTokenScope"]],
**kwargs: Any
) -> "_models.CommunicationIdentityAccessToken":
"""Issue a new token for an identity.
Issue a new token for an identity.
:param id: Identifier of the identity to issue token for.
:type id: str
:param scopes: List of scopes attached to the token.
:type scopes: list[str or ~azure.communication.identity.models.CommunicationTokenScope]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CommunicationIdentityAccessToken, or the result of cls(response)
:rtype: ~azure.communication.identity.models.CommunicationIdentityAccessToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CommunicationIdentityAccessToken"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_body = _models.CommunicationIdentityAccessTokenRequest(scopes=scopes)
_json = self._serialize.body(_body, 'CommunicationIdentityAccessTokenRequest')
request = build_issue_access_token_request(
id=id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.issue_access_token.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CommunicationErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CommunicationIdentityAccessToken', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
issue_access_token.metadata = {'url': "/identities/{id}/:issueAccessToken"} # type: ignore
| 43.614958
| 221
| 0.671578
|
3d68f0b3009610881e49643f6d47a34182772d75
| 77,967
|
py
|
Python
|
dev-tools/scripts/releaseWizard.py
|
solr8/solr
|
c2f26ac784945dca6d096b58f2d0e98196562894
|
[
"Apache-2.0"
] | null | null | null |
dev-tools/scripts/releaseWizard.py
|
solr8/solr
|
c2f26ac784945dca6d096b58f2d0e98196562894
|
[
"Apache-2.0"
] | null | null | null |
dev-tools/scripts/releaseWizard.py
|
solr8/solr
|
c2f26ac784945dca6d096b58f2d0e98196562894
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is the Release Manager's best friend, ensuring all details of a release are handled correctly.
# It will walk you through the steps of the release process, asking for decisions or input along the way.
# CAUTION: You still need to use your head! Please read the HELP section in the main menu.
#
# Requirements:
# Install requirements with this command:
# pip3 install -r requirements.txt
#
# Usage:
# releaseWizard.py [-h] [--dry-run] [--root PATH]
#
# optional arguments:
# -h, --help show this help message and exit
# --dry-run Do not execute any commands, but echo them instead. Display
# extra debug info
# --root PATH Specify different root folder than ~/.lucene-releases
import argparse
import copy
import fcntl
import json
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import textwrap
import time
import urllib
from collections import OrderedDict
from datetime import datetime
from datetime import timedelta
try:
import holidays
import yaml
from ics import Calendar, Event
from jinja2 import Environment
except:
print("You lack some of the module dependencies to run this script.")
print("Please run 'pip3 install -r requirements.txt' and try again.")
sys.exit(1)
import scriptutil
from consolemenu import ConsoleMenu
from consolemenu.items import FunctionItem, SubmenuItem, ExitItem
from consolemenu.screen import Screen
from scriptutil import BranchType, Version, check_ant, download, run
# Solr-to-Java version mapping
java_versions = {6: 8, 7: 8, 8: 8, 9: 11}
editor = None
# Edit this to add other global jinja2 variables or filters
def expand_jinja(text, vars=None):
global_vars = OrderedDict({
'script_version': state.script_version,
'release_version': state.release_version,
'release_version_underscore': state.release_version.replace('.', '_'),
'release_date': state.get_release_date(),
'ivy2_folder': os.path.expanduser("~/.ivy2/"),
'config_path': state.config_path,
'rc_number': state.rc_number,
'script_branch': state.script_branch,
'release_folder': state.get_release_folder(),
'git_checkout_folder': state.get_git_checkout_folder(),
'git_website_folder': state.get_website_git_folder(),
'dist_url_base': 'https://dist.apache.org/repos/dist/dev/lucene',
'm2_repository_url': 'https://repository.apache.org/service/local/staging/deploy/maven2',
'dist_file_path': state.get_dist_folder(),
'rc_folder': state.get_rc_folder(),
'base_branch': state.get_base_branch_name(),
'release_branch': state.release_branch,
'stable_branch': state.get_stable_branch_name(),
'minor_branch': state.get_minor_branch_name(),
'release_type': state.release_type,
'is_feature_release': state.release_type in ['minor', 'major'],
'release_version_major': state.release_version_major,
'release_version_minor': state.release_version_minor,
'release_version_bugfix': state.release_version_bugfix,
'release_version_refguide': state.get_refguide_release() ,
'state': state,
'gpg_key' : state.get_gpg_key(),
'epoch': unix_time_millis(datetime.utcnow()),
'get_next_version': state.get_next_version(),
'current_git_rev': state.get_current_git_rev(),
'keys_downloaded': keys_downloaded(),
'editor': get_editor(),
'rename_cmd': 'ren' if is_windows() else 'mv',
'vote_close_72h': vote_close_72h_date().strftime("%Y-%m-%d %H:00 UTC"),
'vote_close_72h_epoch': unix_time_millis(vote_close_72h_date()),
'vote_close_72h_holidays': vote_close_72h_holidays(),
'lucene_news_file': lucene_news_file,
'solr_news_file': solr_news_file,
'load_lines': load_lines,
'set_java_home': set_java_home,
'latest_version': state.get_latest_version(),
'latest_lts_version': state.get_latest_lts_version(),
'master_version': state.get_master_version(),
'mirrored_versions': state.get_mirrored_versions(),
'mirrored_versions_to_delete': state.get_mirrored_versions_to_delete(),
'home': os.path.expanduser("~")
})
global_vars.update(state.get_todo_states())
if vars:
global_vars.update(vars)
filled = replace_templates(text)
try:
env = Environment(lstrip_blocks=True, keep_trailing_newline=False, trim_blocks=True)
env.filters['path_join'] = lambda paths: os.path.join(*paths)
env.filters['expanduser'] = lambda path: os.path.expanduser(path)
env.filters['formatdate'] = lambda date: (datetime.strftime(date, "%-d %B %Y") if date else "<date>" )
template = env.from_string(str(filled), globals=global_vars)
filled = template.render()
except Exception as e:
print("Exception while rendering jinja template %s: %s" % (str(filled)[:10], e))
return filled
def replace_templates(text):
tpl_lines = []
for line in text.splitlines():
if line.startswith("(( template="):
match = re.search(r"^\(\( template=(.+?) \)\)", line)
name = match.group(1)
tpl_lines.append(replace_templates(templates[name].strip()))
else:
tpl_lines.append(line)
return "\n".join(tpl_lines)
def getScriptVersion():
topLevelDir = os.path.join(os.path.abspath("%s/" % script_path), os.path.pardir, os.path.pardir)
reBaseVersion = re.compile(r'version\.base\s*=\s*(\d+\.\d+\.\d+)')
return reBaseVersion.search(open('%s/lucene/version.properties' % topLevelDir).read()).group(1)
def get_editor():
global editor
if editor is None:
if 'EDITOR' in os.environ:
if os.environ['EDITOR'] in ['vi', 'vim', 'nano', 'pico', 'emacs']:
print("WARNING: You have EDITOR set to %s, which will not work when launched from this tool. Please use an editor that launches a separate window/process" % os.environ['EDITOR'])
editor = os.environ['EDITOR']
elif is_windows():
editor = 'notepad.exe'
elif is_mac():
editor = 'open -a TextEdit'
else:
sys.exit("On Linux you have to set EDITOR variable to a command that will start an editor in its own window")
return editor
def check_prerequisites(todo=None):
if sys.version_info < (3, 4):
sys.exit("Script requires Python v3.4 or later")
try:
gpg_ver = run("gpg --version").splitlines()[0]
except:
sys.exit("You will need gpg installed")
if not check_ant().startswith('1.8'):
print("WARNING: This script will work best with ant 1.8. The script buildAndPushRelease.py may have problems with PGP password input under ant 1.10")
if not 'GPG_TTY' in os.environ:
print("WARNING: GPG_TTY environment variable is not set, GPG signing may not work correctly (try 'export GPG_TTY=$(tty)'")
if not 'JAVA8_HOME' in os.environ or not 'JAVA11_HOME' in os.environ:
sys.exit("Please set environment variables JAVA8_HOME and JAVA11_HOME")
try:
asciidoc_ver = run("asciidoctor -V").splitlines()[0]
except:
asciidoc_ver = ""
print("WARNING: In order to export asciidoc version to HTML, you will need asciidoctor installed")
try:
git_ver = run("git --version").splitlines()[0]
except:
sys.exit("You will need git installed")
try:
svn_ver = run("svn --version").splitlines()[0]
except:
sys.exit("You will need svn installed")
if not 'EDITOR' in os.environ:
print("WARNING: Environment variable $EDITOR not set, using %s" % get_editor())
if todo:
print("%s\n%s\n%s\n" % (gpg_ver, asciidoc_ver, git_ver))
return True
epoch = datetime.utcfromtimestamp(0)
def unix_time_millis(dt):
return int((dt - epoch).total_seconds() * 1000.0)
def bootstrap_todos(todo_list):
# Establish links from commands to to_do for finding todo vars
for tg in todo_list:
if dry_run:
print("Group %s" % tg.id)
for td in tg.get_todos():
if dry_run:
print(" Todo %s" % td.id)
cmds = td.commands
if cmds:
if dry_run:
print(" Commands")
cmds.todo_id = td.id
for cmd in cmds.commands:
if dry_run:
print(" Command %s" % cmd.cmd)
cmd.todo_id = td.id
print("Loaded TODO definitions from releaseWizard.yaml")
return todo_list
def maybe_remove_rc_from_svn():
todo = state.get_todo_by_id('import_svn')
if todo and todo.is_done():
print("import_svn done")
Commands(state.get_git_checkout_folder(),
"""Looks like you uploaded artifacts for {{ build_rc.git_rev | default("<git_rev>", True) }} to svn which needs to be removed.""",
[Command(
"""svn -m "Remove cancelled Lucene/Solr {{ release_version }} RC{{ rc_number }}" rm {{ dist_url }}""",
logfile="svn_rm.log",
tee=True,
vars={
'dist_folder': """lucene-solr-{{ release_version }}-RC{{ rc_number }}-rev{{ build_rc.git_rev | default("<git_rev>", True) }}""",
'dist_url': "{{ dist_url_base }}/{{ dist_folder }}"
}
)],
enable_execute=True, confirm_each_command=False).run()
# To be able to hide fields when dumping Yaml
class SecretYamlObject(yaml.YAMLObject):
hidden_fields = []
@classmethod
def to_yaml(cls,dumper,data):
print("Dumping object %s" % type(data))
new_data = copy.deepcopy(data)
for item in cls.hidden_fields:
if item in new_data.__dict__:
del new_data.__dict__[item]
for item in data.__dict__:
if item in new_data.__dict__ and new_data.__dict__[item] is None:
del new_data.__dict__[item]
return dumper.represent_yaml_object(cls.yaml_tag, new_data, cls,
flow_style=cls.yaml_flow_style)
def str_presenter(dumper, data):
if len(data.split('\n')) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
class ReleaseState:
def __init__(self, config_path, release_version, script_version):
self.script_version = script_version
self.config_path = config_path
self.todo_groups = None
self.todos = None
self.latest_version = None
self.previous_rcs = {}
self.rc_number = 1
self.start_date = unix_time_millis(datetime.utcnow())
self.script_branch = run("git rev-parse --abbrev-ref HEAD").strip()
self.mirrored_versions = None
try:
self.script_branch_type = scriptutil.find_branch_type()
except:
print("WARNING: This script shold (ideally) run from the release branch, not a feature branch (%s)" % self.script_branch)
self.script_branch_type = 'feature'
self.set_release_version(release_version)
def set_release_version(self, version):
self.validate_release_version(self.script_branch_type, self.script_branch, version)
self.release_version = version
v = Version.parse(version)
self.release_version_major = v.major
self.release_version_minor = v.minor
self.release_version_bugfix = v.bugfix
self.release_branch = "branch_%s_%s" % (v.major, v.minor)
if v.is_major_release():
self.release_type = 'major'
elif v.is_minor_release():
self.release_type = 'minor'
else:
self.release_type = 'bugfix'
def is_released(self):
return self.get_todo_by_id('announce_lucene').is_done()
def get_gpg_key(self):
gpg_task = self.get_todo_by_id('gpg')
if gpg_task.is_done():
return gpg_task.get_state()['gpg_key']
else:
return None
def get_release_date(self):
publish_task = self.get_todo_by_id('publish_maven')
if publish_task.is_done():
return unix_to_datetime(publish_task.get_state()['done_date'])
else:
return None
def get_release_date_iso(self):
release_date = self.get_release_date()
if release_date is None:
return "yyyy-mm-dd"
else:
return release_date.isoformat()[:10]
def get_latest_version(self):
if self.latest_version is None:
versions = self.get_mirrored_versions()
latest = versions[0]
for ver in versions:
if Version.parse(ver).gt(Version.parse(latest)):
latest = ver
self.latest_version = latest
self.save()
return state.latest_version
def get_mirrored_versions(self):
if state.mirrored_versions is None:
releases_str = load("https://projects.apache.org/json/foundation/releases.json", "utf-8")
releases = json.loads(releases_str)['lucene']
state.mirrored_versions = [ r for r in list(map(lambda y: y[7:], filter(lambda x: x.startswith('lucene-'), list(releases.keys())))) ]
return state.mirrored_versions
def get_mirrored_versions_to_delete(self):
versions = self.get_mirrored_versions()
to_keep = versions
if state.release_type == 'major':
to_keep = [self.release_version, self.get_latest_version()]
if state.release_type == 'minor':
to_keep = [self.release_version, self.get_latest_lts_version()]
if state.release_type == 'bugfix':
if Version.parse(state.release_version).major == Version.parse(state.get_latest_version()).major:
to_keep = [self.release_version, self.get_latest_lts_version()]
elif Version.parse(state.release_version).major == Version.parse(state.get_latest_lts_version()).major:
to_keep = [self.get_latest_version(), self.release_version]
else:
raise Exception("Release version %s must have same major version as current minor or lts release")
return [ver for ver in versions if ver not in to_keep]
def get_master_version(self):
v = Version.parse(self.get_latest_version())
return "%s.%s.%s" % (v.major + 1, 0, 0)
def get_latest_lts_version(self):
versions = self.get_mirrored_versions()
latest = self.get_latest_version()
lts_prefix = "%s." % (Version.parse(latest).major - 1)
lts_versions = list(filter(lambda x: x.startswith(lts_prefix), versions))
latest_lts = lts_versions[0]
for ver in lts_versions:
if Version.parse(ver).gt(Version.parse(latest_lts)):
latest_lts = ver
return latest_lts
def validate_release_version(self, branch_type, branch, release_version):
ver = Version.parse(release_version)
# print("release_version=%s, ver=%s" % (release_version, ver))
if branch_type == BranchType.release:
if not branch.startswith('branch_'):
sys.exit("Incompatible branch and branch_type")
if not ver.is_bugfix_release():
sys.exit("You can only release bugfix releases from an existing release branch")
elif branch_type == BranchType.stable:
if not branch.startswith('branch_') and branch.endswith('x'):
sys.exit("Incompatible branch and branch_type")
if not ver.is_minor_release():
sys.exit("You can only release minor releases from an existing stable branch")
elif branch_type == BranchType.unstable:
if not branch == 'master':
sys.exit("Incompatible branch and branch_type")
if not ver.is_major_release():
sys.exit("You can only release a new major version from master branch")
if not getScriptVersion() == release_version:
print("WARNING: Expected release version %s when on branch %s, but got %s" % (
getScriptVersion(), branch, release_version))
def get_base_branch_name(self):
v = Version.parse(self.release_version)
if v.is_major_release():
return 'master'
elif v.is_minor_release():
return self.get_stable_branch_name()
elif v.major == Version.parse(self.get_latest_version()).major:
return self.get_minor_branch_name()
else:
return self.release_branch
def clear_rc(self):
if ask_yes_no("Are you sure? This will clear and restart RC%s" % self.rc_number):
maybe_remove_rc_from_svn()
dict = {}
for g in list(filter(lambda x: x.in_rc_loop(), self.todo_groups)):
for t in g.get_todos():
t.clear()
print("Cleared RC TODO state")
try:
shutil.rmtree(self.get_rc_folder())
print("Cleared folder %s" % self.get_rc_folder())
except Exception as e:
print("WARN: Failed to clear %s, please do it manually with higher privileges" % self.get_rc_folder())
self.save()
def new_rc(self):
if ask_yes_no("Are you sure? This will abort current RC"):
maybe_remove_rc_from_svn()
dict = {}
for g in list(filter(lambda x: x.in_rc_loop(), self.todo_groups)):
for t in g.get_todos():
if t.applies(self.release_type):
dict[t.id] = copy.deepcopy(t.state)
t.clear()
self.previous_rcs["RC%d" % self.rc_number] = dict
self.rc_number += 1
self.save()
def to_dict(self):
tmp_todos = {}
for todo_id in self.todos:
t = self.todos[todo_id]
tmp_todos[todo_id] = copy.deepcopy(t.state)
dict = {
'script_version': self.script_version,
'release_version': self.release_version,
'start_date': self.start_date,
'rc_number': self.rc_number,
'script_branch': self.script_branch,
'todos': tmp_todos,
'previous_rcs': self.previous_rcs
}
if self.latest_version:
dict['latest_version'] = self.latest_version
return dict
def restore_from_dict(self, dict):
self.script_version = dict['script_version']
assert dict['release_version'] == self.release_version
if 'start_date' in dict:
self.start_date = dict['start_date']
if 'latest_version' in dict:
self.latest_version = dict['latest_version']
else:
self.latest_version = None
self.rc_number = dict['rc_number']
self.script_branch = dict['script_branch']
self.previous_rcs = copy.deepcopy(dict['previous_rcs'])
for todo_id in dict['todos']:
if todo_id in self.todos:
t = self.todos[todo_id]
for k in dict['todos'][todo_id]:
t.state[k] = dict['todos'][todo_id][k]
else:
print("Warning: Could not restore state for %s, Todo definition not found" % todo_id)
def load(self):
if os.path.exists(os.path.join(self.config_path, self.release_version, 'state.yaml')):
state_file = os.path.join(self.config_path, self.release_version, 'state.yaml')
with open(state_file, 'r') as fp:
try:
dict = yaml.load(fp, Loader=yaml.Loader)
self.restore_from_dict(dict)
print("Loaded state from %s" % state_file)
except Exception as e:
print("Failed to load state from %s: %s" % (state_file, e))
def save(self):
print("Saving")
if not os.path.exists(os.path.join(self.config_path, self.release_version)):
print("Creating folder %s" % os.path.join(self.config_path, self.release_version))
os.makedirs(os.path.join(self.config_path, self.release_version))
with open(os.path.join(self.config_path, self.release_version, 'state.yaml'), 'w') as fp:
yaml.dump(self.to_dict(), fp, sort_keys=False, default_flow_style=False)
def clear(self):
self.previous_rcs = {}
self.rc_number = 1
for t_id in self.todos:
t = self.todos[t_id]
t.state = {}
self.save()
def get_rc_number(self):
return self.rc_number
def get_current_git_rev(self):
try:
return run("git rev-parse HEAD", cwd=self.get_git_checkout_folder()).strip()
except:
return "<git-rev>"
def get_group_by_id(self, id):
lst = list(filter(lambda x: x.id == id, self.todo_groups))
if len(lst) == 1:
return lst[0]
else:
return None
def get_todo_by_id(self, id):
lst = list(filter(lambda x: x.id == id, self.todos.values()))
if len(lst) == 1:
return lst[0]
else:
return None
def get_todo_state_by_id(self, id):
lst = list(filter(lambda x: x.id == id, self.todos.values()))
if len(lst) == 1:
return lst[0].state
else:
return {}
def get_release_folder(self):
folder = os.path.join(self.config_path, self.release_version)
if not os.path.exists(folder):
print("Creating folder %s" % folder)
os.makedirs(folder)
return folder
def get_rc_folder(self):
folder = os.path.join(self.get_release_folder(), "RC%d" % self.rc_number)
if not os.path.exists(folder):
print("Creating folder %s" % folder)
os.makedirs(folder)
return folder
def get_dist_folder(self):
folder = os.path.join(self.get_rc_folder(), "dist")
return folder
def get_git_checkout_folder(self):
folder = os.path.join(self.get_release_folder(), "lucene-solr")
return folder
def get_website_git_folder(self):
folder = os.path.join(self.get_release_folder(), "lucene-site")
return folder
def get_minor_branch_name(self):
latest = state.get_latest_version()
if latest is not None:
v = Version.parse(latest)
return "branch_%s_%s" % (v.major, v.minor)
else:
raise Exception("Cannot find latest version")
def get_stable_branch_name(self):
v = Version.parse(self.get_latest_version())
return "branch_%sx" % v.major
def get_next_version(self):
if self.release_type == 'major':
return "%s.0" % (self.release_version_major + 1)
if self.release_type == 'minor':
return "%s.%s" % (self.release_version_major, self.release_version_minor + 1)
if self.release_type == 'bugfix':
return "%s.%s.%s" % (self.release_version_major, self.release_version_minor, self.release_version_bugfix + 1)
def get_refguide_release(self):
return "%s_%s" % (self.release_version_major, self.release_version_minor)
def get_java_home(self):
return self.get_java_home_for_version(self.release_version)
def get_java_home_for_version(self, version):
v = Version.parse(version)
java_ver = java_versions[v.major]
java_home_var = "JAVA%s_HOME" % java_ver
if java_home_var in os.environ:
return os.environ.get(java_home_var)
else:
raise Exception("Script needs environment variable %s" % java_home_var )
def get_java_cmd_for_version(self, version):
return os.path.join(self.get_java_home_for_version(version), "bin", "java")
def get_java_cmd(self):
return os.path.join(self.get_java_home(), "bin", "java")
def get_todo_states(self):
states = {}
if self.todos:
for todo_id in self.todos:
t = self.todos[todo_id]
states[todo_id] = copy.deepcopy(t.state)
return states
def init_todos(self, groups):
self.todo_groups = groups
self.todos = {}
for g in self.todo_groups:
for t in g.get_todos():
self.todos[t.id] = t
class TodoGroup(SecretYamlObject):
yaml_tag = u'!TodoGroup'
hidden_fields = []
def __init__(self, id, title, description, todos, is_in_rc_loop=None, depends=None):
self.id = id
self.title = title
self.description = description
self.depends = depends
self.is_in_rc_loop = is_in_rc_loop
self.todos = todos
@classmethod
def from_yaml(cls, loader, node):
fields = loader.construct_mapping(node, deep = True)
return TodoGroup(**fields)
def num_done(self):
return sum(1 for x in self.todos if x.is_done() > 0)
def num_applies(self):
count = sum(1 for x in self.todos if x.applies(state.release_type))
# print("num_applies=%s" % count)
return count
def is_done(self):
# print("Done=%s, applies=%s" % (self.num_done(), self.num_applies()))
return self.num_done() >= self.num_applies()
def get_title(self):
# print("get_title: %s" % self.is_done())
prefix = ""
if self.is_done():
prefix = "✓ "
return "%s%s (%d/%d)" % (prefix, self.title, self.num_done(), self.num_applies())
def get_submenu(self):
menu = UpdatableConsoleMenu(title=self.title, subtitle=self.get_subtitle, prologue_text=self.get_description(),
screen=MyScreen())
menu.exit_item = CustomExitItem("Return")
for todo in self.get_todos():
if todo.applies(state.release_type):
menu.append_item(todo.get_menu_item())
return menu
def get_menu_item(self):
item = UpdatableSubmenuItem(self.get_title, self.get_submenu())
return item
def get_todos(self):
return self.todos
def in_rc_loop(self):
return self.is_in_rc_loop is True
def get_description(self):
desc = self.description
if desc:
return expand_jinja(desc)
else:
return None
def get_subtitle(self):
if self.depends:
ret_str = ""
for dep in ensure_list(self.depends):
g = state.get_group_by_id(dep)
if not g:
g = state.get_todo_by_id(dep)
if g and not g.is_done():
ret_str += "NOTE: Please first complete '%s'\n" % g.title
return ret_str.strip()
return None
class Todo(SecretYamlObject):
yaml_tag = u'!Todo'
hidden_fields = ['state']
def __init__(self, id, title, description=None, post_description=None, done=None, types=None, links=None,
commands=None, user_input=None, depends=None, vars=None, asciidoc=None, persist_vars=None,
function=None):
self.id = id
self.title = title
self.description = description
self.asciidoc = asciidoc
self.types = types
self.depends = depends
self.vars = vars
self.persist_vars = persist_vars
self.function = function
self.user_input = user_input
self.commands = commands
self.post_description = post_description
self.links = links
self.state = {}
self.set_done(done)
if self.types:
self.types = ensure_list(self.types)
for t in self.types:
if not t in ['minor', 'major', 'bugfix']:
sys.exit("Wrong Todo config for '%s'. Type needs to be either 'minor', 'major' or 'bugfix'" % self.id)
if commands:
self.commands.todo_id = self.id
for c in commands.commands:
c.todo_id = self.id
@classmethod
def from_yaml(cls, loader, node):
fields = loader.construct_mapping(node, deep = True)
return Todo(**fields)
def get_vars(self):
myvars = {}
if self.vars:
for k in self.vars:
val = self.vars[k]
if callable(val):
myvars[k] = expand_jinja(val(), vars=myvars)
else:
myvars[k] = expand_jinja(val, vars=myvars)
return myvars
def set_done(self, is_done):
if is_done:
self.state['done_date'] = unix_time_millis(datetime.utcnow())
if self.persist_vars:
for k in self.persist_vars:
self.state[k] = self.get_vars()[k]
else:
self.state.clear()
self.state['done'] = is_done
def applies(self, type):
if self.types:
return type in self.types
return True
def is_done(self):
return 'done' in self.state and self.state['done'] is True
def get_title(self):
prefix = ""
if self.is_done():
prefix = "✓ "
return expand_jinja("%s%s" % (prefix, self.title), self.get_vars_and_state())
def display_and_confirm(self):
try:
if self.depends:
ret_str = ""
for dep in ensure_list(self.depends):
g = state.get_group_by_id(dep)
if not g:
g = state.get_todo_by_id(dep)
if not g.is_done():
print("This step depends on '%s'. Please complete that first\n" % g.title)
return
desc = self.get_description()
if desc:
print("%s" % desc)
try:
if self.function and not self.is_done():
if not eval(self.function)(self):
return
except Exception as e:
print("Function call to %s for todo %s failed: %s" % (self.function, self.id, e))
raise e
if self.user_input and not self.is_done():
ui_list = ensure_list(self.user_input)
for ui in ui_list:
ui.run(self.state)
print()
if self.links:
print("\nLinks:\n")
for link in self.links:
print("- %s" % expand_jinja(link, self.get_vars_and_state()))
print()
cmds = self.get_commands()
if cmds:
if not self.is_done():
if not cmds.logs_prefix:
cmds.logs_prefix = self.id
cmds.run()
else:
print("This step is already completed. You have to first set it to 'not completed' in order to execute commands again.")
print()
if self.post_description:
print("%s" % self.get_post_description())
todostate = self.get_state()
if self.is_done() and len(todostate) > 2:
print("Variables registered\n")
for k in todostate:
if k == 'done' or k == 'done_date':
continue
print("* %s = %s" % (k, todostate[k]))
print()
completed = ask_yes_no("Mark task '%s' as completed?" % self.get_title())
self.set_done(completed)
state.save()
except Exception as e:
print("ERROR while executing todo %s (%s)" % (self.get_title(), e))
def get_menu_item(self):
return UpdatableFunctionItem(self.get_title, self.display_and_confirm)
def clone(self):
clone = Todo(self.id, self.title, description=self.description)
clone.state = copy.deepcopy(self.state)
return clone
def clear(self):
self.state.clear()
self.set_done(False)
def get_state(self):
return self.state
def get_description(self):
desc = self.description
if desc:
return expand_jinja(desc, vars=self.get_vars_and_state())
else:
return None
def get_post_description(self):
if self.post_description:
return expand_jinja(self.post_description, vars=self.get_vars_and_state())
else:
return None
def get_commands(self):
cmds = self.commands
return cmds
def get_asciidoc(self):
if self.asciidoc:
return expand_jinja(self.asciidoc, vars=self.get_vars_and_state())
else:
return None
def get_vars_and_state(self):
d = self.get_vars().copy()
d.update(self.get_state())
return d
def get_release_version():
v = str(input("Which version are you releasing? (x.y.z) "))
try:
version = Version.parse(v)
except:
print("Not a valid version %s" % v)
return get_release_version()
return str(version)
def get_subtitle():
applying_groups = list(filter(lambda x: x.num_applies() > 0, state.todo_groups))
done_groups = sum(1 for x in applying_groups if x.is_done())
return "Please complete the below checklist (Complete: %s/%s)" % (done_groups, len(applying_groups))
def get_todo_menuitem_title():
return "Go to checklist (RC%d)" % (state.rc_number)
def get_releasing_text():
return "Releasing Lucene/Solr %s RC%d" % (state.release_version, state.rc_number)
def get_start_new_rc_menu_title():
return "Abort RC%d and start a new RC%d" % (state.rc_number, state.rc_number + 1)
def start_new_rc():
state.new_rc()
print("Started RC%d" % state.rc_number)
def reset_state():
global state
if ask_yes_no("Are you sure? This will erase all current progress"):
maybe_remove_rc_from_svn()
shutil.rmtree(os.path.join(state.config_path, state.release_version))
state.clear()
def template(name, vars=None):
return expand_jinja(templates[name], vars=vars)
def help():
print(template('help'))
pause()
def ensure_list(o):
if o is None:
return []
if not isinstance(o, list):
return [o]
else:
return o
def open_file(filename):
print("Opening file %s" % filename)
if platform.system().startswith("Win"):
run("start %s" % filename)
else:
run("open %s" % filename)
def expand_multiline(cmd_txt, indent=0):
return re.sub(r' +', " %s\n %s" % (Commands.cmd_continuation_char, " "*indent), cmd_txt)
def unix_to_datetime(unix_stamp):
return datetime.utcfromtimestamp(unix_stamp / 1000)
def generate_asciidoc():
base_filename = os.path.join(state.get_release_folder(),
"lucene_solr_release_%s"
% (state.release_version.replace("\.", "_")))
filename_adoc = "%s.adoc" % base_filename
filename_html = "%s.html" % base_filename
fh = open(filename_adoc, "w")
fh.write("= Lucene/Solr Release %s\n\n" % state.release_version)
fh.write("(_Generated by releaseWizard.py v%s at %s_)\n\n"
% (getScriptVersion(), datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")))
fh.write(":numbered:\n\n")
fh.write("%s\n\n" % template('help'))
for group in state.todo_groups:
if group.num_applies() == 0:
continue
fh.write("== %s\n\n" % group.get_title())
fh.write("%s\n\n" % group.get_description())
for todo in group.get_todos():
if not todo.applies(state.release_type):
continue
fh.write("=== %s\n\n" % todo.get_title())
if todo.is_done():
fh.write("_Completed %s_\n\n" % unix_to_datetime(todo.state['done_date']).strftime(
"%Y-%m-%d %H:%M UTC"))
if todo.get_asciidoc():
fh.write("%s\n\n" % todo.get_asciidoc())
else:
desc = todo.get_description()
if desc:
fh.write("%s\n\n" % desc)
state_copy = copy.deepcopy(todo.state)
state_copy.pop('done', None)
state_copy.pop('done_date', None)
if len(state_copy) > 0 or todo.user_input is not None:
fh.write(".Variables collected in this step\n")
fh.write("|===\n")
fh.write("|Variable |Value\n")
mykeys = set()
for e in ensure_list(todo.user_input):
mykeys.add(e.name)
for e in state_copy.keys():
mykeys.add(e)
for key in mykeys:
val = "(not set)"
if key in state_copy:
val = state_copy[key]
fh.write("\n|%s\n|%s\n" % (key, val))
fh.write("|===\n\n")
cmds = todo.get_commands()
if cmds:
if cmds.commands_text:
fh.write("%s\n\n" % cmds.get_commands_text())
fh.write("[source,sh]\n----\n")
if cmds.env:
for key in cmds.env:
val = cmds.env[key]
if is_windows():
fh.write("SET %s=%s\n" % (key, val))
else:
fh.write("export %s=%s\n" % (key, val))
fh.write(abbreviate_homedir("cd %s\n" % cmds.get_root_folder()))
cmds2 = ensure_list(cmds.commands)
for c in cmds2:
for line in c.display_cmd():
fh.write("%s\n" % line)
fh.write("----\n\n")
if todo.post_description and not todo.get_asciidoc():
fh.write("\n%s\n\n" % todo.get_post_description())
if todo.links:
fh.write("Links:\n\n")
for l in todo.links:
fh.write("* %s\n" % expand_jinja(l))
fh.write("\n")
fh.close()
print("Wrote file %s" % os.path.join(state.get_release_folder(), filename_adoc))
print("Running command 'asciidoctor %s'" % filename_adoc)
run_follow("asciidoctor %s" % filename_adoc)
if os.path.exists(filename_html):
open_file(filename_html)
else:
print("Failed generating HTML version, please install asciidoctor")
pause()
def load_rc():
lucenerc = os.path.expanduser("~/.lucenerc")
try:
with open(lucenerc, 'r') as fp:
return json.load(fp)
except:
return None
def store_rc(release_root, release_version=None):
lucenerc = os.path.expanduser("~/.lucenerc")
dict = {}
dict['root'] = release_root
if release_version:
dict['release_version'] = release_version
with open(lucenerc, "w") as fp:
json.dump(dict, fp, indent=2)
def release_other_version():
if not state.is_released():
maybe_remove_rc_from_svn()
store_rc(state.config_path, None)
print("Please restart the wizard")
sys.exit(0)
def file_to_string(filename):
with open(filename, encoding='utf8') as f:
return f.read().strip()
def download_keys():
download('KEYS', "https://archive.apache.org/dist/lucene/KEYS", state.config_path)
def keys_downloaded():
return os.path.exists(os.path.join(state.config_path, "KEYS"))
def dump_yaml():
file = open(os.path.join(script_path, "releaseWizard.yaml"), "w")
yaml.add_representer(str, str_presenter)
yaml.Dumper.ignore_aliases = lambda *args : True
dump_obj = {'templates': templates,
'groups': state.todo_groups}
yaml.dump(dump_obj, width=180, stream=file, sort_keys=False, default_flow_style=False)
def parse_config():
description = 'Script to guide a RM through the whole release process'
parser = argparse.ArgumentParser(description=description, epilog="Go push that release!",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--dry-run', dest='dry', action='store_true', default=False,
help='Do not execute any commands, but echo them instead. Display extra debug info')
parser.add_argument('--init', action='store_true', default=False,
help='Re-initialize root and version')
config = parser.parse_args()
return config
def load(urlString, encoding="utf-8"):
try:
content = urllib.request.urlopen(urlString).read().decode(encoding)
except Exception as e:
print('Retrying download of url %s after exception: %s' % (urlString, e))
content = urllib.request.urlopen(urlString).read().decode(encoding)
return content
def configure_pgp(gpg_todo):
print("Based on your Apache ID we'll lookup your key online\n"
"and through this complete the 'gpg' prerequisite task.\n")
gpg_state = gpg_todo.get_state()
id = str(input("Please enter your Apache id: (ENTER=skip) "))
if id.strip() == '':
return False
key_url = "https://home.apache.org/keys/committer/%s.asc" % id.strip()
committer_key = load(key_url)
lines = committer_key.splitlines()
keyid_linenum = None
for idx, line in enumerate(lines):
if line == 'ASF ID: %s' % id:
keyid_linenum = idx+1
break
if keyid_linenum:
keyid_line = lines[keyid_linenum]
assert keyid_line.startswith('LDAP PGP key: ')
gpg_id = keyid_line[14:].replace(" ", "")[-8:]
print("Found gpg key id %s on file at Apache (%s)" % (gpg_id, key_url))
else:
print(textwrap.dedent("""\
Could not find your GPG key from Apache servers.
Please make sure you have registered your key ID in
id.apache.org, see links for more info."""))
gpg_id = str(input("Enter your key ID manually, 8 last characters (ENTER=skip): "))
if gpg_id.strip() == '':
return False
elif len(gpg_id) != 8:
print("gpg id must be the last 8 characters of your key id")
gpg_id = gpg_id.upper()
try:
res = run("gpg --list-secret-keys %s" % gpg_id)
print("Found key %s on your private gpg keychain" % gpg_id)
# Check rsa and key length >= 4096
match = re.search(r'^sec +((rsa|dsa)(\d{4})) ', res)
type = "(unknown)"
length = -1
if match:
type = match.group(2)
length = int(match.group(3))
else:
match = re.search(r'^sec +((\d{4})([DR])/.*?) ', res)
if match:
type = 'rsa' if match.group(3) == 'R' else 'dsa'
length = int(match.group(2))
else:
print("Could not determine type and key size for your key")
print("%s" % res)
if not ask_yes_no("Is your key of type RSA and size >= 2048 (ideally 4096)? "):
print("Sorry, please generate a new key, add to KEYS and register with id.apache.org")
return False
if not type == 'rsa':
print("We strongly recommend RSA type key, your is '%s'. Consider generating a new key." % type.upper())
if length < 2048:
print("Your key has key length of %s. Cannot use < 2048, please generate a new key before doing the release" % length)
return False
if length < 4096:
print("Your key length is < 4096, Please generate a stronger key.")
print("Alternatively, follow instructions in http://www.apache.org/dev/release-signing.html#note")
if not ask_yes_no("Have you configured your gpg to avoid SHA-1?"):
print("Please either generate a strong key or reconfigure your client")
return False
print("Validated that your key is of type RSA and has a length >= 2048 (%s)" % length)
except:
print(textwrap.dedent("""\
Key not found on your private gpg keychain. In order to sign the release you'll
need to fix this, then try again"""))
return False
try:
lines = run("gpg --check-signatures %s" % gpg_id).splitlines()
sigs = 0
apache_sigs = 0
for line in lines:
if line.startswith("sig") and not gpg_id in line:
sigs += 1
if '@apache.org' in line:
apache_sigs += 1
print("Your key has %s signatures, of which %s are by committers (@apache.org address)" % (sigs, apache_sigs))
if apache_sigs < 1:
print(textwrap.dedent("""\
Your key is not signed by any other committer.
Please review http://www.apache.org/dev/openpgp.html#apache-wot
and make sure to get your key signed until next time.
You may want to run 'gpg --refresh-keys' to refresh your keychain."""))
uses_apacheid = is_code_signing_key = False
for line in lines:
if line.startswith("uid") and "%s@apache" % id in line:
uses_apacheid = True
if 'CODE SIGNING KEY' in line.upper():
is_code_signing_key = True
if not uses_apacheid:
print("WARNING: Your key should use your apache-id email address, see http://www.apache.org/dev/release-signing.html#user-id")
if not is_code_signing_key:
print("WARNING: You code signing key should be labeled 'CODE SIGNING KEY', see http://www.apache.org/dev/release-signing.html#key-comment")
except Exception as e:
print("Could not check signatures of your key: %s" % e)
download_keys()
keys_text = file_to_string(os.path.join(state.config_path, "KEYS"))
if gpg_id in keys_text or "%s %s" % (gpg_id[:4], gpg_id[-4:]) in keys_text:
print("Found your key ID in official KEYS file. KEYS file is not cached locally.")
else:
print(textwrap.dedent("""\
Could not find your key ID in official KEYS file.
Please make sure it is added to https://dist.apache.org/repos/dist/release/lucene/KEYS
and committed to svn. Then re-try this initialization"""))
if not ask_yes_no("Do you want to continue without fixing KEYS file? (not recommended) "):
return False
gpg_state['apache_id'] = id
gpg_state['gpg_key'] = gpg_id
return True
def pause(fun=None):
if fun:
fun()
input("\nPress ENTER to continue...")
# Custom classes for ConsoleMenu, to make menu texts dynamic
# Needed until https://github.com/aegirhall/console-menu/pull/25 is released
# See https://pypi.org/project/console-menu/ for other docs
class UpdatableConsoleMenu(ConsoleMenu):
def __repr__(self):
return "%s: %s. %d items" % (self.get_title(), self.get_subtitle(), len(self.items))
def draw(self):
"""
Refreshes the screen and redraws the menu. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.printf(self.formatter.format(title=self.get_title(), subtitle=self.get_subtitle(), items=self.items,
prologue_text=self.get_prologue_text(), epilogue_text=self.get_epilogue_text()))
# Getters to get text in case method reference
def get_title(self):
return self.title() if callable(self.title) else self.title
def get_subtitle(self):
return self.subtitle() if callable(self.subtitle) else self.subtitle
def get_prologue_text(self):
return self.prologue_text() if callable(self.prologue_text) else self.prologue_text
def get_epilogue_text(self):
return self.epilogue_text() if callable(self.epilogue_text) else self.epilogue_text
class UpdatableSubmenuItem(SubmenuItem):
def __init__(self, text, submenu, menu=None, should_exit=False):
"""
:ivar ConsoleMenu self.submenu: The submenu to be opened when this item is selected
"""
super(SubmenuItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.submenu = submenu
if menu:
self.get_submenu().parent = menu
def show(self, index):
return "%2d - %s" % (index + 1, self.get_text())
# Getters to get text in case method reference
def get_text(self):
return self.text() if callable(self.text) else self.text
def set_menu(self, menu):
"""
Sets the menu of this item.
Should be used instead of directly accessing the menu attribute for this class.
:param ConsoleMenu menu: the menu
"""
self.menu = menu
self.get_submenu().parent = menu
def action(self):
"""
This class overrides this method
"""
self.get_submenu().start()
def clean_up(self):
"""
This class overrides this method
"""
self.get_submenu().join()
self.menu.clear_screen()
self.menu.resume()
def get_return(self):
"""
:return: The returned value in the submenu
"""
return self.get_submenu().returned_value
def get_submenu(self):
"""
We unwrap the submenu variable in case it is a reference to a method that returns a submenu
"""
return self.submenu if not callable(self.submenu) else self.submenu()
class UpdatableFunctionItem(FunctionItem):
def show(self, index):
return "%2d - %s" % (index + 1, self.get_text())
# Getters to get text in case method reference
def get_text(self):
return self.text() if callable(self.text) else self.text
class MyScreen(Screen):
def clear(self):
return
class CustomExitItem(ExitItem):
def show(self, index):
return super(ExitItem, self).show(index)
def get_return(self):
return ""
def main():
global state
global dry_run
global templates
print("Lucene/Solr releaseWizard v%s" % getScriptVersion())
c = parse_config()
if c.dry:
print("Entering dry-run mode where all commands will be echoed instead of executed")
dry_run = True
release_root = os.path.expanduser("~/.lucene-releases")
if not load_rc() or c.init:
print("Initializing")
dir_ok = False
root = str(input("Choose root folder: [~/.lucene-releases] "))
if os.path.exists(root) and (not os.path.isdir(root) or not os.access(root, os.W_OK)):
sys.exit("Root %s exists but is not a directory or is not writable" % root)
if not root == '':
if root.startswith("~/"):
release_root = os.path.expanduser(root)
else:
release_root = os.path.abspath(root)
if not os.path.exists(release_root):
try:
print("Creating release root %s" % release_root)
os.makedirs(release_root)
except Exception as e:
sys.exit("Error while creating %s: %s" % (release_root, e))
release_version = get_release_version()
else:
conf = load_rc()
release_root = conf['root']
if 'release_version' in conf:
release_version = conf['release_version']
else:
release_version = get_release_version()
store_rc(release_root, release_version)
check_prerequisites()
try:
y = yaml.load(open(os.path.join(script_path, "releaseWizard.yaml"), "r"), Loader=yaml.Loader)
templates = y.get('templates')
todo_list = y.get('groups')
state = ReleaseState(release_root, release_version, getScriptVersion())
state.init_todos(bootstrap_todos(todo_list))
state.load()
except Exception as e:
sys.exit("Failed initializing. %s" % e)
state.save()
# Smoketester requires JAVA_HOME to point to JAVA8 and JAVA11_HOME to point ot Java11
os.environ['JAVA_HOME'] = state.get_java_home()
os.environ['JAVACMD'] = state.get_java_cmd()
global lucene_news_file
global solr_news_file
lucene_news_file = os.path.join(state.get_website_git_folder(), 'content', 'core', 'core_news',
"%s-%s-available.md" % (state.get_release_date_iso(), state.release_version.replace(".", "-")))
solr_news_file = os.path.join(state.get_website_git_folder(), 'content', 'solr', 'solr_news',
"%s-%s-available.md" % (state.get_release_date_iso(), state.release_version.replace(".", "-")))
website_folder = state.get_website_git_folder()
main_menu = UpdatableConsoleMenu(title="Lucene/Solr ReleaseWizard",
subtitle=get_releasing_text,
prologue_text="Welcome to the release wizard. From here you can manage the process including creating new RCs. "
"All changes are persisted, so you can exit any time and continue later. Make sure to read the Help section.",
epilogue_text="® 2020 The Lucene/Solr project. Licensed under the Apache License 2.0\nScript version v%s)" % getScriptVersion(),
screen=MyScreen())
todo_menu = UpdatableConsoleMenu(title=get_releasing_text,
subtitle=get_subtitle,
prologue_text=None,
epilogue_text=None,
screen=MyScreen())
todo_menu.exit_item = CustomExitItem("Return")
for todo_group in state.todo_groups:
if todo_group.num_applies() >= 0:
menu_item = todo_group.get_menu_item()
menu_item.set_menu(todo_menu)
todo_menu.append_item(menu_item)
main_menu.append_item(UpdatableSubmenuItem(get_todo_menuitem_title, todo_menu, menu=main_menu))
main_menu.append_item(UpdatableFunctionItem(get_start_new_rc_menu_title, start_new_rc))
main_menu.append_item(UpdatableFunctionItem('Clear and restart current RC', state.clear_rc))
main_menu.append_item(UpdatableFunctionItem("Clear all state, restart the %s release" % state.release_version, reset_state))
main_menu.append_item(UpdatableFunctionItem('Start release for a different version', release_other_version))
main_menu.append_item(UpdatableFunctionItem('Generate Asciidoc guide for this release', generate_asciidoc))
# main_menu.append_item(UpdatableFunctionItem('Dump YAML', dump_yaml))
main_menu.append_item(UpdatableFunctionItem('Help', help))
main_menu.show()
sys.path.append(os.path.dirname(__file__))
current_git_root = os.path.abspath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, os.path.pardir))
dry_run = False
major_minor = ['major', 'minor']
script_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_path)
def git_checkout_folder():
return state.get_git_checkout_folder()
def tail_file(file, lines):
bufsize = 8192
fsize = os.stat(file).st_size
with open(file) as f:
if bufsize >= fsize:
bufsize = fsize
idx = 0
while True:
idx += 1
seek_pos = fsize - bufsize * idx
if seek_pos < 0:
seek_pos = 0
f.seek(seek_pos)
data = []
data.extend(f.readlines())
if len(data) >= lines or f.tell() == 0 or seek_pos == 0:
if not seek_pos == 0:
print("Tailing last %d lines of file %s" % (lines, file))
print(''.join(data[-lines:]))
break
def run_with_log_tail(command, cwd, logfile=None, tail_lines=10, tee=False, live=False, shell=None):
fh = sys.stdout
if logfile:
logdir = os.path.dirname(logfile)
if not os.path.exists(logdir):
os.makedirs(logdir)
fh = open(logfile, 'w')
rc = run_follow(command, cwd, fh=fh, tee=tee, live=live, shell=shell)
if logfile:
fh.close()
if not tee and tail_lines and tail_lines > 0:
tail_file(logfile, tail_lines)
return rc
def ask_yes_no(text):
answer = None
while answer not in ['y', 'n']:
answer = str(input("\nQ: %s (y/n): " % text))
print("\n")
return answer == 'y'
def abbreviate_line(line, width):
line = line.rstrip()
if len(line) > width:
line = "%s.....%s" % (line[:(width / 2 - 5)], line[-(width / 2):])
else:
line = "%s%s" % (line, " " * (width - len(line) + 2))
return line
def print_line_cr(line, linenum, stdout=True, tee=False):
if not tee:
if not stdout:
print("[line %s] %s" % (linenum, abbreviate_line(line, 80)), end='\r')
else:
if line.endswith("\r"):
print(line.rstrip(), end='\r')
else:
print(line.rstrip())
def run_follow(command, cwd=None, fh=sys.stdout, tee=False, live=False, shell=None):
doShell = '&&' in command or '&' in command or shell is not None
if not doShell and not isinstance(command, list):
command = shlex.split(command)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd,
universal_newlines=True, bufsize=0, close_fds=True, shell=doShell)
lines_written = 0
fl = fcntl.fcntl(process.stdout, fcntl.F_GETFL)
fcntl.fcntl(process.stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
flerr = fcntl.fcntl(process.stderr, fcntl.F_GETFL)
fcntl.fcntl(process.stderr, fcntl.F_SETFL, flerr | os.O_NONBLOCK)
endstdout = endstderr = False
errlines = []
while not (endstderr and endstdout):
lines_before = lines_written
if not endstdout:
try:
if live:
chars = process.stdout.read()
if chars == '' and process.poll() is not None:
endstdout = True
else:
fh.write(chars)
fh.flush()
if '\n' in chars:
lines_written += 1
else:
line = process.stdout.readline()
if line == '' and process.poll() is not None:
endstdout = True
else:
fh.write("%s\n" % line.rstrip())
fh.flush()
lines_written += 1
print_line_cr(line, lines_written, stdout=(fh == sys.stdout), tee=tee)
except Exception as ioe:
pass
if not endstderr:
try:
if live:
chars = process.stderr.read()
if chars == '' and process.poll() is not None:
endstderr = True
else:
fh.write(chars)
fh.flush()
if '\n' in chars:
lines_written += 1
else:
line = process.stderr.readline()
if line == '' and process.poll() is not None:
endstderr = True
else:
errlines.append("%s\n" % line.rstrip())
lines_written += 1
print_line_cr(line, lines_written, stdout=(fh == sys.stdout), tee=tee)
except Exception as e:
pass
if not lines_written > lines_before:
# if no output then sleep a bit before checking again
time.sleep(0.1)
print(" " * 80)
rc = process.poll()
if len(errlines) > 0:
for line in errlines:
fh.write("%s\n" % line.rstrip())
fh.flush()
return rc
def is_windows():
return platform.system().startswith("Win")
def is_mac():
return platform.system().startswith("Darwin")
def is_linux():
return platform.system().startswith("Linux")
class Commands(SecretYamlObject):
yaml_tag = u'!Commands'
hidden_fields = ['todo_id']
cmd_continuation_char = "^" if is_windows() else "\\"
def __init__(self, root_folder, commands_text=None, commands=None, logs_prefix=None, run_text=None, enable_execute=None,
confirm_each_command=None, env=None, vars=None, todo_id=None, remove_files=None):
self.root_folder = root_folder
self.commands_text = commands_text
self.vars = vars
self.env = env
self.run_text = run_text
self.remove_files = remove_files
self.todo_id = todo_id
self.logs_prefix = logs_prefix
self.enable_execute = enable_execute
self.confirm_each_command = confirm_each_command
self.commands = commands
for c in self.commands:
c.todo_id = todo_id
@classmethod
def from_yaml(cls, loader, node):
fields = loader.construct_mapping(node, deep = True)
return Commands(**fields)
def run(self):
root = self.get_root_folder()
if self.commands_text:
print(self.get_commands_text())
if self.env:
for key in self.env:
val = self.jinjaify(self.env[key])
os.environ[key] = val
if is_windows():
print("\n SET %s=%s" % (key, val))
else:
print("\n export %s=%s" % (key, val))
print(abbreviate_homedir("\n cd %s" % root))
commands = ensure_list(self.commands)
for cmd in commands:
for line in cmd.display_cmd():
print(" %s" % line)
print()
if not self.enable_execute is False:
if self.run_text:
print("\n%s\n" % self.get_run_text())
if len(commands) > 1:
if not self.confirm_each_command is False:
print("You will get prompted before running each individual command.")
else:
print(
"You will not be prompted for each command but will see the ouput of each. If one command fails the execution will stop.")
success = True
if ask_yes_no("Do you want me to run these commands now?"):
if self.remove_files:
for _f in ensure_list(self.get_remove_files()):
f = os.path.join(root, _f)
if os.path.exists(f):
filefolder = "File" if os.path.isfile(f) else "Folder"
if ask_yes_no("%s %s already exists. Shall I remove it now?" % (filefolder, f)) and not dry_run:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
index = 0
log_folder = self.logs_prefix if len(commands) > 1 else None
for cmd in commands:
index += 1
if len(commands) > 1:
log_prefix = "%02d_" % index
else:
log_prefix = self.logs_prefix if self.logs_prefix else ''
if not log_prefix[-1:] == '_':
log_prefix += "_"
cwd = root
if cmd.cwd:
cwd = os.path.join(root, cmd.cwd)
folder_prefix = ''
if cmd.cwd:
folder_prefix = cmd.cwd + "_"
if self.confirm_each_command is False or len(commands) == 1 or ask_yes_no("Shall I run '%s' in folder '%s'" % (cmd, cwd)):
if self.confirm_each_command is False:
print("------------\nRunning '%s' in folder '%s'" % (cmd, cwd))
logfilename = cmd.logfile
logfile = None
cmd_to_run = "%s%s" % ("echo Dry run, command is: " if dry_run else "", cmd.get_cmd())
if cmd.redirect:
try:
out = run(cmd_to_run, cwd=cwd)
mode = 'a' if cmd.redirect_append is True else 'w'
with open(os.path.join(root, cwd, cmd.get_redirect()), mode) as outfile:
outfile.write(out)
outfile.flush()
print("Wrote %s bytes to redirect file %s" % (len(out), cmd.get_redirect()))
except Exception as e:
print("Command %s failed: %s" % (cmd_to_run, e))
success = False
break
else:
if not cmd.stdout:
if not log_folder:
log_folder = os.path.join(state.get_rc_folder(), "logs")
elif not os.path.isabs(log_folder):
log_folder = os.path.join(state.get_rc_folder(), "logs", log_folder)
if not logfilename:
logfilename = "%s.log" % re.sub(r"\W", "_", cmd.get_cmd())
logfile = os.path.join(log_folder, "%s%s%s" % (log_prefix, folder_prefix, logfilename))
if cmd.tee:
print("Output of command will be printed (logfile=%s)" % logfile)
elif cmd.live:
print("Output will be shown live byte by byte")
logfile = None
else:
print("Wait until command completes... Full log in %s\n" % logfile)
if cmd.comment:
print("# %s\n" % cmd.get_comment())
start_time = time.time()
returncode = run_with_log_tail(cmd_to_run, cwd, logfile=logfile, tee=cmd.tee, tail_lines=25,
live=cmd.live, shell=cmd.shell)
elapsed = time.time() - start_time
if not returncode == 0:
if cmd.should_fail:
print("Command failed, which was expected")
success = True
else:
print("WARN: Command %s returned with error" % cmd.get_cmd())
success = False
break
else:
if cmd.should_fail and not dry_run:
print("Expected command to fail, but it succeeded.")
success = False
break
else:
if elapsed > 30:
print("Command completed in %s seconds" % elapsed)
if not success:
print("WARNING: One or more commands failed, you may want to check the logs")
return success
def get_root_folder(self):
return self.jinjaify(self.root_folder)
def get_commands_text(self):
return self.jinjaify(self.commands_text)
def get_run_text(self):
return self.jinjaify(self.run_text)
def get_remove_files(self):
return self.jinjaify(self.remove_files)
def get_vars(self):
myvars = {}
if self.vars:
for k in self.vars:
val = self.vars[k]
if callable(val):
myvars[k] = expand_jinja(val(), vars=myvars)
else:
myvars[k] = expand_jinja(val, vars=myvars)
return myvars
def jinjaify(self, data, join=False):
if not data:
return None
v = self.get_vars()
if self.todo_id:
v.update(state.get_todo_by_id(self.todo_id).get_vars())
if isinstance(data, list):
if join:
return expand_jinja(" ".join(data), v)
else:
res = []
for rf in data:
res.append(expand_jinja(rf, v))
return res
else:
return expand_jinja(data, v)
def abbreviate_homedir(line):
if is_windows():
if 'HOME' in os.environ:
return re.sub(r'([^/]|\b)%s' % os.path.expanduser('~'), "\\1%HOME%", line)
elif 'USERPROFILE' in os.environ:
return re.sub(r'([^/]|\b)%s' % os.path.expanduser('~'), "\\1%USERPROFILE%", line)
else:
return re.sub(r'([^/]|\b)%s' % os.path.expanduser('~'), "\\1~", line)
class Command(SecretYamlObject):
yaml_tag = u'!Command'
hidden_fields = ['todo_id']
def __init__(self, cmd, cwd=None, stdout=None, logfile=None, tee=None, live=None, comment=None, vars=None,
todo_id=None, should_fail=None, redirect=None, redirect_append=None, shell=None):
self.cmd = cmd
self.cwd = cwd
self.comment = comment
self.logfile = logfile
self.vars = vars
self.tee = tee
self.live = live
self.stdout = stdout
self.should_fail = should_fail
self.shell = shell
self.todo_id = todo_id
self.redirect_append = redirect_append
self.redirect = redirect
if tee and stdout:
self.stdout = None
print("Command %s specifies 'tee' and 'stdout', using only 'tee'" % self.cmd)
if live and stdout:
self.stdout = None
print("Command %s specifies 'live' and 'stdout', using only 'live'" % self.cmd)
if live and tee:
self.tee = None
print("Command %s specifies 'tee' and 'live', using only 'live'" % self.cmd)
if redirect and (tee or stdout or live):
self.tee = self.stdout = self.live = None
print("Command %s specifies 'redirect' and other out options at the same time. Using redirect only" % self.cmd)
@classmethod
def from_yaml(cls, loader, node):
fields = loader.construct_mapping(node, deep = True)
return Command(**fields)
def get_comment(self):
return self.jinjaify(self.comment)
def get_redirect(self):
return self.jinjaify(self.redirect)
def get_cmd(self):
return self.jinjaify(self.cmd, join=True)
def get_vars(self):
myvars = {}
if self.vars:
for k in self.vars:
val = self.vars[k]
if callable(val):
myvars[k] = expand_jinja(val(), vars=myvars)
else:
myvars[k] = expand_jinja(val, vars=myvars)
return myvars
def __str__(self):
return self.get_cmd()
def jinjaify(self, data, join=False):
v = self.get_vars()
if self.todo_id:
v.update(state.get_todo_by_id(self.todo_id).get_vars())
if isinstance(data, list):
if join:
return expand_jinja(" ".join(data), v)
else:
res = []
for rf in data:
res.append(expand_jinja(rf, v))
return res
else:
return expand_jinja(data, v)
def display_cmd(self):
lines = []
pre = post = ''
if self.comment:
if is_windows():
lines.append("REM %s" % self.get_comment())
else:
lines.append("# %s" % self.get_comment())
if self.cwd:
lines.append("pushd %s" % self.cwd)
redir = "" if self.redirect is None else " %s %s" % (">" if self.redirect_append is None else ">>" , self.get_redirect())
line = "%s%s" % (expand_multiline(self.get_cmd(), indent=2), redir)
# Print ~ or %HOME% rather than the full expanded homedir path
line = abbreviate_homedir(line)
lines.append(line)
if self.cwd:
lines.append("popd")
return lines
class UserInput(SecretYamlObject):
yaml_tag = u'!UserInput'
def __init__(self, name, prompt, type=None):
self.type = type
self.prompt = prompt
self.name = name
@classmethod
def from_yaml(cls, loader, node):
fields = loader.construct_mapping(node, deep = True)
return UserInput(**fields)
def run(self, dict=None):
correct = False
while not correct:
try:
result = str(input("%s : " % self.prompt))
if self.type and self.type == 'int':
result = int(result)
correct = True
except Exception as e:
print("Incorrect input: %s, try again" % e)
continue
if dict:
dict[self.name] = result
return result
def create_ical(todo):
if ask_yes_no("Do you want to add a Calendar reminder for the close vote time?"):
c = Calendar()
e = Event()
e.name = "Lucene/Solr %s vote ends" % state.release_version
e.begin = vote_close_72h_date()
e.description = "Remember to sum up votes and continue release :)"
c.events.add(e)
ics_file = os.path.join(state.get_rc_folder(), 'vote_end.ics')
with open(ics_file, 'w') as my_file:
my_file.writelines(c)
open_file(ics_file)
return True
today = datetime.utcnow().date()
sundays = {(today + timedelta(days=x)): 'Sunday' for x in range(10) if (today + timedelta(days=x)).weekday() == 6}
y = datetime.utcnow().year
years = [y, y+1]
non_working = holidays.CA(years=years) + holidays.US(years=years) + holidays.England(years=years) \
+ holidays.DE(years=years) + holidays.NO(years=years) + holidays.IND(years=years) + holidays.RU(years=years)
def vote_close_72h_date():
# Voting open at least 72 hours according to ASF policy
return datetime.utcnow() + timedelta(hours=73)
def vote_close_72h_holidays():
days = 0
day_offset = -1
holidays = []
# Warn RM about major holidays coming up that should perhaps extend the voting deadline
# Warning will be given for Sunday or a public holiday observed by 3 or more [CA, US, EN, DE, NO, IND, RU]
while days < 3:
day_offset += 1
d = today + timedelta(days=day_offset)
if not (d in sundays or (d in non_working and len(non_working[d]) >= 2)):
days += 1
else:
if d in sundays:
holidays.append("%s (Sunday)" % d)
else:
holidays.append("%s (%s)" % (d, non_working[d]))
return holidays if len(holidays) > 0 else None
def prepare_announce_lucene(todo):
if not os.path.exists(lucene_news_file):
lucene_text = expand_jinja("(( template=announce_lucene ))")
with open(lucene_news_file, 'w') as fp:
fp.write(lucene_text)
# print("Wrote Lucene announce draft to %s" % lucene_news_file)
else:
print("Draft already exist, not re-generating")
return True
def prepare_announce_solr(todo):
if not os.path.exists(solr_news_file):
solr_text = expand_jinja("(( template=announce_solr ))")
with open(solr_news_file, 'w') as fp:
fp.write(solr_text)
# print("Wrote Solr announce draft to %s" % solr_news_file)
else:
print("Draft already exist, not re-generating")
return True
def check_artifacts_available(todo):
try:
cdnUrl = expand_jinja("https://dlcdn.apache.org/lucene/java/{{ release_version }}/lucene-{{ release_version }}-src.tgz.asc")
load(cdnUrl)
print("Found %s" % cdnUrl)
except Exception as e:
print("Could not fetch %s (%s)" % (cdnUrl, e))
return False
try:
mavenUrl = expand_jinja("https://repo1.maven.org/maven2/org/apache/lucene/lucene-core/{{ release_version }}/lucene-core-{{ release_version }}.pom.asc")
load(mavenUrl)
print("Found %s" % mavenUrl)
except Exception as e:
print("Could not fetch %s (%s)" % (mavenUrl, e))
return False
return True
def set_java_home(version):
os.environ['JAVA_HOME'] = state.get_java_home_for_version(version)
os.environ['JAVACMD'] = state.get_java_cmd_for_version(version)
def load_lines(file, from_line=0):
if os.path.exists(file):
with open(file, 'r') as fp:
return fp.readlines()[from_line:]
else:
return ["<Please paste the announcement text here>\n"]
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Keyboard interrupt...exiting')
| 38.712512
| 192
| 0.58004
|
21eadc785c19591fc4ce1584c835e2140c2b7c81
| 3,324
|
py
|
Python
|
src/settings.py
|
ititifad/daraja
|
05a6d47ab2d645d1eb5479343a33889ccfb43234
|
[
"MIT"
] | null | null | null |
src/settings.py
|
ititifad/daraja
|
05a6d47ab2d645d1eb5479343a33889ccfb43234
|
[
"MIT"
] | null | null | null |
src/settings.py
|
ititifad/daraja
|
05a6d47ab2d645d1eb5479343a33889ccfb43234
|
[
"MIT"
] | null | null | null |
"""
Django settings for src project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%ig(1(qxxj&8j*-r$v@um50+04-8=r7j+h&yoy2j(3d1_0==m_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly'
]
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.569231
| 91
| 0.695848
|
1f5eee47162c7ff0384ff8ede95fee931e5d70eb
| 658
|
py
|
Python
|
app/migrations/0004_auto_20200916_1301.py
|
mono57/students.archives.fs
|
14e20b7696f6a0d6481a3312b2ad8b7ba197eb59
|
[
"MIT"
] | null | null | null |
app/migrations/0004_auto_20200916_1301.py
|
mono57/students.archives.fs
|
14e20b7696f6a0d6481a3312b2ad8b7ba197eb59
|
[
"MIT"
] | 9
|
2021-03-30T14:09:12.000Z
|
2022-03-12T00:46:23.000Z
|
app/migrations/0004_auto_20200916_1301.py
|
mono57/students.archives.fs
|
14e20b7696f6a0d6481a3312b2ad8b7ba197eb59
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-09-16 12:01
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20200916_1043'),
]
operations = [
migrations.AlterField(
model_name='student',
name='region_of_origin',
field=models.CharField(blank=True, max_length=100, verbose_name="Region d'origine"),
),
migrations.AlterField(
model_name='student',
name='uid',
field=models.UUIDField(blank=True, default=uuid.UUID('dd149708-242d-4187-bb4e-918dcda6447a')),
),
]
| 26.32
| 106
| 0.613982
|
724fa0d223ee126bdc4223ed3802633f384d3d60
| 802
|
py
|
Python
|
users/migrations/0001_initial.py
|
Abezzam10/Wappp
|
94f6f669ab0589b2398e42225eee4063b3213b59
|
[
"Apache-2.0"
] | null | null | null |
users/migrations/0001_initial.py
|
Abezzam10/Wappp
|
94f6f669ab0589b2398e42225eee4063b3213b59
|
[
"Apache-2.0"
] | null | null | null |
users/migrations/0001_initial.py
|
Abezzam10/Wappp
|
94f6f669ab0589b2398e42225eee4063b3213b59
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-21 22:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.846154
| 122
| 0.625935
|
f3338b95bc70748da460e948e035dec3265d940a
| 190
|
py
|
Python
|
lg_keyboard/src/lg_keyboard/__init__.py
|
carlosvquezada/lg_ros_nodes
|
7560e99272d06ef5c80a5444131dad72c078a718
|
[
"Apache-2.0"
] | null | null | null |
lg_keyboard/src/lg_keyboard/__init__.py
|
carlosvquezada/lg_ros_nodes
|
7560e99272d06ef5c80a5444131dad72c078a718
|
[
"Apache-2.0"
] | null | null | null |
lg_keyboard/src/lg_keyboard/__init__.py
|
carlosvquezada/lg_ros_nodes
|
7560e99272d06ef5c80a5444131dad72c078a718
|
[
"Apache-2.0"
] | null | null | null |
from onboard import ROS_NODE_NAME
from onboard import OnboardViewportException
from onboard import OnboardLauncher
from onboard import OnboardConfig
from onboard_router import OnboardRouter
| 31.666667
| 44
| 0.894737
|
26ef676624805cb910607d31d4ec081a0aa44648
| 68
|
py
|
Python
|
scripts/hello_streamlit.py
|
R-fred/awesome-streamlit
|
10f2b132bc8e61a82edfacb4b3bb36d0da6c63d3
|
[
"CC0-1.0"
] | 1,194
|
2019-10-09T06:15:27.000Z
|
2022-03-31T14:53:00.000Z
|
scripts/hello_streamlit.py
|
R-fred/awesome-streamlit
|
10f2b132bc8e61a82edfacb4b3bb36d0da6c63d3
|
[
"CC0-1.0"
] | 55
|
2019-10-09T12:08:39.000Z
|
2022-02-10T00:48:53.000Z
|
scripts/hello_streamlit.py
|
R-fred/awesome-streamlit
|
10f2b132bc8e61a82edfacb4b3bb36d0da6c63d3
|
[
"CC0-1.0"
] | 272
|
2019-10-09T12:04:31.000Z
|
2022-03-29T02:43:30.000Z
|
import streamlit as st
st.title("Hello Streamlit again and again")
| 17
| 43
| 0.779412
|
b2ef7fd209032909347028e076ce2b708133239e
| 1,403
|
py
|
Python
|
iszigzag.py
|
pclumson1/Python3_Algorithms
|
7b8106af1641aeb09b6bb1ac1881b8beecde1184
|
[
"MIT"
] | null | null | null |
iszigzag.py
|
pclumson1/Python3_Algorithms
|
7b8106af1641aeb09b6bb1ac1881b8beecde1184
|
[
"MIT"
] | null | null | null |
iszigzag.py
|
pclumson1/Python3_Algorithms
|
7b8106af1641aeb09b6bb1ac1881b8beecde1184
|
[
"MIT"
] | null | null | null |
# Let's say a triple (a, b, c) is a zigzag if either a < b > c or a > b < c.
# Given an array of integers numbers, your task is to check all the triples of its consecutive elements for being a zigzag. More formally, your task is to construct an array of length numbers.length - 2, where the ith element of the output array equals 1 if the triple (numbers[i], numbers[i + 1], numbers[i + 2]) is a zigzag, and 0 otherwise.
# Example
# For numbers = [1, 2, 1, 3, 4], the output should be isZigzag(numbers) = [1, 1, 0].
# (numbers[0], numbers[1], numbers[2]) = (1, 2, 1) is a zigzag, because 1 < 2 > 1;
# (numbers[1], numbers[2] , numbers[3]) = (2, 1, 3) is a zigzag, because 2 > 1 < 3;
# (numbers[2], numbers[3] , numbers[4]) = (1, 3, 4) is not a zigzag, because 1 < 3 < 4;
# For numbers = [1, 2, 3, 4], the output should be isZigzag(numbers) = [0, 0];
# Since all the elements of numbers are increasing, there are no zigzags.
# For numbers = [1000000000, 1000000000, 1000000000], the output should be isZigzag(numbers) = [0].
# Since all the elements of numbers are the same, there are no zigzags.
def isZigzag(numbers):
arr = []
for i in range(len(numbers) - 2):
a, b, c = numbers[i], numbers[i + 1], numbers[i + 2]
if a < b > c or a > b < c:
arr.append(1)
else:
arr.append(0)
return arr
| 45.258065
| 344
| 0.605132
|
3d631939f616a86d9597eb0cdfeb16f420c6144c
| 10,053
|
py
|
Python
|
arviz/tests/base_tests/test_rcparams.py
|
smit-s/arviz
|
6a51574efc2dfa652d489091121a3c46da64d12e
|
[
"Apache-2.0"
] | null | null | null |
arviz/tests/base_tests/test_rcparams.py
|
smit-s/arviz
|
6a51574efc2dfa652d489091121a3c46da64d12e
|
[
"Apache-2.0"
] | null | null | null |
arviz/tests/base_tests/test_rcparams.py
|
smit-s/arviz
|
6a51574efc2dfa652d489091121a3c46da64d12e
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=redefined-outer-name
import os
import numpy as np
import pytest
from xarray.core.indexing import MemoryCachedArray
from ...data import load_arviz_data, datasets
from ...stats import compare
from ...rcparams import (
rcParams,
rc_context,
_make_validate_choice,
_make_validate_choice_regex,
make_iterable_validator,
_validate_float_or_none,
_validate_positive_int_or_none,
_validate_probability,
read_rcfile,
)
from ..helpers import models # pylint: disable=unused-import
### Test rcparams classes ###
def test_rc_context_dict():
rcParams["data.load"] = "lazy"
with rc_context(rc={"data.load": "eager"}):
assert rcParams["data.load"] == "eager"
assert rcParams["data.load"] == "lazy"
def test_rc_context_file():
path = os.path.dirname(os.path.abspath(__file__))
rcParams["data.load"] = "lazy"
with rc_context(fname=os.path.join(path, "../test.rcparams")):
assert rcParams["data.load"] == "eager"
assert rcParams["data.load"] == "lazy"
def test_bad_rc_file():
"""Test bad value raises error."""
path = os.path.dirname(os.path.abspath(__file__))
with pytest.raises(ValueError, match="Bad val "):
read_rcfile(os.path.join(path, "../bad.rcparams"))
def test_warning_rc_file(caplog):
"""Test invalid lines and duplicated keys log warnings and bad value raises error."""
path = os.path.dirname(os.path.abspath(__file__))
read_rcfile(os.path.join(path, "../test.rcparams"))
records = caplog.records
assert len(records) == 1
assert records[0].levelname == "WARNING"
assert "Duplicate key" in caplog.text
def test_bad_key():
"""Test the error when using unexistent keys in rcParams is correct."""
with pytest.raises(KeyError, match="bad_key is not a valid rc"):
rcParams["bad_key"] = "nothing"
def test_del_key_error():
"""Check that rcParams keys cannot be deleted."""
with pytest.raises(TypeError, match="keys cannot be deleted"):
del rcParams["data.load"]
def test_clear_error():
"""Check that rcParams cannot be cleared."""
with pytest.raises(TypeError, match="keys cannot be deleted"):
rcParams.clear()
def test_pop_error():
"""Check rcParams pop error."""
with pytest.raises(TypeError, match=r"keys cannot be deleted.*get\(key\)"):
rcParams.pop("data.load")
def test_popitem_error():
"""Check rcParams popitem error."""
with pytest.raises(TypeError, match=r"keys cannot be deleted.*get\(key\)"):
rcParams.popitem()
def test_setdefaults_error():
"""Check rcParams popitem error."""
with pytest.raises(TypeError, match="Use arvizrc"):
rcParams.setdefault("data.load", "eager")
def test_rcparams_find_all():
data_rcparams = rcParams.find_all("data")
assert len(data_rcparams)
def test_rcparams_repr_str():
"""Check both repr and str print all keys."""
repr_str = rcParams.__repr__()
str_str = rcParams.__str__()
assert repr_str.startswith("RcParams")
for string in (repr_str, str_str):
assert all([key in string for key in rcParams.keys()])
### Test arvizrc.template file is up to date ###
def test_rctemplate_updated():
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../arvizrc.template")
rc_pars_template = read_rcfile(fname)
assert all([key in rc_pars_template.keys() for key in rcParams.keys()])
assert all([value == rc_pars_template[key] for key, value in rcParams.items()])
### Test validation functions ###
@pytest.mark.parametrize("param", ["data.load", "stats.information_criterion"])
def test_choice_bad_values(param):
"""Test error messages are correct for rcParams validated with _make_validate_choice."""
msg = "{}: bad_value is not one of".format(param.replace(".", r"\."))
with pytest.raises(ValueError, match=msg):
rcParams[param] = "bad_value"
@pytest.mark.parametrize("allow_none", (True, False))
@pytest.mark.parametrize("typeof", (str, int))
@pytest.mark.parametrize("args", [("not one", 10), (False, None), (False, 4)])
def test_make_validate_choice(args, allow_none, typeof):
accepted_values = set(typeof(value) for value in (0, 1, 4, 6))
validate_choice = _make_validate_choice(accepted_values, allow_none=allow_none, typeof=typeof)
raise_error, value = args
if value is None and not allow_none:
raise_error = "not one of" if typeof == str else "Could not convert"
if raise_error:
with pytest.raises(ValueError, match=raise_error):
validate_choice(value)
else:
value = validate_choice(value)
assert value in accepted_values or value is None
@pytest.mark.parametrize("allow_none", (True, False))
@pytest.mark.parametrize(
"args",
[
(False, None),
(False, "row"),
(False, "54row"),
(False, "4column"),
("or in regex", "square"),
],
)
def test_make_validate_choice_regex(args, allow_none):
accepted_values = {"row", "column"}
accepted_values_regex = {r"\d*row", r"\d*column"}
validate_choice = _make_validate_choice_regex(
accepted_values, accepted_values_regex, allow_none=allow_none
)
raise_error, value = args
if value is None and not allow_none:
raise_error = "or in regex"
if raise_error:
with pytest.raises(ValueError, match=raise_error):
validate_choice(value)
else:
value_result = validate_choice(value)
assert value == value_result
@pytest.mark.parametrize("allow_none", (True, False))
@pytest.mark.parametrize("allow_auto", (True, False))
@pytest.mark.parametrize("value", [(1, 2), "auto", None, "(1, 4)"])
def test_make_iterable_validator_none_auto(value, allow_auto, allow_none):
scalar_validator = _validate_float_or_none
validate_iterable = make_iterable_validator(
scalar_validator, allow_auto=allow_auto, allow_none=allow_none
)
raise_error = False
if value is None and not allow_none:
raise_error = "Only ordered iterable"
if value == "auto" and not allow_auto:
raise_error = "Could not convert"
if raise_error:
with pytest.raises(ValueError, match=raise_error):
validate_iterable(value)
else:
value = validate_iterable(value)
assert np.iterable(value) or value is None or value == "auto"
@pytest.mark.parametrize("length", (2, None))
@pytest.mark.parametrize("value", [(1, 5), (1, 3, 5), "(3, 4, 5)"])
def test_make_iterable_validator_length(value, length):
scalar_validator = _validate_float_or_none
validate_iterable = make_iterable_validator(scalar_validator, length=length)
raise_error = False
if length is not None and len(value) != length:
raise_error = "Iterable must be of length"
if raise_error:
with pytest.raises(ValueError, match=raise_error):
validate_iterable(value)
else:
value = validate_iterable(value)
assert np.iterable(value)
@pytest.mark.parametrize(
"args",
[
("Only ordered iterable", set(["a", "b", "c"])),
("Could not convert", "johndoe"),
("Only ordered iterable", 15),
],
)
def test_make_iterable_validator_illegal(args):
scalar_validator = _validate_float_or_none
validate_iterable = make_iterable_validator(scalar_validator)
raise_error, value = args
with pytest.raises(ValueError, match=raise_error):
validate_iterable(value)
@pytest.mark.parametrize(
"args",
[("Only positive", -1), ("Could not convert", "1.3"), (False, "2"), (False, None), (False, 1)],
)
def test_validate_positive_int_or_none(args):
raise_error, value = args
if raise_error:
with pytest.raises(ValueError, match=raise_error):
_validate_positive_int_or_none(value)
else:
value = _validate_positive_int_or_none(value)
assert isinstance(value, int) or value is None
@pytest.mark.parametrize(
"args",
[
("Only.+between 0 and 1", -1),
("Only.+between 0 and 1", "1.3"),
("not convert to float", "word"),
(False, "0.6"),
(False, 0),
(False, 1),
],
)
def test_validate_probability(args):
raise_error, value = args
if raise_error:
with pytest.raises(ValueError, match=raise_error):
_validate_probability(value)
else:
value = _validate_probability(value)
assert isinstance(value, float)
### Test integration of rcParams in ArviZ ###
def test_data_load():
rcParams["data.load"] = "lazy"
idata_lazy = load_arviz_data("centered_eight")
assert isinstance(
idata_lazy.posterior.mu.variable._data, # pylint: disable=protected-access
MemoryCachedArray,
)
assert rcParams["data.load"] == "lazy"
rcParams["data.load"] = "eager"
idata_eager = load_arviz_data("centered_eight")
assert isinstance(
idata_eager.posterior.mu.variable._data, np.ndarray # pylint: disable=protected-access
)
assert rcParams["data.load"] == "eager"
def test_stats_information_criterion(models):
rcParams["stats.information_criterion"] = "waic"
df_comp = compare({"model1": models.model_1, "model2": models.model_2})
assert "waic" in df_comp.columns
rcParams["stats.information_criterion"] = "loo"
df_comp = compare({"model1": models.model_1, "model2": models.model_2})
assert "loo" in df_comp.columns
def test_http_type_request(models, monkeypatch):
def _urlretrive(url, _):
raise Exception("URL Retrieved: {}".format(url))
# Hijack url retrieve to inspect url passed
monkeypatch.setattr(datasets, "urlretrieve", _urlretrive)
# Test HTTPS default
with pytest.raises(Exception) as error:
datasets.load_arviz_data("radon")
assert "https://" in str(error)
# Test HTTP setting
with pytest.raises(Exception) as error:
rcParams["data.http_protocol"] = "http"
datasets.load_arviz_data("radon")
assert "http://" in str(error)
| 33.51
| 99
| 0.676614
|
88c2ff1f70d5e7f39e74cd48ea49d34967f4f958
| 23,898
|
py
|
Python
|
trio/_abc.py
|
matham/trio
|
ef7a32110b9565dd6f3a6132c105cec63a707968
|
[
"Apache-2.0",
"MIT"
] | 4
|
2017-03-01T22:14:46.000Z
|
2020-07-31T07:18:18.000Z
|
trio/_abc.py
|
matham/trio
|
ef7a32110b9565dd6f3a6132c105cec63a707968
|
[
"Apache-2.0",
"MIT"
] | 81
|
2017-01-22T11:58:29.000Z
|
2017-05-27T22:17:49.000Z
|
trio/_abc.py
|
matham/trio
|
ef7a32110b9565dd6f3a6132c105cec63a707968
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-05-28T19:38:09.000Z
|
2020-05-28T19:38:09.000Z
|
from abc import ABCMeta, abstractmethod
from typing import Generic, TypeVar
from ._util import aiter_compat
import trio
# We use ABCMeta instead of ABC, plus set __slots__=(), so as not to force a
# __dict__ onto subclasses.
class Clock(metaclass=ABCMeta):
"""The interface for custom run loop clocks.
"""
__slots__ = ()
@abstractmethod
def start_clock(self):
"""Do any setup this clock might need.
Called at the beginning of the run.
"""
@abstractmethod
def current_time(self):
"""Return the current time, according to this clock.
This is used to implement functions like :func:`trio.current_time` and
:func:`trio.move_on_after`.
Returns:
float: The current time.
"""
@abstractmethod
def deadline_to_sleep_time(self, deadline):
"""Compute the real time until the given deadline.
This is called before we enter a system-specific wait function like
:func:`select.select`, to get the timeout to pass.
For a clock using wall-time, this should be something like::
return deadline - self.current_time()
but of course it may be different if you're implementing some kind of
virtual clock.
Args:
deadline (float): The absolute time of the next deadline,
according to this clock.
Returns:
float: The number of real seconds to sleep until the given
deadline. May be :data:`math.inf`.
"""
class Instrument(metaclass=ABCMeta):
"""The interface for run loop instrumentation.
Instruments don't have to inherit from this abstract base class, and all
of these methods are optional. This class serves mostly as documentation.
"""
__slots__ = ()
def before_run(self):
"""Called at the beginning of :func:`trio.run`.
"""
def after_run(self):
"""Called just before :func:`trio.run` returns.
"""
def task_spawned(self, task):
"""Called when the given task is created.
Args:
task (trio.hazmat.Task): The new task.
"""
def task_scheduled(self, task):
"""Called when the given task becomes runnable.
It may still be some time before it actually runs, if there are other
runnable tasks ahead of it.
Args:
task (trio.hazmat.Task): The task that became runnable.
"""
def before_task_step(self, task):
"""Called immediately before we resume running the given task.
Args:
task (trio.hazmat.Task): The task that is about to run.
"""
def after_task_step(self, task):
"""Called when we return to the main run loop after a task has yielded.
Args:
task (trio.hazmat.Task): The task that just ran.
"""
def task_exited(self, task):
"""Called when the given task exits.
Args:
task (trio.hazmat.Task): The finished task.
"""
def before_io_wait(self, timeout):
"""Called before blocking to wait for I/O readiness.
Args:
timeout (float): The number of seconds we are willing to wait.
"""
def after_io_wait(self, timeout):
"""Called after handling pending I/O.
Args:
timeout (float): The number of seconds we were willing to
wait. This much time may or may not have elapsed, depending on
whether any I/O was ready.
"""
class HostnameResolver(metaclass=ABCMeta):
"""If you have a custom hostname resolver, then implementing
:class:`HostnameResolver` allows you to register this to be used by Trio.
See :func:`trio.socket.set_custom_hostname_resolver`.
"""
__slots__ = ()
@abstractmethod
async def getaddrinfo(
self, host, port, family=0, type=0, proto=0, flags=0
):
"""A custom implementation of :func:`~trio.socket.getaddrinfo`.
Called by :func:`trio.socket.getaddrinfo`.
If ``host`` is given as a numeric IP address, then
:func:`~trio.socket.getaddrinfo` may handle the request itself rather
than calling this method.
Any required IDNA encoding is handled before calling this function;
your implementation can assume that it will never see U-labels like
``"café.com"``, and only needs to handle A-labels like
``b"xn--caf-dma.com"``.
"""
@abstractmethod
async def getnameinfo(self, sockaddr, flags):
"""A custom implementation of :func:`~trio.socket.getnameinfo`.
Called by :func:`trio.socket.getnameinfo`.
"""
class SocketFactory(metaclass=ABCMeta):
"""If you write a custom class implementing the Trio socket interface,
then you can use a :class:`SocketFactory` to get Trio to use it.
See :func:`trio.socket.set_custom_socket_factory`.
"""
@abstractmethod
def socket(self, family=None, type=None, proto=None):
"""Create and return a socket object.
Your socket object must inherit from :class:`trio.socket.SocketType`,
which is an empty class whose only purpose is to "mark" which classes
should be considered valid Trio sockets.
Called by :func:`trio.socket.socket`.
Note that unlike :func:`trio.socket.socket`, this does not take a
``fileno=`` argument. If a ``fileno=`` is specified, then
:func:`trio.socket.socket` returns a regular Trio socket object
instead of calling this method.
"""
class AsyncResource(metaclass=ABCMeta):
"""A standard interface for resources that needs to be cleaned up, and
where that cleanup may require blocking operations.
This class distinguishes between "graceful" closes, which may perform I/O
and thus block, and a "forceful" close, which cannot. For example, cleanly
shutting down a TLS-encrypted connection requires sending a "goodbye"
message; but if a peer has become non-responsive, then sending this
message might block forever, so we may want to just drop the connection
instead. Therefore the :meth:`aclose` method is unusual in that it
should always close the connection (or at least make its best attempt)
*even if it fails*; failure indicates a failure to achieve grace, not a
failure to close the connection.
Objects that implement this interface can be used as async context
managers, i.e., you can write::
async with create_resource() as some_async_resource:
...
Entering the context manager is synchronous (not a checkpoint); exiting it
calls :meth:`aclose`. The default implementations of
``__aenter__`` and ``__aexit__`` should be adequate for all subclasses.
"""
__slots__ = ()
@abstractmethod
async def aclose(self):
"""Close this resource, possibly blocking.
IMPORTANT: This method may block in order to perform a "graceful"
shutdown. But, if this fails, then it still *must* close any
underlying resources before returning. An error from this method
indicates a failure to achieve grace, *not* a failure to close the
connection.
For example, suppose we call :meth:`aclose` on a TLS-encrypted
connection. This requires sending a "goodbye" message; but if the peer
has become non-responsive, then our attempt to send this message might
block forever, and eventually time out and be cancelled. In this case
the :meth:`aclose` method on :class:`~trio.SSLStream` will
immediately close the underlying transport stream using
:func:`trio.aclose_forcefully` before raising :exc:`~trio.Cancelled`.
If the resource is already closed, then this method should silently
succeed.
Once this method completes, any other pending or future operations on
this resource should generally raise :exc:`~trio.ClosedResourceError`,
unless there's a good reason to do otherwise.
See also: :func:`trio.aclose_forcefully`.
"""
async def __aenter__(self):
return self
async def __aexit__(self, *args):
await self.aclose()
class SendStream(AsyncResource):
"""A standard interface for sending data on a byte stream.
The underlying stream may be unidirectional, or bidirectional. If it's
bidirectional, then you probably want to also implement
:class:`ReceiveStream`, which makes your object a :class:`Stream`.
:class:`SendStream` objects also implement the :class:`AsyncResource`
interface, so they can be closed by calling :meth:`~AsyncResource.aclose`
or using an ``async with`` block.
If you want to send Python objects rather than raw bytes, see
:class:`SendChannel`.
"""
__slots__ = ()
@abstractmethod
async def send_all(self, data):
"""Sends the given data through the stream, blocking if necessary.
Args:
data (bytes, bytearray, or memoryview): The data to send.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
:meth:`HalfCloseableStream.send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`send_all` is running.
Most low-level operations in Trio provide a guarantee: if they raise
:exc:`trio.Cancelled`, this means that they had no effect, so the
system remains in a known state. This is **not true** for
:meth:`send_all`. If this operation raises :exc:`trio.Cancelled` (or
any other exception for that matter), then it may have sent some, all,
or none of the requested data, and there is no way to know which.
"""
@abstractmethod
async def wait_send_all_might_not_block(self):
"""Block until it's possible that :meth:`send_all` might not block.
This method may return early: it's possible that after it returns,
:meth:`send_all` will still block. (In the worst case, if no better
implementation is available, then it might always return immediately
without blocking. It's nice to do better than that when possible,
though.)
This method **must not** return *late*: if it's possible for
:meth:`send_all` to complete without blocking, then it must
return. When implementing it, err on the side of returning early.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
:meth:`HalfCloseableStream.send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`wait_send_all_might_not_block` is running.
Note:
This method is intended to aid in implementing protocols that want
to delay choosing which data to send until the last moment. E.g.,
suppose you're working on an implemention of a remote display server
like `VNC
<https://en.wikipedia.org/wiki/Virtual_Network_Computing>`__, and
the network connection is currently backed up so that if you call
:meth:`send_all` now then it will sit for 0.5 seconds before actually
sending anything. In this case it doesn't make sense to take a
screenshot, then wait 0.5 seconds, and then send it, because the
screen will keep changing while you wait; it's better to wait 0.5
seconds, then take the screenshot, and then send it, because this
way the data you deliver will be more
up-to-date. Using :meth:`wait_send_all_might_not_block` makes it
possible to implement the better strategy.
If you use this method, you might also want to read up on
``TCP_NOTSENT_LOWAT``.
Further reading:
* `Prioritization Only Works When There's Pending Data to Prioritize
<https://insouciant.org/tech/prioritization-only-works-when-theres-pending-data-to-prioritize/>`__
* WWDC 2015: Your App and Next Generation Networks: `slides
<http://devstreaming.apple.com/videos/wwdc/2015/719ui2k57m/719/719_your_app_and_next_generation_networks.pdf?dl=1>`__,
`video and transcript
<https://developer.apple.com/videos/play/wwdc2015/719/>`__
"""
class ReceiveStream(AsyncResource):
"""A standard interface for receiving data on a byte stream.
The underlying stream may be unidirectional, or bidirectional. If it's
bidirectional, then you probably want to also implement
:class:`SendStream`, which makes your object a :class:`Stream`.
:class:`ReceiveStream` objects also implement the :class:`AsyncResource`
interface, so they can be closed by calling :meth:`~AsyncResource.aclose`
or using an ``async with`` block.
If you want to receive Python objects rather than raw bytes, see
:class:`ReceiveChannel`.
`ReceiveStream` objects can be used in ``async for`` loops. Each iteration
will produce an arbitrary sized chunk of bytes, like calling
`receive_some` with no arguments. Every chunk will contain at least one
byte, and the loop automatically exits when reaching end-of-file.
"""
__slots__ = ()
@abstractmethod
async def receive_some(self, max_bytes=None):
"""Wait until there is data available on this stream, and then return
some of it.
A return value of ``b""`` (an empty bytestring) indicates that the
stream has reached end-of-file. Implementations should be careful that
they return ``b""`` if, and only if, the stream has reached
end-of-file!
Args:
max_bytes (int): The maximum number of bytes to return. Must be
greater than zero. Optional; if omitted, then the stream object
is free to pick a reasonable default.
Returns:
bytes or bytearray: The data received.
Raises:
trio.BusyResourceError: if two tasks attempt to call
:meth:`receive_some` on the same stream at the same time.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`receive_some` is running.
"""
@aiter_compat
def __aiter__(self):
return self
async def __anext__(self):
data = await self.receive_some()
if not data:
raise StopAsyncIteration
return data
class Stream(SendStream, ReceiveStream):
"""A standard interface for interacting with bidirectional byte streams.
A :class:`Stream` is an object that implements both the
:class:`SendStream` and :class:`ReceiveStream` interfaces.
If implementing this interface, you should consider whether you can go one
step further and implement :class:`HalfCloseableStream`.
"""
__slots__ = ()
class HalfCloseableStream(Stream):
"""This interface extends :class:`Stream` to also allow closing the send
part of the stream without closing the receive part.
"""
__slots__ = ()
@abstractmethod
async def send_eof(self):
"""Send an end-of-file indication on this stream, if possible.
The difference between :meth:`send_eof` and
:meth:`~AsyncResource.aclose` is that :meth:`send_eof` is a
*unidirectional* end-of-file indication. After you call this method,
you shouldn't try sending any more data on this stream, and your
remote peer should receive an end-of-file indication (eventually,
after receiving all the data you sent before that). But, they may
continue to send data to you, and you can continue to receive it by
calling :meth:`~ReceiveStream.receive_some`. You can think of it as
calling :meth:`~AsyncResource.aclose` on just the
:class:`SendStream` "half" of the stream object (and in fact that's
literally how :class:`trio.StapledStream` implements it).
Examples:
* On a socket, this corresponds to ``shutdown(..., SHUT_WR)`` (`man
page <https://linux.die.net/man/2/shutdown>`__).
* The SSH protocol provides the ability to multiplex bidirectional
"channels" on top of a single encrypted connection. A Trio
implementation of SSH could expose these channels as
:class:`HalfCloseableStream` objects, and calling :meth:`send_eof`
would send an ``SSH_MSG_CHANNEL_EOF`` request (see `RFC 4254 §5.3
<https://tools.ietf.org/html/rfc4254#section-5.3>`__).
* On an SSL/TLS-encrypted connection, the protocol doesn't provide any
way to do a unidirectional shutdown without closing the connection
entirely, so :class:`~trio.SSLStream` implements
:class:`Stream`, not :class:`HalfCloseableStream`.
If an EOF has already been sent, then this method should silently
succeed.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`~SendStream.send_all`,
:meth:`~SendStream.wait_send_all_might_not_block`, or
:meth:`send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`send_eof` is running.
"""
# A regular invariant generic type
T = TypeVar("T")
# The type of object produced by a ReceiveChannel (covariant because
# ReceiveChannel[Derived] can be passed to someone expecting
# ReceiveChannel[Base])
ReceiveType = TypeVar("ReceiveType", covariant=True)
# The type of object accepted by a SendChannel (contravariant because
# SendChannel[Base] can be passed to someone expecting
# SendChannel[Derived])
SendType = TypeVar("SendType", contravariant=True)
# The type of object produced by a Listener (covariant plus must be
# an AsyncResource)
T_resource = TypeVar("T_resource", bound=AsyncResource, covariant=True)
class Listener(AsyncResource, Generic[T_resource]):
"""A standard interface for listening for incoming connections.
:class:`Listener` objects also implement the :class:`AsyncResource`
interface, so they can be closed by calling :meth:`~AsyncResource.aclose`
or using an ``async with`` block.
"""
__slots__ = ()
@abstractmethod
async def accept(self):
"""Wait until an incoming connection arrives, and then return it.
Returns:
AsyncResource: An object representing the incoming connection. In
practice this is generally some kind of :class:`Stream`,
but in principle you could also define a :class:`Listener` that
returned, say, channel objects.
Raises:
trio.BusyResourceError: if two tasks attempt to call
:meth:`accept` on the same listener at the same time.
trio.ClosedResourceError: if you previously closed this listener
object, or if another task closes this listener object while
:meth:`accept` is running.
Listeners don't generally raise :exc:`~trio.BrokenResourceError`,
because for listeners there is no general condition of "the
network/remote peer broke the connection" that can be handled in a
generic way, like there is for streams. Other errors *can* occur and
be raised from :meth:`accept` – for example, if you run out of file
descriptors then you might get an :class:`OSError` with its errno set
to ``EMFILE``.
"""
class SendChannel(AsyncResource, Generic[SendType]):
"""A standard interface for sending Python objects to some receiver.
`SendChannel` objects also implement the `AsyncResource` interface, so
they can be closed by calling `~AsyncResource.aclose` or using an ``async
with`` block.
If you want to send raw bytes rather than Python objects, see
`SendStream`.
"""
__slots__ = ()
@abstractmethod
async def send(self, value: SendType) -> None:
"""Attempt to send an object through the channel, blocking if necessary.
Args:
value (object): The object to send.
Raises:
trio.BrokenResourceError: if something has gone wrong, and the
channel is broken. For example, you may get this if the receiver
has already been closed.
trio.ClosedResourceError: if you previously closed this
:class:`SendChannel` object, or if another task closes it while
:meth:`send` is running.
trio.BusyResourceError: some channels allow multiple tasks to call
`send` at the same time, but others don't. If you try to call
`send` simultaneously from multiple tasks on a channel that
doesn't support it, then you can get `~trio.BusyResourceError`.
"""
class ReceiveChannel(AsyncResource, Generic[ReceiveType]):
"""A standard interface for receiving Python objects from some sender.
You can iterate over a :class:`ReceiveChannel` using an ``async for``
loop::
async for value in receive_channel:
...
This is equivalent to calling :meth:`receive` repeatedly. The loop exits
without error when `receive` raises `~trio.EndOfChannel`.
`ReceiveChannel` objects also implement the `AsyncResource` interface, so
they can be closed by calling `~AsyncResource.aclose` or using an ``async
with`` block.
If you want to receive raw bytes rather than Python objects, see
`ReceiveStream`.
"""
__slots__ = ()
@abstractmethod
async def receive(self) -> ReceiveType:
"""Attempt to receive an incoming object, blocking if necessary.
Returns:
object: Whatever object was received.
Raises:
trio.EndOfChannel: if the sender has been closed cleanly, and no
more objects are coming. This is not an error condition.
trio.ClosedResourceError: if you previously closed this
:class:`ReceiveChannel` object.
trio.BrokenResourceError: if something has gone wrong, and the
channel is broken.
trio.BusyResourceError: some channels allow multiple tasks to call
`receive` at the same time, but others don't. If you try to call
`receive` simultaneously from multiple tasks on a channel that
doesn't support it, then you can get `~trio.BusyResourceError`.
"""
@aiter_compat
def __aiter__(self):
return self
async def __anext__(self) -> ReceiveType:
try:
return await self.receive()
except trio.EndOfChannel:
raise StopAsyncIteration
class Channel(SendChannel[T], ReceiveChannel[T]):
"""A standard interface for interacting with bidirectional channels.
A `Channel` is an object that implements both the `SendChannel` and
`ReceiveChannel` interfaces, so you can both send and receive objects.
"""
| 36.766154
| 130
| 0.663905
|
4a4621be4f5356af2323f0ee733d1bd3d3ad9388
| 373
|
py
|
Python
|
01.Python/07.Range.py
|
rmatam/Deep-Learning
|
665492254d36d3991e4b58abf6dd62d7fdac8769
|
[
"Apache-2.0"
] | null | null | null |
01.Python/07.Range.py
|
rmatam/Deep-Learning
|
665492254d36d3991e4b58abf6dd62d7fdac8769
|
[
"Apache-2.0"
] | null | null | null |
01.Python/07.Range.py
|
rmatam/Deep-Learning
|
665492254d36d3991e4b58abf6dd62d7fdac8769
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 21 18:08:37 2016
@author: rmatam
"""
import numpy as np
from scipy import stats
A = np.array([[10,14,11,7,9.5,15,19],[8,9,17,14.5,12,18,15.5],
[15,7.5,11.5,10,10.5,7,11],[11.5,11,9,12,14,12,7.5]])
B = A.T
print B
#a=np.median(B,axis=0)
a=np.ptp(B,axis=0)
b=np.ptp(B,axis=1)
print(a,b)
| 20.722222
| 69
| 0.544236
|
69cd39fe7ebe8016d05a856c4ff6eaaddcdfabad
| 99,454
|
py
|
Python
|
tests/unit/gapic/service_v1beta1/test_environments.py
|
renovate-bot/python-orchestration-airflow
|
ca7e4935f1509a6e09c3d58fab7dfe4a4eabe782
|
[
"Apache-2.0"
] | 5
|
2021-09-13T21:29:18.000Z
|
2021-12-29T14:30:09.000Z
|
tests/unit/gapic/service_v1beta1/test_environments.py
|
renovate-bot/python-orchestration-airflow
|
ca7e4935f1509a6e09c3d58fab7dfe4a4eabe782
|
[
"Apache-2.0"
] | 21
|
2021-07-23T11:14:26.000Z
|
2022-03-07T17:06:22.000Z
|
tests/unit/gapic/service_v1beta1/test_environments.py
|
renovate-bot/python-orchestration-airflow
|
ca7e4935f1509a6e09c3d58fab7dfe4a4eabe782
|
[
"Apache-2.0"
] | 2
|
2021-07-21T15:43:57.000Z
|
2022-01-29T08:11:34.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.orchestration.airflow.service_v1beta1.services.environments import (
EnvironmentsAsyncClient,
)
from google.cloud.orchestration.airflow.service_v1beta1.services.environments import (
EnvironmentsClient,
)
from google.cloud.orchestration.airflow.service_v1beta1.services.environments import (
pagers,
)
from google.cloud.orchestration.airflow.service_v1beta1.services.environments import (
transports,
)
from google.cloud.orchestration.airflow.service_v1beta1.types import environments
from google.cloud.orchestration.airflow.service_v1beta1.types import operations
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert EnvironmentsClient._get_default_mtls_endpoint(None) is None
assert (
EnvironmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert EnvironmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient,])
def test_environments_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "composer.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.EnvironmentsGrpcTransport, "grpc"),
(transports.EnvironmentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_environments_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient,])
def test_environments_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "composer.googleapis.com:443"
def test_environments_client_get_transport_class():
transport = EnvironmentsClient.get_transport_class()
available_transports = [
transports.EnvironmentsGrpcTransport,
]
assert transport in available_transports
transport = EnvironmentsClient.get_transport_class("grpc")
assert transport == transports.EnvironmentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
def test_environments_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(EnvironmentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(EnvironmentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc", "true"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc", "false"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_environments_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_environments_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_environments_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_environments_client_client_options_from_dict():
with mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.environments.transports.EnvironmentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = EnvironmentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [environments.CreateEnvironmentRequest, dict,])
def test_create_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CreateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
client.create_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CreateEnvironmentRequest()
@pytest.mark.asyncio
async def test_create_environment_async(
transport: str = "grpc_asyncio", request_type=environments.CreateEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CreateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_environment_async_from_dict():
await test_create_environment_async(request_type=dict)
def test_create_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.CreateEnvironmentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.CreateEnvironmentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_environment(
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
def test_create_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_environment(
environments.CreateEnvironmentRequest(),
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_environment(
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_environment(
environments.CreateEnvironmentRequest(),
parent="parent_value",
environment=environments.Environment(name="name_value"),
)
@pytest.mark.parametrize("request_type", [environments.GetEnvironmentRequest, dict,])
def test_get_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environments.Environment(
name="name_value",
uuid="uuid_value",
state=environments.Environment.State.CREATING,
)
response = client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.GetEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environments.Environment)
assert response.name == "name_value"
assert response.uuid == "uuid_value"
assert response.state == environments.Environment.State.CREATING
def test_get_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
client.get_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.GetEnvironmentRequest()
@pytest.mark.asyncio
async def test_get_environment_async(
transport: str = "grpc_asyncio", request_type=environments.GetEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.Environment(
name="name_value",
uuid="uuid_value",
state=environments.Environment.State.CREATING,
)
)
response = await client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.GetEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environments.Environment)
assert response.name == "name_value"
assert response.uuid == "uuid_value"
assert response.state == environments.Environment.State.CREATING
@pytest.mark.asyncio
async def test_get_environment_async_from_dict():
await test_get_environment_async(request_type=dict)
def test_get_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.GetEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
call.return_value = environments.Environment()
client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.GetEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.Environment()
)
await client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environments.Environment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_environment(
environments.GetEnvironmentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environments.Environment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.Environment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_environment(
environments.GetEnvironmentRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [environments.ListEnvironmentsRequest, dict,])
def test_list_environments(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environments.ListEnvironmentsResponse(
next_page_token="next_page_token_value",
)
response = client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.ListEnvironmentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEnvironmentsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_environments_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
client.list_environments()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.ListEnvironmentsRequest()
@pytest.mark.asyncio
async def test_list_environments_async(
transport: str = "grpc_asyncio", request_type=environments.ListEnvironmentsRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.ListEnvironmentsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.ListEnvironmentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEnvironmentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_environments_async_from_dict():
await test_list_environments_async(request_type=dict)
def test_list_environments_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.ListEnvironmentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
call.return_value = environments.ListEnvironmentsResponse()
client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_environments_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.ListEnvironmentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.ListEnvironmentsResponse()
)
await client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_environments_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environments.ListEnvironmentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_environments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_environments_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_environments(
environments.ListEnvironmentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_environments_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environments.ListEnvironmentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environments.ListEnvironmentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_environments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_environments_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_environments(
environments.ListEnvironmentsRequest(), parent="parent_value",
)
def test_list_environments_pager(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_environments(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, environments.Environment) for i in results)
def test_list_environments_pages(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
pages = list(client.list_environments(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_environments_async_pager():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
async_pager = await client.list_environments(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, environments.Environment) for i in responses)
@pytest.mark.asyncio
async def test_list_environments_async_pages():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environments.ListEnvironmentsResponse(
environments=[
environments.Environment(),
environments.Environment(),
environments.Environment(),
],
next_page_token="abc",
),
environments.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(),], next_page_token="ghi",
),
environments.ListEnvironmentsResponse(
environments=[environments.Environment(), environments.Environment(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_environments(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [environments.UpdateEnvironmentRequest, dict,])
def test_update_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.UpdateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
client.update_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.UpdateEnvironmentRequest()
@pytest.mark.asyncio
async def test_update_environment_async(
transport: str = "grpc_asyncio", request_type=environments.UpdateEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.UpdateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_environment_async_from_dict():
await test_update_environment_async(request_type=dict)
def test_update_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.UpdateEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.UpdateEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_update_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_environment(
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_environment(
environments.UpdateEnvironmentRequest(),
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_environment(
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].environment
mock_val = environments.Environment(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_environment(
environments.UpdateEnvironmentRequest(),
name="name_value",
environment=environments.Environment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [environments.DeleteEnvironmentRequest, dict,])
def test_delete_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.DeleteEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
client.delete_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.DeleteEnvironmentRequest()
@pytest.mark.asyncio
async def test_delete_environment_async(
transport: str = "grpc_asyncio", request_type=environments.DeleteEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.DeleteEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_environment_async_from_dict():
await test_delete_environment_async(request_type=dict)
def test_delete_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.DeleteEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.DeleteEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_environment_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_environment_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_environment(
environments.DeleteEnvironmentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_environment_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_environment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_environment_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_environment(
environments.DeleteEnvironmentRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [environments.RestartWebServerRequest, dict,])
def test_restart_web_server(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.restart_web_server), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.restart_web_server(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.RestartWebServerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_restart_web_server_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.restart_web_server), "__call__"
) as call:
client.restart_web_server()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.RestartWebServerRequest()
@pytest.mark.asyncio
async def test_restart_web_server_async(
transport: str = "grpc_asyncio", request_type=environments.RestartWebServerRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.restart_web_server), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.restart_web_server(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.RestartWebServerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_restart_web_server_async_from_dict():
await test_restart_web_server_async(request_type=dict)
def test_restart_web_server_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.RestartWebServerRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.restart_web_server), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.restart_web_server(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_restart_web_server_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.RestartWebServerRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.restart_web_server), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.restart_web_server(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [environments.CheckUpgradeRequest, dict,])
def test_check_upgrade(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.check_upgrade), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.check_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CheckUpgradeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_check_upgrade_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.check_upgrade), "__call__") as call:
client.check_upgrade()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CheckUpgradeRequest()
@pytest.mark.asyncio
async def test_check_upgrade_async(
transport: str = "grpc_asyncio", request_type=environments.CheckUpgradeRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.check_upgrade), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.check_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environments.CheckUpgradeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_check_upgrade_async_from_dict():
await test_check_upgrade_async(request_type=dict)
def test_check_upgrade_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.CheckUpgradeRequest()
request.environment = "environment/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.check_upgrade), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.check_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "environment=environment/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_check_upgrade_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environments.CheckUpgradeRequest()
request.environment = "environment/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.check_upgrade), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.check_upgrade(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "environment=environment/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = EnvironmentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.EnvironmentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.EnvironmentsGrpcTransport,
transports.EnvironmentsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.EnvironmentsGrpcTransport,)
def test_environments_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.EnvironmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_environments_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.environments.transports.EnvironmentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.EnvironmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_environment",
"get_environment",
"list_environments",
"update_environment",
"delete_environment",
"restart_web_server",
"check_upgrade",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_environments_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.environments.transports.EnvironmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EnvironmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_environments_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.orchestration.airflow.service_v1beta1.services.environments.transports.EnvironmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EnvironmentsTransport()
adc.assert_called_once()
def test_environments_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EnvironmentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EnvironmentsGrpcTransport,
transports.EnvironmentsGrpcAsyncIOTransport,
],
)
def test_environments_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EnvironmentsGrpcTransport, grpc_helpers),
(transports.EnvironmentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_environments_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"composer.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="composer.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_environments_host_no_port():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="composer.googleapis.com"
),
)
assert client.transport._host == "composer.googleapis.com:443"
def test_environments_host_with_port():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="composer.googleapis.com:8000"
),
)
assert client.transport._host == "composer.googleapis.com:8000"
def test_environments_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EnvironmentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_environments_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EnvironmentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_environments_grpc_lro_client():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_environments_grpc_lro_async_client():
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_environment_path():
project = "squid"
location = "clam"
environment = "whelk"
expected = "projects/{project}/locations/{location}/environments/{environment}".format(
project=project, location=location, environment=environment,
)
actual = EnvironmentsClient.environment_path(project, location, environment)
assert expected == actual
def test_parse_environment_path():
expected = {
"project": "octopus",
"location": "oyster",
"environment": "nudibranch",
}
path = EnvironmentsClient.environment_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_environment_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = EnvironmentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = EnvironmentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = EnvironmentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = EnvironmentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = EnvironmentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = EnvironmentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = EnvironmentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = EnvironmentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = EnvironmentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = EnvironmentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.EnvironmentsTransport, "_prep_wrapped_messages"
) as prep:
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.EnvironmentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = EnvironmentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 38.428903
| 138
| 0.689645
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.