hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f9e17e9664aad34733bd9fdd9f0df727ab36cda | 7,405 | py | Python | utils/toolkit.py | eLeVeNnN/fastorch | 7b73548b1b25c81dd41e4fc9cc7afb61bf8a5d2d | [
"MIT"
] | null | null | null | utils/toolkit.py | eLeVeNnN/fastorch | 7b73548b1b25c81dd41e4fc9cc7afb61bf8a5d2d | [
"MIT"
] | null | null | null | utils/toolkit.py | eLeVeNnN/fastorch | 7b73548b1b25c81dd41e4fc9cc7afb61bf8a5d2d | [
"MIT"
] | null | null | null | import numpy as np
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
import torch.optim as optim
| 36.840796 | 124 | 0.520324 | import numpy as np
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
import torch.optim as optim
class ProgressBar(object):
def __init__(self,
max_iter: int = 1,
verbose: int = 1,
bar_nums: int = 20,
untrained_sign: str = '*',
trained_sign: str = '='):
self.max_iter = max_iter
self.verbose = verbose
self._nums = bar_nums - 1
self._untrained = untrained_sign
self._trained = trained_sign
self.iter = 0
def update(self, n_iter: int = 1):
self.iter += n_iter
def get_bar(self) -> str:
trained_ratio = self.iter / self.max_iter
reached_bar_nums = round(trained_ratio * self._nums)
unreached_bar_nums = self._nums - reached_bar_nums
if self.verbose == 1:
bar = reached_bar_nums * self._trained + '>' + unreached_bar_nums * self._untrained
else:
percent = str(round(trained_ratio * 100))
bar = '{black} {percent:>{white}}%'.format(black="\033[40m%s\033[0m" % ' ' * reached_bar_nums,
percent=percent, white=unreached_bar_nums)
return bar
class AverageMeter(object):
def __init__(self, name=None, verbose=0):
self.name = name
self.val = None
self.avg = None
self.sums = None
self.steps = 0
self.verbose = verbose
self.reset()
def reset(self):
if self.verbose == 0:
self.val = 0.
self.avg = 0.
self.sums = 0.
else:
self.val = []
self.avg = []
self.sums = []
def update(self, val, step=1):
if val is None:
self.val = None
return
self.steps += step
if self.verbose == 0:
self.val = val
self.sums += val * step
self.avg = self.sums / self.steps
else:
self.val.append(val)
self.sums.append(self.sums[-1] + val * step)
self.avg.append(self.sums[-1] / self.steps)
def split_data(arrays, start=0, end=None):
arrays = np.array(arrays)
if isinstance(arrays, list):
if end is None:
return [x[start:] for x in arrays]
else:
return [x[start: end] for x in arrays]
else:
if end is None:
return arrays[start:]
else:
return arrays[start: end]
def get_optimizer(optimizer, model):
if isinstance(optimizer, str):
optimizer = optimizer.lower()
if optimizer in ['sgd']:
return optim.SGD(model.parameters(), lr=1e-2)
elif optimizer in ['adam']:
return optim.Adam(model.parameters())
else:
raise ValueError('Unknwon optimizer type!')
elif isinstance(optimizer, Optimizer):
return optimizer
def get_objective(objective):
if isinstance(objective, str):
objective = objective.lower()
if objective in ['l1', 'l1loss']:
return nn.L1Loss()
elif objective in ['nll', 'nllloss']:
return nn.NLLLoss()
elif objective in ['nll2d', 'nllloss2d']:
return nn.NLLLoss2d()
elif objective in ['poissonnll', 'poissonnllloss']:
return nn.PoissonNLLLoss()
elif objective in ['kldiv', 'kldivloss']:
return nn.KLDivLoss()
elif objective in ['mse', 'mseloss']:
return nn.MSELoss()
elif objective in ['bce', 'bceloss']:
return nn.BCELoss()
elif objective in ['smoothl1', 'smoothl1loss']:
return nn.SmoothL1Loss()
elif objective in ['crossentropy', 'cross_entropy']:
return nn.CrossEntropyLoss()
elif objective in ['ctc', 'ctcloss']:
return nn.CTCLoss()
else:
raise ValueError('unknown argument!')
elif isinstance(objective, _Loss):
return objective
else:
raise ValueError('unknown argument {}'.format(objective))
def console(prog_bar: ProgressBar = None,
verbose: int = 0,
trained_samples: int = None,
total_samples: int = None,
trained_batch: int = 1,
total_batch: int = 1,
trained_time: float = 0.,
batch_loss: float = 0.,
batch_acc: float = 0.,
validation_loss: float = None,
validation_acc: float = None):
if verbose == 0:
return
elif verbose == 1:
formated_trained_time = format_time(trained_time)
formated_per_batch_time = format_time(trained_time / trained_batch)
bar = prog_bar.get_bar()
if validation_loss is None and validation_acc is None:
print('\r {:d}/{:d} [{}] - {} - {}/batch -batch_loss: {:.4f} -batch_acc: {:.4f}'.format(trained_samples,
total_samples, bar,
formated_trained_time,
formated_per_batch_time,
batch_loss,
batch_acc),
flush=True, end='')
else:
print('\r {:d}/{:d} [{}] - {} - {}/batch'
' -batch_loss: {:.4f} -batch_acc: {:.4f} -validation_loss: {:.4f} -validation_acc: {:.4f}'.format(
trained_samples, total_samples, bar, formated_trained_time, formated_per_batch_time, batch_loss,
batch_acc, validation_loss, validation_acc), flush=True, end='')
elif verbose == 2:
batch_time = trained_time / trained_batch
eta = (total_batch - trained_batch) * batch_time
formated_eta = format_time(eta)
bar = prog_bar.get_bar()
if validation_loss is None and validation_acc is None:
print('{} -ETA {} -batch_loss: {:.4f} -batch_acc: {:.4f}'.format(bar, formated_eta, batch_loss, batch_acc))
else:
print(
'{} -ETA {} -batch_loss: {:.4f} -batch_acc: {:.4f} -validation_loss: {:.4f} -validation_acc: {:.4f}'.format(
bar, formated_eta, batch_loss, batch_acc, validation_loss, validation_acc))
else:
raise ValueError('Verbose only supports for 0, 1 and 2 ~')
def format_time(second_time: float) -> str:
if second_time < 1:
ms = second_time * 1000
if ms < 1:
us = second_time * 1000
return '%dus' % us
else:
return '%dms' % ms
second_time = round(second_time)
if second_time > 3600:
# hours
h = second_time // 3600
second_time = second_time % 3600
# minutes
m = second_time // 60
second_time = second_time % 60
return '%dh%dm%ds' % (h, m, second_time)
elif second_time > 60:
m = second_time // 60
second_time = second_time % 60
return '%dm%ds' % (m, second_time)
else:
return '%ds' % second_time
| 6,912 | 11 | 321 |
c0a984092f028bc031c552bd5178bb8eb54691f9 | 5,494 | py | Python | corr/corr_load.py | cmolder/voyager-analysis | a44dcc4df88eaaa83e451eead39846c9eed985f4 | [
"MIT"
] | null | null | null | corr/corr_load.py | cmolder/voyager-analysis | a44dcc4df88eaaa83e451eead39846c9eed985f4 | [
"MIT"
] | null | null | null | corr/corr_load.py | cmolder/voyager-analysis | a44dcc4df88eaaa83e451eead39846c9eed985f4 | [
"MIT"
] | null | null | null | """Compute correlation between access history and next prefetch,
using load traces.
Need to run from above corr/ directory. If you still get an error,
try export PYTHONPATH=.
"""
import argparse
import time
from utils.load import get_open_function
from utils.load_trace import get_instructions
from utils.logging import log_progress
def gather_correlation_data(f, cd, pcd):
"""Wrapper function to gather correlation data
from each address in the load trace."""
# Count number of lines
nlines = 0
for _ in f:
nlines += 1
f.seek(0)
start_time = time.time()
for lnum, inst in enumerate(get_instructions(f)):
# Periodically log progress
log_progress(lnum, nlines, start_time, interval=50000)
# Add load to correlation tracker
addr = inst.addr
cd.add_addr(addr)
pcd.add_addr(addr)
# Print time to run
print('Time to run:', (time.time() - start_time) / 60, 'min')
class CorrelationData(object):
"""Track correlation between address histories (triggers) and the next prefetch address.
depth : how many prefetches to look ahead
- e.g. 1 = next prefetch, 2 = the second prefetch ahead, etc.
max_hist_len : number of prior global load addresses to consider as part of the trigger.
- Track all triggers of length 1 to max_hist_len (inclusive)
shift : number of bits to cut-off for tracking
- 0 : cache line temporal correlation
- 6 : page temporal correlation
"""
def compute_correlation(load_trace, depth, max_hist_len):
"""Main temporal correlation computation"""
correlation_data = CorrelationData(depth, max_hist_len)
page_correlation_data = CorrelationData(depth, max_hist_len, shift=6)
start = time.time()
l_open = get_open_function(load_trace)
with l_open(load_trace, mode='rt', encoding='utf-8') as f:
gather_correlation_data(f, correlation_data, page_correlation_data)
print_freqs(correlation_data.compute_freqs(), 'Cache Lines')
print_freqs(page_correlation_data.compute_freqs(), 'Pages')
print_freqs(correlation_data.compute_freqs(weighted=True), 'Weighted Cache Lines')
print_freqs(page_correlation_data.compute_freqs(weighted=True), 'Weighted Pages')
print('Time to run:', (time.time() - start) / 60, 'min')
if __name__ == '__main__':
args = get_argument_parser()
compute_correlation(args.load_trace, args.depth, args.max_hist_len)
| 36.384106 | 106 | 0.638151 | """Compute correlation between access history and next prefetch,
using load traces.
Need to run from above corr/ directory. If you still get an error,
try export PYTHONPATH=.
"""
import argparse
import time
from utils.load import get_open_function
from utils.load_trace import get_instructions
from utils.logging import log_progress
def gather_correlation_data(f, cd, pcd):
"""Wrapper function to gather correlation data
from each address in the load trace."""
# Count number of lines
nlines = 0
for _ in f:
nlines += 1
f.seek(0)
start_time = time.time()
for lnum, inst in enumerate(get_instructions(f)):
# Periodically log progress
log_progress(lnum, nlines, start_time, interval=50000)
# Add load to correlation tracker
addr = inst.addr
cd.add_addr(addr)
pcd.add_addr(addr)
# Print time to run
print('Time to run:', (time.time() - start_time) / 60, 'min')
class CorrelationData(object):
"""Track correlation between address histories (triggers) and the next prefetch address.
depth : how many prefetches to look ahead
- e.g. 1 = next prefetch, 2 = the second prefetch ahead, etc.
max_hist_len : number of prior global load addresses to consider as part of the trigger.
- Track all triggers of length 1 to max_hist_len (inclusive)
shift : number of bits to cut-off for tracking
- 0 : cache line temporal correlation
- 6 : page temporal correlation
"""
def __init__(self, depth, max_hist_len, shift=0):
self.depth = depth
self.hist = []
# We're considering the correlation for triggers of length 1 to max_hist_len (inclusive)
self.max_hist_len = max_hist_len
self.data = {i: {} for i in range(1, max_hist_len + 1)}
# How much extra to cutoff for tracking.
# 0 corresponds to cache line temporal correlation
# 6 corresponds to page temporal correlation
self.shift = shift
def add_addr(self, addr):
# Only take some bits of the full address
addr_tag = self.addr_tag(addr)
if len(self.hist) == self.max_hist_len + self.depth - 1:
# For every history length, keep track of how many times addr_tag shows up
# given the history
for hist_len in self.data:
# tag is the history trigger
tag = tuple(self.hist[(self.max_hist_len - hist_len):self.max_hist_len])
if tag not in self.data[hist_len]:
self.data[hist_len][tag] = {}
# Add the current address
if addr_tag not in self.data[hist_len][tag]:
self.data[hist_len][tag][addr_tag] = 0
self.data[hist_len][tag][addr_tag] += 1
# Update history with addr_tag
self.hist.append(addr_tag)
if len(self.hist) > self.max_hist_len + self.depth - 1:
self.hist = self.hist[1:]
def addr_tag(self, addr):
return addr >> (self.shift + 6)
def compute_freqs(self, weighted=False):
freqs = {}
for hist_len in self.data:
freqs[hist_len] = {}
for tag in self.data[hist_len]:
# # of unique correlated addresses
num_unique_correlated_addrs = len(self.data[hist_len][tag])
if num_unique_correlated_addrs not in freqs[hist_len]:
freqs[hist_len][num_unique_correlated_addrs] = 0
# If we want the frequency to be weighted by # of addresses for this
# history trigger
if weighted:
freqs[hist_len][num_unique_correlated_addrs] += sum(self.data[hist_len][tag].values())
else:
freqs[hist_len][num_unique_correlated_addrs] += 1
return freqs
def print_freqs(freqs, suffix=''):
for hist_len in freqs:
print(hist_len, suffix)
print({k: freqs[hist_len][k] for k in sorted(freqs[hist_len])})
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('load_trace')
parser.add_argument('-d', '--depth', type=int, default=1)
parser.add_argument('-l', '--max-hist-len', type=int, default=4)
args = parser.parse_args()
print('Arguments:')
print(' Load trace :', args.load_trace)
print(' Depth :', args.depth)
print(' Max history len:', args.max_hist_len)
return args
def compute_correlation(load_trace, depth, max_hist_len):
"""Main temporal correlation computation"""
correlation_data = CorrelationData(depth, max_hist_len)
page_correlation_data = CorrelationData(depth, max_hist_len, shift=6)
start = time.time()
l_open = get_open_function(load_trace)
with l_open(load_trace, mode='rt', encoding='utf-8') as f:
gather_correlation_data(f, correlation_data, page_correlation_data)
print_freqs(correlation_data.compute_freqs(), 'Cache Lines')
print_freqs(page_correlation_data.compute_freqs(), 'Pages')
print_freqs(correlation_data.compute_freqs(weighted=True), 'Weighted Cache Lines')
print_freqs(page_correlation_data.compute_freqs(weighted=True), 'Weighted Pages')
print('Time to run:', (time.time() - start) / 60, 'min')
if __name__ == '__main__':
args = get_argument_parser()
compute_correlation(args.load_trace, args.depth, args.max_hist_len)
| 2,829 | 0 | 153 |
8b4addbb43fa421cea80cb0bfe023f54f404e3ad | 6,411 | py | Python | c_mock_generator/generate_mock.py | BjoernLange/C-Mock-Generator | 91f0e331abf54dc5b6706796c02341e23b2e06d7 | [
"MIT"
] | null | null | null | c_mock_generator/generate_mock.py | BjoernLange/C-Mock-Generator | 91f0e331abf54dc5b6706796c02341e23b2e06d7 | [
"MIT"
] | 3 | 2020-06-07T12:48:17.000Z | 2020-07-26T12:52:45.000Z | c_mock_generator/generate_mock.py | BjoernLange/C-Mock-Generator | 91f0e331abf54dc5b6706796c02341e23b2e06d7 | [
"MIT"
] | null | null | null | import argparse
import os
from typing import Iterable, Tuple, List, Optional
from .module_definition import Module, Method, ParameterDocumentation
from .module_definition.exceptions import MockGeneratorError
from .util import CodeBuilder, TemplateFormatter, read_lines
if __name__ == '__main__':
main()
| 34.842391 | 79 | 0.6545 | import argparse
import os
from typing import Iterable, Tuple, List, Optional
from .module_definition import Module, Method, ParameterDocumentation
from .module_definition.exceptions import MockGeneratorError
from .util import CodeBuilder, TemplateFormatter, read_lines
def line_starts_with_documentation(line: str) -> bool:
return line.lstrip().startswith('/**')
class LineIterator(Iterable):
def __init__(self, lines: Iterable[str]) -> None:
self.iterator = iter(lines)
def __iter__(self):
return self
def __next__(self):
return next(self.iterator)
def parse_method(self):
documentation = self.read_until('*/')
parameter_documentation = \
ParameterDocumentation.multiple_from_documentation(documentation)
method_definition = self.read_until(';').rstrip()
method = Method.from_method_definition(method_definition)
method.enrich_with_documentation(parameter_documentation)
return method
def read_until(self, terminator: str) -> str:
result = ''
line = next(self.iterator)
while not line.rstrip().endswith(terminator):
result = result + line
line = next(self.iterator)
return result + line
class MockHeaderCodeGenerator:
def __init__(self, module_name: str, source_include_path: Optional[str],
lines: Iterable[str]) -> None:
self.module_name = module_name
self.source_include_path = source_include_path
self.line_it = LineIterator(lines)
self.header_builder = CodeBuilder()
self.ifdef_level = 0
self.methods: List[Method] = []
self.set_up_tear_down_generated = False
def generate_header_code(self) -> None:
line_number = 1
try:
for line in self.line_it:
self.track_ifdef_level(line)
if self.should_generate_set_up_and_tear_down_declarations():
self.append_set_up_tear_down_declaration()
if line_starts_with_documentation(line):
method = self.line_it.parse_method()
self.register_method(method)
else:
self.append_line(line)
self.append_newline()
line_number = line_number + 1
except MockGeneratorError as error:
print('Parsing error at line {}: {}'
.format(line_number, str(error)))
exit(1)
@property
def module(self) -> Module:
return Module(self.source_include_path, self.module_name, self.methods)
@property
def header_code(self) -> str:
return str(self.header_builder)
def append_newline(self) -> None:
self.header_builder.newline()
def append_line(self, line: str) -> None:
self.header_builder.append(line.rstrip())
def should_generate_set_up_and_tear_down_declarations(self) -> bool:
return not self.set_up_tear_down_generated and self.ifdef_level == 0
def register_method(self, method: Method) -> None:
self.header_builder.append(method.generate_header_content())
self.methods.append(method)
def track_ifdef_level(self, line: str) -> None:
if line.lstrip().startswith('#if'):
self.ifdef_level = self.ifdef_level + 1
elif line.lstrip().startswith('#endif'):
self.ifdef_level = self.ifdef_level - 1
def append_set_up_tear_down_declaration(self) -> None:
self.set_up_tear_down_generated = True
self.header_builder.append('void mock_', self.module_name,
'_set_up(void);').newline()
self.header_builder.append('void mock_', self.module_name,
'_tear_down(void);').newline().newline()
def generate_mock_header_code(
module_name: str, source_include_path: Optional[str],
lines: Iterable[str]) -> Tuple[str, Module]:
generator = MockHeaderCodeGenerator(
module_name, source_include_path, lines)
generator.generate_header_code()
return generator.header_code, generator.module
def generate_mock_source_code(module: Module):
source_template = os.path.join(
os.path.dirname(__file__), 'resource', 'source_template.c')
return '\n'.join(TemplateFormatter(
read_lines(source_template)).format(module))
def generate_mock_code_for_lines(
module_name: str, source_include_path: Optional[str],
lines: Iterable[str]) -> Tuple[str, str]:
header_code, module = generate_mock_header_code(
module_name, source_include_path, lines)
source_code = generate_mock_source_code(module)
return header_code, source_code
def are_args_valid(args) -> bool:
if not args.input.endswith('.h') or \
not os.path.isfile(args.input):
print('Input needs to be a C header file.')
return False
if not args.output_header.endswith('.h') or \
os.path.isfile(args.output_header):
print('Output header needs to be a C header file name and '
'must not exist.')
return False
if not args.output_source.endswith('.c') or \
os.path.isfile(args.output_source):
print('Output source needs to be a C source file name and '
'must not exist.')
return False
return True
def generate_mock_code(args) -> None:
if not are_args_valid(args):
return
with open(args.input) as file:
module_name = os.path.basename(args.input)[:-2]
source_include_path = args.source_include_path
lines = (x for x in file)
header, source = generate_mock_code_for_lines(
module_name, source_include_path, lines)
with open(args.output_header, 'w') as file:
file.write(header)
with open(args.output_source, 'w') as file:
file.write(source)
def main() -> None:
parser = argparse.ArgumentParser()
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-i', '--input', required=True)
required_named.add_argument('-oh', '--output-header', required=True)
required_named.add_argument('-oc', '--output-source', required=True)
parser.add_argument('-cp', '--source-include-path')
generate_mock_code(parser.parse_args())
if __name__ == '__main__':
main()
| 5,437 | 314 | 341 |
3d0efe650cbfc962d3f0b737e1aa9610e7336754 | 15,627 | py | Python | tests/unit/threat_intel_downloader/test_main.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | null | null | null | tests/unit/threat_intel_downloader/test_main.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | 1 | 2018-08-08T03:26:58.000Z | 2018-08-08T03:26:58.000Z | tests/unit/threat_intel_downloader/test_main.py | opsbay/streamalert | 557fb3f604661cdd9bd36486cccc8ce3a34bd1f1 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import os
import boto3
from botocore.exceptions import ClientError
from mock import Mock, patch
from moto import mock_ssm
from nose.tools import (
assert_equal,
raises
)
from stream_alert.shared.config import load_config
from stream_alert.threat_intel_downloader.exceptions import (
ThreatStreamCredsError,
ThreatStreamLambdaInvokeError,
ThreatStreamRequestsError
)
from stream_alert.threat_intel_downloader.main import ThreatStream
from tests.unit.stream_alert_apps.test_helpers import MockLambdaClient
from tests.unit.threat_intel_downloader.test_helpers import get_mock_context, put_mock_params
@patch('time.sleep', Mock())
class TestThreatStream(object):
"""Test class to test ThreatStream functionalities"""
# pylint: disable=protected-access
@patch('stream_alert.threat_intel_downloader.main.load_config',
Mock(return_value=load_config('tests/unit/conf/')))
def setup(self):
"""Setup TestThreatStream"""
# pylint: disable=attribute-defined-outside-init
context = get_mock_context(100000)
self.threatstream = ThreatStream(context.invoked_function_arn,
context.get_remaining_time_in_millis)
@staticmethod
@staticmethod
@patch('stream_alert.threat_intel_downloader.main.load_config',
Mock(return_value=load_config('tests/unit/conf/')))
def test_load_config(self):
"""ThreatStream - Load Config"""
arn = 'arn:aws:lambda:region:123456789012:function:name:development'
expected_config = {
'account_id': '123456789012',
'function_name': 'name',
'qualifier': 'development',
'region': 'region',
'enabled': True,
'excluded_sub_types': [
'bot_ip',
'brute_ip',
'scan_ip',
'spam_ip',
'tor_ip'
],
'ioc_filters': [
'crowdstrike',
'@airbnb.com'
],
'ioc_keys': [
'expiration_ts',
'itype',
'source',
'type',
'value'
],
'ioc_types': [
'domain',
'ip',
'md5'
],
'memory': '128',
'timeout': '60'
}
assert_equal(self.threatstream._load_config(arn), expected_config)
def test_process_data(self):
"""ThreatStream - Process Raw IOC Data"""
raw_data = [
self._get_fake_intel('malicious_domain.com', 'ioc_source'),
self._get_fake_intel('malicious_domain2.com', 'ioc_source2'),
# this will get filtered out
self._get_fake_intel('malicious_domain3.com', 'bad_source_ioc'),
]
self.threatstream._config['ioc_filters'] = {'ioc_source'}
processed_data = self.threatstream._process_data(raw_data)
expected_result = [
{
'value': 'malicious_domain.com',
'itype': 'c2_domain',
'source': 'ioc_source',
'type': 'domain',
'expiration_ts': 1512000062
},
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'ioc_source2',
'type': 'domain',
'expiration_ts': 1512000062
}
]
assert_equal(processed_data, expected_result)
@mock_ssm
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds(self):
"""ThreatStream - Load API creds from SSM"""
value = {'api_user': 'test_user', 'api_key': 'test_key'}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
assert_equal(self.threatstream.api_user, 'test_user')
assert_equal(self.threatstream.api_key, 'test_key')
@mock_ssm
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_cached(self):
"""ThreatStream - Load API creds from SSM, Cached"""
value = {'api_user': 'test_user', 'api_key': 'test_key'}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
assert_equal(self.threatstream.api_user, 'test_user')
assert_equal(self.threatstream.api_key, 'test_key')
self.threatstream._load_api_creds()
@mock_ssm
@raises(ClientError)
def test_load_api_creds_client_errors(self):
"""ThreatStream - Load API creds from SSM, ClientError"""
self.threatstream._load_api_creds()
@patch('boto3.client')
@raises(ThreatStreamCredsError)
def test_load_api_creds_empty_response(self, boto_mock):
"""ThreatStream - Load API creds from SSM, Empty Response"""
boto_mock.return_value.get_parameter.return_value = None
self.threatstream._load_api_creds()
@mock_ssm
@raises(ThreatStreamCredsError)
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_invalid_json(self):
"""ThreatStream - Load API creds from SSM with invalid JSON"""
boto3.client('ssm').put_parameter(
Name=ThreatStream.CRED_PARAMETER_NAME,
Value='invalid_value',
Type='SecureString',
Overwrite=True
)
self.threatstream._load_api_creds()
@mock_ssm
@raises(ThreatStreamCredsError)
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_no_api_key(self):
"""ThreatStream - Load API creds from SSM, No API Key"""
value = {'api_user': 'test_user', 'api_key': ''}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
@patch('stream_alert.threat_intel_downloader.main.datetime')
def test_epoch_now(self, date_mock):
"""ThreatStream - Epoch, Now"""
fake_date_now = datetime(year=2017, month=9, day=1)
date_mock.utcnow.return_value = fake_date_now
date_mock.utcfromtimestamp = datetime.utcfromtimestamp
expected_value = datetime(year=2017, month=11, day=30)
value = self.threatstream._epoch_time(None)
assert_equal(datetime.utcfromtimestamp(value), expected_value)
def test_epoch_from_time(self):
"""ThreatStream - Epoch, From Timestamp"""
expected_value = datetime(year=2017, month=11, day=30)
value = self.threatstream._epoch_time('2017-11-30T00:00:00.000Z')
assert_equal(datetime.utcfromtimestamp(value), expected_value)
@raises(ValueError)
def test_epoch_from_bad_time(self):
"""ThreatStream - Epoch, Error"""
self.threatstream._epoch_time('20171130T00:00:00.000Z')
def test_excluded_sub_types(self):
"""ThreatStream - Excluded Sub Types Property"""
expected_value = ['bot_ip', 'brute_ip', 'scan_ip', 'spam_ip', 'tor_ip']
assert_equal(self.threatstream.excluded_sub_types, expected_value)
def test_ioc_keys(self):
"""ThreatStream - IOC Keys Property"""
expected_value = ['expiration_ts', 'itype', 'source', 'type', 'value']
assert_equal(self.threatstream.ioc_keys, expected_value)
def test_ioc_sources(self):
"""ThreatStream - IOC Sources Property"""
expected_value = ['crowdstrike', '@airbnb.com']
assert_equal(self.threatstream.ioc_sources, expected_value)
def test_ioc_types(self):
"""ThreatStream - IOC Types Property"""
expected_value = ['domain', 'ip', 'md5']
assert_equal(self.threatstream.ioc_types, expected_value)
def test_threshold(self):
"""ThreatStream - Threshold Property"""
assert_equal(self.threatstream.threshold, 499000)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._finalize')
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect(self, get_mock, finalize_mock):
"""ThreatStream - Connection to ThreatStream.com"""
get_mock.return_value.json.return_value = self._get_http_response()
get_mock.return_value.status_code = 200
self.threatstream._config['ioc_filters'] = {'test_source'}
self.threatstream._connect('previous_url')
expected_intel = [
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'test_source',
'type': 'domain',
'expiration_ts': 1512000062
}
]
finalize_mock.assert_called_with(expected_intel, None)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._finalize')
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_next(self, get_mock, finalize_mock):
"""ThreatStream - Connection to ThreatStream.com, with Continuation"""
next_url = 'this_url'
get_mock.return_value.json.return_value = self._get_http_response(next_url)
get_mock.return_value.status_code = 200
self.threatstream._config['ioc_filters'] = {'test_source'}
self.threatstream._connect('previous_url')
expected_intel = [
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'test_source',
'type': 'domain',
'expiration_ts': 1512000062
}
]
finalize_mock.assert_called_with(expected_intel, next_url)
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_unauthed(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Unauthorized Error"""
get_mock.return_value.json.return_value = self._get_http_response()
get_mock.return_value.status_code = 401
self.threatstream._connect('previous_url')
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_retry_error(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Retry Error"""
get_mock.return_value.status_code = 500
self.threatstream._connect('previous_url')
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_unknown_error(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Unknown Error"""
get_mock.return_value.status_code = 404
self.threatstream._connect('previous_url')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._load_api_creds')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._connect')
def test_runner(self, connect_mock, _):
"""ThreatStream - Runner"""
expected_url = ('/api/v2/intelligence/?username=user&api_key=key&limit=1000&q='
'(status="active")+AND+(type="domain"+OR+type="ip"+OR+type="md5")+'
'AND+NOT+(itype="bot_ip"+OR+itype="brute_ip"+OR+itype="scan_ip"+'
'OR+itype="spam_ip"+OR+itype="tor_ip")')
self.threatstream.api_key = 'key'
self.threatstream.api_user = 'user'
self.threatstream.runner({'none': 'test'})
connect_mock.assert_called_with(expected_url)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._write_to_dynamodb_table')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._invoke_lambda_function')
def test_finalize(self, invoke_mock, write_mock):
"""ThreatStream - Finalize with Intel"""
intel = ['foo', 'bar']
self.threatstream._finalize(intel, None)
write_mock.assert_called_with(intel)
invoke_mock.assert_not_called()
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._write_to_dynamodb_table')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._invoke_lambda_function')
def test_finalize_next_url(self, invoke_mock, write_mock):
"""ThreatStream - Finalize with Next URL"""
intel = ['foo', 'bar']
self.threatstream._finalize(intel, 'next')
write_mock.assert_called_with(intel)
invoke_mock.assert_called_with('next')
@patch('boto3.resource')
def test_write_to_dynamodb_table(self, boto_mock):
"""ThreatStream - Write Intel to DynamoDB Table"""
intel = [self._get_fake_intel('malicious_domain.com', 'test_source')]
expected_intel = {
'expiration_ts': '2017-11-30T00:01:02.123Z',
'source': 'test_source',
'ioc_type': 'domain',
'sub_type': 'c2_domain',
'ioc_value': 'malicious_domain.com'
}
self.threatstream._write_to_dynamodb_table(intel)
batch_writer = boto_mock.return_value.Table.return_value.batch_writer.return_value
batch_writer.__enter__.return_value.put_item.assert_called_with(Item=expected_intel)
@patch('boto3.resource')
@raises(ClientError)
def test_write_to_dynamodb_table_error(self, boto_mock):
"""ThreatStream - Write Intel to DynamoDB Table, Error"""
intel = [self._get_fake_intel('malicious_domain.com', 'test_source')]
err = ClientError({'Error': {'Code': 404}}, 'PutItem')
batch_writer = boto_mock.return_value.Table.return_value.batch_writer.return_value
batch_writer.__enter__.return_value.put_item.side_effect = err
self.threatstream._write_to_dynamodb_table(intel)
@patch('boto3.client')
def test_invoke_lambda_function(self, boto_mock):
"""ThreatStream - Invoke Lambda Function"""
boto_mock.return_value = MockLambdaClient()
self.threatstream._invoke_lambda_function('next_token')
boto_mock.assert_called_once()
@patch('boto3.client', Mock(return_value=MockLambdaClient()))
@raises(ThreatStreamLambdaInvokeError)
def test_invoke_lambda_function_error(self):
"""ThreatStream - Invoke Lambda Function, Error"""
MockLambdaClient._raise_exception = True
self.threatstream._invoke_lambda_function('next_token')
| 41.34127 | 93 | 0.651949 | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import os
import boto3
from botocore.exceptions import ClientError
from mock import Mock, patch
from moto import mock_ssm
from nose.tools import (
assert_equal,
raises
)
from stream_alert.shared.config import load_config
from stream_alert.threat_intel_downloader.exceptions import (
ThreatStreamCredsError,
ThreatStreamLambdaInvokeError,
ThreatStreamRequestsError
)
from stream_alert.threat_intel_downloader.main import ThreatStream
from tests.unit.stream_alert_apps.test_helpers import MockLambdaClient
from tests.unit.threat_intel_downloader.test_helpers import get_mock_context, put_mock_params
@patch('time.sleep', Mock())
class TestThreatStream(object):
"""Test class to test ThreatStream functionalities"""
# pylint: disable=protected-access
@patch('stream_alert.threat_intel_downloader.main.load_config',
Mock(return_value=load_config('tests/unit/conf/')))
def setup(self):
"""Setup TestThreatStream"""
# pylint: disable=attribute-defined-outside-init
context = get_mock_context(100000)
self.threatstream = ThreatStream(context.invoked_function_arn,
context.get_remaining_time_in_millis)
@staticmethod
def _get_fake_intel(value, source):
return {
'value': value,
'itype': 'c2_domain',
'source': source,
'type': 'domain',
'expiration_ts': '2017-11-30T00:01:02.123Z',
'key1': 'value1',
'key2': 'value2'
}
@staticmethod
def _get_http_response(next_url=None):
return {
'key1': 'value1',
'objects': [
TestThreatStream._get_fake_intel('malicious_domain.com', 'ioc_source'),
TestThreatStream._get_fake_intel('malicious_domain2.com', 'test_source')
],
'meta': {
'next': next_url,
'offset': 100
}
}
@patch('stream_alert.threat_intel_downloader.main.load_config',
Mock(return_value=load_config('tests/unit/conf/')))
def test_load_config(self):
"""ThreatStream - Load Config"""
arn = 'arn:aws:lambda:region:123456789012:function:name:development'
expected_config = {
'account_id': '123456789012',
'function_name': 'name',
'qualifier': 'development',
'region': 'region',
'enabled': True,
'excluded_sub_types': [
'bot_ip',
'brute_ip',
'scan_ip',
'spam_ip',
'tor_ip'
],
'ioc_filters': [
'crowdstrike',
'@airbnb.com'
],
'ioc_keys': [
'expiration_ts',
'itype',
'source',
'type',
'value'
],
'ioc_types': [
'domain',
'ip',
'md5'
],
'memory': '128',
'timeout': '60'
}
assert_equal(self.threatstream._load_config(arn), expected_config)
def test_process_data(self):
"""ThreatStream - Process Raw IOC Data"""
raw_data = [
self._get_fake_intel('malicious_domain.com', 'ioc_source'),
self._get_fake_intel('malicious_domain2.com', 'ioc_source2'),
# this will get filtered out
self._get_fake_intel('malicious_domain3.com', 'bad_source_ioc'),
]
self.threatstream._config['ioc_filters'] = {'ioc_source'}
processed_data = self.threatstream._process_data(raw_data)
expected_result = [
{
'value': 'malicious_domain.com',
'itype': 'c2_domain',
'source': 'ioc_source',
'type': 'domain',
'expiration_ts': 1512000062
},
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'ioc_source2',
'type': 'domain',
'expiration_ts': 1512000062
}
]
assert_equal(processed_data, expected_result)
@mock_ssm
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds(self):
"""ThreatStream - Load API creds from SSM"""
value = {'api_user': 'test_user', 'api_key': 'test_key'}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
assert_equal(self.threatstream.api_user, 'test_user')
assert_equal(self.threatstream.api_key, 'test_key')
@mock_ssm
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_cached(self):
"""ThreatStream - Load API creds from SSM, Cached"""
value = {'api_user': 'test_user', 'api_key': 'test_key'}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
assert_equal(self.threatstream.api_user, 'test_user')
assert_equal(self.threatstream.api_key, 'test_key')
self.threatstream._load_api_creds()
@mock_ssm
@raises(ClientError)
def test_load_api_creds_client_errors(self):
"""ThreatStream - Load API creds from SSM, ClientError"""
self.threatstream._load_api_creds()
@patch('boto3.client')
@raises(ThreatStreamCredsError)
def test_load_api_creds_empty_response(self, boto_mock):
"""ThreatStream - Load API creds from SSM, Empty Response"""
boto_mock.return_value.get_parameter.return_value = None
self.threatstream._load_api_creds()
@mock_ssm
@raises(ThreatStreamCredsError)
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_invalid_json(self):
"""ThreatStream - Load API creds from SSM with invalid JSON"""
boto3.client('ssm').put_parameter(
Name=ThreatStream.CRED_PARAMETER_NAME,
Value='invalid_value',
Type='SecureString',
Overwrite=True
)
self.threatstream._load_api_creds()
@mock_ssm
@raises(ThreatStreamCredsError)
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def test_load_api_creds_no_api_key(self):
"""ThreatStream - Load API creds from SSM, No API Key"""
value = {'api_user': 'test_user', 'api_key': ''}
put_mock_params(ThreatStream.CRED_PARAMETER_NAME, value)
self.threatstream._load_api_creds()
@patch('stream_alert.threat_intel_downloader.main.datetime')
def test_epoch_now(self, date_mock):
"""ThreatStream - Epoch, Now"""
fake_date_now = datetime(year=2017, month=9, day=1)
date_mock.utcnow.return_value = fake_date_now
date_mock.utcfromtimestamp = datetime.utcfromtimestamp
expected_value = datetime(year=2017, month=11, day=30)
value = self.threatstream._epoch_time(None)
assert_equal(datetime.utcfromtimestamp(value), expected_value)
def test_epoch_from_time(self):
"""ThreatStream - Epoch, From Timestamp"""
expected_value = datetime(year=2017, month=11, day=30)
value = self.threatstream._epoch_time('2017-11-30T00:00:00.000Z')
assert_equal(datetime.utcfromtimestamp(value), expected_value)
@raises(ValueError)
def test_epoch_from_bad_time(self):
"""ThreatStream - Epoch, Error"""
self.threatstream._epoch_time('20171130T00:00:00.000Z')
def test_excluded_sub_types(self):
"""ThreatStream - Excluded Sub Types Property"""
expected_value = ['bot_ip', 'brute_ip', 'scan_ip', 'spam_ip', 'tor_ip']
assert_equal(self.threatstream.excluded_sub_types, expected_value)
def test_ioc_keys(self):
"""ThreatStream - IOC Keys Property"""
expected_value = ['expiration_ts', 'itype', 'source', 'type', 'value']
assert_equal(self.threatstream.ioc_keys, expected_value)
def test_ioc_sources(self):
"""ThreatStream - IOC Sources Property"""
expected_value = ['crowdstrike', '@airbnb.com']
assert_equal(self.threatstream.ioc_sources, expected_value)
def test_ioc_types(self):
"""ThreatStream - IOC Types Property"""
expected_value = ['domain', 'ip', 'md5']
assert_equal(self.threatstream.ioc_types, expected_value)
def test_threshold(self):
"""ThreatStream - Threshold Property"""
assert_equal(self.threatstream.threshold, 499000)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._finalize')
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect(self, get_mock, finalize_mock):
"""ThreatStream - Connection to ThreatStream.com"""
get_mock.return_value.json.return_value = self._get_http_response()
get_mock.return_value.status_code = 200
self.threatstream._config['ioc_filters'] = {'test_source'}
self.threatstream._connect('previous_url')
expected_intel = [
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'test_source',
'type': 'domain',
'expiration_ts': 1512000062
}
]
finalize_mock.assert_called_with(expected_intel, None)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._finalize')
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_next(self, get_mock, finalize_mock):
"""ThreatStream - Connection to ThreatStream.com, with Continuation"""
next_url = 'this_url'
get_mock.return_value.json.return_value = self._get_http_response(next_url)
get_mock.return_value.status_code = 200
self.threatstream._config['ioc_filters'] = {'test_source'}
self.threatstream._connect('previous_url')
expected_intel = [
{
'value': 'malicious_domain2.com',
'itype': 'c2_domain',
'source': 'test_source',
'type': 'domain',
'expiration_ts': 1512000062
}
]
finalize_mock.assert_called_with(expected_intel, next_url)
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_unauthed(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Unauthorized Error"""
get_mock.return_value.json.return_value = self._get_http_response()
get_mock.return_value.status_code = 401
self.threatstream._connect('previous_url')
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_retry_error(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Retry Error"""
get_mock.return_value.status_code = 500
self.threatstream._connect('previous_url')
@raises(ThreatStreamRequestsError)
@patch('stream_alert.threat_intel_downloader.main.requests.get')
def test_connect_with_unknown_error(self, get_mock):
"""ThreatStream - Connection to ThreatStream.com, Unknown Error"""
get_mock.return_value.status_code = 404
self.threatstream._connect('previous_url')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._load_api_creds')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._connect')
def test_runner(self, connect_mock, _):
"""ThreatStream - Runner"""
expected_url = ('/api/v2/intelligence/?username=user&api_key=key&limit=1000&q='
'(status="active")+AND+(type="domain"+OR+type="ip"+OR+type="md5")+'
'AND+NOT+(itype="bot_ip"+OR+itype="brute_ip"+OR+itype="scan_ip"+'
'OR+itype="spam_ip"+OR+itype="tor_ip")')
self.threatstream.api_key = 'key'
self.threatstream.api_user = 'user'
self.threatstream.runner({'none': 'test'})
connect_mock.assert_called_with(expected_url)
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._write_to_dynamodb_table')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._invoke_lambda_function')
def test_finalize(self, invoke_mock, write_mock):
"""ThreatStream - Finalize with Intel"""
intel = ['foo', 'bar']
self.threatstream._finalize(intel, None)
write_mock.assert_called_with(intel)
invoke_mock.assert_not_called()
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._write_to_dynamodb_table')
@patch('stream_alert.threat_intel_downloader.main.ThreatStream._invoke_lambda_function')
def test_finalize_next_url(self, invoke_mock, write_mock):
"""ThreatStream - Finalize with Next URL"""
intel = ['foo', 'bar']
self.threatstream._finalize(intel, 'next')
write_mock.assert_called_with(intel)
invoke_mock.assert_called_with('next')
@patch('boto3.resource')
def test_write_to_dynamodb_table(self, boto_mock):
"""ThreatStream - Write Intel to DynamoDB Table"""
intel = [self._get_fake_intel('malicious_domain.com', 'test_source')]
expected_intel = {
'expiration_ts': '2017-11-30T00:01:02.123Z',
'source': 'test_source',
'ioc_type': 'domain',
'sub_type': 'c2_domain',
'ioc_value': 'malicious_domain.com'
}
self.threatstream._write_to_dynamodb_table(intel)
batch_writer = boto_mock.return_value.Table.return_value.batch_writer.return_value
batch_writer.__enter__.return_value.put_item.assert_called_with(Item=expected_intel)
@patch('boto3.resource')
@raises(ClientError)
def test_write_to_dynamodb_table_error(self, boto_mock):
"""ThreatStream - Write Intel to DynamoDB Table, Error"""
intel = [self._get_fake_intel('malicious_domain.com', 'test_source')]
err = ClientError({'Error': {'Code': 404}}, 'PutItem')
batch_writer = boto_mock.return_value.Table.return_value.batch_writer.return_value
batch_writer.__enter__.return_value.put_item.side_effect = err
self.threatstream._write_to_dynamodb_table(intel)
@patch('boto3.client')
def test_invoke_lambda_function(self, boto_mock):
"""ThreatStream - Invoke Lambda Function"""
boto_mock.return_value = MockLambdaClient()
self.threatstream._invoke_lambda_function('next_token')
boto_mock.assert_called_once()
@patch('boto3.client', Mock(return_value=MockLambdaClient()))
@raises(ThreatStreamLambdaInvokeError)
def test_invoke_lambda_function_error(self):
"""ThreatStream - Invoke Lambda Function, Error"""
MockLambdaClient._raise_exception = True
self.threatstream._invoke_lambda_function('next_token')
| 670 | 0 | 52 |
ee62108b17d42c736856b8733f3ae4635f8dc080 | 596 | py | Python | qctests/EN_range_check.py | BillMills/AutoQC | cb56fa5bb2115170ec204edd84e2d69ce84be820 | [
"MIT"
] | 17 | 2015-01-31T00:35:58.000Z | 2020-10-26T19:01:46.000Z | qctests/EN_range_check.py | castelao/AutoQC | eb85422c1a6a5ff965a1ef96b3cb29240a66b506 | [
"MIT"
] | 163 | 2015-01-21T03:44:42.000Z | 2022-01-09T22:03:12.000Z | qctests/EN_range_check.py | BillMills/AutoQC | cb56fa5bb2115170ec204edd84e2d69ce84be820 | [
"MIT"
] | 11 | 2015-06-04T14:32:22.000Z | 2021-04-11T05:18:09.000Z | """
Implements the global range check used in the EN quality control
system.
"""
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Get temperature values from the profile.
t = p.t()
# Make the quality control decisions. This should
# return true if the temperature is outside -4 deg C
# and 40 deg C.
qc = (t.mask == False) & ((t.data < -4.0) | (t.data > 40.0))
return qc
| 24.833333 | 74 | 0.644295 | """
Implements the global range check used in the EN quality control
system.
"""
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Get temperature values from the profile.
t = p.t()
# Make the quality control decisions. This should
# return true if the temperature is outside -4 deg C
# and 40 deg C.
qc = (t.mask == False) & ((t.data < -4.0) | (t.data > 40.0))
return qc
| 0 | 0 | 0 |
829dcf41a8949e0e449237d9b5a16d2d1449e4c8 | 6,241 | py | Python | threading_util/pausable_thread.py | SDRAST/support | ebe081692fc8c892c1389156e979d5baa7f8f0e6 | [
"Apache-2.0"
] | null | null | null | threading_util/pausable_thread.py | SDRAST/support | ebe081692fc8c892c1389156e979d5baa7f8f0e6 | [
"Apache-2.0"
] | null | null | null | threading_util/pausable_thread.py | SDRAST/support | ebe081692fc8c892c1389156e979d5baa7f8f0e6 | [
"Apache-2.0"
] | null | null | null | import logging
import threading
import time
module_logger = logging.getLogger(__name__)
__all__ = ["iterativeRun", "Pause", "PausableThread", "PausableThreadCallback"]
def iterativeRun(run_fn):
"""
A decorator for running functions repeatedly inside a PausableThread.
Allows one to pause and stop the thread while its repeatedly calling
the overriden run function.
Args:
run_fn (callable): the overridden run function from PausableThread
Returns:
callable: wrapped function
"""
return wrapper
class Pause(object):
"""
A context manager for pausing threads.
This starts by pausing an input thread or threads and unpausing them when
code inside block has been called.
This makes sure that when we unpause the thread when we're done
doing whatever task we needed.
Attributes:
thread (dict): A collection of threads to pause and unpause.
init_pause_status (dict): The initial state of the threads in
the thread attribute.
"""
def __init__(self, pausable_thread):
"""
Args:
pausable_thread (list, PausableThread): An instance, or list of
instances of PausableThread. If we pass ``None``, then this gets
dealt with properly down stream.
"""
self.thread = pausable_thread
if not isinstance(self.thread, dict):
# if the argument is not a dict, make it one
self.thread = {'thread': self.thread}
self.init_pause_status = {}
for name in list(self.thread.keys()):
if self.thread[name]:
self.init_pause_status[name] = self.thread[name].paused()
else:
self.init_pause_status[name] = None
# self.init_pause_status = {name: self.thread[name].paused() for name in self.thread.keys()}
def __enter__(self):
"""
Pause the thread in question, and make sure that whatever
functionality is being performing is actually stopped.
"""
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
if not self.init_pause_status[name]:
# and it is not already paused
t.pause()
else:
pass
# now make sure that they're actually paused.
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
while self.thread[name].running():
# wait until it is no longer running
time.sleep(0.001)
else:
pass
def __exit__(self, *args):
"""
Unpause the thread
"""
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
if not self.init_pause_status[name]:
self.thread[name].unpause()
else:
pass
class PausableThread(threading.Thread):
"""
A pausable stoppable thread.
It also has a running flag that can be used to determine if the process is
still running.
Attributes:
_running ():
name (str): name of thread, if any
logger (logging.getLogger): logging instance.
_lock (threading.Lock): thread's internal lock
_pause (threading.Event): setting and clearing this indicates to
pause or unpause thread.
_stop (threading.Event): setting this stops thread.
_running (threading.Event): setting this indicates thread is
currently executing "run" method.
"""
def __init__(self, name=None, **kwargs):
"""
create a pausable thread
Args:
name (str): name of thread.
**kwargs: To be passed to
"""
threading.Thread.__init__(self)
self.name = name
self.logger = logging.getLogger(__name__)
self._lock = threading.Lock()
# create events for the thread states
self._pause = threading.Event()
self._stop = threading.Event()
self._running = threading.Event()
def stop(self):
"""
Stop the thread from running all together. Make
sure to join this up with threading.Thread.join()
"""
self._stop.set()
class PausableThreadCallback(threading.Thread):
"""
A thread that runs the same callback over an over again, with some
predetermined wait time.
This thread can be paused, unpaused, and stopped in a thread-safe manner.
"""
| 27.986547 | 100 | 0.571703 | import logging
import threading
import time
module_logger = logging.getLogger(__name__)
__all__ = ["iterativeRun", "Pause", "PausableThread", "PausableThreadCallback"]
def iterativeRun(run_fn):
"""
A decorator for running functions repeatedly inside a PausableThread.
Allows one to pause and stop the thread while its repeatedly calling
the overriden run function.
Args:
run_fn (callable): the overridden run function from PausableThread
Returns:
callable: wrapped function
"""
def wrapper(self):
while True:
if self.stopped():
break
if self.paused():
time.sleep(0.001)
continue
else:
self._running.set()
run_fn(self)
self._running.clear()
return wrapper
class Pause(object):
"""
A context manager for pausing threads.
This starts by pausing an input thread or threads and unpausing them when
code inside block has been called.
This makes sure that when we unpause the thread when we're done
doing whatever task we needed.
Attributes:
thread (dict): A collection of threads to pause and unpause.
init_pause_status (dict): The initial state of the threads in
the thread attribute.
"""
def __init__(self, pausable_thread):
"""
Args:
pausable_thread (list, PausableThread): An instance, or list of
instances of PausableThread. If we pass ``None``, then this gets
dealt with properly down stream.
"""
self.thread = pausable_thread
if not isinstance(self.thread, dict):
# if the argument is not a dict, make it one
self.thread = {'thread': self.thread}
self.init_pause_status = {}
for name in list(self.thread.keys()):
if self.thread[name]:
self.init_pause_status[name] = self.thread[name].paused()
else:
self.init_pause_status[name] = None
# self.init_pause_status = {name: self.thread[name].paused() for name in self.thread.keys()}
def __enter__(self):
"""
Pause the thread in question, and make sure that whatever
functionality is being performing is actually stopped.
"""
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
if not self.init_pause_status[name]:
# and it is not already paused
t.pause()
else:
pass
# now make sure that they're actually paused.
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
while self.thread[name].running():
# wait until it is no longer running
time.sleep(0.001)
else:
pass
def __exit__(self, *args):
"""
Unpause the thread
"""
for name in list(self.thread.keys()):
t = self.thread[name]
if t:
# if there really is a thread
if not self.init_pause_status[name]:
self.thread[name].unpause()
else:
pass
class PausableThread(threading.Thread):
"""
A pausable stoppable thread.
It also has a running flag that can be used to determine if the process is
still running.
Attributes:
_running ():
name (str): name of thread, if any
logger (logging.getLogger): logging instance.
_lock (threading.Lock): thread's internal lock
_pause (threading.Event): setting and clearing this indicates to
pause or unpause thread.
_stop (threading.Event): setting this stops thread.
_running (threading.Event): setting this indicates thread is
currently executing "run" method.
"""
def __init__(self, name=None, **kwargs):
"""
create a pausable thread
Args:
name (str): name of thread.
**kwargs: To be passed to
"""
threading.Thread.__init__(self)
self.name = name
self.logger = logging.getLogger(__name__)
self._lock = threading.Lock()
# create events for the thread states
self._pause = threading.Event()
self._stop = threading.Event()
self._running = threading.Event()
def stop(self):
"""
Stop the thread from running all together. Make
sure to join this up with threading.Thread.join()
"""
self._stop.set()
def pause(self):
self._pause.set()
def unpause(self):
self._pause.clear()
def stopped(self):
return self._stop.isSet()
def paused(self):
return self._pause.isSet()
def running(self):
return self._running.isSet()
class PausableThreadCallback(threading.Thread):
"""
A thread that runs the same callback over an over again, with some
predetermined wait time.
This thread can be paused, unpaused, and stopped in a thread-safe manner.
"""
def __init__(self, callback, name=None, *args):
threading.Thread.__init__(self)
self.name = name
self.callback = callback
self.args = args
self._pause = threading.Event()
self._stop = threading.Event()
self._running = threading.Event()
def run(self):
while True:
if self.stopped():
break
if self.paused():
time.sleep(0.001)
continue
else:
self._running.set()
self.callback(*self.args)
self._running.clear()
def stop(self):
self._stop.set()
def pause(self):
self._pause.set()
def unpause(self):
self._pause.clear()
def stopped(self):
return self._stop.isSet()
def paused(self):
return self._pause.isSet()
def running(self):
return self._running.isSet()
| 1,153 | 0 | 377 |
861046f2185daf66208a051d17e44c2d4a82f4e2 | 1,013 | py | Python | python/bin/use_gene_id_for_missing_gene_names.py | jennomics/single-cell | 34c951511e2be071fe3a8e11511d8ff5de8f540f | [
"MIT"
] | 3 | 2020-11-14T02:19:34.000Z | 2022-03-09T17:00:52.000Z | python/bin/use_gene_id_for_missing_gene_names.py | jennomics/single-cell | 34c951511e2be071fe3a8e11511d8ff5de8f540f | [
"MIT"
] | 7 | 2020-06-02T18:51:13.000Z | 2021-07-08T22:48:45.000Z | python/bin/use_gene_id_for_missing_gene_names.py | jennomics/single-cell | 34c951511e2be071fe3a8e11511d8ff5de8f540f | [
"MIT"
] | 2 | 2021-04-21T17:19:10.000Z | 2021-05-22T14:26:13.000Z | #!/bin/env python3
"""
If gene name is simply totally missing, use the gene ID for the gene name.
(Don't confuse this with the case in which there is actualy a gene name
embeded in the gene id, for which you would want to use the utility
'extract_name_embedded_in_gene_id.py'.)
"""
import argparse
import gtfez
# command line interface (making this a modulino)
if __name__ == '__main__':
main(parse_args())
| 25.325 | 85 | 0.662389 | #!/bin/env python3
"""
If gene name is simply totally missing, use the gene ID for the gene name.
(Don't confuse this with the case in which there is actualy a gene name
embeded in the gene id, for which you would want to use the utility
'extract_name_embedded_in_gene_id.py'.)
"""
import argparse
import gtfez
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'gtf',
type=argparse.FileType('r'),
help='location of GTF file',
)
args = parser.parse_args()
return args
def main(args):
for line in args.gtf:
if line.startswith('#'):
print(line.rstrip())
continue
record = gtfez.GTFRecord(line)
if ('gene_name' not in record.attributes) and 'gene_id' in record.attributes:
record.attributes['gene_name'] = record.attributes['gene_id']
print(record)
# command line interface (making this a modulino)
if __name__ == '__main__':
main(parse_args())
| 551 | 0 | 46 |
22a2ab9b0ebf3d05d6af95bfe871d127bdeb4b36 | 1,334 | py | Python | model/Schedule.py | shionart/test_flask | f27477357bcfe939012f5a38523f238fa84de85b | [
"MIT",
"Unlicense"
] | 1 | 2021-07-22T14:57:31.000Z | 2021-07-22T14:57:31.000Z | model/Schedule.py | shionart/test_flask | f27477357bcfe939012f5a38523f238fa84de85b | [
"MIT",
"Unlicense"
] | null | null | null | model/Schedule.py | shionart/test_flask | f27477357bcfe939012f5a38523f238fa84de85b | [
"MIT",
"Unlicense"
] | null | null | null | from model.Control import Control
from model.Sensor import Sensor
from apscheduler.schedulers.background import BackgroundScheduler
# The "apscheduler." prefix is hard coded
scheduler = BackgroundScheduler({
'apscheduler.executors.default': {
'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
'max_workers': '60'
},
'apscheduler.executors.processpool': {
'type': 'processpool',
'max_workers': '60'
},
'apscheduler.job_defaults.max_instances': '60',
'apscheduler.timezone': 'UTC',
})
class Schedule():
"""
Kelas untuk set scheduler dan fungsi yang berkaitan
"""
def getListControl(self):
"""
Fungsi untuk mengecek secara berkala Control pada tiap Nodes ke Main Web
"""
print("memulai getlistcontrol")
try:
a = Control().read_controls()
if a!=None:#cek local ada nodes atau tidak
for control in a:
# print("Satuan control")
#Membuat obj control tiap baris pada table control
c = Control(id_arduino=control['id_arduino'], id_user=control['id_user'])
c.getControl() #Ngambil data terbaru
else:
print("KOSONG")
except Exception as e:
print(e)
| 32.536585 | 93 | 0.598951 | from model.Control import Control
from model.Sensor import Sensor
from apscheduler.schedulers.background import BackgroundScheduler
# The "apscheduler." prefix is hard coded
scheduler = BackgroundScheduler({
'apscheduler.executors.default': {
'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
'max_workers': '60'
},
'apscheduler.executors.processpool': {
'type': 'processpool',
'max_workers': '60'
},
'apscheduler.job_defaults.max_instances': '60',
'apscheduler.timezone': 'UTC',
})
class Schedule():
"""
Kelas untuk set scheduler dan fungsi yang berkaitan
"""
def getListControl(self):
"""
Fungsi untuk mengecek secara berkala Control pada tiap Nodes ke Main Web
"""
print("memulai getlistcontrol")
try:
a = Control().read_controls()
if a!=None:#cek local ada nodes atau tidak
for control in a:
# print("Satuan control")
#Membuat obj control tiap baris pada table control
c = Control(id_arduino=control['id_arduino'], id_user=control['id_user'])
c.getControl() #Ngambil data terbaru
else:
print("KOSONG")
except Exception as e:
print(e)
| 0 | 0 | 0 |
28be2a632bb1b6e14ddbeb0afac9fa4520012efa | 390 | py | Python | archived/consoletest.py | CubeFlix/emos | 7f84100908e78384c82777ec3bee0cc1b130cefb | [
"MIT"
] | 1 | 2021-05-26T17:41:07.000Z | 2021-05-26T17:41:07.000Z | archived/consoletest.py | CubeFlix/emos | 7f84100908e78384c82777ec3bee0cc1b130cefb | [
"MIT"
] | null | null | null | archived/consoletest.py | CubeFlix/emos | 7f84100908e78384c82777ec3bee0cc1b130cefb | [
"MIT"
] | null | null | null | import sys
import os
size = os.get_terminal_size()
rows, cols = size.lines, size.columns
data_to_add = b'Hello, world!'
data = bytes((rows * cols) * b' ')
data = data_to_add + data[len(data_to_add) : ]
for row in range(rows):
start = row * cols
end = start + cols
write(str(data[start : end], 'utf-8'))
write('\n')
| 17.727273 | 46 | 0.666667 | import sys
import os
def write(string):
sys.stdout.write(string)
sys.stdout.flush()
size = os.get_terminal_size()
rows, cols = size.lines, size.columns
data_to_add = b'Hello, world!'
data = bytes((rows * cols) * b' ')
data = data_to_add + data[len(data_to_add) : ]
for row in range(rows):
start = row * cols
end = start + cols
write(str(data[start : end], 'utf-8'))
write('\n')
| 43 | 0 | 23 |
958f06b3471de3581f85d8b7924fb17c7793dd42 | 4,820 | py | Python | pricelist/models.py | WillieIlus/jobscorner | ed3734468ea0e88a306a1d29bc876562e940f4fb | [
"bzip2-1.0.6"
] | 2 | 2020-04-12T13:18:35.000Z | 2021-04-02T04:18:17.000Z | pricelist/models.py | WillieIlus/jobscorner | ed3734468ea0e88a306a1d29bc876562e940f4fb | [
"bzip2-1.0.6"
] | 3 | 2020-02-11T23:58:53.000Z | 2020-09-06T18:46:17.000Z | pricelist/models.py | WillieIlus/jobscorner | ed3734468ea0e88a306a1d29bc876562e940f4fb | [
"bzip2-1.0.6"
] | 1 | 2020-08-17T08:29:41.000Z | 2020-08-17T08:29:41.000Z | from __future__ import unicode_literals
from builtins import super
import numpy as np
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
# what this moddule does is displaying the price median price, minunum and maximum price and displaying the companies that offer the services
# it goes this way a model to display the activity and the prices
# what is the price of printing and a banner 1 meter squared on roland
# or what is the price of printing a banner 1 meter squared on flora
# what is the price of hosting a meeting for and hour in a hotel
# what is the price of repairing a phone nokia
from accounts.models import User
from company.models import Company
from location.models import Location
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
NEGOTIABLE = (
('yes', 'Yes'),
('no', 'No'),
)
| 32.789116 | 141 | 0.681743 | from __future__ import unicode_literals
from builtins import super
import numpy as np
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
# what this moddule does is displaying the price median price, minunum and maximum price and displaying the companies that offer the services
# it goes this way a model to display the activity and the prices
# what is the price of printing and a banner 1 meter squared on roland
# or what is the price of printing a banner 1 meter squared on flora
# what is the price of hosting a meeting for and hour in a hotel
# what is the price of repairing a phone nokia
from accounts.models import User
from company.models import Company
from location.models import Location
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
NEGOTIABLE = (
('yes', 'Yes'),
('no', 'No'),
)
class Type(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, blank=True, null=True, unique=True)
class Meta:
verbose_name_plural = "types"
ordering = ['name']
_metadata = {
'title': 'name',
}
def __str__(self):
return self.name
def save(self, *args, **kwargs):
slug = slugify(self.name)
self.slug = slug
super().save(*args, **kwargs)
class Item(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, blank=True, null=True, unique=True)
type = models.ForeignKey(Type, related_name='item', on_delete=models.CASCADE)
class Meta:
verbose_name_plural = "items"
ordering = ['name']
_metadata = {
'title': 'name',
}
def __str__(self):
return self.name
def save(self, *args, **kwargs):
slug = slugify(self.name)
self.slug = slug
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('item:detail', kwargs={'slug': self.slug})
class Service(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, blank=True, null=True, unique=True)
description = models.TextField(blank=True, null=True)
item = models.ForeignKey(Item, related_name='service', on_delete=models.CASCADE)
type = models.ForeignKey(Type, related_name='service', blank=True, null=True, on_delete=models.PROTECT)
location = models.ForeignKey(Location, related_name='service', blank=True, null=True, on_delete=models.PROTECT)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default="draft")
publish = models.DateTimeField('date published', auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
objects = models.Manager()
published = PublishedManager()
class Meta:
verbose_name_plural = "services"
ordering = ['name', 'publish']
_metadata = {
'title': 'name',
'description': 'description',
}
def __str__(self):
return "The price of %s a %s %s is" % (self.name, self.type, self.item)
def average_price(self):
all_prices = list(map(lambda x: x.amount, self.service.all()))
return np.mean(all_prices)
def save(self, *args, **kwargs):
pricequestion = "The Price of %s %s %s" %(self.name, self.type, self.item)
slug = slugify(pricequestion)
self.slug = slug
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('pricelist:detail', kwargs={'slug': self.slug})
class Price(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
amount = models.FloatField()
service = models.ForeignKey(Service, related_name='service', on_delete=models.CASCADE)
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to="pricelist/item", blank=True, null=True)
company = models.ForeignKey(Company, blank=True, null=True, on_delete=models.PROTECT)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default="draft")
negotiable = models.CharField(max_length=10, choices=NEGOTIABLE, default="yes")
publish = models.DateTimeField('date published', auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
objects = models.Manager()
published = PublishedManager()
class Meta:
verbose_name_plural = "prices"
ordering = ['amount', 'publish']
unique_together = ('service', 'user')
def __str__(self):
return "%s" % (self.amount,)
| 913 | 2,872 | 142 |
9c7547f725e65ef0319f9a63015b38a4e864c966 | 2,138 | py | Python | tests/apps/minimal2/config/settings.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | null | null | null | tests/apps/minimal2/config/settings.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 6 | 2016-11-01T18:42:34.000Z | 2020-11-16T16:52:14.000Z | tests/apps/minimal2/config/settings.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 1 | 2020-01-22T18:20:46.000Z | 2020-01-22T18:20:46.000Z | from __future__ import print_function
from os import path
from blazeweb.config import DefaultSettings
basedir = path.dirname(path.dirname(__file__))
app_package = path.basename(basedir)
| 23.494505 | 78 | 0.661366 | from __future__ import print_function
from os import path
from blazeweb.config import DefaultSettings
basedir = path.dirname(path.dirname(__file__))
app_package = path.basename(basedir)
class Default(DefaultSettings):
def init(self):
self.dirs.base = basedir
self.app_package = app_package
DefaultSettings.init(self)
# since this is a quick start app, we want our views.py file to get
# loaded
self.auto_load_views = True
# don't use exception catching, debuggers, logging, etc.
self.apply_test_settings()
def get_storage_dir(self):
return path.join(basedir, '..', '..', 'test-output', self.app_package)
class Dev(Default):
def init(self):
Default.init(self)
self.apply_dev_settings()
class Dispatching(Default):
def init(self):
Default.init(self)
self.apply_test_settings()
self.static_files.enabled = False
self.add_component(self.app_package, 'internalonly')
self.add_component(self.app_package, 'news')
self.add_component(self.app_package, 'news', 'newscomp4')
self.add_component(self.app_package, 'foo', 'foobwp')
# components should be able to add things to this list
self.some_list = ['from app']
class BeakerSessions(Dispatching):
def init(self):
Dispatching.init(self)
self.beaker.timeout = 2
class EventSettings(Default):
def init(self):
Default.init(self)
self.apply_test_settings()
class Test(Default):
def init(self):
Default.init(self)
print('Test settings')
class Test2(Default):
def init(self):
Default.init(self)
print('Test2 settings')
class TestStorageDir(Default):
def init(self):
Default.init(self)
self.auto_create_writeable_dirs = False
def get_storage_dir(self):
return DefaultSettings.get_storage_dir(self)
class NoAutoImportView(Default):
def init(self):
Default.init(self)
# we just want to make sure turning the setting off works too
self.auto_load_views = False
| 1,391 | 54 | 496 |
742e878ab6e12a4761a497391a8a5daf4ad31b7e | 380 | py | Python | alex/utils/token.py | oplatek/alex | 73af644ec35c8a1cd0c37cd478c2afc1db717e0b | [
"Apache-2.0"
] | 184 | 2015-02-11T04:14:41.000Z | 2022-03-24T21:43:58.000Z | alex/utils/token.py | oplatek/alex | 73af644ec35c8a1cd0c37cd478c2afc1db717e0b | [
"Apache-2.0"
] | 69 | 2015-01-11T04:57:22.000Z | 2019-04-24T10:25:56.000Z | alex/utils/token.py | oplatek/alex | 73af644ec35c8a1cd0c37cd478c2afc1db717e0b | [
"Apache-2.0"
] | 61 | 2015-03-04T10:52:13.000Z | 2022-03-04T12:14:06.000Z | import urllib2
| 31.666667 | 83 | 0.697368 | import urllib2
def get_token(cfg):
token_url = cfg['General'].get('token_url')
curr_session = cfg['Logging']['session_logger'].session_dir_name.value
if token_url is not None:
f_token = urllib2.urlopen(token_url.format(curr_session))
return f_token.read()
else:
raise Exception("Please configure the 'token_url' DM parameter in config.")
| 341 | 0 | 23 |
0d33204beaccc33b5fa77749025dc9e04c89d176 | 2,955 | py | Python | TimeRange.py | SanthoshS20/Sudoku-Challenge | 2e7402c473b5c303e97bfa3535b6e46534bf1d0f | [
"Apache-2.0"
] | null | null | null | TimeRange.py | SanthoshS20/Sudoku-Challenge | 2e7402c473b5c303e97bfa3535b6e46534bf1d0f | [
"Apache-2.0"
] | null | null | null | TimeRange.py | SanthoshS20/Sudoku-Challenge | 2e7402c473b5c303e97bfa3535b6e46534bf1d0f | [
"Apache-2.0"
] | null | null | null | lines = []
with open('Time.txt') as f:
lines = f.readlines()
f.close()
total_lines = len(lines)
TimeTakenInDays = dict()
DifficultLevelInEachDays = dict()
day = 1
for index in range(total_lines):
if(index!=0):
difficultLevel, Time = lines[index].split(" ")
TimeTakenInDays["Day "+str(day-1)] = int(Time)
DifficultLevelInEachDays["Day "+str(day-1)] = difficultLevel
day+=1
easyAverage = 0
# 20 minutes
easyMin = 1200
easyMax = 0
easyCount = 0
mediumAverage = 0
# 60 minutes
mediumMin = 3600
mediumMax = 0
mediumCount = 0
hardAverage = 0
# 2 hours
hardMin = 7200
hardMax = 0
hardCount = 0
for index in range(total_lines-1):
key = "Day {}".format(index+1)
if(DifficultLevelInEachDays[key]=="Easy"):
easyAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>easyMax):
easyMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<easyMin):
easyMin = TimeTakenInDays[key]
easyCount+=1
elif(DifficultLevelInEachDays[key]=="Medium"):
mediumAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>mediumMax):
mediumMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<mediumMin):
mediumMin = TimeTakenInDays[key]
mediumCount+=1
elif(DifficultLevelInEachDays[key]=="Hard"):
hardAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>hardMax):
hardMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<hardMin):
hardMin = TimeTakenInDays[key]
hardCount+=1
lines = []
with open("README.md", "r") as f:
lines = f.readlines()
f.close()
del lines[8:len(lines)]
with open("README.md", "w") as f:
for line in lines:
f.write(line)
f.close()
with open("README.md", "a") as f:
for index in range(total_lines-1):
key = "Day {}".format(index+1)
f.write("\n"+key+" ")
f.write(DifficultLevelInEachDays[key]+" ")
f.write(str(TimeTakenInDays[key]))
f.write("<br />")
f.write("<br /><br />")
if(easyCount!=0):
f.write("\nEasy Level\n")
f.write("\nMinimum Time Taken - "+str(easyMin))
f.write("\n\nMaximum Time Taken - "+str(easyMax))
f.write("\n\nAverage Time Taken - "+str(easyAverage//easyCount))
f.write("<br /><br />")
if(mediumCount!=0):
f.write("\n\nMedium Level\n")
f.write("\nMinimum Time Taken - "+str(mediumMin))
f.write("\n\nMaximum Time Taken - "+str(mediumMax))
f.write("\n\nAverage Time Taken - "+str(mediumAverage//mediumCount))
f.write("<br /><br />")
if(hardCount!=0):
f.write("\n\nHard Level\n")
f.write("\n\nMinimum Time Taken - "+str(hardMin))
f.write("\n\nMaximum Time Taken - "+str(hardMax))
f.write("\n\nAverage Time Taken - "+str(hardAverage//hardCount))
f.close()
| 27.616822 | 85 | 0.607445 | lines = []
with open('Time.txt') as f:
lines = f.readlines()
f.close()
total_lines = len(lines)
TimeTakenInDays = dict()
DifficultLevelInEachDays = dict()
day = 1
for index in range(total_lines):
if(index!=0):
difficultLevel, Time = lines[index].split(" ")
TimeTakenInDays["Day "+str(day-1)] = int(Time)
DifficultLevelInEachDays["Day "+str(day-1)] = difficultLevel
day+=1
easyAverage = 0
# 20 minutes
easyMin = 1200
easyMax = 0
easyCount = 0
mediumAverage = 0
# 60 minutes
mediumMin = 3600
mediumMax = 0
mediumCount = 0
hardAverage = 0
# 2 hours
hardMin = 7200
hardMax = 0
hardCount = 0
for index in range(total_lines-1):
key = "Day {}".format(index+1)
if(DifficultLevelInEachDays[key]=="Easy"):
easyAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>easyMax):
easyMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<easyMin):
easyMin = TimeTakenInDays[key]
easyCount+=1
elif(DifficultLevelInEachDays[key]=="Medium"):
mediumAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>mediumMax):
mediumMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<mediumMin):
mediumMin = TimeTakenInDays[key]
mediumCount+=1
elif(DifficultLevelInEachDays[key]=="Hard"):
hardAverage+=TimeTakenInDays[key]
if(TimeTakenInDays[key]>hardMax):
hardMax = TimeTakenInDays[key]
if(TimeTakenInDays[key]<hardMin):
hardMin = TimeTakenInDays[key]
hardCount+=1
lines = []
with open("README.md", "r") as f:
lines = f.readlines()
f.close()
del lines[8:len(lines)]
with open("README.md", "w") as f:
for line in lines:
f.write(line)
f.close()
with open("README.md", "a") as f:
for index in range(total_lines-1):
key = "Day {}".format(index+1)
f.write("\n"+key+" ")
f.write(DifficultLevelInEachDays[key]+" ")
f.write(str(TimeTakenInDays[key]))
f.write("<br />")
f.write("<br /><br />")
if(easyCount!=0):
f.write("\nEasy Level\n")
f.write("\nMinimum Time Taken - "+str(easyMin))
f.write("\n\nMaximum Time Taken - "+str(easyMax))
f.write("\n\nAverage Time Taken - "+str(easyAverage//easyCount))
f.write("<br /><br />")
if(mediumCount!=0):
f.write("\n\nMedium Level\n")
f.write("\nMinimum Time Taken - "+str(mediumMin))
f.write("\n\nMaximum Time Taken - "+str(mediumMax))
f.write("\n\nAverage Time Taken - "+str(mediumAverage//mediumCount))
f.write("<br /><br />")
if(hardCount!=0):
f.write("\n\nHard Level\n")
f.write("\n\nMinimum Time Taken - "+str(hardMin))
f.write("\n\nMaximum Time Taken - "+str(hardMax))
f.write("\n\nAverage Time Taken - "+str(hardAverage//hardCount))
f.close()
| 0 | 0 | 0 |
267be97d97e9fec907dba78e0d5f8931b294188f | 308 | py | Python | lib/logger.py | webclinic017/bitrush | 5d76c98a17bb830dba1bdd103475c120903ded90 | [
"MIT"
] | 1 | 2022-01-09T21:17:23.000Z | 2022-01-09T21:17:23.000Z | lib/logger.py | webclinic017/bitrush | 5d76c98a17bb830dba1bdd103475c120903ded90 | [
"MIT"
] | null | null | null | lib/logger.py | webclinic017/bitrush | 5d76c98a17bb830dba1bdd103475c120903ded90 | [
"MIT"
] | 1 | 2022-01-09T21:17:17.000Z | 2022-01-09T21:17:17.000Z | from os import environ
from loguru import logger
from sentry_sdk import capture_exception
| 18.117647 | 40 | 0.717532 | from os import environ
from loguru import logger
from sentry_sdk import capture_exception
def info(msg: str):
logger.info(msg)
def error(exception: Exception):
logger.exception(exception)
# to trigger error alerts
if environ.get("STAGE") == "prod":
capture_exception(exception)
| 169 | 0 | 46 |
111a96782e4cecc752b1df49f925b746936f637a | 26,816 | py | Python | agil/Chef/models/form.py | sadekmehri/agil | 60f7143f1360f8f6aaeb659519e7182552ebabc5 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T22:40:44.000Z | 2021-01-07T22:40:44.000Z | agil/Chef/models/form.py | sadekmehri/agil | 60f7143f1360f8f6aaeb659519e7182552ebabc5 | [
"BSD-3-Clause"
] | null | null | null | agil/Chef/models/form.py | sadekmehri/agil | 60f7143f1360f8f6aaeb659519e7182552ebabc5 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from flask_login import current_user
from flask_wtf import FlaskForm
from sqlalchemy import asc, or_
from wtforms import StringField, FloatField, SelectField, PasswordField, TextAreaField, HiddenField
from wtforms.validators import DataRequired, Length, Regexp, InputRequired, ValidationError, length
from agil.Chef.utils import validation, verifDate, days_between, date_check, days_calc
from agil.Main.utils import FormatString
from agil.models.Absence import Absence
from agil.models.Carburant import Carburant
from agil.models.Citerne import Citerne
from agil.models.Conge import TypeConge, Conge
from agil.models.Employee import Employee
from agil.models.Groupe import Groupe
from agil.models.Lavage import Lavage
from agil.models.Pompe import Pompe
from agil.models.PompeCiterne import PompeCiterne
from agil.models.Role import Role
from agil.models.Station import Station
from agil.models.Voie import Voie
| 47.128295 | 198 | 0.664976 | from datetime import datetime
from flask_login import current_user
from flask_wtf import FlaskForm
from sqlalchemy import asc, or_
from wtforms import StringField, FloatField, SelectField, PasswordField, TextAreaField, HiddenField
from wtforms.validators import DataRequired, Length, Regexp, InputRequired, ValidationError, length
from agil.Chef.utils import validation, verifDate, days_between, date_check, days_calc
from agil.Main.utils import FormatString
from agil.models.Absence import Absence
from agil.models.Carburant import Carburant
from agil.models.Citerne import Citerne
from agil.models.Conge import TypeConge, Conge
from agil.models.Employee import Employee
from agil.models.Groupe import Groupe
from agil.models.Lavage import Lavage
from agil.models.Pompe import Pompe
from agil.models.PompeCiterne import PompeCiterne
from agil.models.Role import Role
from agil.models.Station import Station
from agil.models.Voie import Voie
class ReligionField(SelectField):
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == v:
break
else:
pass
class ToDoForm(FlaskForm):
Date = StringField("Date :", validators=[DataRequired()])
Task = TextAreaField("Event :", render_kw={"rows": 4, "cols": 50}, validators=[DataRequired(), Length(min=3, max=75)])
def validate_Date(self, field):
if not verifDate(str(field.data)) or not date_check(str(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Task(self, field):
validation(field.data)
class ToDoFormUpd(ToDoForm):
Id = HiddenField("Id :", validators=[InputRequired()])
class Meta:
csrf = False
class ResetLoginForm(FlaskForm):
Password = PasswordField('Password', validators=[DataRequired()])
class DateRecette(FlaskForm):
Date = StringField('Date :')
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
class Recette(FlaskForm):
Matricule = StringField('Matricule Voiture :', id="matricule", validators=[DataRequired()])
Debut = StringField('Heure Debut : ', id="anytime-time", validators=[DataRequired()])
Fin = StringField('Heure Fin : ', id="anytime-time1", validators=[DataRequired()])
Type = SelectField('Type Lavage :', coerce=int, validators=[InputRequired()])
kilometrage = StringField('kilométrage :', id="km", validators=[DataRequired()])
Prix = FloatField('Prix :', id="price", validators=[DataRequired()])
Date = StringField('Date :', id="Date", validators=[DataRequired()])
Groupe = SelectField('Groupe :', coerce=int, validators=[InputRequired()])
def validate_Prix(self, field):
if float(field.data) <= 0:
raise ValidationError('La valeur du prix doit être > 0.')
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Fin(form, field):
FMT = '%H:%M'
tdelta = datetime.strptime(field.data, FMT) - datetime.strptime(form.Debut.data, FMT)
if tdelta.days < 0:
raise ValidationError('L\'heure de fin doit être > Heure de début')
def validate_Type(self, field):
Type = Lavage.query.filter_by(idLavage=field.data).first()
if not Type:
raise ValidationError('Veuillez choisir une option valide.')
def validate_Groupe(self, field):
Gp = Groupe.query.filter_by(idGroupe=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_type_lavage(self):
type = list(Lavage.query.with_entities(Lavage.idLavage, Lavage.TypeLavage).all())
self.Type.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in type])
def fill_choice_groupe(self):
Gp = list(Groupe.query.with_entities(Groupe.idGroupe, Groupe.NomGroupe).all())
self.Groupe.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
class Citernes(FlaskForm):
Code = StringField('Code Citerne :', id="code",validators=[DataRequired(), Regexp(regex=r'^[a-zA-Z0-9]*$', message='erreur')])
Car = SelectField('Carburant :', coerce=int, validators=[InputRequired()])
Volume = FloatField('Volume Citerne :', id="citerne", validators=[DataRequired()])
Min = StringField('Stock Min :', id="min", validators=[DataRequired()])
def validate_Code(self, field):
Type = Citerne.query.filter_by(NomCiterne=field.data).filter(Citerne.idStation == current_user.idStation).first()
if Type:
raise ValidationError('Nom Citerne existe déjà.')
def validate_Car(self, field):
Type = Carburant.query.filter_by(idCarburant=field.data).first()
if not Type:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_carburant(self):
Gp = list(Carburant.query.with_entities(Carburant.idCarburant, Carburant.NomCarburant).all())
self.Car.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Volume(self, field):
if float(field.data) <= 0:
raise ValidationError('La valeur du volume doit être > 0.')
def validate_Min(self, field):
if float(field.data.replace(' %', '')) <= 0:
raise ValidationError('La valeur minimale doit être > 0.')
class UpdCiternes(FlaskForm):
Act = FloatField('Volume Actuelle :', id="volAct", validators=[DataRequired()])
Etat = SelectField('Etat Citerne :', coerce=str, validators=[DataRequired()],choices=[('-1', '-- sélectionnez une option --'), ("0", 'Panne'), ("1", 'Active')])
Code = StringField('Code Citerne :', id="code",validators=[DataRequired(), Regexp(regex=r'^[a-zA-Z0-9]*$', message='erreur')])
Car = StringField('Carburant :')
Volume = FloatField('Volume Citerne :', id="citerne", validators=[DataRequired()])
Min = StringField('Stock Min :', id="min", validators=[DataRequired()])
def validate_Volume(self, field):
if float(field.data) <= 0:
raise ValidationError('La valeur du volume doit être > 0.')
def validate_Min(self, field):
if float(field.data.replace(' %', '')) <= 0:
raise ValidationError('La valeur minimale doit être > 0.')
def validate_Etat(self, field):
if not 1 >= int(field.data) >= 0:
raise ValidationError('Veuillez choisir une option valide.')
def validate_Act(form, field):
if form.Volume.data - field.data < 0:
raise ValidationError('Le volume actuel doit être <= Volume du citerne')
class Pompes(FlaskForm):
Code = StringField('Code Pompe :', id="code", validators=[DataRequired(), Regexp(regex=r'^[A-Za-z0-9]*$', message='Uniquement lettres et chiffres')])
def validate_Code(self, field):
Gp = Pompe.query.filter_by(NomPompe=field.data).filter(Pompe.idStation == current_user.idStation).first()
if Gp:
raise ValidationError('Pompe existe déjà.')
class UpdPompes(Pompes):
Etat = SelectField('Etat Pompe :', coerce=str, validators=[DataRequired()],choices=[("-1", '-- sélectionnez une option --'), ("0", 'Panne'), ("1", 'Active')])
def validate_Etat(self, field):
if not 1 >= int(field.data) >= 0:
raise ValidationError('Veuillez choisir une option valide.')
def validate_Code(form, field):
pass
class PompeCiternes(FlaskForm):
Code = SelectField('Code Pompe :', coerce=int, validators=[InputRequired()])
Cit = SelectField('Nom Citerne :', id="cit", coerce=int, validators=[InputRequired()])
Type = ReligionField('Type Carburant', id="type", coerce=int, choices=[(0, '-- sélectionnez une option --')])
def validate_Cit(form, field):
Gp = Citerne.query.filter_by(idCiterne=field.data).filter(Citerne.idStation == current_user.idStation, Citerne.EtatCiterne == 1).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
Gp = PompeCiterne.query.filter_by(idCiterne=field.data, idPompe=form.Code.data).filter(Citerne.idStation == current_user.idStation).first()
if Gp:
raise ValidationError('La liaison existe déjà.')
def validate_Code(self, field):
Gp = Pompe.query.filter_by(idPompe=field.data).filter(Pompe.idStation == current_user.idStation,Pompe.EtatPompe == 1).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_Cit(self):
Gp = list(Citerne.query.with_entities(Citerne.idCiterne, Citerne.NomCiterne).filter(
Citerne.idStation == current_user.idStation,
Citerne.EtatCiterne == 1).order_by(asc(Citerne.NomCiterne)).all())
self.Cit.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def fill_choice_Code(self):
# idStation will be replaced after
Gp = list(Pompe.query.with_entities(Pompe.idPompe, Pompe.NomPompe).filter(Pompe.idStation == current_user.idStation,Pompe.EtatPompe == 1).order_by(asc(Pompe.NomPompe)).all())
self.Code.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
class UpdPompeCiternes(PompeCiternes):
def validate_Cit(form, field):
Gp = Citerne.query.filter_by(idCiterne=field.data).filter(Citerne.idStation == current_user.idStation,Citerne.EtatCiterne == 1).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
class RecetteCar(FlaskForm):
Pmp = SelectField('Pompe :', coerce=int, id="pmp", validators=[InputRequired()])
Cit = ReligionField('Citerne :', coerce=int, id="cit", choices=[(0, '-- select an option --')],validators=[InputRequired()])
Car = ReligionField('Carburant :', coerce=int, id="car", choices=[(0, '-- select an option --')],validators=[InputRequired()])
Voie = ReligionField('Voie :', coerce=int, validators=[InputRequired()])
IndDebut = StringField('Indice Debut :', id="deb",validators=[DataRequired(), Regexp(regex=r'^[0-9]*$', message='Seuls les chiffres')])
IndFin = StringField('Indice Fin :', id="fin",validators=[DataRequired(), Regexp(regex=r'^[0-9]*$', message='Seuls les chiffres')])
Prix = StringField('Prix : (1 litre)', id="prix")
Groupe = SelectField('Groupe :', coerce=int, validators=[InputRequired()])
Date = StringField('Date :', id="Date", validators=[DataRequired()])
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Groupe(self, field):
Gp = Groupe.query.filter_by(idGroupe=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_groupe(self):
Gp = list(Groupe.query.with_entities(Groupe.idGroupe, Groupe.NomGroupe).all())
self.Groupe.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Voie(self, field):
Gp = Voie.query.filter_by(idVoie=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_Voie(self):
Gp = list(Voie.query.with_entities(Voie.idVoie, Voie.nomVoie).all())
self.Voie.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Pmp(self, field):
Gp = Pompe.query.filter_by(idPompe=field.data).filter(Pompe.idStation == current_user.idStation,Pompe.EtatPompe == 1).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_Pmp(self):
Gp = list(Pompe.query.with_entities(Pompe.idPompe, Pompe.NomPompe).filter(Pompe.idStation == current_user.idStation,Pompe.EtatPompe == 1).order_by(asc(Pompe.NomPompe)).all())
self.Pmp.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Car(self, field):
Type = Carburant.query.filter_by(idCarburant=field.data).first()
if not Type:
raise ValidationError('Veuillez choisir une option valide.')
def validate_Cit(self, field):
Type = Citerne.query.filter_by(idCiterne=field.data).filter(Citerne.idStation == current_user.idStation).first()
if not Type:
raise ValidationError('Veuillez choisir une option valide.')
def validate_IndFin(form, field):
if float(field.data) - float(form.IndDebut.data) <= 0:
raise ValidationError('Indice Fin devrait être > Indice Debut.')
Cit = Citerne.query.filter_by(idCiterne=form.Cit.data).filter(Citerne.idStation == current_user.idStation).first()
if Cit:
if Cit.Val_Act_Citerne < float(form.IndFin.data) - float(form.IndDebut.data):
raise ValidationError("C'est au-dessus du volume réel de Citerne {} : {} L".format(Cit.NomCiterne,Cit.Val_Act_Citerne))
else:
raise ValidationError("Quelque chose s'est mal passé.")
def validate_IndDeb(self, field):
if field.data <= 0:
raise ValidationError('Indice Debut devrait être > 0.')
def validate_Prix(self, field):
if field.data != "":
if float(field.data) <= 0:
raise ValidationError('Le prix d\'un litre doit être> 0')
class RecetteFilter(FlaskForm):
Grp = ReligionField('Groupe :', coerce=int, choices=[(0, '-- sélectionnez une option --')], validators=[InputRequired()])
Dat = StringField('Date :', id="input-daterange")
def fill_choice_groupe(self):
Gp = list(Groupe.query.with_entities(Groupe.idGroupe, Groupe.NomGroupe).all())
self.Grp.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Dat(self, field):
if field.data != "":
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Grp(self, field):
if field.data != 0:
Gp = Groupe.query.filter_by(idGroupe=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une date valide.')
class CarFilter(FlaskForm):
Mat = StringField('Matricule Voiture :', id="Mat", validators=[DataRequired()])
class ExpensesForm(FlaskForm):
Date = StringField("Date :",id="Date", validators=[DataRequired()])
Cat = StringField("Catégorie :", validators=[DataRequired(), Length(min=3, max=20)])
Desc = TextAreaField("Description :", render_kw={"rows": 4, "cols": 50}, validators=[DataRequired(), Length(min=3, max=75)])
Mont = StringField("Montant :",id="Mont", validators=[DataRequired()])
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Cat(self, field):
validation(field.data)
def validate_Desc(self, field):
validation(field.data)
def validate_Mont(self, field):
if float(field.data) <= 0:
raise ValidationError('Montant doit être supérieur à 0')
class ExpenseFilter(FlaskForm):
Dat = StringField('Date :', id="input-daterange")
def validate_Dat(self, field):
if field.data != "":
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
class EmpStation(FlaskForm):
Code = StringField('Code :', id="codeEmp", validators=[DataRequired()])
Cin = StringField('Cin :', id="Cin", validators=[DataRequired(),length(8),Regexp(regex=r'^[0-9]{8}$', message='Seuls les chiffres sont autorisés')])
Nom = StringField('Nom :', id="Nom", validators=[DataRequired(),length(min=3,max=25),Regexp(regex=r'^[A-Za-z ]+$', message='Seules les lettres sont autorisées')])
Prenom = StringField('Prenom :', id="Prenom", validators=[DataRequired(),length(min=3,max=25), Regexp(regex=r'^[A-Za-z ]+$', message='Seules les lettres sont autorisées')])
Tel = StringField('Telephone :', id="Tel", validators=[DataRequired(), length(8), Regexp(regex=r'^[0-9]{8}$', message='Seuls les chiffres sont autorisés')])
Date = StringField('Date de Naissance :', id="Date", validators=[DataRequired()])
Sal = StringField('Salaire :', id="Sal", validators=[DataRequired()])
Groupe = SelectField('Groupe :', coerce=int, validators=[InputRequired()])
Role = SelectField('Role :', coerce=int, validators=[InputRequired()])
def validate_Groupe(self, field):
Gp = Groupe.query.filter_by(idGroupe=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def validate_Code(self, field):
Gp = Employee.query.filter_by(codeEmp=FormatString(field.data)).first()
if Gp:
raise ValidationError('Le code employé existe déjà.')
def validate_Cin(self, field):
Gp = Employee.query.filter_by(cinEmp=FormatString(field.data)).first()
if Gp:
raise ValidationError('Cin employé existe déjà.')
def validate_Tel(self, field):
Gp = Employee.query.filter_by(telEmp=FormatString(field.data)).first()
if Gp:
raise ValidationError('Le téléphone existe déjà.')
def fill_choice_groupe(self):
Gp = list(Groupe.query.with_entities(Groupe.idGroupe, Groupe.NomGroupe).all())
self.Groupe.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Role(self, field):
Gp = Role.query.filter_by(idRole=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_role(self):
Gp = list(Role.query.with_entities(Role.idRole, Role.NomRole).all())
self.Role.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_Nom(self, field):
validation(field.data)
def validate_Prenom(self, field):
validation(field.data)
Gp = Employee.query.filter(Employee.nomEmp == FormatString(self.Nom.data).capitalize(),Employee.prenomEmp == FormatString(field.data).capitalize()).first()
if Gp:
raise ValidationError('L\'employé existe déjà! Veuillez en choisir un autre.')
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Sal(self, field):
if float(field.data) <= 0:
raise ValidationError('Salaire doit être > 0.')
class UpdEmpStation(EmpStation):
def validate_Code(self, field):
pass
def validate_Cin(self, field):
pass
def validate_Tel(self, field):
pass
def validate_Prenom(self, field):
pass
class EmployeeFilter(FlaskForm):
Groupe = SelectField('Groupe :', coerce=int, validators=[InputRequired()])
def validate_Groupe(self, field):
Gp = Groupe.query.filter_by(idGroupe=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_groupe(self):
Gp = list(Groupe.query.with_entities(Groupe.idGroupe, Groupe.NomGroupe).all())
self.Groupe.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
class AbsenceForm(FlaskForm):
Date = StringField("Date :",id="Date", validators=[DataRequired()])
Code = StringField("Code ou Cin Employé :",id="Code", validators=[DataRequired()])
Name = StringField("Nom :",id="Name")
Prenom = StringField("Prenom :", id="Prenom")
Groupe = StringField("Groupe :", id="Groupe")
Desc = TextAreaField("Description :", id="Desc", render_kw={"rows": 4, "cols": 50}, validators=[DataRequired(), Length(min=3, max=75)])
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Code(form, field):
validation(field.data)
Gp = Employee.query.filter(or_(Employee.codeEmp == FormatString(field.data), Employee.cinEmp == FormatString(field.data)),Employee.idStation == current_user.idStation).first()
if not Gp:
raise ValidationError('L\'employé n\'existe pas! Veuillez saisir un autre code.')
Ab = Absence.query.join(Employee, Employee.idEmp == Absence.idEmp) \
.join(Station, Station.idStation == Employee.idStation) \
.filter(Employee.idStation == current_user.idStation, Absence.idStation == current_user.idStation, Employee.idEmp == Gp.idEmp,Absence.DateAbsence == FormatString(form.Date.data)).first()
if Ab:
raise ValidationError("Vous avez ajouté cet employé à la liste.")
def validate_Desc(self, field):
validation(field.data)
class UpdAbsenceForm(AbsenceForm):
def validate_Code(self, field):
pass
class AbsenceFilter(FlaskForm):
Code = StringField('Code ou Cin Employé :',id="Code")
def validate_Code(self, field):
validation(field.data)
Gp = Employee.query.filter(or_(Employee.codeEmp == FormatString(field.data), Employee.cinEmp == FormatString(field.data))).first()
if not Gp:
raise ValidationError('L\'employé n\'existe pas! Veuillez saisir un autre code.')
class CongeForm(FlaskForm):
DatDeb = StringField("Date Sortie :",id="DateDeb", validators=[DataRequired()])
DatFin = StringField("Date Retour :", id="DateFin", validators=[DataRequired()])
Code = StringField("Code ou Cin Employé :",id="Code", validators=[DataRequired()])
Name = StringField("Nom :",id="Name")
Prenom = StringField("Prenom :", id="Prenom")
Groupe = StringField("Groupe :", id="Groupe")
Type = SelectField('Type du congé :',id="Type", coerce=int, validators=[InputRequired()])
Desc = TextAreaField("Description :", id="Desc", render_kw={"rows": 4, "cols": 50}, validators=[DataRequired(), Length(min=3, max=75)])
def validate_Code(form, field):
validation(field.data)
Gp = Employee.query.filter(or_(Employee.codeEmp == FormatString(field.data), Employee.cinEmp == FormatString(field.data)),Employee.idStation == current_user.idStation).first()
if not Gp:
raise ValidationError('L\'employé n\'existe pas! Veuillez saisir un autre code.')
Gp = Conge.query.join(Employee, Employee.idEmp == Conge.idEmp) \
.join(Station, Station.idStation == Employee.idStation) \
.filter(or_(Employee.codeEmp == FormatString(field.data), Employee.cinEmp == FormatString(field.data)),Employee.idStation == current_user.idStation) \
.filter(Conge.idStation == current_user.idStation,Conge.DateDebConge >= FormatString(form.DatDeb.data)).all()
Test = True
for record in Gp:
if ((days_calc(record.DateDebConge, FormatString(form.DatDeb.data)) >= 0 and days_calc(FormatString(form.DatFin.data), record.DateFinConge) >= 0 )
or (not((days_calc(FormatString(form.DatDeb.data),record.DateDebConge) > 0 and days_calc(FormatString(form.DatFin.data),record.DateDebConge) >= 0 )
or (days_calc(record.DateFinConge,FormatString(form.DatDeb.data)) > 0 and days_calc(record.DateFinConge,FormatString(form.DatFin.data)) > 0 )))) :
Test = False
if not Test:
raise ValidationError('L\'employé est en congé ! Veuillez saisir une autre date.')
def validate_Desc(self, field):
validation(field.data)
def validate_Type(self, field):
Gp = TypeConge.query.filter_by(idTypeConge=field.data).first()
if not Gp:
raise ValidationError('Veuillez choisir une option valide.')
def fill_choice_type(self):
Gp = list(TypeConge.query.with_entities(TypeConge.idTypeConge,TypeConge.typeConge).all())
self.Type.choices = ([(0, '-- sélectionnez une option --')]) + ([i for i in Gp])
def validate_DatDeb(self, field):
if not (verifDate(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_DatFin(form, field):
if not (verifDate(field.data) and days_calc(form.DatDeb.data, field.data) >=1):
raise ValidationError('Date retour devrait être > date retour.')
class UpdCongeForm(CongeForm):
def validate_Code(form, field):
validation(field.data)
Gp = Employee.query.filter(or_(Employee.codeEmp == FormatString(field.data), Employee.cinEmp == FormatString(field.data)),Employee.idStation == current_user.idStation).first()
if not Gp:
raise ValidationError('L\'employé n\'existe pas! Veuillez saisir un autre code.')
class CongeFormFilter(FlaskForm):
DatDeb = StringField("Date Sortie Debut :",id="DateDeb", validators=[DataRequired()])
DatFin = StringField("Date Sortie Fin :", id="DateFin", validators=[DataRequired()])
def validate_DatDeb(self, field):
if not (verifDate(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_DatFin(form, field):
if not (verifDate(field.data) and days_calc(form.DatDeb.data, field.data) >=1):
raise ValidationError('Date fin devrait être > date debut.')
class SettingsInfo(FlaskForm):
Code = StringField('Code :', id="Code")
Cin = StringField('Cin :', id="Cin", validators=[DataRequired(),length(8),Regexp(regex=r'^[0-9]{8}$', message='Seuls les chiffres sont autorisés')])
Nom = StringField('Nom :', id="Nom", validators=[DataRequired(),length(min=3,max=25),Regexp(regex=r'^[A-Za-z ]+$', message='Seules les lettres sont autorisées')])
Prenom = StringField('Prenom :', id="Prenom", validators=[DataRequired(),length(min=3,max=25), Regexp(regex=r'^[A-Za-z ]+$', message='Seules les lettres sont autorisées')])
Tel = StringField('Telephone :', id="Tel", validators=[DataRequired(), length(8), Regexp(regex=r'^[0-9]{8}$', message='Seuls les chiffres sont autorisés')])
Date: StringField = StringField('Date de Naissance :', id="Date", validators=[DataRequired()])
Email = StringField('Email :', id="Email")
def validate_Date(self, field):
if not (verifDate(field.data) and not days_between(field.data)):
raise ValidationError('Veuillez choisir une date valide.')
def validate_Nom(self, field):
validation(field.data)
def validate_Prenom(self, field):
validation(field.data)
| 15,362 | 9,725 | 836 |
1398971302ab0cacdde5283f6e1b051d2130b83f | 2,521 | py | Python | authority/models.py | azizmb/django-authority | 231be8ecdf696eab418ee5f0bf9cfb42722e28eb | [
"BSD-3-Clause"
] | 2 | 2019-10-03T03:38:21.000Z | 2021-09-30T22:47:33.000Z | authority/models.py | jsocol/django-authority | 800c42759db817d49eea535fabdfcc4147d99e6c | [
"BSD-3-Clause"
] | null | null | null | authority/models.py | jsocol/django-authority | 800c42759db817d49eea535fabdfcc4147d99e6c | [
"BSD-3-Clause"
] | 1 | 2021-09-30T22:47:34.000Z | 2021-09-30T22:47:34.000Z | from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from authority.managers import PermissionManager
class Permission(models.Model):
"""
A granular permission model, per-object permission in other words.
This kind of permission is associated with a user/group and an object
of any content type.
"""
codename = models.CharField(_('codename'), max_length=100)
content_type = models.ForeignKey(ContentType, related_name="row_permissions")
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(User, null=True, blank=True, related_name='granted_permissions')
group = models.ForeignKey(Group, null=True, blank=True)
creator = models.ForeignKey(User, null=True, blank=True, related_name='created_permissions')
approved = models.BooleanField(_('approved'), default=False, help_text=_("Designates whether the permission has been approved and treated as active. Unselect this instead of deleting permissions."))
date_requested = models.DateTimeField(_('date requested'), default=datetime.now)
date_approved = models.DateTimeField(_('date approved'), blank=True, null=True)
objects = PermissionManager()
def approve(self, creator):
"""
Approve granular permission request setting a Permission entry as
approved=True for a specific action from an user on an object instance.
"""
self.approved = True
self.creator = creator
self.save()
| 42.728814 | 202 | 0.711226 | from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from authority.managers import PermissionManager
class Permission(models.Model):
"""
A granular permission model, per-object permission in other words.
This kind of permission is associated with a user/group and an object
of any content type.
"""
codename = models.CharField(_('codename'), max_length=100)
content_type = models.ForeignKey(ContentType, related_name="row_permissions")
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(User, null=True, blank=True, related_name='granted_permissions')
group = models.ForeignKey(Group, null=True, blank=True)
creator = models.ForeignKey(User, null=True, blank=True, related_name='created_permissions')
approved = models.BooleanField(_('approved'), default=False, help_text=_("Designates whether the permission has been approved and treated as active. Unselect this instead of deleting permissions."))
date_requested = models.DateTimeField(_('date requested'), default=datetime.now)
date_approved = models.DateTimeField(_('date approved'), blank=True, null=True)
objects = PermissionManager()
def __unicode__(self):
return self.codename
class Meta:
unique_together = ("codename", "object_id", "content_type", "user", "group")
verbose_name = _('permission')
verbose_name_plural = _('permissions')
permissions = (
('change_foreign_permissions', 'Can change foreign permissions'),
('delete_foreign_permissions', 'Can delete foreign permissions'),
('approve_permission_requests', 'Can approve permission requests'),
)
def save(self, *args, **kwargs):
# Make sure the approval date is always set
if self.approved and not self.date_approved:
self.date_approved = datetime.now()
super(Permission, self).save(*args, **kwargs)
def approve(self, creator):
"""
Approve granular permission request setting a Permission entry as
approved=True for a specific action from an user on an object instance.
"""
self.approved = True
self.creator = creator
self.save()
| 248 | 431 | 81 |
13732902169601c5b96f9035d2641c2704e3729a | 1,339 | py | Python | lambda/function/valueextraction/test_classes.py | LouisLoison/deepblooGit | 4691509acc252c887f428e693c72fa30d8f18a47 | [
"Apache-2.0"
] | null | null | null | lambda/function/valueextraction/test_classes.py | LouisLoison/deepblooGit | 4691509acc252c887f428e693c72fa30d8f18a47 | [
"Apache-2.0"
] | null | null | null | lambda/function/valueextraction/test_classes.py | LouisLoison/deepblooGit | 4691509acc252c887f428e693c72fa30d8f18a47 | [
"Apache-2.0"
] | null | null | null | import unittest
from classes import *
if __name__ == '__main__':
unittest.main()
| 34.333333 | 94 | 0.652726 | import unittest
from classes import *
class TestClassUnit(unittest.TestCase):
def setUp(self):
self.unit1 = Unit("kilowatt", "power")
self.unit2 = Unit("kW", "power")
def test_attributes(self):
"""Test whether the attributes were correctly assigned"""
# Test with unit full name
self.assertEqual(self.unit1.name, "kilowatt")
self.assertEqual(self.unit1.entity, "power")
self.assertEqual(self.unit1.ref_unit, "watt")
self.assertEqual(self.unit1.uri, "en.m.wikipedia.org/wiki/{}".format(self.unit1.name))
# Test with abbreviated unit
self.assertEqual(self.unit2.name, "kW")
self.assertEqual(self.unit2.entity, "power")
self.assertEqual(self.unit2.ref_unit, "watt")
self.assertEqual(self.unit2.uri, "en.m.wikipedia.org/wiki/{}".format(self.unit2.name))
# Test for inadequate entity
with self.assertRaises(EntityException):
self.unit3 = Unit("ampere", "time")
class TestClassMetric(unittest.TestCase):
def setUp(self):
self.metric = Metric(25.4, "kilowatt", "power", "25kW")
def test_str(self):
self.assertEqual(str(self.metric), "25.40 kilowatt")
# TODO: Assert that units are always written with their full names
if __name__ == '__main__':
unittest.main()
| 276 | 875 | 99 |
e4106635d7619aef4ac144460c96350e6a945f15 | 1,372 | py | Python | localgraphclustering/algorithms/acl_list.py | vishalbelsare/LocalGraphClustering | a6325350997932d548a876deb259c2387fc2c809 | [
"MIT"
] | 106 | 2017-09-06T04:47:02.000Z | 2022-03-30T07:43:27.000Z | localgraphclustering/algorithms/acl_list.py | pmacg/local-bipartite-clusters | d29e8d37c79e27b48e785b7b2c4bad9ea5d66b6d | [
"MIT"
] | 51 | 2017-09-06T02:22:09.000Z | 2021-12-15T11:39:28.000Z | localgraphclustering/algorithms/acl_list.py | vishalbelsare/LocalGraphClustering | a6325350997932d548a876deb259c2387fc2c809 | [
"MIT"
] | 38 | 2017-09-04T21:45:13.000Z | 2022-01-19T09:48:25.000Z | import time
import numpy as np
| 24.945455 | 89 | 0.462099 | import time
import numpy as np
def acl_list(ref_node, g, alpha = 0.15, rho = 1.0e-5, max_iter = 100000, max_time = 100):
n = g.adjacency_matrix.shape[0]
r = np.zeros(n)
p = np.zeros(n)
nodes = []
for i in ref_node:
r[i] = 1
thresh = rho*g.d[i]
if r[i] > thresh:
nodes.append(i)
iter = 0
start = time.time()
while len(nodes) > 0 and iter <= max_iter:
idx = nodes[0]
direction = r[idx]
p[idx] = p[idx] + alpha*direction
r[idx] = ((1-alpha)/2)*direction
if r[idx] < rho*g.d[idx]:
del nodes[0]
else:
nodes.append(idx)
del nodes[0]
for u in range(g.adjacency_matrix.indptr[idx],g.adjacency_matrix.indptr[idx+1]):
j = g.adjacency_matrix.indices[u]
update = ((1-alpha)/2)*(direction/g.d[idx])*g.adjacency_matrix.data[u]
r_new = r[j] + update
thresh = rho*g.d[j]
if r[j] <= thresh and r_new > thresh:
nodes.append(j)
r[j] = r_new
iter = iter + 1
end = time.time()
if end - start > max_time:
print("ACL: Maximum running time reached")
break
return p
| 1,318 | 0 | 23 |
5dc54e4673a11ed0255507be3766ee629180e1ed | 5,380 | py | Python | mmocr/datasets/base_dataset.py | yuexy/mmocr | 82488024db159266e66ea6b0d6f84a5a18e87362 | [
"Apache-2.0"
] | 2,261 | 2021-04-08T03:45:41.000Z | 2022-03-31T23:37:46.000Z | mmocr/datasets/base_dataset.py | yuexy/mmocr | 82488024db159266e66ea6b0d6f84a5a18e87362 | [
"Apache-2.0"
] | 789 | 2021-04-08T05:40:13.000Z | 2022-03-31T09:42:39.000Z | mmocr/datasets/base_dataset.py | yuexy/mmocr | 82488024db159266e66ea6b0d6f84a5a18e87362 | [
"Apache-2.0"
] | 432 | 2021-04-08T03:56:16.000Z | 2022-03-30T18:44:43.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmcv.utils import print_log
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.pipelines import Compose
from torch.utils.data import Dataset
from mmocr.datasets.builder import build_loader
@DATASETS.register_module()
class BaseDataset(Dataset):
"""Custom dataset for text detection, text recognition, and their
downstream tasks.
1. The text detection annotation format is as follows:
The `annotations` field is optional for testing
(this is one line of anno_file, with line-json-str
converted to dict for visualizing only).
{
"file_name": "sample.jpg",
"height": 1080,
"width": 960,
"annotations":
[
{
"iscrowd": 0,
"category_id": 1,
"bbox": [357.0, 667.0, 804.0, 100.0],
"segmentation": [[361, 667, 710, 670,
72, 767, 357, 763]]
}
]
}
2. The two text recognition annotation formats are as follows:
The `x1,y1,x2,y2,x3,y3,x4,y4` field is used for online crop
augmentation during training.
format1: sample.jpg hello
format2: sample.jpg 20 20 100 20 100 40 20 40 hello
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
loader (dict): Dictionary to construct loader
to load annotation infos.
img_prefix (str, optional): Image prefix to generate full
image path.
test_mode (bool, optional): If set True, try...except will
be turned off in __getitem__.
"""
def _set_group_flag(self):
"""Set flag."""
self.flag = np.zeros(len(self), dtype=np.uint8)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
def prepare_train_img(self, index):
"""Get training data and annotations from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.data_infos[index]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, img_info):
"""Get testing data from pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
return self.prepare_train_img(img_info)
def _log_error_index(self, index):
"""Logging data info of bad index."""
try:
data_info = self.data_infos[index]
img_prefix = self.img_prefix
print_log(f'Warning: skip broken file {data_info} '
f'with img_prefix {img_prefix}')
except Exception as e:
print_log(f'load index {index} with error {e}')
def _get_next_index(self, index):
"""Get next index from dataset."""
self._log_error_index(index)
index = (index + 1) % len(self)
return index
def __getitem__(self, index):
"""Get training/test data from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training/test data.
"""
if self.test_mode:
return self.prepare_test_img(index)
while True:
try:
data = self.prepare_train_img(index)
if data is None:
raise Exception('prepared train data empty')
break
except Exception as e:
print_log(f'prepare index {index} with error {e}')
index = self._get_next_index(index)
return data
def format_results(self, results, **kwargs):
"""Placeholder to format result to dataset-specific output."""
pass
def evaluate(self, results, metric=None, logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]
"""
raise NotImplementedError
| 32.02381 | 75 | 0.569331 | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmcv.utils import print_log
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.pipelines import Compose
from torch.utils.data import Dataset
from mmocr.datasets.builder import build_loader
@DATASETS.register_module()
class BaseDataset(Dataset):
"""Custom dataset for text detection, text recognition, and their
downstream tasks.
1. The text detection annotation format is as follows:
The `annotations` field is optional for testing
(this is one line of anno_file, with line-json-str
converted to dict for visualizing only).
{
"file_name": "sample.jpg",
"height": 1080,
"width": 960,
"annotations":
[
{
"iscrowd": 0,
"category_id": 1,
"bbox": [357.0, 667.0, 804.0, 100.0],
"segmentation": [[361, 667, 710, 670,
72, 767, 357, 763]]
}
]
}
2. The two text recognition annotation formats are as follows:
The `x1,y1,x2,y2,x3,y3,x4,y4` field is used for online crop
augmentation during training.
format1: sample.jpg hello
format2: sample.jpg 20 20 100 20 100 40 20 40 hello
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
loader (dict): Dictionary to construct loader
to load annotation infos.
img_prefix (str, optional): Image prefix to generate full
image path.
test_mode (bool, optional): If set True, try...except will
be turned off in __getitem__.
"""
def __init__(self,
ann_file,
loader,
pipeline,
img_prefix='',
test_mode=False):
super().__init__()
self.test_mode = test_mode
self.img_prefix = img_prefix
self.ann_file = ann_file
# load annotations
loader.update(ann_file=ann_file)
self.data_infos = build_loader(loader)
# processing pipeline
self.pipeline = Compose(pipeline)
# set group flag and class, no meaning
# for text detect and recognize
self._set_group_flag()
self.CLASSES = 0
def __len__(self):
return len(self.data_infos)
def _set_group_flag(self):
"""Set flag."""
self.flag = np.zeros(len(self), dtype=np.uint8)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
def prepare_train_img(self, index):
"""Get training data and annotations from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.data_infos[index]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, img_info):
"""Get testing data from pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
return self.prepare_train_img(img_info)
def _log_error_index(self, index):
"""Logging data info of bad index."""
try:
data_info = self.data_infos[index]
img_prefix = self.img_prefix
print_log(f'Warning: skip broken file {data_info} '
f'with img_prefix {img_prefix}')
except Exception as e:
print_log(f'load index {index} with error {e}')
def _get_next_index(self, index):
"""Get next index from dataset."""
self._log_error_index(index)
index = (index + 1) % len(self)
return index
def __getitem__(self, index):
"""Get training/test data from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training/test data.
"""
if self.test_mode:
return self.prepare_test_img(index)
while True:
try:
data = self.prepare_train_img(index)
if data is None:
raise Exception('prepared train data empty')
break
except Exception as e:
print_log(f'prepare index {index} with error {e}')
index = self._get_next_index(index)
return data
def format_results(self, results, **kwargs):
"""Placeholder to format result to dataset-specific output."""
pass
def evaluate(self, results, metric=None, logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]
"""
raise NotImplementedError
| 638 | 0 | 54 |
458e196363715df31a63162c01d08a23e0f42486 | 3,221 | py | Python | dvc/objects/diff.py | annmary-roy/dvc | eefede92b204084ad418ed300ac4eb480d696421 | [
"Apache-2.0"
] | null | null | null | dvc/objects/diff.py | annmary-roy/dvc | eefede92b204084ad418ed300ac4eb480d696421 | [
"Apache-2.0"
] | 66 | 2021-03-29T09:02:26.000Z | 2022-03-30T13:09:33.000Z | dvc/objects/diff.py | annmary-roy/dvc | eefede92b204084ad418ed300ac4eb480d696421 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, List, Optional, Tuple
if TYPE_CHECKING:
from .file import HashFile
ADD = "add"
MODIFY = "modify"
DELETE = "delete"
UNCHANGED = "unchanged"
@dataclass
@dataclass
@dataclass
ROOT = ("",)
| 23.683824 | 71 | 0.588327 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, List, Optional, Tuple
if TYPE_CHECKING:
from .file import HashFile
ADD = "add"
MODIFY = "modify"
DELETE = "delete"
UNCHANGED = "unchanged"
@dataclass
class TreeEntry:
in_cache: bool
key: Tuple[str]
obj: Optional["HashFile"] = field(default=None)
def __bool__(self):
return bool(self.obj)
def __eq__(self, other):
if not isinstance(other, TreeEntry):
return False
if self.key != other.key or bool(self.obj) != bool(other.obj):
return False
if not self.obj:
return False
return self.obj.hash_info == other.obj.hash_info
@dataclass
class Change:
old: TreeEntry
new: TreeEntry
@property
def typ(self):
if not self.old and not self.new:
return UNCHANGED
if self.old and not self.new:
return DELETE
if not self.old and self.new:
return ADD
if self.old != self.new:
return MODIFY
return UNCHANGED
def __bool__(self):
return self.typ != UNCHANGED
@dataclass
class DiffResult:
added: List[Change] = field(default_factory=list, compare=True)
modified: List[Change] = field(default_factory=list, compare=True)
deleted: List[Change] = field(default_factory=list, compare=True)
unchanged: List[Change] = field(default_factory=list, compare=True)
def __bool__(self):
return bool(self.added or self.modified or self.deleted)
ROOT = ("",)
def diff(
old: Optional["HashFile"], new: Optional["HashFile"], cache
) -> DiffResult:
from .tree import Tree
if old is None and new is None:
return DiffResult()
def _get_keys(obj):
if not obj:
return []
return [ROOT] + (
[key for key, _ in obj] if isinstance(obj, Tree) else []
)
old_keys = set(_get_keys(old))
new_keys = set(_get_keys(new))
def _get_obj(obj, key):
if not obj or key == ROOT:
return obj
return obj.get(key)
def _in_cache(obj, cache):
from . import check
from .errors import ObjectFormatError
if not obj:
return False
try:
check(cache, obj)
return True
except (FileNotFoundError, ObjectFormatError):
return False
ret = DiffResult()
for key in old_keys | new_keys:
old_obj = _get_obj(old, key)
new_obj = _get_obj(new, key)
change = Change(
old=TreeEntry(_in_cache(old_obj, cache), key, old_obj),
new=TreeEntry(_in_cache(new_obj, cache), key, new_obj),
)
if change.typ == ADD:
ret.added.append(change)
elif change.typ == MODIFY:
ret.modified.append(change)
elif change.typ == DELETE:
ret.deleted.append(change)
else:
assert change.typ == UNCHANGED
if not change.new.in_cache and not isinstance(
change.new.obj, Tree
):
ret.modified.append(change)
else:
ret.unchanged.append(change)
return ret
| 2,315 | 542 | 89 |
8ebf57f798435311aa471271c473c4df844dc059 | 1,143 | py | Python | setup.py | kngwyu/intrinsic-rewards | c2a8f98c0fd9292dc90f8857fa5ddb763ba8b994 | [
"Apache-2.0"
] | 8 | 2019-09-22T12:13:05.000Z | 2022-03-31T11:52:13.000Z | setup.py | kngwyu/intrinsic-rewards | c2a8f98c0fd9292dc90f8857fa5ddb763ba8b994 | [
"Apache-2.0"
] | 2 | 2019-07-29T08:57:28.000Z | 2019-11-29T10:35:35.000Z | setup.py | kngwyu/intrinsic-rewards | c2a8f98c0fd9292dc90f8857fa5ddb763ba8b994 | [
"Apache-2.0"
] | 1 | 2020-03-24T01:38:27.000Z | 2020-03-24T01:38:27.000Z | import io
import re
from setuptools import find_packages, setup
with io.open("int_rew/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r"__version__ = \"(.*?)\"", f.read()).group(1)
setup(
name="intrinsic_rewards",
version=version,
url="https://github.com/kngwyu/intrinsic_rewards",
project_urls={
"Code": "https://github.com/kngwyu/intrinsic_rewards",
"Issue tracker": "https://github.com/kngwyu/intrinsic_rewards/issues",
},
author="Yuji Kanagawa",
author_email="yuji.kngw.80s.revive@gmail.com",
description="A collection of DRL algorithms with intrinsic rewards",
packages=find_packages(),
python_requires=">=3.6",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| 32.657143 | 78 | 0.63692 | import io
import re
from setuptools import find_packages, setup
with io.open("int_rew/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r"__version__ = \"(.*?)\"", f.read()).group(1)
setup(
name="intrinsic_rewards",
version=version,
url="https://github.com/kngwyu/intrinsic_rewards",
project_urls={
"Code": "https://github.com/kngwyu/intrinsic_rewards",
"Issue tracker": "https://github.com/kngwyu/intrinsic_rewards/issues",
},
author="Yuji Kanagawa",
author_email="yuji.kngw.80s.revive@gmail.com",
description="A collection of DRL algorithms with intrinsic rewards",
packages=find_packages(),
python_requires=">=3.6",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| 0 | 0 | 0 |
f39d50bf7ed0edab4d1e0f39dcd916514d0e71d7 | 1,353 | py | Python | source/location/migrations/0001_initial.py | kssvrk/teletrack | ce8e1ce0fcb333875fd54d1a69a59f265c0e2396 | [
"BSD-3-Clause"
] | null | null | null | source/location/migrations/0001_initial.py | kssvrk/teletrack | ce8e1ce0fcb333875fd54d1a69a59f265c0e2396 | [
"BSD-3-Clause"
] | null | null | null | source/location/migrations/0001_initial.py | kssvrk/teletrack | ce8e1ce0fcb333875fd54d1a69a59f265c0e2396 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-17 14:58
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
| 36.567568 | 120 | 0.604582 | # Generated by Django 3.1.2 on 2020-10-17 14:58
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LocationUser',
fields=[
('telegram_username', models.CharField(max_length=1000, unique=True)),
('locuser_id', models.AutoField(primary_key=True, serialize=False)),
('enable', models.BooleanField(default=False)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('text_sent', models.CharField(max_length=1000, null=True)),
],
),
migrations.CreateModel(
name='LocationStream',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated_at', models.DateTimeField(auto_now_add=True)),
('last_update_time', models.DateTimeField()),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('locuser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location.locationuser')),
],
),
]
| 0 | 1,163 | 23 |
8c8d29861002ac69dc53ae4d2b8dbdffec7fe001 | 1,178 | py | Python | tests/symbols_test.py | JTSchwartz/chorecore-py | 10acf36ef20db4583f27926b0fc18f0270b22cfc | [
"0BSD"
] | null | null | null | tests/symbols_test.py | JTSchwartz/chorecore-py | 10acf36ef20db4583f27926b0fc18f0270b22cfc | [
"0BSD"
] | null | null | null | tests/symbols_test.py | JTSchwartz/chorecore-py | 10acf36ef20db4583f27926b0fc18f0270b22cfc | [
"0BSD"
] | null | null | null | import chorecore
| 69.294118 | 98 | 0.829372 | import chorecore
def test_fraction_to_symbol():
assert chorecore.symbols.fraction_to_symbol("1/2") == chorecore.fraction.Fraction.ONE_HALF
assert chorecore.symbols.fraction_to_symbol("1/8") == chorecore.fraction.Fraction.ONE_EIGHTH
assert chorecore.symbols.fraction_to_symbol("1/10") == chorecore.fraction.Fraction.ONE_TENTH
assert chorecore.symbols.fraction_to_symbol("0.5") == chorecore.fraction.Fraction.ONE_HALF
assert chorecore.symbols.fraction_to_symbol("0.75") == chorecore.fraction.Fraction.THREE_QUARTERS
assert chorecore.symbols.fraction_to_symbol("0.875") == chorecore.fraction.Fraction.SEVEN_EIGHTHS
assert chorecore.symbols.fraction_to_symbol(1/2) == chorecore.fraction.Fraction.ONE_HALF
assert chorecore.symbols.fraction_to_symbol(1/8) == chorecore.fraction.Fraction.ONE_EIGHTH
assert chorecore.symbols.fraction_to_symbol(1/10) == chorecore.fraction.Fraction.ONE_TENTH
assert chorecore.symbols.fraction_to_symbol(0.5) == chorecore.fraction.Fraction.ONE_HALF
assert chorecore.symbols.fraction_to_symbol(0.75) == chorecore.fraction.Fraction.THREE_QUARTERS
assert chorecore.symbols.fraction_to_symbol(0.875) == chorecore.fraction.Fraction.SEVEN_EIGHTHS
| 1,137 | 0 | 23 |
55ab91521c1b171d35f42ae28297981c244edf1a | 735 | py | Python | Mundo2/Desafio069.py | Marcoakira/Desafios_Python_do_Curso_Guanabara | c49b774148a2232f8f3c21b83e3dc97610480757 | [
"MIT"
] | null | null | null | Mundo2/Desafio069.py | Marcoakira/Desafios_Python_do_Curso_Guanabara | c49b774148a2232f8f3c21b83e3dc97610480757 | [
"MIT"
] | null | null | null | Mundo2/Desafio069.py | Marcoakira/Desafios_Python_do_Curso_Guanabara | c49b774148a2232f8f3c21b83e3dc97610480757 | [
"MIT"
] | null | null | null | # Desafio069 quantas pessoas tem mais de 18, quantos homens, quantas mulheres com menos de 20.
maiorDeIdade = int()
homens = int()
mulheresMenor = int()
while True:
idade = int(input('Qual a idade? '))
sexo = input('M para masculino, e F para feminino ').strip().lower()
if idade > 18:
maiorDeIdade += 1
if sexo == 'm':
homens += 1
if sexo == 'f' and idade > 20:
mulheresMenor += 1
saida = input('Deseja continuar? \033[2;32mS\033[m ou \033[2;32mN\033[m ')
if saida == 'n':
break
print(f'Tivemos cadastrados {maiorDeIdade} Maiores de 18 anos.')
print(f'Tivemos cadastrados {homens} homens.')
print(f'Tivemos cadastrados {mulheresMenor} mulheres com menos de 20 anos.') | 28.269231 | 94 | 0.643537 | # Desafio069 quantas pessoas tem mais de 18, quantos homens, quantas mulheres com menos de 20.
maiorDeIdade = int()
homens = int()
mulheresMenor = int()
while True:
idade = int(input('Qual a idade? '))
sexo = input('M para masculino, e F para feminino ').strip().lower()
if idade > 18:
maiorDeIdade += 1
if sexo == 'm':
homens += 1
if sexo == 'f' and idade > 20:
mulheresMenor += 1
saida = input('Deseja continuar? \033[2;32mS\033[m ou \033[2;32mN\033[m ')
if saida == 'n':
break
print(f'Tivemos cadastrados {maiorDeIdade} Maiores de 18 anos.')
print(f'Tivemos cadastrados {homens} homens.')
print(f'Tivemos cadastrados {mulheresMenor} mulheres com menos de 20 anos.') | 0 | 0 | 0 |
faac24cf431579bec22a2a840c0759c39b7237d8 | 2,445 | py | Python | tests/test_hebrew_python.py | matan-h/hebrew-python | 28e6ab2d14fcada9ffe228660f47ef2af244ab2a | [
"BSD-4-Clause"
] | 1 | 2021-11-11T05:17:21.000Z | 2021-11-11T05:17:21.000Z | tests/test_hebrew_python.py | matan-h/hebrew-python | 28e6ab2d14fcada9ffe228660f47ef2af244ab2a | [
"BSD-4-Clause"
] | 1 | 2021-12-27T00:39:20.000Z | 2021-12-27T00:39:20.000Z | tests/test_hebrew_python.py | matan-h/hebrew-python | 28e6ab2d14fcada9ffe228660f47ef2af244ab2a | [
"BSD-4-Clause"
] | null | null | null | import builtins
import sys
import unittest
import hebrew_python.hook as hepy
from io import StringIO
from contextlib import contextmanager
import re
# for debug the test:
true_stdout = sys.stdout
true_stderr = sys.stderr
DEBUG = False
if DEBUG:
from ddebug import dd
dd.add_output_folder(with_errors=False)
try:
import friendly_traceback
except ImportError:
friendly_traceback = None
@contextmanager
if __name__ == '__main__':
unittest.main()
| 27.47191 | 91 | 0.610634 | import builtins
import sys
import unittest
import hebrew_python.hook as hepy
from io import StringIO
from contextlib import contextmanager
import re
# for debug the test:
true_stdout = sys.stdout
true_stderr = sys.stderr
DEBUG = False
if DEBUG:
from ddebug import dd
dd.add_output_folder(with_errors=False)
try:
import friendly_traceback
except ImportError:
friendly_traceback = None
@contextmanager
def Output():
old_stdout = sys.stdout
old_stderr = sys.stderr
with StringIO() as stdout, StringIO() as stderr:
sys.stdout = stdout
sys.stderr = stderr
try:
yield stdout, stderr
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class TestHebrewPython(unittest.TestCase):
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
def setUp(self):
hepy.setup()
self.assertIn("הראה", hepy.hebrew_builtins)
def test_keywords_transform(self):
hook = hepy.create_hook()
s = "אמת וגם שקר"
en = hook.transform_source(s)
self.assertEqual(en, "True and False")
def test_builtins(self):
self.assertIn("הראה", hepy.hebrew_builtins)
with Output() as (std, _):
hepy.exec_code("הראה('OK')", '<test>', {}, builtins, {})
self.assertEqual(std.getvalue(), "OK\n")
def test_errors(self):
try:
1 / 0
except ZeroDivisionError:
with Output() as (stdout, stderr):
# hepy.error_hook.excepthook = dd(hepy.error_hook.excepthook,call_type="@")
with hepy.error_hook.rich.get_console().capture() as capture:
hepy.error_hook.excepthook(*sys.exc_info())
value = capture.get()
# stdout
if friendly_traceback:
self.assertIn("ידידותי", value)
def test_basic_file(self):
# sys.argv = [sys.argv[0], "basic_file.hepy", sys.argv[1:]]
hepy.create_hook(False, console=False)
from . import basic_file
n = basic_file.main()
heb_min = basic_file.מינימום
heb_max = basic_file.מקסימום
self.assertIn(n, range(heb_min, heb_max + 1))
def test_import_file(self):
hepy.create_hook(False, console=False)
from . import import_file
self.assertTrue(import_file.בודק())
if __name__ == '__main__':
unittest.main()
| 1,722 | 254 | 45 |
cac639136f54f0b421674840cca71ea62ce9f26d | 1,176 | py | Python | Q01__/46_LRU_Cache/Solution.py | hsclinical/leetcode | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | [
"Apache-2.0"
] | null | null | null | Q01__/46_LRU_Cache/Solution.py | hsclinical/leetcode | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | [
"Apache-2.0"
] | null | null | null | Q01__/46_LRU_Cache/Solution.py | hsclinical/leetcode | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | [
"Apache-2.0"
] | null | null | null |
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value) | 33.6 | 64 | 0.517857 | class LRUCache:
def __init__(self, capacity: int):
self.dataDict = {}
self.dataList = []
self.capacity = capacity
def get(self, key: int) -> int:
if key in self.dataList:
index = self.dataList.index(key)
if index != len(self.dataList) - 1:
self.dataList.pop(index)
self.dataList.append(key)
return self.dataDict[key]
else:
return -1
def put(self, key: int, value: int) -> None:
if key in self.dataList:
self.dataList.remove(key)
self.dataList.append(key)
self.dataDict[ key ] = value
else:
if len(self.dataList) < self.capacity:
self.dataList.append(key)
self.dataDict[ key ] = value
else:
elem = self.dataList.pop(0)
del self.dataDict[ elem ]
self.dataList.append(key)
self.dataDict[ key ] = value
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value) | 933 | -6 | 108 |
32d3f498b5b97c7424f3051a49ccac8f836f7aca | 4,115 | py | Python | astrom/kmtn-astrometry/kmtn_astrom.py | changsuchoi/cspy | 9fa8f125bed368f636ea19180e742f8304bbc432 | [
"MIT"
] | null | null | null | astrom/kmtn-astrometry/kmtn_astrom.py | changsuchoi/cspy | 9fa8f125bed368f636ea19180e742f8304bbc432 | [
"MIT"
] | null | null | null | astrom/kmtn-astrometry/kmtn_astrom.py | changsuchoi/cspy | 9fa8f125bed368f636ea19180e742f8304bbc432 | [
"MIT"
] | null | null | null | # python code for KMTNET data astrometry (specially CTIO data)
# read kmtnet_astrom.txt first to understand the order and process
# 2015.09.17 Changsu Choi
from astropy.io import ascii
import numpy as np
import os,sys
from astropy.io import fits
import astropy.units as u
import astropy.coordinates as coord
import astropy.units as u
import subprocess
import pp
#os.system('gethead kmtc.20150218.00503*.fits ra dec filter object exptime date-obs > info.txt')
info=ascii.read('info.txt')
addlist=info['col1']
ra=info['col2']
dec=info['col3']
filters=info['col4']
obj=info['col5']
exptime=info['col6']
dateobs=info['col7']
'''
def mefcr :
num=addlist[n][14:-5]
rad=coord.Angle(ra[n],unit=u.hour)
radd=rad.degree
decd=coord.Angle(dec[n],unit=u.deg)
decdd=decd.degree
# python
makemef='python kmtn_makemef.py '+num
resetcrval='python kmtn_resetcrval.py '+ num+'.fits -c '+str(radd)+','+str(decdd)
os.system(makemef)
os.system(resetcrval)
'''
for n in range(len(addlist)):
num=addlist[n][14:-5]
rad=coord.Angle(ra[n],unit=u.hour)
radd=rad.degree
decd=coord.Angle(dec[n],unit=u.deg)
decdd=decd.degree
# python
makemef='python kmtn_makemef.py '+num
resetcrval='python kmtn_resetcrval.py '+ num+'.fits -c '+str(radd)+','+str(decdd)
os.system(makemef)
os.system(resetcrval)
# sextractor
#sexcom= 'sex '+num+'.fits -c kmtnet.sex -CATALOG_NAME '+num+'.cat -HEADER_SUFFIX NONE -DETECT_THRESH 50.0 -ANALYSIS_THRESH 50.0 -SATUR_LEVEL 60000.0 -WEIGHT_TYPE MAP_WEIGHT -WEIGHT_IMAGE weight.fits'
#scampcom='scamp '+num+'.cat -c kmtnet.scamp -ASTREF_CATALOG 2MASS -POSITION_MAXERR 20.0 -CROSSID_RADIUS 5.0 -DISTORT_DEGREES 3 -PROJECTION_TYPE TPV -AHEADER_GLOBAL kmtnet_global_ctio.ahead -CHECKPLOT_TYPE NONE'
## sextractor and scamp
for n in range(len(addlist)):
sexscamp(addlist[n])
## final file making
for files in addlist : set4astrom(files)
'''
## header edition
for n in range(len(addlist)):
num=addlist[n][14:-5]
hdr=fits.getheader(addlist[n])
data=fits.getdata(num+'.fits')
hdr.fromTxtFile('006022.head')
newfile='a'+addlist[n]
fits.writeto(newfile,data,hdr,clobber=True)
'''
| 30.708955 | 380 | 0.72661 | # python code for KMTNET data astrometry (specially CTIO data)
# read kmtnet_astrom.txt first to understand the order and process
# 2015.09.17 Changsu Choi
from astropy.io import ascii
import numpy as np
import os,sys
from astropy.io import fits
import astropy.units as u
import astropy.coordinates as coord
import astropy.units as u
import subprocess
import pp
#os.system('gethead kmtc.20150218.00503*.fits ra dec filter object exptime date-obs > info.txt')
info=ascii.read('info.txt')
addlist=info['col1']
ra=info['col2']
dec=info['col3']
filters=info['col4']
obj=info['col5']
exptime=info['col6']
dateobs=info['col7']
'''
def mefcr :
num=addlist[n][14:-5]
rad=coord.Angle(ra[n],unit=u.hour)
radd=rad.degree
decd=coord.Angle(dec[n],unit=u.deg)
decdd=decd.degree
# python
makemef='python kmtn_makemef.py '+num
resetcrval='python kmtn_resetcrval.py '+ num+'.fits -c '+str(radd)+','+str(decdd)
os.system(makemef)
os.system(resetcrval)
'''
for n in range(len(addlist)):
num=addlist[n][14:-5]
rad=coord.Angle(ra[n],unit=u.hour)
radd=rad.degree
decd=coord.Angle(dec[n],unit=u.deg)
decdd=decd.degree
# python
makemef='python kmtn_makemef.py '+num
resetcrval='python kmtn_resetcrval.py '+ num+'.fits -c '+str(radd)+','+str(decdd)
os.system(makemef)
os.system(resetcrval)
# sextractor
#sexcom= 'sex '+num+'.fits -c kmtnet.sex -CATALOG_NAME '+num+'.cat -HEADER_SUFFIX NONE -DETECT_THRESH 50.0 -ANALYSIS_THRESH 50.0 -SATUR_LEVEL 60000.0 -WEIGHT_TYPE MAP_WEIGHT -WEIGHT_IMAGE weight.fits'
#scampcom='scamp '+num+'.cat -c kmtnet.scamp -ASTREF_CATALOG 2MASS -POSITION_MAXERR 20.0 -CROSSID_RADIUS 5.0 -DISTORT_DEGREES 3 -PROJECTION_TYPE TPV -AHEADER_GLOBAL kmtnet_global_ctio.ahead -CHECKPLOT_TYPE NONE'
def sexscamp(files) :
threshold='30'
num=files[14:-5]
sexcom= 'sex '+num+'.fits -c kmtnet.sex -CATALOG_NAME '+num+'.cat -HEADER_SUFFIX NONE -DETECT_THRESH 20.0 -ANALYSIS_THRESH 20.0 -SATUR_LEVEL 60000.0 -WEIGHT_TYPE MAP_WEIGHT -WEIGHT_IMAGE weight.fits'
scampcom='scamp '+num+'.cat -c kmtnet.scamp -ASTREF_CATALOG UCAC-3 -POSITION_MAXERR 20.0 -CROSSID_RADIUS 5.0 -DISTORT_DEGREES 3 -PROJECTION_TYPE TPV -AHEADER_GLOBAL kmtnet_global_ctio.ahead -CHECKPLOT_TYPE ASTR_REFSYSMAP,FGROUPS,DISTORTION,ASTR_REFERROR2D,ASTR_REFERROR1D -CHECKPLOT_NAME astr_refsysmap,fgroups,distort,astr_referror2d,astr_referror1d -STABILITY_TYPE INSTRUMENT'
os.system(sexcom)
os.system(scampcom)
def set4astrom(files) :
from astropy.io import fits
from astropy.io import ascii
print files
num=files[14:-5]
f=open(num+'.head','r')
lines=f.readlines()
f.close()
f=open(num+'.kk.head','w')
for line in lines[0:51] : f.write(line)
f.close()
data=fits.getdata(num+'.kk.fits')
hdr=fits.getheader(num+'.kk.fits')
hdr.fromTxtFile(num+'.kk.head')
newfile='a'+num+'.kk.fits'
fits.writeto(newfile,data,hdr,clobber=True)
f=open(num+'.mm.head','w')
for line in lines[51:102] : f.write(line)
f.close()
data=fits.getdata(num+'.mm.fits')
hdr=fits.getheader(num+'.mm.fits')
hdr.fromTxtFile(num+'.mm.head')
newfile='a'+num+'.mm.fits'
fits.writeto(newfile,data,hdr,clobber=True)
f=open(num+'.nn.head','w')
for line in lines[102:153] : f.write(line)
f.close()
data=fits.getdata(num+'.nn.fits')
hdr=fits.getheader(num+'.nn.fits')
hdr.fromTxtFile(num+'.nn.head')
newfile='a'+num+'.nn.fits'
fits.writeto(newfile,data,hdr,clobber=True)
f=open(num+'.tt.head','w')
for line in lines[153:] : f.write(line)
f.close()
data=fits.getdata(num+'.tt.fits')
hdr=fits.getheader(num+'.tt.fits')
hdr.fromTxtFile(num+'.tt.head')
newfile='a'+num+'.tt.fits'
fits.writeto(newfile,data,hdr,clobber=True)
os.system('swarp -c kmtnet.swarp a'+num+'*.fits -IMAGEOUT_NAME a'+files)
os.system('ds9 a'+files+' &')
## sextractor and scamp
for n in range(len(addlist)):
sexscamp(addlist[n])
## final file making
for files in addlist : set4astrom(files)
'''
## header edition
for n in range(len(addlist)):
num=addlist[n][14:-5]
hdr=fits.getheader(addlist[n])
data=fits.getdata(num+'.fits')
hdr.fromTxtFile('006022.head')
newfile='a'+addlist[n]
fits.writeto(newfile,data,hdr,clobber=True)
'''
| 1,952 | 0 | 46 |
da23ddba88a387143815bdc7741f07171f29808d | 823 | py | Python | sksurgeryspeech/ui/sksurgeryspeech_command_line.py | UCL/scikit-surgeryspeech | 4a2dcec3df98969a95863dc91fb5caaeb2d4a238 | [
"BSD-3-Clause"
] | null | null | null | sksurgeryspeech/ui/sksurgeryspeech_command_line.py | UCL/scikit-surgeryspeech | 4a2dcec3df98969a95863dc91fb5caaeb2d4a238 | [
"BSD-3-Clause"
] | 23 | 2020-06-15T11:06:17.000Z | 2020-12-01T14:41:44.000Z | sksurgeryspeech/ui/sksurgeryspeech_command_line.py | SciKit-Surgery/scikit-surgeryspeech | 4a2dcec3df98969a95863dc91fb5caaeb2d4a238 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
"""Command line processing"""
import argparse
from sksurgeryspeech import __version__
from sksurgeryspeech.ui import sksurgeryspeech_demo
def main(args=None):
"""Entry point for scikit-surgeryspeech application"""
parser = argparse.ArgumentParser(description='scikit-surgeryspeech')
version_string = __version__
friendly_version_string = version_string if version_string else 'unknown'
parser.add_argument(
"--version",
action='version',
version='scikit-surgeryspeech version ' + friendly_version_string)
parser.add_argument(
"-c", "--config",
required=True,
type=str,
help="Configuration file")
args = parser.parse_args(args)
demo = sksurgeryspeech_demo.SpeechRecognitionDemo(args.config)
demo.run_demo()
| 24.939394 | 77 | 0.708384 | # coding=utf-8
"""Command line processing"""
import argparse
from sksurgeryspeech import __version__
from sksurgeryspeech.ui import sksurgeryspeech_demo
def main(args=None):
"""Entry point for scikit-surgeryspeech application"""
parser = argparse.ArgumentParser(description='scikit-surgeryspeech')
version_string = __version__
friendly_version_string = version_string if version_string else 'unknown'
parser.add_argument(
"--version",
action='version',
version='scikit-surgeryspeech version ' + friendly_version_string)
parser.add_argument(
"-c", "--config",
required=True,
type=str,
help="Configuration file")
args = parser.parse_args(args)
demo = sksurgeryspeech_demo.SpeechRecognitionDemo(args.config)
demo.run_demo()
| 0 | 0 | 0 |
b5579edae01fcdc963d77a96b7ec45ad274f61de | 4,862 | py | Python | scripts/pytorch_wrapper.py | JLivingston01/py_research | 928f74287039a933d27c5a5dc3df8db4cb79c152 | [
"MIT"
] | 1 | 2022-02-21T00:47:41.000Z | 2022-02-21T00:47:41.000Z | scripts/pytorch_wrapper.py | JLivingston01/py_research | 928f74287039a933d27c5a5dc3df8db4cb79c152 | [
"MIT"
] | null | null | null | scripts/pytorch_wrapper.py | JLivingston01/py_research | 928f74287039a933d27c5a5dc3df8db4cb79c152 | [
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.utils.data import TensorDataset,DataLoader
X = np.random.normal(5,2,(10000,30))
Y = X@np.random.normal(1,2,30)
model = NeuralNetwork(
configuration= nn.Sequential(
nn.Linear(30,10),
nn.Sigmoid(),
nn.Linear(10,1)
),
loss_fn = torch.nn.modules.loss.L1Loss(),
# loss_fn=torch.nn.modules.loss.MSELoss(),
optimizer = torch.optim.SGD,
lr = 1e-2,
batch_size = 200,
epochs=100
).to('cpu')
model.fit(X,Y)
pred = model.predict(X)
Y_torch = torch.from_numpy(Y.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
1-sum((Y_torch-pred)**2)/sum((Y_torch-torch.mean(Y_torch))**2)
###
training_data = pd.read_csv("c:/users/jliv/downloads/mnist_train.csv")
testing_data = pd.read_csv("c:/users/jliv/downloads/mnist_test.csv")
cols = ['label']+['col_'+str(i) for i in range(len(training_data.columns)-1)]
training_data.columns = cols
testing_data.columns = cols
training_labels=training_data['label']
testing_labels=testing_data['label']
training_data.drop(['label'],inplace=True,axis=1)
testing_data.drop(['label'],inplace=True,axis=1)
training_data=np.array(training_data).reshape(59999,1,28,28)
testing_data=np.array(testing_data).reshape(9999,1,28,28)
import matplotlib.pyplot as plt
plt.imshow(training_data[0][0])
plt.show()
training_labels=np.array(training_labels)
testing_labels=np.array(testing_labels)
model = NeuralNetwork(
configuration= nn.Sequential(
nn.Conv2d(1, 1, kernel_size=4, stride=2, padding=2),
nn.ReLU(),
nn.AdaptiveAvgPool2d(16),
nn.Flatten(),
nn.Linear(16*16, 10),
nn.Sigmoid()
),
loss_fn = torch.nn.modules.loss.CrossEntropyLoss(),
optimizer = torch.optim.SGD,
lr = 1e-2,
batch_size = 200,
epochs=1
).to('cpu')
model.fit(training_data,training_labels)
pred=np.argmax(model.predict(training_data),axis=1)
Y_torch = torch.from_numpy(training_labels.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
np.mean(np.where(Y_torch==pred,1,0))
pred=np.argmax(model.predict(testing_data),axis=1)
Y_torch = torch.from_numpy(testing_labels.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
np.mean(np.where(Y_torch==pred,1,0))
| 24.31 | 78 | 0.569724 |
import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.utils.data import TensorDataset,DataLoader
class NeuralNetwork(nn.Module):
def __init__(self,
configuration,
loss_fn,
optimizer,
lr,
batch_size=200,
epochs=500):
super(NeuralNetwork,self).__init__()
self.stack=configuration
self.loss_fn=loss_fn
self.optimizer=optimizer
self.lr=lr
self.batch_size=batch_size
self.epochs=epochs
self.set_optimizer()
def set_optimizer(self):
optimizer = self.optimizer(self.parameters(),lr=self.lr)
self.optimizer=optimizer
def forward(self,x):
logits=self.stack(x)
return logits
def fit_one(self,X,Y):
X_torch=torch.from_numpy(np.array(X).astype(np.float32))
Y_torch=torch.from_numpy(np.array(Y).astype(np.float32))
if type(self.loss_fn).__name__!='MSELoss':
Y_torch = Y_torch.type(torch.LongTensor)
train_ds=TensorDataset(X_torch,Y_torch)
train_dataloader = DataLoader(train_ds,self.batch_size)
losses = []
for batch,(xt,yt) in enumerate(train_dataloader):
X1,y1=xt.to('cpu'),yt.to('cpu')
pred = self(X1)
if type(self.loss_fn).__name__!='CrossEntropyLoss':
loss = self.loss_fn(pred.reshape(y1.shape),y1)
else:
loss = self.loss_fn(pred,y1)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses.append(loss.item())
print('loss:', np.mean(losses))
def fit(self,X,Y):
for i in range(self.epochs):
self.fit_one(X,Y)
if i%100==0:
print(f"Epoch {i+1}\n--------")
def predict(self,X):
X_torch=torch.from_numpy(np.array(X).astype(np.float32))
ds = TensorDataset(X_torch)
dataloader = DataLoader(ds,batch_size = len(ds))
with torch.no_grad():
for xt in dataloader:
X1=xt[0].to('cpu')
pred = self(X1)
return pred
X = np.random.normal(5,2,(10000,30))
Y = X@np.random.normal(1,2,30)
model = NeuralNetwork(
configuration= nn.Sequential(
nn.Linear(30,10),
nn.Sigmoid(),
nn.Linear(10,1)
),
loss_fn = torch.nn.modules.loss.L1Loss(),
# loss_fn=torch.nn.modules.loss.MSELoss(),
optimizer = torch.optim.SGD,
lr = 1e-2,
batch_size = 200,
epochs=100
).to('cpu')
model.fit(X,Y)
pred = model.predict(X)
Y_torch = torch.from_numpy(Y.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
1-sum((Y_torch-pred)**2)/sum((Y_torch-torch.mean(Y_torch))**2)
###
training_data = pd.read_csv("c:/users/jliv/downloads/mnist_train.csv")
testing_data = pd.read_csv("c:/users/jliv/downloads/mnist_test.csv")
cols = ['label']+['col_'+str(i) for i in range(len(training_data.columns)-1)]
training_data.columns = cols
testing_data.columns = cols
training_labels=training_data['label']
testing_labels=testing_data['label']
training_data.drop(['label'],inplace=True,axis=1)
testing_data.drop(['label'],inplace=True,axis=1)
training_data=np.array(training_data).reshape(59999,1,28,28)
testing_data=np.array(testing_data).reshape(9999,1,28,28)
import matplotlib.pyplot as plt
plt.imshow(training_data[0][0])
plt.show()
training_labels=np.array(training_labels)
testing_labels=np.array(testing_labels)
model = NeuralNetwork(
configuration= nn.Sequential(
nn.Conv2d(1, 1, kernel_size=4, stride=2, padding=2),
nn.ReLU(),
nn.AdaptiveAvgPool2d(16),
nn.Flatten(),
nn.Linear(16*16, 10),
nn.Sigmoid()
),
loss_fn = torch.nn.modules.loss.CrossEntropyLoss(),
optimizer = torch.optim.SGD,
lr = 1e-2,
batch_size = 200,
epochs=1
).to('cpu')
model.fit(training_data,training_labels)
pred=np.argmax(model.predict(training_data),axis=1)
Y_torch = torch.from_numpy(training_labels.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
np.mean(np.where(Y_torch==pred,1,0))
pred=np.argmax(model.predict(testing_data),axis=1)
Y_torch = torch.from_numpy(testing_labels.astype(np.float32))
Y_torch = Y_torch.reshape(pred.shape)
np.mean(np.where(Y_torch==pred,1,0))
| 2,132 | 10 | 225 |
b56b84899f7a80aac99a7d6d79d83e0d3df834a8 | 958 | py | Python | scraper/storage_spiders/kkfashionvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/kkfashionvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/kkfashionvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='mo-ta']/p[1]",
'price' : "//span[@class='item_price']",
'category' : "//div[@class='tieu-de']/h1/a",
'description' : "//div[@id='my-cls-ajax']/table//tr[2]/td[3]",
'images' : "//div[@id='picture']/img[@id='large_image']/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'kkfashion.vn'
allowed_domains = ['kkfashion.vn']
start_urls = ['http://kkfashion.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow = ['/shop-online+/[a-zA-Z0-9_-]+\.html',''], deny = ['Huong_Dan']), 'parse_item'),
Rule(LinkExtractor(allow = ['/[a-zA-Z0-9-_]+\.html'], deny = ['Huong_Dan']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 35.481481 | 111 | 0.609603 | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='mo-ta']/p[1]",
'price' : "//span[@class='item_price']",
'category' : "//div[@class='tieu-de']/h1/a",
'description' : "//div[@id='my-cls-ajax']/table//tr[2]/td[3]",
'images' : "//div[@id='picture']/img[@id='large_image']/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'kkfashion.vn'
allowed_domains = ['kkfashion.vn']
start_urls = ['http://kkfashion.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow = ['/shop-online+/[a-zA-Z0-9_-]+\.html',''], deny = ['Huong_Dan']), 'parse_item'),
Rule(LinkExtractor(allow = ['/[a-zA-Z0-9-_]+\.html'], deny = ['Huong_Dan']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 0 | 0 | 0 |
209e7d84d4d078f6bb157ff3081913b22bb20177 | 835 | py | Python | Python/benchmarking/struct_deserialization.py | felixbinder/tdw | eb2b00b74b9fcf8ef2dcba1baa62424640c520b1 | [
"BSD-2-Clause"
] | 307 | 2020-05-20T18:08:49.000Z | 2022-03-21T19:55:08.000Z | Python/benchmarking/struct_deserialization.py | felixbinder/tdw | eb2b00b74b9fcf8ef2dcba1baa62424640c520b1 | [
"BSD-2-Clause"
] | 92 | 2020-07-21T18:29:13.000Z | 2022-03-28T07:25:54.000Z | Python/benchmarking/struct_deserialization.py | felixbinder/tdw | eb2b00b74b9fcf8ef2dcba1baa62424640c520b1 | [
"BSD-2-Clause"
] | 53 | 2020-07-14T15:55:17.000Z | 2022-03-20T16:20:01.000Z | from time import time
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
"""
Benchmark the speed of deserializing structs (such as Vector3 and Quaternion).
"""
if __name__ == "__main__":
o_id = 0
cmds = [{"$type": "teleport_object",
"position": {"x": 0, "y": 0, "z": 0},
"id": o_id},
{"$type": "rotate_object_to",
"rotation": {"w": 1, "x": 0, "y": 0, "z": 0},
"id": o_id}]
c = Controller()
c.start()
c.communicate([TDWUtils.create_empty_room(12, 12),
c.get_add_object("rh10", object_id=o_id)])
num_trials = 5000
t0 = time()
for i in range(num_trials):
c.communicate(cmds)
fps = (num_trials / (time() - t0))
print(f"FPS: {round(fps)}")
c.communicate({"$type": "terminate"})
| 26.935484 | 78 | 0.553293 | from time import time
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
"""
Benchmark the speed of deserializing structs (such as Vector3 and Quaternion).
"""
if __name__ == "__main__":
o_id = 0
cmds = [{"$type": "teleport_object",
"position": {"x": 0, "y": 0, "z": 0},
"id": o_id},
{"$type": "rotate_object_to",
"rotation": {"w": 1, "x": 0, "y": 0, "z": 0},
"id": o_id}]
c = Controller()
c.start()
c.communicate([TDWUtils.create_empty_room(12, 12),
c.get_add_object("rh10", object_id=o_id)])
num_trials = 5000
t0 = time()
for i in range(num_trials):
c.communicate(cmds)
fps = (num_trials / (time() - t0))
print(f"FPS: {round(fps)}")
c.communicate({"$type": "terminate"})
| 0 | 0 | 0 |
f356fa4c071df3368258b586764b644585a00714 | 141 | py | Python | exercises/en/exc_01_03.py | kaseyhackspace/eng-course-sample | 0dd3821f34b9111eb4de8f0f24b7516061407caf | [
"MIT"
] | null | null | null | exercises/en/exc_01_03.py | kaseyhackspace/eng-course-sample | 0dd3821f34b9111eb4de8f0f24b7516061407caf | [
"MIT"
] | null | null | null | exercises/en/exc_01_03.py | kaseyhackspace/eng-course-sample | 0dd3821f34b9111eb4de8f0f24b7516061407caf | [
"MIT"
] | null | null | null | import numpy as np
q=__
l=__
x = np.linspace(0,l,__)
M = q/2*(l*x-x**2)
V = q*(1/2-x)
print("Moment")
print(__)
print("Shear")
print(__) | 10.071429 | 23 | 0.588652 | import numpy as np
q=__
l=__
x = np.linspace(0,l,__)
M = q/2*(l*x-x**2)
V = q*(1/2-x)
print("Moment")
print(__)
print("Shear")
print(__) | 0 | 0 | 0 |
fc40c4432ff8ca12685d2abc38ad34faf14a4a57 | 3,654 | py | Python | ybk/frontend/calendar.py | sopnic/ybk | e829aeed0ad867e62b372df357157f9b72bb4b15 | [
"MIT"
] | null | null | null | ybk/frontend/calendar.py | sopnic/ybk | e829aeed0ad867e62b372df357157f9b72bb4b15 | [
"MIT"
] | null | null | null | ybk/frontend/calendar.py | sopnic/ybk | e829aeed0ad867e62b372df357157f9b72bb4b15 | [
"MIT"
] | 1 | 2021-06-23T19:19:03.000Z | 2021-06-23T19:19:03.000Z | from datetime import datetime, timedelta
from collections import defaultdict
from flask import render_template, request
from flask.ext.login import login_required
from ybk.models import Collection
from ybk.settings import get_conf
from .views import frontend
@frontend.route('/calendar/')
@login_required
| 35.134615 | 78 | 0.500274 | from datetime import datetime, timedelta
from collections import defaultdict
from flask import render_template, request
from flask.ext.login import login_required
from ybk.models import Collection
from ybk.settings import get_conf
from .views import frontend
@frontend.route('/calendar/')
@login_required
def calendar():
nav = 'calendar'
starts_at = request.args.get('starts_at')
ends_at = request.args.get('ends_at')
if starts_at:
starts_at = datetime.strptime(starts_at, '%Y%m%d')
today = datetime.utcnow() + timedelta(hours=8)
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
if not starts_at:
starts_at = today - timedelta(days=3)
ends_at = starts_at + timedelta(days=10)
# 表头
heads = []
d = starts_at
while d <= ends_at:
heads.append(('周' + '一二三四五六日'[d.weekday()],
'{}/{}'.format(d.month, d.day)))
d += timedelta(days=1)
# 表身
exs = [] # 交易所所在行
rowdict = defaultdict(list) # 交易所 -> 每天有/没有
seen = set()
ddict = {}
for c in Collection.query({'offers_at': {'$gte': starts_at,
'$lte': ends_at}},
sort=[('offers_at', 1)]):
if (c.exchange, c.offers_at) in seen:
continue
seen.add((c.exchange, c.offers_at))
if c.exchange not in exs:
exs.append(c.exchange)
d = ddict.get(c.exchange, starts_at)
while d < c.cashout_at:
if d >= c.offers_at and d < c.cashout_at:
cs = list(Collection.query({'offers_at': c.offers_at,
'exchange': c.exchange}))
ndays = (c.cashout_at - c.offers_at).days
if c.offers_at + timedelta(days=ndays) > ends_at:
ndays = (ends_at - c.offers_at).days + 1
rowdict[c.exchange].append({'colspan': ndays,
'exchange': c.exchange,
'count': len(cs),
'cs': cs,
'symbols':
','.join([c.symbol for c in cs])})
ddict[c.exchange] = c.cashout_at
break
else:
rowdict[c.exchange].append({'colspan': 1})
d += timedelta(days=1)
banks = {}
details = {}
for ex in ddict:
d = ddict[ex]
while d <= ends_at:
spans = sum(x['colspan'] for x in rowdict[ex])
if spans < 11:
rowdict[ex].append({'colspan': 1})
d += timedelta(days=1)
c = get_conf(ex)
banks[ex] = c['opening']['bank']
details[ex] = {}
for cell in rowdict[ex]:
if 'cs' in cell:
for c in cell['cs']:
details[ex][c.symbol] = {
'name': c.name,
'price': c.offer_price,
'offer_cash': c.offer_cash or 0,
'expected_ratio': c.expected_result_cash_ratio or 0,
'expected_revenue': c.expected_annual_profit or 0,
}
if not exs:
exs = ['无申购']
prev_starts_at = (starts_at - timedelta(days=10)).strftime('%Y%m%d')
next_starts_at = (starts_at + timedelta(days=10)).strftime('%Y%m%d')
thisdate = (datetime.utcnow() + timedelta(hours=8))
thisdate = '{}/{}'.format(thisdate.month, thisdate.day)
return render_template('frontend/calendar.html', **locals())
| 3,380 | 0 | 22 |
d74b5019d83c90d0f1d6c091d0a5982297146bcc | 350 | py | Python | SomeCode/Fibonacci_Sequence/fib_recursive.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | SomeCode/Fibonacci_Sequence/fib_recursive.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | SomeCode/Fibonacci_Sequence/fib_recursive.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null |
for c in range(1, 50):
print(c, ":", fibonacci(c))
| 23.333333 | 52 | 0.52 | def fibonacci(n):
if type(n) != int:
raise TypeError('n must be a positive int')
if n < 1:
raise ValueError('n must be a positive int')
if n == 1:
return 1
elif n == 2:
return 2
elif n > 2:
return fibonacci(n - 1) + fibonacci(n - 2)
for c in range(1, 50):
print(c, ":", fibonacci(c))
| 272 | 0 | 22 |
4a8e25813b907a40feaa1559a5f471ab13c22d1f | 3,194 | py | Python | experiments-tosem/rq1.py | testingautomated-usi/DeepHyperion | 698e27cdea7581055de0d5f02d0585053452ac8f | [
"MIT"
] | 5 | 2021-04-28T09:35:44.000Z | 2021-09-10T18:18:08.000Z | experiments-tosem/rq1.py | testingautomated-usi/DeepHyperion | 698e27cdea7581055de0d5f02d0585053452ac8f | [
"MIT"
] | null | null | null | experiments-tosem/rq1.py | testingautomated-usi/DeepHyperion | 698e27cdea7581055de0d5f02d0585053452ac8f | [
"MIT"
] | 2 | 2021-04-26T12:46:44.000Z | 2021-09-16T08:27:53.000Z | #
# This is the code for plotting the figures for RQ1. It is optimized towards plotting exactly those figures
# Use data_analysis.py for explorative data analysis
#
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from plotting_utils import load_data_from_folder, create_custom_palette, \
filter_data_and_plot_as_boxplots, filter_data_by_tag, store_figure_to_paper_folder
import matplotlib.pyplot as plt
if __name__ == "__main__":
main()
| 38.95122 | 114 | 0.716969 | #
# This is the code for plotting the figures for RQ1. It is optimized towards plotting exactly those figures
# Use data_analysis.py for explorative data analysis
#
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from plotting_utils import load_data_from_folder, create_custom_palette, \
filter_data_and_plot_as_boxplots, filter_data_by_tag, store_figure_to_paper_folder
import matplotlib.pyplot as plt
def preprare_the_figure(plot_data):
fontsize = 20
min_fontsize = 16
color_palette = create_custom_palette()
# Create the figure
fig = plt.figure(figsize=(16, 10))
# Set the figure to be a grid with 1 column and 2 rows without space between them
gs = fig.add_gridspec(2, hspace=0)
# Get the axes objects
axs = gs.subplots(sharex=True)
# Plot the top plot
axs[0] = filter_data_and_plot_as_boxplots(axs[0], "Mapped Misbehaviors", plot_data, color_palette)
# Plot the bottom plot
axs[1] = filter_data_and_plot_as_boxplots(axs[1], "Misbehavior Sparseness", plot_data, color_palette)
# Adjust the plots
# Put a legend to the right of the current axis
axs[0].legend(bbox_to_anchor=(1.04,1), loc="upper left",borderaxespad=0,fontsize=fontsize)
# Increase font for y-label and y-ticks
# axs[0].set_ylabel(axs[0].get_ylabel(), fontsize=fontsize)
axs[0].set_ylabel("Mapped Misb.", fontsize=fontsize)
axs[0].tick_params(axis='y', which='major', labelsize=min_fontsize)
# ax.tick_params(axis='both', which='minor', labelsize=8)
#axs[0].legend(fontsize=fontsize)
# Remove the legend from the bottom plot
axs[1].legend([], [], frameon=False)
# Remove the x - label
axs[1].set_xlabel('')
# Increase only the size of x-ticks, but split the combinations in two lines
# labels = [item.get_text() for item in ax.get_xticklabels()]
# labels[1] = 'Testing'
# https://stackoverflow.com/questions/11244514/modify-tick-label-text
axs[1].set_xticklabels([l.get_text().replace("-", "\n") for l in axs[1].get_xticklabels()], fontsize=fontsize)
# Increase label y-label and y-ticks
# axs[1].set_ylabel(axs[1].get_ylabel(), fontsize=fontsize)
axs[1].set_ylabel("Misb. Sparseness", fontsize=fontsize)
axs[1].tick_params(axis='y', which='major', labelsize=min_fontsize)
# Align the y labels: -0.1 moves it a bit to the left, 0.5 move it in the middle of y-axis
axs[0].get_yaxis().set_label_coords(-0.08, 0.5)
axs[1].get_yaxis().set_label_coords(-0.08, 0.5)
return fig
def main():
# Load all the data and select the required feature combinations
mnist_data = load_data_from_folder("./data/mnist")
mnist_data = filter_data_by_tag(mnist_data, ["rescaled"])
mnist_figure = preprare_the_figure(mnist_data)
# Store
store_figure_to_paper_folder(mnist_figure, file_name="RQ1-MNIST")
beamng_data = load_data_from_folder("./data/beamng")
beamng_data = filter_data_by_tag(beamng_data, ["rescaled"])
beamng_figure = preprare_the_figure(beamng_data)
# Store
store_figure_to_paper_folder(beamng_figure, file_name="RQ1-BeamNG")
if __name__ == "__main__":
main()
| 2,649 | 0 | 46 |
682b75ede6c09c1b14fd7011e3d65b73f93372df | 1,556 | py | Python | example/telegrambot.py | Nachtalb/django-telegrambot | 55aa2683e7453d7c65701fecdfcd029e86f34b7d | [
"BSD-3-Clause"
] | 245 | 2016-01-26T16:24:41.000Z | 2022-03-07T08:00:36.000Z | example/telegrambot.py | Nachtalb/django-telegrambot | 55aa2683e7453d7c65701fecdfcd029e86f34b7d | [
"BSD-3-Clause"
] | 49 | 2016-09-17T02:26:50.000Z | 2021-12-20T06:53:19.000Z | example/telegrambot.py | Nachtalb/django-telegrambot | 55aa2683e7453d7c65701fecdfcd029e86f34b7d | [
"BSD-3-Clause"
] | 97 | 2016-04-17T22:05:19.000Z | 2022-03-24T11:04:48.000Z | # -*- coding: utf-8 -*-
# Example code for telegrambot.py module
from telegram.ext import CommandHandler, MessageHandler, Filters
from django_telegrambot.apps import DjangoTelegramBot
import logging
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
| 31.12 | 91 | 0.726864 | # -*- coding: utf-8 -*-
# Example code for telegrambot.py module
from telegram.ext import CommandHandler, MessageHandler, Filters
from django_telegrambot.apps import DjangoTelegramBot
import logging
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
bot.sendMessage(update.message.chat_id, text='Hi!')
def help(bot, update):
bot.sendMessage(update.message.chat_id, text='Help!')
def echo(bot, update):
bot.sendMessage(update.message.chat_id, text=update.message.text)
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
logger.info("Loading handlers for telegram bot")
# Default dispatcher (this is related to the first bot in settings.TELEGRAM_BOT_TOKENS)
dp = DjangoTelegramBot.dispatcher
# To get Dispatcher related to a specific bot
# dp = DjangoTelegramBot.getDispatcher('BOT_n_token') #get by bot token
# dp = DjangoTelegramBot.getDispatcher('BOT_n_username') #get by bot username
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler([Filters.text], echo))
# log all errors
dp.add_error_handler(error)
# log all errors
dp.addErrorHandler(error)
| 1,036 | 0 | 114 |
0332766a058a8145c7c4f524085c8c21f3bf51b0 | 1,558 | py | Python | core/face_alignment.py | zhaipro/yry | 81587179b40fafeb6c24ef58489c59061f7368ce | [
"Apache-2.0"
] | null | null | null | core/face_alignment.py | zhaipro/yry | 81587179b40fafeb6c24ef58489c59061f7368ce | [
"Apache-2.0"
] | null | null | null | core/face_alignment.py | zhaipro/yry | 81587179b40fafeb6c24ef58489c59061f7368ce | [
"Apache-2.0"
] | null | null | null | import os
import dlib
from pathlib import Path
root_path = Path(__file__).parent
landmarks_model_path = os.path.join(root_path, 'shape_predictor_68_face_landmarks.dat')
landmarks_detector = LandmarksDetector(landmarks_model_path)
| 43.277778 | 130 | 0.595635 | import os
import dlib
from pathlib import Path
root_path = Path(__file__).parent
class LandmarksDetector:
def __init__(self, predictor_model_path):
"""
:param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file
"""
self.detector = dlib.get_frontal_face_detector() # cnn_face_detection_model_v1 also can be used
self.shape_predictor = dlib.shape_predictor(predictor_model_path)
self.threshold = 0
def get_landmarks(self, img):
if isinstance(img, str):
img = dlib.load_rgb_image(img)
dets, scores, idx = self.detector.run(img, 1, 0) # The seconde param always be 1, which means upsample the image 1 time,
# this will make everything bigger and allow us to detect more faces.
# The third param is score.
for i, detection in enumerate(dets):
try:
if scores[i] < self.threshold:
continue
# print(f'image: {image} i:{i} score: {scores[i]}')
face_landmarks = [(item.x, item.y) for item in self.shape_predictor(img, detection).parts()]
yield face_landmarks
except Exception:
print("Exception in get_landmarks()!")
landmarks_model_path = os.path.join(root_path, 'shape_predictor_68_face_landmarks.dat')
landmarks_detector = LandmarksDetector(landmarks_model_path)
| 889 | 403 | 24 |
6154f32c6c3c7577ec90b8c0271ec0cf22620436 | 4,210 | py | Python | app/paper/paperSigma/CONUS_map.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/paper/paperSigma/CONUS_map.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/paper/paperSigma/CONUS_map.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | 2 | 2021-04-04T02:45:59.000Z | 2022-03-19T09:41:39.000Z | import os
import rnnSMAP
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
figTitleLst = ['Temporal Test', 'Spatial Test']
figNameLst = ['temporal', 'spatial']
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
for iFig in range(0, 2):
# iFig = 0
figTitle = figTitleLst[iFig]
if iFig == 0:
testName = 'CONUSv2f1'
yr = [2017]
if iFig == 1:
testName = 'CONUSv2f2'
yr = [2015]
trainName = 'CONUSv2f1'
out = trainName+'_y15_Forcing_dr60'
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
caseStrLst = ['sigmaMC', 'sigmaX', 'sigma']
nCase = len(caseStrLst)
saveFolder = os.path.join(rnnSMAP.kPath['dirResult'], 'paperSigma')
#################################################
# test
predField = 'LSTM'
targetField = 'SMAP'
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=yr)
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
statSigma = ds.statCalSigma(field='LSTM')
statConf = ds.statCalConf(predField='LSTM', targetField='SMAP')
statNorm = rnnSMAP.classPost.statNorm(
statSigma=statSigma, dataPred=ds.LSTM, dataTarget=ds.SMAP)
#################################################
# plot figure
fig = plt.figure(figsize=[12, 3])
gs = gridspec.GridSpec(
1, 3, width_ratios=[1, 1, 0.5], height_ratios=[1])
dataErr = getattr(statErr, 'ubRMSE')
dataSigma = getattr(statSigma, 'sigma')
cRange = [0, 0.06]
cRange2 = [0, 0.03]
# plot map RMSE
ax = fig.add_subplot(gs[0, 0])
grid = ds.data2grid(data=dataErr)
titleStr = 'ubRMSE of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
# plot map sigma
ax = fig.add_subplot(gs[0, 1])
grid = ds.data2grid(data=dataSigma)
titleStr = r'$\sigma_{comb}$'+' of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
fig.show()
# plot map sigma vs RMSE
ax = fig.add_subplot(gs[0, 2])
ax.set_aspect('equal', 'box')
y = dataErr
x = dataSigma
rnnSMAP.funPost.plotVS(
x, y, ax=ax, xlabel=r'$\sigma_{comb}$', ylabel='ubRMSE')
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'map_'+figNameLst[iFig])
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
print(scipy.stats.pearsonr(x, y))
#################################################
# plot sigmaX vs sigmaMC
plotSigma = 1
if plotSigma == 1:
fig = plt.figure(figsize=[12, 3])
gs = gridspec.GridSpec(
1, 3, width_ratios=[1, 1, 0.5], height_ratios=[1])
dataSigmaX = getattr(statSigma, 'sigmaX')
dataSigmaMC = getattr(statSigma, 'sigmaMC')
# plot map RMSE
ax = fig.add_subplot(gs[0, 0])
grid = ds.data2grid(data=dataSigmaX)
titleStr = r'$\sigma_{x}$ '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
# plot map sigma
ax = fig.add_subplot(gs[0, 1])
grid = ds.data2grid(data=dataSigmaMC)
titleStr = r'$\sigma_{MC}$'+' of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange2, title=titleStr)
# plot map sigma vs RMSE
ax = fig.add_subplot(gs[0, 2])
ax.set_aspect('equal', 'box')
y = dataSigmaMC
x = dataSigmaX
rnnSMAP.funPost.plotVS(
x, y, ax=ax, xlabel=r'$\sigma_{x}$', ylabel=r'$\sigma_{MC}$')
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'map_'+figNameLst[iFig]+'_sigma')
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
| 32.635659 | 77 | 0.587648 | import os
import rnnSMAP
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
figTitleLst = ['Temporal Test', 'Spatial Test']
figNameLst = ['temporal', 'spatial']
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
for iFig in range(0, 2):
# iFig = 0
figTitle = figTitleLst[iFig]
if iFig == 0:
testName = 'CONUSv2f1'
yr = [2017]
if iFig == 1:
testName = 'CONUSv2f2'
yr = [2015]
trainName = 'CONUSv2f1'
out = trainName+'_y15_Forcing_dr60'
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
caseStrLst = ['sigmaMC', 'sigmaX', 'sigma']
nCase = len(caseStrLst)
saveFolder = os.path.join(rnnSMAP.kPath['dirResult'], 'paperSigma')
#################################################
# test
predField = 'LSTM'
targetField = 'SMAP'
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=yr)
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
statSigma = ds.statCalSigma(field='LSTM')
statConf = ds.statCalConf(predField='LSTM', targetField='SMAP')
statNorm = rnnSMAP.classPost.statNorm(
statSigma=statSigma, dataPred=ds.LSTM, dataTarget=ds.SMAP)
#################################################
# plot figure
fig = plt.figure(figsize=[12, 3])
gs = gridspec.GridSpec(
1, 3, width_ratios=[1, 1, 0.5], height_ratios=[1])
dataErr = getattr(statErr, 'ubRMSE')
dataSigma = getattr(statSigma, 'sigma')
cRange = [0, 0.06]
cRange2 = [0, 0.03]
# plot map RMSE
ax = fig.add_subplot(gs[0, 0])
grid = ds.data2grid(data=dataErr)
titleStr = 'ubRMSE of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
# plot map sigma
ax = fig.add_subplot(gs[0, 1])
grid = ds.data2grid(data=dataSigma)
titleStr = r'$\sigma_{comb}$'+' of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
fig.show()
# plot map sigma vs RMSE
ax = fig.add_subplot(gs[0, 2])
ax.set_aspect('equal', 'box')
y = dataErr
x = dataSigma
rnnSMAP.funPost.plotVS(
x, y, ax=ax, xlabel=r'$\sigma_{comb}$', ylabel='ubRMSE')
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'map_'+figNameLst[iFig])
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
print(scipy.stats.pearsonr(x, y))
#################################################
# plot sigmaX vs sigmaMC
plotSigma = 1
if plotSigma == 1:
fig = plt.figure(figsize=[12, 3])
gs = gridspec.GridSpec(
1, 3, width_ratios=[1, 1, 0.5], height_ratios=[1])
dataSigmaX = getattr(statSigma, 'sigmaX')
dataSigmaMC = getattr(statSigma, 'sigmaMC')
# plot map RMSE
ax = fig.add_subplot(gs[0, 0])
grid = ds.data2grid(data=dataSigmaX)
titleStr = r'$\sigma_{x}$ '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange, title=titleStr)
# plot map sigma
ax = fig.add_subplot(gs[0, 1])
grid = ds.data2grid(data=dataSigmaMC)
titleStr = r'$\sigma_{MC}$'+' of '+figTitle
rnnSMAP.funPost.plotMap(grid, crd=ds.crdGrid, ax=ax,
cRange=cRange2, title=titleStr)
# plot map sigma vs RMSE
ax = fig.add_subplot(gs[0, 2])
ax.set_aspect('equal', 'box')
y = dataSigmaMC
x = dataSigmaX
rnnSMAP.funPost.plotVS(
x, y, ax=ax, xlabel=r'$\sigma_{x}$', ylabel=r'$\sigma_{MC}$')
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'map_'+figNameLst[iFig]+'_sigma')
fig.savefig(saveFile, dpi=100)
fig.savefig(saveFile+'.eps')
| 0 | 0 | 0 |
e3e55584798ccf7f1581a3f462a9c39c61dee078 | 15,483 | py | Python | src/crosswalks/make_crosswalk.py | jujiang526/mta | d8f01cb6ab1cfd74109937c7ac4d2a8ae8be6084 | [
"Apache-2.0"
] | 14 | 2020-06-12T00:22:29.000Z | 2022-03-20T07:46:28.000Z | src/crosswalks/make_crosswalk.py | jujiang526/mta | d8f01cb6ab1cfd74109937c7ac4d2a8ae8be6084 | [
"Apache-2.0"
] | 4 | 2020-08-07T18:20:24.000Z | 2022-02-08T15:43:55.000Z | src/crosswalks/make_crosswalk.py | jujiang526/mta | d8f01cb6ab1cfd74109937c7ac4d2a8ae8be6084 | [
"Apache-2.0"
] | 3 | 2020-07-03T15:41:22.000Z | 2021-09-26T19:24:45.000Z | import pandas as pd
import geopandas as gpd
import re
import textdistance
import numpy as np
import math
if __name__ == "__main__":
main() | 59.779923 | 182 | 0.613835 | import pandas as pd
import geopandas as gpd
import re
import textdistance
import numpy as np
import math
def make_ordinal(s):
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
name_ord = []
for x in s:
x = x.title()
m = re.findall(r'\d+', x)
if(len(m) > 0):
num = m[0]
t = re.sub('{}'.format(num), ordinal(int(num)), x)
name_ord.append(t)
else:
t = x
name_ord.append(t)
return name_ord
def main():
elevator_list = pd.read_csv('../../data/raw/EE_master_list.csv')
stations = gpd.read_file('../../data/raw/subway_stations.geojson')
turnstile_remotes = pd.read_excel('../../data/raw/Remote-Booth-Station.xls')
gtfs = pd.read_csv('../../data/raw/google_transit/stops.txt')
turnstile_remotes['Line Name'] = turnstile_remotes['Line Name'].astype(str)
gtfs = gtfs[gtfs.location_type == 1]
gtfs_routes = pd.read_csv('../../data/raw/google_transit/routes.txt')
gtfs_trips = pd.read_csv('../../data/raw/google_transit/trips.txt')
gtfs_stop_times = pd.read_csv('../../data/raw/google_transit/stop_times.txt')
## Getting lines for each GTFS Stop ID
gtfs_stop_times = gtfs_stop_times[gtfs_stop_times.trip_id.str.contains('Weekday')]
gtfs_lines = gtfs_stop_times.merge(gtfs_trips,on="trip_id")
gtfs_lines = gtfs_lines.merge(gtfs_routes,on='route_id')
gtfs_lines['stop_id'] = [re.sub('N$|S$','',x) for x in gtfs_lines.stop_id]
gtfs_lines['lines'] = gtfs_lines[['stop_id','route_short_name']].groupby(['stop_id'])['route_short_name'].transform(lambda x:
','.join(x.unique()))
gtfs_lines = gtfs_lines[['stop_id','lines']]
gtfs_lines = gtfs_lines.drop_duplicates()
gtfs = gtfs.merge(gtfs_lines[['stop_id','lines']],how='left',on='stop_id')
gtfs = gtfs[~gtfs.lines.isnull()]
## Standardization
stations = pd.DataFrame(stations.drop('geometry',axis=1))
# Standardizing names
stations['name_ord'] = stations.name
turnstile_remotes['name_ord'] = make_ordinal(turnstile_remotes.Station)
elevator_list['name_ord'] = make_ordinal(elevator_list.station_name)
gtfs['name_ord'] = make_ordinal(gtfs.stop_name)
# Standardizing lines
stations["clean_lines"] = [re.sub('-','',re.sub('-\d+ Express','',x)) for x in stations.line]
turnstile_remotes['clean_lines'] = [re.sub('-','',re.sub(r'(\w)(?!$)',r'\1-',str(x))) for x in turnstile_remotes['Line Name']]
elevator_list['clean_lines'] = [re.sub('-','',re.sub('/', '-',re.sub('(/METRO-NORTH)|(/LIRR)','', x))) for x in
elevator_list.subway_lines]
gtfs['clean_lines'] = [re.sub('-','',re.sub(',','-',re.sub(',((\d)|(\w))X','',x))) for x in gtfs.lines]
# Dropping unnecessary columns
stations = stations[['name','name_ord','clean_lines','line']]
elevator_list = elevator_list[['equipment_id','station_name','name_ord','clean_lines','subway_lines']]
turnstile_remotes = turnstile_remotes[['Remote','Station','name_ord','clean_lines','Line Name']]
gtfs = gtfs[['stop_id','stop_name','stop_lat','stop_lon','name_ord','clean_lines','lines']]
###### Text Matching
elevator_list.reset_index(drop=True,inplace=True)
elevator_list['station_match'] = ''
elevator_list['station_lines'] = ''
for i,row in elevator_list.iterrows():
## station matching lines
st_line_matches = [y if len(textdistance.lcsstr(row.clean_lines,y)) > 0 else None for y in stations.clean_lines]
st_line_matches = [x for x in st_line_matches if x is not None]
st_subset = stations[stations.clean_lines.isin(st_line_matches)]
## Fails to find the right match for just 59th St
if row.station_name == '59 St':
continue
## elevator
if st_subset.shape[0] > 0:
st_dist = [textdistance.jaccard(row.name_ord,y) for y in st_subset.name_ord]
st_match = st_subset.iloc[np.argmax(st_dist),]
st_score = max(st_dist)
if st_score > 0.75:
elevator_list.iloc[i,][['station_match','station_lines']] = st_match[['name_ord','line']]
else:
st_dist = [textdistance.jaro_winkler(row.name_ord,y) for y in st_subset.name_ord]
st_match = st_subset.iloc[np.argmax(st_dist),]
st_score = max(st_dist)
elevator_list.iloc[i,][['station_match','station_lines']] = st_match[['name_ord','line']]
## Manual overrides
elevator_list.loc[(elevator_list.station_name == '57 St - 7 Av')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['NQRW','57th St','N-Q-R-W']
elevator_list.loc[(elevator_list.station_name == '59 St')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['456','Lexington Ave - 59th St','4-5-6-6 Express']
elevator_list.loc[(elevator_list.station_name == '68 St / Hunter College')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['46','68th St - Hunter College','4-6-6 Express']
elevator_list.loc[(elevator_list.station_name == '86 St')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['456','86th St','4-5-6-6 Express']
elevator_list.loc[(elevator_list.station_name == 'Bedford Park Blvd/Grand Concourse Line')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['BD','Bedford Park Blvd','B-D']
elevator_list.loc[(elevator_list.station_name == 'Chambers St')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['JZ','Chambers St','J-Z']
el_station_merge = elevator_list.copy()
el_station_merge['equipments'] = el_station_merge.groupby(['station_match','station_lines'])['equipment_id'].transform(lambda x :
','.join(x.unique()))
el_station_merge.drop(['equipment_id','name_ord'],axis=1,inplace=True)
el_station_merge = el_station_merge.drop_duplicates()
crosswalk = stations.merge(el_station_merge,how='left',left_on=['name','line'],right_on=['station_match','station_lines'])
crosswalk.rename(columns={'clean_lines_x':'clean_lines','station_name':'el_station_name','subway_lines':'el_lines'},inplace=True)
crosswalk.drop(['station_match','station_lines','clean_lines_y'],axis=1,inplace=True)
crosswalk.fillna('',inplace=True)
## Matching GTFS
crosswalk.reset_index(drop=True,inplace=True)
crosswalk['gtfs_station_name'] = ''
crosswalk['gtfs_lines'] = ''
for i,row in crosswalk.iterrows():
## gtfs matching lines
gtfs_line_matches = [y if len(textdistance.lcsstr(row.clean_lines,y)) > 0 else None for y in gtfs.clean_lines]
gtfs_line_matches = [x for x in gtfs_line_matches if x is not None]
gtfs_subset = gtfs[gtfs.clean_lines.isin(gtfs_line_matches)]
###### distances
## exceptions where it fails
if((row.name_ord == '46th St') | (row.name_ord == '57th St')):
continue
if gtfs_subset.shape[0] > 0:
gtfs_dist = [textdistance.jaccard(row.name_ord,y) for y in gtfs_subset.name_ord]
gtfs_match = gtfs_subset.iloc[np.argmax(gtfs_dist),]
gtfs_score = max(gtfs_dist)
if gtfs_score > 0.88:
crosswalk.iloc[i,][['gtfs_station_name','gtfs_lines']] = gtfs_match[['stop_name','lines']]
else:
gtfs_dist = [textdistance.jaro_winkler(row.name_ord,y) for y in gtfs_subset.name_ord]
gtfs_match = gtfs_subset.iloc[np.argmax(gtfs_dist),]
gtfs_score = max(gtfs_dist)
if gtfs_score > 0.74:
crosswalk.iloc[i,][['gtfs_station_name','gtfs_lines']] = gtfs_match[['stop_name','lines']]
## Manual overrides
crosswalk.loc[(crosswalk.name_ord == 'Lexington Ave - 59th St')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['59 St','4,5,5X,6,6X']
crosswalk.loc[(crosswalk.name_ord == 'Long Island City - Court Sq')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['Court Sq - 23 St','G']
crosswalk.loc[(crosswalk.name_ord == '46th St')&(crosswalk.clean_lines=='EMR')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['46 St','E,M,R']
crosswalk.loc[(crosswalk.name_ord == '46th St')&(crosswalk.clean_lines=='7')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['46 St - Bliss St','7']
crosswalk.loc[(crosswalk.name_ord == 'Gravesend - 86th St')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['86 St','N,W,Q']
crosswalk.loc[(crosswalk.name_ord == 'Lower East Side - 2nd Ave')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['2 Av','F,FX']
crosswalk.loc[(crosswalk.name_ord == '57th St')&(crosswalk.clean_lines=='F')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['57 St','F,FX,M']
crosswalk.loc[(crosswalk.name_ord == '57th St')&(crosswalk.clean_lines=='NQRW')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['57 St - 7 Av','N,W,Q,R']
##### Turnstile
stations_w_issues = ['36th Ave','111th St','168th St','104th St','7th Ave','28th St','39th Ave','81st St','30th Ave',
'Broadway Junction','49th St', '57th St', '80th St','96th St','176th St']
crosswalk.reset_index(drop=True,inplace=True)
crosswalk['turnstile_station_name'] = ''
crosswalk['turnstile_lines'] = ''
for i,row in crosswalk.iterrows():
## turnstile matching lines
ts_line_matches = [y if len(textdistance.lcsstr(row.clean_lines,y)) > 0 else None for y in turnstile_remotes.clean_lines]
ts_line_matches = [x for x in ts_line_matches if x is not None]
ts_subset = turnstile_remotes[turnstile_remotes.clean_lines.isin(ts_line_matches)]
##### distances
if (row.name_ord in stations_w_issues):
continue
# turnstile
if ts_subset.shape[0] > 0:
ts_dist = [textdistance.jaccard(row.name_ord,y) for y in ts_subset.name_ord]
ts_match = ts_subset.iloc[np.argmax(ts_dist),]
ts_score = max(ts_dist)
if ts_score > 0.88:
crosswalk.iloc[i,][['turnstile_station_name','turnstile_lines']] = ts_match[['Station','Line Name']]
else:
ts_dist = [textdistance.jaro_winkler(row.name_ord,y) for y in ts_subset.name_ord]
ts_match = ts_subset.iloc[np.argmax(ts_dist),]
ts_score = max(ts_dist)
if ts_score > 0.81:
crosswalk.iloc[i,][['turnstile_station_name','turnstile_lines']] = ts_match[['Station','Line Name']]
missing_vals = crosswalk[crosswalk.turnstile_station_name == ''][['name','clean_lines']]
missing_vals.reset_index(drop=True,inplace=True)
## manual overrides
ts_override = [['MAIN ST','7'],['138 ST-3 AVE','6'],['42 ST-GRD CNTRL','4567S'],['96 ST','6'],['61 ST/WOODSIDE','7'],['96 ST','BC'],
['168 ST-BROADWAY','1AC'],['UNION TPK-KEW G','EF'],['WASHINGTON-36 A','NQ'],['42 ST-GRD CNTRL','4567S'],['GREENWOOD-111','A'],
['OXFORD-104 ST','A'],['7 AV-PARK SLOPE','FG'],['7 AVE','BQ'],['FLATBUSH AVE','25'],['28 ST-BROADWAY','NR'],['COURT SQ','EMG'],
['VAN ALSTON-21ST','G'],['BEEBE-39 AVE','NQ'],['96 ST','123'],['110 ST-CPN','23'],['81 ST-MUSEUM','BC'],['110 ST-CATHEDRL','1'],['176 ST','4'],
['168 ST-BROADWAY','1AC'],['111 ST','7'],['LEFFERTS BLVD','A'],['28 ST','1'],['28 ST','6'],['42 ST-GRD CNTRL','4567S'],['FOREST PARKWAY','J'],
['111 ST','J'],['MYRTLE AVE','LM'],['ROCKAWAY PKY','L'],['EAST 105 ST','L'],['BROADWAY-ENY','ACJLZ'],['ELDERTS LANE','JZ'],['MYRTLE AVE','LM'],
['VAN WYCK BLVD','EF'],['HOYT ST-ASTORIA','NQ'],['DITMARS BL-31 S','NQ'],['148 ST-LENOX','3'],['242 ST','1'],['E TREMONT AVE','25'],['DYRE AVE','5'],
['BROADWAY-ENY','ACJLZ'],['149 ST-3 AVE','25'],['GRAND-30 AVE','NQ'],['NEW UTRECHT AVE','ND'],['86 ST','N'],['22 AVE-BAY PKY','F'],
['7 AVE-53 ST','BDE'],['57 ST','F'],['49 ST-7 AVE','NQR'],['57 ST-7 AVE','NQR'],['57 ST-7 AVE','NQR'],['2 AVE','F'],['BOROUGH HALL/CT','2345R'],['BROADWAY-ENY','ACJLZ'],
['BROOKLYN BRIDGE','456JZ'],['METROPOLITAN AV','M'],['ROOSEVELT AVE','EFMR7'],['E 177 ST-PARKCH','6'],['HUDSON-80 ST','A'],['STILLWELL AVE','DFNQ'],['34 ST-HUDSON YD','7'],
['72 ST-2 AVE','Q'],['86 ST-2 AVE','Q'],['96 ST-2 AVE','Q']]
turnstile_override = pd.DataFrame(ts_override)
turnstile_override.rename(columns={0:'turnstile_station_name',1:'turnstile_lines'},inplace=True)
turnstile_override = pd.concat([missing_vals,turnstile_override],axis=1)
for i,row in crosswalk.iterrows():
if (row.turnstile_station_name == ''):
ts_match = turnstile_override[(turnstile_override.name == row.name_ord)&
(turnstile_override.clean_lines == row.clean_lines)][['turnstile_station_name','turnstile_lines']]
crosswalk.iloc[i,][['turnstile_station_name','turnstile_lines']] = ts_match.values[0]
crosswalk.drop('name_ord',axis=1,inplace=True)
crosswalk.rename(columns={'name':'station_name','line':'station_lines'},inplace=True)
crosswalk = crosswalk.merge(gtfs.drop('name_ord',axis=1),how='left',left_on=['gtfs_station_name','gtfs_lines'],right_on=['stop_name','lines'])
crosswalk.drop(['stop_name','clean_lines_y','lines'],axis=1,inplace=True)
crosswalk.rename(columns={'stop_id':'gtfs_stop_id','stop_lat':'lat','stop_lon':'lon','clean_lines_x':'clean_lines'},inplace=True)
turnstile_remotes['turnstile_units'] = turnstile_remotes.groupby(['Station','Line Name'])['Remote'].transform(lambda x : ','.join(x.unique()))
turnstile_merge = turnstile_remotes.drop(['Remote','name_ord','clean_lines'],axis=1).drop_duplicates()
crosswalk = crosswalk.merge(turnstile_merge,how='left',left_on=['turnstile_station_name','turnstile_lines'],right_on=['Station','Line Name']).drop(['Station','Line Name'],axis=1)
## adding missing units
crosswalk.loc[(crosswalk.station_name == '34th St - Hudson Yards')&(crosswalk.clean_lines == '7'),['turnstile_units']] = ['R072']
crosswalk.loc[(crosswalk.station_name == '72nd St')&(crosswalk.clean_lines == 'Q'),['turnstile_units']] = ['R570']
crosswalk.loc[(crosswalk.station_name == '86th St')&(crosswalk.clean_lines == 'Q'),['turnstile_units']] = ['R571']
crosswalk.loc[(crosswalk.station_name == '96th St')&(crosswalk.clean_lines == 'Q'),['turnstile_units']] = ['R572']
crosswalk.to_csv('../../data/crosswalk/Master_crosswalk.csv',index=False)
if __name__ == "__main__":
main() | 15,293 | 0 | 46 |
0ba651c21a66e26b88a2cb551112680207e27de1 | 91 | py | Python | setup.py | wtnb75/tarjinja | f6ac83d8bde0f476274002ba01d7709568f9a529 | [
"MIT"
] | 3 | 2021-06-05T22:55:25.000Z | 2022-01-23T11:48:21.000Z | setup.py | wtnb75/tarjinja | f6ac83d8bde0f476274002ba01d7709568f9a529 | [
"MIT"
] | 6 | 2020-11-29T16:52:58.000Z | 2020-12-01T21:51:58.000Z | setup.py | wtnb75/tarjinja | f6ac83d8bde0f476274002ba01d7709568f9a529 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(install_requires=open("requirements.txt").readlines())
| 22.75 | 60 | 0.813187 | from setuptools import setup
setup(install_requires=open("requirements.txt").readlines())
| 0 | 0 | 0 |
db88f9801fb7811f0a3219bfa14db8d66420f6dc | 35,685 | py | Python | sip-4.19.21/configure.py | Clynie/StockSpider-1 | 4d3bec01f581c4c8dd45af668ce8aab4c98685ba | [
"MIT"
] | null | null | null | sip-4.19.21/configure.py | Clynie/StockSpider-1 | 4d3bec01f581c4c8dd45af668ce8aab4c98685ba | [
"MIT"
] | null | null | null | sip-4.19.21/configure.py | Clynie/StockSpider-1 | 4d3bec01f581c4c8dd45af668ce8aab4c98685ba | [
"MIT"
] | null | null | null | # This script handles the SIP configuration and generates the Makefiles.
#
# Copyright (c) 2019 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import keyword
import sys
import os
import glob
import optparse
from distutils import sysconfig
try:
from importlib import invalidate_caches
except ImportError:
invalidate_caches = lambda: None
import siputils
# Initialise the globals.
sip_version = 0x041315
sip_version_str = "4.19.21"
py_version = sys.hexversion >> 8
py_platform = sys.platform
plat_py_site_dir = None
plat_py_inc_dir = None
plat_py_venv_inc_dir = None
plat_py_conf_inc_dir = None
plat_py_lib_dir = None
plat_sip_dir = None
plat_bin_dir = None
platform_specs = []
sip_bin_dir = ''
sip_inc_dir = ''
sip_root_dir = ''
sip_module_dir = ''
sip_module_dest_dir = ''
sip_sip_dir = ''
pyi_dir = ''
sysroot = ''
src_dir = os.path.dirname(os.path.abspath(__file__))
sip_module_name = None
build_platform = None
# Constants.
DEFAULT_MACOSX_ARCH = 'i386 ppc'
MACOSX_SDK_DIRS = ('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs', '/Developer/SDKs')
# The names of build macros extracted from the platform specific configuration
# files.
build_macro_names = [
"DEFINES", "CONFIG",
"CC",
"CFLAGS",
"CFLAGS_RELEASE", "CFLAGS_DEBUG",
"CFLAGS_CONSOLE", "CFLAGS_SHLIB", "CFLAGS_APP", "CFLAGS_THREAD",
"CFLAGS_MT", "CFLAGS_MT_DBG", "CFLAGS_MT_DLL", "CFLAGS_MT_DLLDBG",
"CFLAGS_EXCEPTIONS_ON", "CFLAGS_EXCEPTIONS_OFF",
"CFLAGS_RTTI_ON", "CFLAGS_RTTI_OFF",
"CFLAGS_STL_ON", "CFLAGS_STL_OFF",
"CFLAGS_WARN_ON", "CFLAGS_WARN_OFF",
"CHK_DIR_EXISTS", "COPY",
"CXX",
"CXXFLAGS",
"CXXFLAGS_RELEASE", "CXXFLAGS_DEBUG",
"CXXFLAGS_CONSOLE", "CXXFLAGS_SHLIB", "CXXFLAGS_APP", "CXXFLAGS_THREAD",
"CXXFLAGS_MT", "CXXFLAGS_MT_DBG", "CXXFLAGS_MT_DLL", "CXXFLAGS_MT_DLLDBG",
"CXXFLAGS_EXCEPTIONS_ON", "CXXFLAGS_EXCEPTIONS_OFF",
"CXXFLAGS_RTTI_ON", "CXXFLAGS_RTTI_OFF",
"CXXFLAGS_STL_ON", "CXXFLAGS_STL_OFF",
"CXXFLAGS_WARN_ON", "CXXFLAGS_WARN_OFF",
"DEL_FILE",
"EXTENSION_SHLIB", "EXTENSION_PLUGIN",
"INCDIR", "INCDIR_X11", "INCDIR_OPENGL",
"LIBS_CORE", "LIBS_GUI", "LIBS_NETWORK", "LIBS_OPENGL", "LIBS_WEBKIT",
"LINK", "LINK_SHLIB", "AIX_SHLIB", "LINK_SHLIB_CMD",
"LFLAGS", "LFLAGS_CONSOLE", "LFLAGS_CONSOLE_DLL", "LFLAGS_DEBUG",
"LFLAGS_DLL",
"LFLAGS_PLUGIN", "LFLAGS_RELEASE", "LFLAGS_SHLIB", "LFLAGS_SONAME",
"LFLAGS_THREAD", "LFLAGS_WINDOWS", "LFLAGS_WINDOWS_DLL", "LFLAGS_OPENGL",
"LIBDIR", "LIBDIR_X11", "LIBDIR_OPENGL",
"LIBS", "LIBS_CONSOLE", "LIBS_RT",
"LIBS_RTMT", "LIBS_THREAD", "LIBS_WINDOWS", "LIBS_X11",
"MAKEFILE_GENERATOR",
"MKDIR",
"RPATH", "LFLAGS_RPATH",
"AR", "RANLIB", "LIB", "STRIP"
]
def show_platforms():
"""Display the different platform/compilers.
"""
sys.stdout.write("""
The following platform/compiler configurations are supported:
""")
platform_specs.sort()
sys.stdout.write(siputils.format(", ".join(platform_specs), leftmargin=2))
sys.stdout.write("\n\n")
def show_macros():
"""Display the different build macros.
"""
sys.stdout.write("""
The following options may be used to adjust the compiler configuration:
""")
build_macro_names.sort()
sys.stdout.write(siputils.format(", ".join(build_macro_names), leftmargin=2))
sys.stdout.write("\n\n")
def set_build_platform():
""" Initialise the build platform. """
global build_platform
# Set the platform specific default specification.
platdefaults = {
"aix": "aix-xlc",
"bsd": "bsdi-g++",
"cygwin": "cygwin-g++",
"darwin": "macx-g++",
"dgux": "dgux-g++",
"freebsd": "freebsd-g++",
"gnu": "hurd-g++",
"hp-ux": "hpux-acc",
"irix": "irix-cc",
"linux": "linux-g++",
"lynxos": "lynxos-g++",
"netbsd": "netbsd-g++",
"openbsd": "openbsd-g++",
"openunix": "unixware-cc",
"osf1": "tru64-cxx",
"qnx": "qnx-g++",
"reliantunix": "reliant-cds",
"sco_sv": "sco-cc",
"sinix": "reliant-cds",
"sunos5": "solaris-cc",
"ultrix": "ultrix-g++",
"unix_sv": "unixware-g++",
"unixware": "unixware-cc",
"haiku1": "haiku-g++"
}
build_platform = "none"
if py_platform == "win32":
if py_version >= 0x030500:
build_platform = "win32-msvc2015"
elif py_version >= 0x030300:
build_platform = "win32-msvc2010"
elif py_version >= 0x020600:
build_platform = "win32-msvc2008"
elif py_version >= 0x020400:
build_platform = "win32-msvc.net"
else:
build_platform = "win32-msvc"
else:
for pd in list(platdefaults.keys()):
if py_platform[:len(pd)] == pd:
build_platform = platdefaults[pd]
break
def inform_user():
""" Tell the user the option values that are going to be used. """
if not opts.no_tools:
siputils.inform("The SIP code generator will be installed in %s." % sip_bin_dir)
siputils.inform("The sip.h header file will be installed in %s." % sip_inc_dir)
if not opts.no_module:
siputils.inform("The %s module will be installed in %s." % (sip_module_name, sip_module_dest_dir))
if opts.pyi:
siputils.inform("The sip.pyi stub file will be installed in %s." % pyi_dir)
if opts.static:
siputils.inform("The %s module will be built as a static library." % sip_module_name)
siputils.inform("The default directory to install .sip files in is %s." % sip_sip_dir)
if opts.use_qmake is None:
siputils.inform("The platform/compiler configuration is %s." % build_platform)
if opts.arch:
siputils.inform("MacOS/X binaries will be created for %s." % (", ".join(opts.arch.split())))
if opts.universal:
siputils.inform("MacOS/X universal binaries will be created using %s." % opts.universal)
if opts.deployment_target:
siputils.inform("MacOS/X deployment target is %s." % opts.deployment_target)
def set_platform_directories():
""" Initialise the global variables relating to platform-specific
directories.
"""
global plat_py_site_dir, plat_py_inc_dir, plat_py_venv_inc_dir
global plat_py_conf_inc_dir, plat_bin_dir, plat_py_lib_dir, plat_sip_dir
# We trust distutils for some stuff.
plat_py_site_dir = sysconfig.get_python_lib(plat_specific=1)
plat_py_inc_dir = sysconfig.get_python_inc()
plat_py_venv_inc_dir = sysconfig.get_python_inc(prefix=sys.prefix)
plat_py_conf_inc_dir = os.path.dirname(sysconfig.get_config_h_filename())
if sys.platform == "win32":
bin_dir = sys.exec_prefix
try:
# Python v3.3 and later.
base_prefix = sys.base_prefix
if sys.exec_prefix != sys.base_exec_prefix:
bin_dir += '\\Scripts'
except AttributeError:
try:
# virtualenv for Python v2.
base_prefix = sys.real_prefix
bin_dir += '\\Scripts'
except AttributeError:
# We can't detect the base prefix in Python v3 prior to v3.3.
base_prefix = sys.prefix
plat_py_lib_dir = base_prefix + "\\libs"
plat_bin_dir = bin_dir
plat_sip_dir = sys.prefix + "\\sip"
else:
lib_dir = sysconfig.get_python_lib(plat_specific=1, standard_lib=1)
plat_py_lib_dir = lib_dir + "/config"
plat_bin_dir = sys.exec_prefix + "/bin"
plat_sip_dir = sys.prefix + "/share/sip"
def create_config(module, template, macros):
"""Create the SIP configuration module so that it can be imported by build
scripts.
module is the module file name.
template is the template file name.
macros is the dictionary of build macros.
"""
siputils.inform("Creating %s..." % module)
content = {
"sip_config_args": sys.argv[1:],
"sip_version": sip_version,
"sip_version_str": sip_version_str,
"platform": build_platform,
"sip_bin": os.path.join(sip_bin_dir, "sip"),
"sip_inc_dir": sip_inc_dir,
"sip_root_dir": sip_root_dir,
"sip_module_dir": sip_module_dir,
"default_bin_dir": plat_bin_dir,
"default_mod_dir": plat_py_site_dir,
"default_sip_dir": sip_sip_dir,
"py_version": py_version,
"py_inc_dir": plat_py_inc_dir,
"py_conf_inc_dir": plat_py_conf_inc_dir,
"py_lib_dir": plat_py_lib_dir,
"universal": opts.universal,
"arch": opts.arch,
"deployment_target": opts.deployment_target,
"qt_framework": 0
}
siputils.create_config_module(module, template, content, macros)
def create_makefiles(macros):
"""Create the Makefiles.
macros is the dictionary of platform specific build macros.
"""
# Bootstrap. Make sure we get the right one.
sys.path.insert(0, os.path.curdir)
invalidate_caches()
import sipconfig
cfg = sipconfig.Configuration()
cfg.set_build_macros(macros)
all_installs = []
top_installs = []
gen_installs = []
subdirs = []
if not opts.no_tools:
subdirs.append('sipgen')
top_installs.append(
(["sipconfig.py", os.path.join(src_dir, "sipdistutils.py")],
cfg.sip_root_dir))
gen_installs.append(
(os.path.join(src_dir, "siplib", "sip.h"), cfg.sip_inc_dir))
if not opts.no_module:
subdirs.append('siplib')
all_installs.extend(top_installs)
all_installs.extend(gen_installs)
# The command to run to generate the dist-info directory.
mk_distinfo = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'mk_distinfo.py')
distinfo_dir = os.path.join(cfg.sip_module_dir,
'%s-%s.dist-info' % (sip_module_name.replace('.', '_'),
sip_version_str))
if opts.use_qmake:
run_mk_distinfo = '%s %s \\\"$(INSTALL_ROOT)\\\" %s installed.txt' % (
sys.executable, mk_distinfo, distinfo_dir)
sipconfig.inform("Creating top level .pro file...")
pro = open("sip.pro", "w")
pro.write("TEMPLATE = subdirs\n")
pro.write("SUBDIRS = %s\n" % " ".join(subdirs))
if top_installs:
# There will only be one element.
files, path = top_installs[0]
pro.write("\n")
pro.write("build_system.files = %s\n" % " ".join(files))
pro.write("build_system.path = %s\n" % quote(path))
pro.write("INSTALLS += build_system\n")
if opts.distinfo:
pro.write("\n")
pro.write("distinfo.extra = %s\n" % run_mk_distinfo)
pro.write("distinfo.path = %s\n" % quote(cfg.sip_module_dir))
pro.write("INSTALLS += distinfo\n")
pro.close()
else:
run_mk_distinfo = '%s %s "$(DESTDIR)" %s installed.txt' % (
sys.executable, mk_distinfo, distinfo_dir)
sipconfig.inform("Creating top level Makefile...")
# Note that mk_distinfo.py won't exist if we are building from the
# repository.
if opts.distinfo and os.path.isfile(mk_distinfo):
top_installs.append((run_mk_distinfo, None))
sipconfig.ParentMakefile(
configuration=cfg,
subdirs=subdirs,
installs=top_installs
).generate()
if opts.use_qmake:
sipconfig.inform("Creating sip code generator .pro file...")
pro = open(os.path.join("sipgen", "sipgen.pro"), "w")
pro.write("TEMPLATE = app\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt app_bundle\n")
pro.write("CONFIG += warn_on exceptions_off console %s\n" % (
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("target.path = %s\n" % os.path.dirname(cfg.sip_bin))
pro.write("INSTALLS += target\n")
c_sources = get_sources("sipgen", "*.c")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(
[qmake_quote(s) for s in c_sources]))
headers = get_sources("sipgen", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(
[qmake_quote(h) for h in headers]))
if gen_installs:
# There will only be one element.
files, path = gen_installs[0]
pro.write("\n")
pro.write("sip_h.files = %s\n" % " ".join(files))
pro.write("sip_h.path = %s\n" % quote(path))
pro.write("INSTALLS += sip_h\n")
pro.close()
else:
sipconfig.inform("Creating sip code generator Makefile...")
sipconfig.ProgramMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "sipgen", "sipgen.sbf"),
dir="sipgen",
install_dir=os.path.dirname(cfg.sip_bin),
installs=gen_installs,
console=1,
warnings=1,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
).generate()
# The implied code generator installs.
if not opts.no_tools:
sip_dir, sip_exe = os.path.split(cfg.sip_bin)
if sys.platform == 'win32':
sip_exe += '.exe'
all_installs.append((sip_exe, sip_dir))
# The module installs.
module_installs=[]
if opts.pyi:
module_installs.append((os.path.join(src_dir, 'sip.pyi'), pyi_dir))
all_installs.extend(module_installs)
if not opts.no_module:
if sys.platform == 'win32':
mod = 'sip.lib' if opts.static else 'sip.pyd'
else:
mod = 'libsip.a' if opts.static else 'sip.so'
all_installs.append((mod, sip_module_dest_dir))
if opts.use_qmake:
sipconfig.inform("Creating sip module .pro file...")
pro = open(os.path.join("siplib", "siplib.pro"), "w")
pro.write("TEMPLATE = lib\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt\n")
pro.write("CONFIG += warn_on exceptions_off %s %s\n" % (
("staticlib" if opts.static else "plugin plugin_bundle"),
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("INCLUDEPATH += %s\n" % cfg.py_inc_dir)
if cfg.py_conf_inc_dir != cfg.py_inc_dir:
pro.write("INCLUDEPATH += %s\n" % cfg.py_conf_inc_dir)
if sip_module_name != 'sip':
pro.write("\n")
pro.write('DEFINES += SIP_MODULE_NAME=%s\n' % sip_module_name)
base_name = sip_module_name.split('.')[-1]
if base_name != 'sip':
pro.write('DEFINES += SIP_MODULE_BASENAME=%s\n' % base_name)
if not opts.static:
# These only need to be correct for Windows.
debug_suffix = "_d" if opts.debug else ""
link_lib_dir = quote("-L" + cfg.py_lib_dir)
pro.write("""
win32 {
PY_MODULE = sip%s.pyd
PY_MODULE_SRC = $(DESTDIR_TARGET)
LIBS += %s
} else {
PY_MODULE = sip.so
macx {
PY_MODULE_SRC = $(TARGET).plugin/Contents/MacOS/$(TARGET)
QMAKE_LFLAGS += "-undefined dynamic_lookup"
} else {
PY_MODULE_SRC = $(TARGET)
}
}
QMAKE_POST_LINK = $(COPY_FILE) $$PY_MODULE_SRC $$PY_MODULE
target.CONFIG = no_check_exist
target.files = $$PY_MODULE
""" % (debug_suffix, link_lib_dir))
pro.write("\n")
pro.write("target.path = %s\n" % sip_module_dest_dir)
pro.write("INSTALLS += target\n")
if opts.pyi:
pro.write("\n")
pro.write("sip_pyi.files = sip.pyi\n")
pro.write("sip_pyi.path = %s\n" % pyi_dir)
pro.write("INSTALLS += sip_pyi\n")
c_sources = get_sources("siplib", "*.c")
cpp_sources = get_sources("siplib", "*.cpp")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(
[qmake_quote(s) for s in c_sources + cpp_sources]))
headers = get_sources("siplib", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(
[qmake_quote(h) for h in headers]))
pro.close()
else:
sipconfig.inform("Creating sip module Makefile...")
build_dir = os.getcwd()
makefile = sipconfig.ModuleMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "siplib", "siplib.sbf"),
dir="siplib",
install_dir=sip_module_dest_dir,
installs=module_installs,
console=1,
warnings=1,
static=opts.static,
debug=opts.debug,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
)
if sip_module_name != 'sip':
makefile.DEFINES.append('SIP_MODULE_NAME=%s' % sip_module_name)
base_name = sip_module_name.split('.')[-1]
if base_name != 'sip':
makefile.DEFINES.append('SIP_MODULE_BASENAME=%s' % base_name)
if src_dir != build_dir:
src_siplib_dir = os.path.join(src_dir, "siplib")
makefile.extra_include_dirs.append(src_siplib_dir)
makefile.extra_source_dirs.append(src_siplib_dir)
makefile.generate()
# Create the file containing all installed files.
if opts.distinfo:
installed = open('installed.txt', 'w')
for sources, dst in all_installs:
if not isinstance(sources, (list, tuple)):
sources = [sources]
for src in sources:
installed.write(
os.path.join(dst, os.path.basename(src)) + '\n')
installed.close()
def get_sources(sources_dir, ext):
""" Get the quoted files with the specified extension from a directory. """
return [quote(f) for f in glob.glob(os.path.join(src_dir, sources_dir, ext))]
def quote(path):
""" Return a path that is quoted if necessary. """
if ' ' in path:
path = '"' + path + '"'
return path
def qmake_quote(path):
""" Return a path quoted for qmake if it contains spaces. path is the
path.
"""
if ' ' in path:
path = '$$quote(%s)' % path
return path
# Look out for recursive definitions.
_extrapolating = []
def _get_configuration_value(config, name, default=None):
""" Get a configuration value while extrapolating. """
value = config.get(name)
if value is None:
if default is None:
siputils.error("Configuration file references non-existent name '%s'." % name)
return default
parts = value.split('%(', 1)
while len(parts) == 2:
prefix, tail = parts
parts = tail.split(')', 1)
if len(parts) != 2:
siputils.error("Configuration file contains unterminated extrapolated name '%s'." % tail)
xtra_name, suffix = parts
if xtra_name in _extrapolating:
siputils.error("Configuration file contains a recursive reference to '%s'." % xtra_name)
_extrapolating.append(xtra_name)
xtra_value = _get_configuration_value(config, xtra_name)
_extrapolating.pop()
value = prefix + xtra_value + suffix
parts = value.split('%(', 1)
return value
def update_from_configuration_file(config_file):
""" Update a number of globals from values read from a configuration file.
"""
siputils.inform("Reading configuration from %s..." % config_file)
config = {}
# Read the file into the dict.
cfg = open(config_file)
line_nr = 0
for l in cfg:
line_nr += 1
# Strip comments and blank lines.
l = l.split('#')[0].strip()
if l == '':
continue
parts = l.split('=', 1)
if len(parts) == 2:
name = parts[0].strip()
value = parts[1].strip()
else:
name = value = ''
if name == '' or value == '':
siputils.error("%s:%d: Invalid line." % (config_file, line_nr))
config[name] = value
last_name = name
cfg.close()
# Enforce the presets.
version = siputils.version_to_string(py_version).split('.')
config['py_major'] = version[0]
config['py_minor'] = version[1]
config['sysroot'] = sysroot
# Override the relevant values.
global py_platform, plat_py_conf_inc_dir, plat_py_inc_dir, plat_py_lib_dir
global sip_bin_dir, sip_inc_dir, sip_module_dir, sip_sip_dir
py_platform = _get_configuration_value(config, 'py_platform', py_platform)
plat_py_inc_dir = _get_configuration_value(config, 'py_inc_dir',
plat_py_inc_dir)
plat_py_lib_dir = _get_configuration_value(config, 'py_pylib_dir',
plat_py_lib_dir)
# The pyconfig.h directory defaults to the Python.h directory.
plat_py_conf_inc_dir = _get_configuration_value(config, 'py_conf_inc_dir',
plat_py_inc_dir)
sip_bin_dir = _get_configuration_value(config, 'sip_bin_dir', sip_bin_dir)
sip_module_dir = _get_configuration_value(config, 'sip_module_dir',
sip_module_dir)
# Note that this defaults to any 'py_inc_dir' specified in the
# configuration file.
sip_inc_dir = _get_configuration_value(config, 'sip_inc_dir',
plat_py_inc_dir)
# Note that this is only used when creating sipconfig.py.
sip_sip_dir = _get_configuration_value(config, 'sip_sip_dir', sip_sip_dir)
def create_optparser(sdk_dir):
"""Create the parser for the command line.
"""
p = optparse.OptionParser(usage="python %prog [opts] [macro=value] "
"[macro+=value]", version=sip_version_str)
# Note: we don't use %default to be compatible with Python 2.3.
p.add_option("-k", "--static", action="store_true", default=False,
dest="static", help="build the SIP module as a static library")
p.add_option("-p", "--platform", action="store", type="string",
metavar="PLATFORM", dest="platform", help="the platform/compiler "
"configuration [default: %s]" % build_platform)
p.add_option("-u", "--debug", action="store_true", default=False,
help="build with debugging symbols")
p.add_option("--sip-module", action="store", default="sip", type="string",
metavar="NAME", dest="sip_module", help="the package.module name "
"of the sip module [default: sip]")
p.add_option("--configuration", dest='config_file', type='string',
action='callback', callback=store_abspath_file, metavar="FILE",
help="FILE contains the target configuration")
p.add_option("--target-py-version", dest='target_py_version',
type='string', action='callback', callback=store_version,
metavar="VERSION",
help="the major.minor version of the target Python [default: "
"%s]" % siputils.version_to_string(py_version, parts=2))
p.add_option("--sysroot", dest='sysroot', type='string', action='callback',
callback=store_abspath_dir, metavar="DIR",
help="DIR is the target system root directory")
p.add_option("--no-module", action="store_true", default=False,
dest="no_module", help="disable the installation of the sip "
"module [default: enabled]")
p.add_option("--no-tools", action="store_true", default=False,
dest="no_tools", help="disable the building of the code generator "
"and the installation of the build system [default: enabled]")
p.add_option("--use-qmake", action="store_true", default=False,
dest="use_qmake", help="generate qmake .pro files instead of "
"Makefiles")
if sys.platform == 'darwin':
# Get the latest SDK to use as the default.
sdks = glob.glob(sdk_dir + '/MacOSX*.sdk')
if len(sdks) > 0:
sdks.sort()
_, default_sdk = os.path.split(sdks[-1])
else:
default_sdk = 'MacOSX10.4u.sdk'
g = optparse.OptionGroup(p, title="MacOS X Configuration")
g.add_option("--arch", action="append", default=[], dest="arch",
choices=["i386", "x86_64", "ppc"],
help="build for architecture ARCH")
g.add_option("--deployment-target", action="store", default='',
metavar="VERSION", dest="deployment_target",
help="set the value of the MACOSX_DEPLOYMENT_TARGET "
"environment variable in generated Makefiles")
g.add_option("-n", "--universal", action="store_true", default=False,
dest="universal",
help="build the SIP code generator and module as universal "
"binaries")
g.add_option("-s", "--sdk", action="store", default=default_sdk,
type="string", metavar="SDK", dest="sdk",
help="the name of the SDK used when building universal "
"binaries [default: %s]" % default_sdk)
p.add_option_group(g)
# Querying.
g = optparse.OptionGroup(p, title="Query")
g.add_option("--show-platforms", action="store_true", default=False,
dest="show_platforms", help="show the list of supported "
"platform/compiler configurations")
g.add_option("--show-build-macros", action="store_true", default=False,
dest="show_build_macros", help="show the list of supported build "
"macros")
p.add_option_group(g)
# Installation.
g = optparse.OptionGroup(p, title="Installation")
g.add_option("-b", "--bindir", action="callback", type="string",
metavar="DIR", dest="sipbindir", callback=store_abspath,
help="where the SIP code generator will be installed [default: "
"%s]" % plat_bin_dir)
g.add_option("-d", "--destdir", action="callback", type="string",
metavar="DIR", dest="destdir", callback=store_abspath,
help="where the SIP module will be installed [default: "
"%s]" % plat_py_site_dir)
g.add_option("-e", "--incdir", action="callback", type="string",
metavar="DIR", dest="sipincdir", callback=store_abspath,
help="where the SIP header file will be installed [default: "
"%s]" % plat_py_venv_inc_dir)
g.add_option("-v", "--sipdir", action="callback", type="string",
metavar="DIR", dest="sipsipdir", callback=store_abspath,
help="where .sip files are normally installed [default: "
"%s]" % plat_sip_dir)
g.add_option("--no-dist-info", action="store_false", default=True,
dest="distinfo",
help="do not install the dist-info directory")
g.add_option("--no-stubs", "--no-pyi", action="store_false", default=True,
dest="pyi",
help="do not install the sip.pyi stub file")
g.add_option("--stubsdir", "--pyidir", action="callback", type="string",
metavar="DIR", dest="pyidir", callback=store_abspath,
help="where the sip.pyi stub file will be installed [default: "
"%s]" % plat_py_site_dir)
p.add_option_group(g)
return p
def main(argv):
"""Create the configuration module module.
argv is the list of command line arguments.
"""
siputils.inform("This is SIP %s for Python %s on %s." % (sip_version_str, sys.version.split()[0], sys.platform))
global py_version, build_platform
if py_version < 0x020300:
siputils.error("This version of SIP requires Python v2.3 or later.")
# Basic initialisation.
set_platform_directories()
set_build_platform()
# Build up the list of valid specs.
for s in os.listdir(os.path.join(src_dir, "specs")):
platform_specs.append(s)
# Determine the directory containing the default OS/X SDK.
if sys.platform == 'darwin':
for sdk_dir in MACOSX_SDK_DIRS:
if os.path.isdir(sdk_dir):
break
else:
sdk_dir = MACOSX_SDK_DIRS[0]
else:
sdk_dir = ''
# Parse the command line.
global opts
p = create_optparser(sdk_dir)
opts, args = p.parse_args()
# Override defaults that affect subsequent configuration.
if opts.target_py_version is not None:
py_version = opts.target_py_version
if opts.sysroot is not None:
global sysroot
sysroot = opts.sysroot
# Make sure MacOS specific options get initialised.
if sys.platform != 'darwin':
opts.universal = ''
opts.arch = []
opts.sdk = ''
opts.deployment_target = ''
# Handle the query options.
if opts.show_platforms or opts.show_build_macros:
if opts.show_platforms:
show_platforms()
if opts.show_build_macros:
show_macros()
sys.exit()
# Convert the list 'arch' option to a string. Multiple architectures
# imply a universal binary.
if len(opts.arch) > 1:
opts.universal = True
opts.arch = ' '.join(opts.arch)
# Convert the boolean 'universal' option to a string.
if opts.universal:
if '/' in opts.sdk:
opts.universal = os.path.abspath(opts.sdk)
else:
opts.universal = sdk_dir + '/' + opts.sdk
if not os.path.isdir(opts.universal):
siputils.error("Unable to find the SDK directory %s. Use the --sdk flag to specify the name of the SDK or its full path." % opts.universal)
if opts.arch == '':
opts.arch = DEFAULT_MACOSX_ARCH
else:
opts.universal = ''
# No sip module also implies no stubs.
if opts.no_module:
opts.pyi = False
# Apply the overrides from any configuration file.
global plat_bin_dir, plat_py_conf_inc_dir, plat_py_inc_dir
global plat_py_lib_dir, plat_py_site_dir, plat_sip_dir
global sip_bin_dir, sip_inc_dir, sip_root_dir, sip_module_dir, sip_sip_dir
global sip_module_dest_dir, sip_module_name, pyi_dir
# Set defaults.
sip_bin_dir = plat_bin_dir
sip_inc_dir = plat_py_venv_inc_dir
sip_root_dir = plat_py_site_dir
sip_sip_dir = plat_sip_dir
if opts.config_file is not None:
update_from_configuration_file(opts.config_file)
elif sysroot != '':
plat_bin_dir = apply_sysroot(plat_bin_dir)
plat_py_conf_inc_dir = apply_sysroot(plat_py_conf_inc_dir)
plat_py_inc_dir = apply_sysroot(plat_py_inc_dir)
plat_py_lib_dir = apply_sysroot(plat_py_lib_dir)
plat_py_site_dir = apply_sysroot(plat_py_site_dir)
plat_sip_dir = apply_sysroot(plat_sip_dir)
sip_bin_dir = apply_sysroot(sip_bin_dir)
sip_inc_dir = apply_sysroot(sip_inc_dir)
sip_root_dir = apply_sysroot(sip_root_dir)
sip_sip_dir = apply_sysroot(sip_sip_dir)
# Fix the name of the sip module.
if opts.destdir is not None:
sip_root_dir = opts.destdir
# The module directory might have been set in a configuration file.
if not sip_module_dir:
sip_module_dir = sip_root_dir
sip_module_name = opts.sip_module
module_path = sip_module_name.split(".")
# Check the module name is valid.
for m in module_path:
# Python v2 doesn't have isidentifier() but we don't bother to provide
# an alternative.
try:
if keyword.iskeyword(m) or not m.isidentifier():
siputils.error(
"'%s' is an invalid Python module name." % sip_module_name)
except AttributeError:
pass
if len(module_path) > 1:
del module_path[-1]
module_path.insert(0, sip_module_dir)
sip_module_dest_dir = os.path.join(*module_path)
else:
sip_module_dest_dir = sip_module_dir
# Override from the command line.
if opts.platform is not None:
build_platform = opts.platform
if opts.sipbindir is not None:
sip_bin_dir = opts.sipbindir
if opts.sipincdir is not None:
sip_inc_dir = opts.sipincdir
if opts.sipsipdir is not None:
sip_sip_dir = opts.sipsipdir
if opts.pyidir is not None:
pyi_dir = opts.pyidir
else:
pyi_dir = sip_module_dest_dir
# Get the platform specific macros for building.
macros = siputils.parse_build_macros(
os.path.join(src_dir, "specs", build_platform), build_macro_names,
args)
if macros is None:
siputils.error("Unsupported macro name specified. Use the --show-build-macros flag to see a list of supported macros.")
sys.exit(2)
# Tell the user what's been found.
inform_user()
# Install the configuration module.
create_config("sipconfig.py", os.path.join(src_dir, "siputils.py"),
macros)
# Create the Makefiles.
create_makefiles(macros)
###############################################################################
# The script starts here.
###############################################################################
if __name__ == "__main__":
try:
main(sys.argv)
except SystemExit:
raise
except:
sys.stderr.write(
"""An internal error occured. Please report all the output from the program,
including the following traceback, to support@riverbankcomputing.com.
""")
raise
| 34.148325 | 151 | 0.612414 | # This script handles the SIP configuration and generates the Makefiles.
#
# Copyright (c) 2019 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import keyword
import sys
import os
import glob
import optparse
from distutils import sysconfig
try:
from importlib import invalidate_caches
except ImportError:
invalidate_caches = lambda: None
import siputils
# Initialise the globals.
sip_version = 0x041315
sip_version_str = "4.19.21"
py_version = sys.hexversion >> 8
py_platform = sys.platform
plat_py_site_dir = None
plat_py_inc_dir = None
plat_py_venv_inc_dir = None
plat_py_conf_inc_dir = None
plat_py_lib_dir = None
plat_sip_dir = None
plat_bin_dir = None
platform_specs = []
sip_bin_dir = ''
sip_inc_dir = ''
sip_root_dir = ''
sip_module_dir = ''
sip_module_dest_dir = ''
sip_sip_dir = ''
pyi_dir = ''
sysroot = ''
src_dir = os.path.dirname(os.path.abspath(__file__))
sip_module_name = None
build_platform = None
# Constants.
DEFAULT_MACOSX_ARCH = 'i386 ppc'
MACOSX_SDK_DIRS = ('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs', '/Developer/SDKs')
# The names of build macros extracted from the platform specific configuration
# files.
build_macro_names = [
"DEFINES", "CONFIG",
"CC",
"CFLAGS",
"CFLAGS_RELEASE", "CFLAGS_DEBUG",
"CFLAGS_CONSOLE", "CFLAGS_SHLIB", "CFLAGS_APP", "CFLAGS_THREAD",
"CFLAGS_MT", "CFLAGS_MT_DBG", "CFLAGS_MT_DLL", "CFLAGS_MT_DLLDBG",
"CFLAGS_EXCEPTIONS_ON", "CFLAGS_EXCEPTIONS_OFF",
"CFLAGS_RTTI_ON", "CFLAGS_RTTI_OFF",
"CFLAGS_STL_ON", "CFLAGS_STL_OFF",
"CFLAGS_WARN_ON", "CFLAGS_WARN_OFF",
"CHK_DIR_EXISTS", "COPY",
"CXX",
"CXXFLAGS",
"CXXFLAGS_RELEASE", "CXXFLAGS_DEBUG",
"CXXFLAGS_CONSOLE", "CXXFLAGS_SHLIB", "CXXFLAGS_APP", "CXXFLAGS_THREAD",
"CXXFLAGS_MT", "CXXFLAGS_MT_DBG", "CXXFLAGS_MT_DLL", "CXXFLAGS_MT_DLLDBG",
"CXXFLAGS_EXCEPTIONS_ON", "CXXFLAGS_EXCEPTIONS_OFF",
"CXXFLAGS_RTTI_ON", "CXXFLAGS_RTTI_OFF",
"CXXFLAGS_STL_ON", "CXXFLAGS_STL_OFF",
"CXXFLAGS_WARN_ON", "CXXFLAGS_WARN_OFF",
"DEL_FILE",
"EXTENSION_SHLIB", "EXTENSION_PLUGIN",
"INCDIR", "INCDIR_X11", "INCDIR_OPENGL",
"LIBS_CORE", "LIBS_GUI", "LIBS_NETWORK", "LIBS_OPENGL", "LIBS_WEBKIT",
"LINK", "LINK_SHLIB", "AIX_SHLIB", "LINK_SHLIB_CMD",
"LFLAGS", "LFLAGS_CONSOLE", "LFLAGS_CONSOLE_DLL", "LFLAGS_DEBUG",
"LFLAGS_DLL",
"LFLAGS_PLUGIN", "LFLAGS_RELEASE", "LFLAGS_SHLIB", "LFLAGS_SONAME",
"LFLAGS_THREAD", "LFLAGS_WINDOWS", "LFLAGS_WINDOWS_DLL", "LFLAGS_OPENGL",
"LIBDIR", "LIBDIR_X11", "LIBDIR_OPENGL",
"LIBS", "LIBS_CONSOLE", "LIBS_RT",
"LIBS_RTMT", "LIBS_THREAD", "LIBS_WINDOWS", "LIBS_X11",
"MAKEFILE_GENERATOR",
"MKDIR",
"RPATH", "LFLAGS_RPATH",
"AR", "RANLIB", "LIB", "STRIP"
]
def show_platforms():
"""Display the different platform/compilers.
"""
sys.stdout.write("""
The following platform/compiler configurations are supported:
""")
platform_specs.sort()
sys.stdout.write(siputils.format(", ".join(platform_specs), leftmargin=2))
sys.stdout.write("\n\n")
def show_macros():
"""Display the different build macros.
"""
sys.stdout.write("""
The following options may be used to adjust the compiler configuration:
""")
build_macro_names.sort()
sys.stdout.write(siputils.format(", ".join(build_macro_names), leftmargin=2))
sys.stdout.write("\n\n")
def set_build_platform():
""" Initialise the build platform. """
global build_platform
# Set the platform specific default specification.
platdefaults = {
"aix": "aix-xlc",
"bsd": "bsdi-g++",
"cygwin": "cygwin-g++",
"darwin": "macx-g++",
"dgux": "dgux-g++",
"freebsd": "freebsd-g++",
"gnu": "hurd-g++",
"hp-ux": "hpux-acc",
"irix": "irix-cc",
"linux": "linux-g++",
"lynxos": "lynxos-g++",
"netbsd": "netbsd-g++",
"openbsd": "openbsd-g++",
"openunix": "unixware-cc",
"osf1": "tru64-cxx",
"qnx": "qnx-g++",
"reliantunix": "reliant-cds",
"sco_sv": "sco-cc",
"sinix": "reliant-cds",
"sunos5": "solaris-cc",
"ultrix": "ultrix-g++",
"unix_sv": "unixware-g++",
"unixware": "unixware-cc",
"haiku1": "haiku-g++"
}
build_platform = "none"
if py_platform == "win32":
if py_version >= 0x030500:
build_platform = "win32-msvc2015"
elif py_version >= 0x030300:
build_platform = "win32-msvc2010"
elif py_version >= 0x020600:
build_platform = "win32-msvc2008"
elif py_version >= 0x020400:
build_platform = "win32-msvc.net"
else:
build_platform = "win32-msvc"
else:
for pd in list(platdefaults.keys()):
if py_platform[:len(pd)] == pd:
build_platform = platdefaults[pd]
break
def inform_user():
""" Tell the user the option values that are going to be used. """
if not opts.no_tools:
siputils.inform("The SIP code generator will be installed in %s." % sip_bin_dir)
siputils.inform("The sip.h header file will be installed in %s." % sip_inc_dir)
if not opts.no_module:
siputils.inform("The %s module will be installed in %s." % (sip_module_name, sip_module_dest_dir))
if opts.pyi:
siputils.inform("The sip.pyi stub file will be installed in %s." % pyi_dir)
if opts.static:
siputils.inform("The %s module will be built as a static library." % sip_module_name)
siputils.inform("The default directory to install .sip files in is %s." % sip_sip_dir)
if opts.use_qmake is None:
siputils.inform("The platform/compiler configuration is %s." % build_platform)
if opts.arch:
siputils.inform("MacOS/X binaries will be created for %s." % (", ".join(opts.arch.split())))
if opts.universal:
siputils.inform("MacOS/X universal binaries will be created using %s." % opts.universal)
if opts.deployment_target:
siputils.inform("MacOS/X deployment target is %s." % opts.deployment_target)
def set_platform_directories():
""" Initialise the global variables relating to platform-specific
directories.
"""
global plat_py_site_dir, plat_py_inc_dir, plat_py_venv_inc_dir
global plat_py_conf_inc_dir, plat_bin_dir, plat_py_lib_dir, plat_sip_dir
# We trust distutils for some stuff.
plat_py_site_dir = sysconfig.get_python_lib(plat_specific=1)
plat_py_inc_dir = sysconfig.get_python_inc()
plat_py_venv_inc_dir = sysconfig.get_python_inc(prefix=sys.prefix)
plat_py_conf_inc_dir = os.path.dirname(sysconfig.get_config_h_filename())
if sys.platform == "win32":
bin_dir = sys.exec_prefix
try:
# Python v3.3 and later.
base_prefix = sys.base_prefix
if sys.exec_prefix != sys.base_exec_prefix:
bin_dir += '\\Scripts'
except AttributeError:
try:
# virtualenv for Python v2.
base_prefix = sys.real_prefix
bin_dir += '\\Scripts'
except AttributeError:
# We can't detect the base prefix in Python v3 prior to v3.3.
base_prefix = sys.prefix
plat_py_lib_dir = base_prefix + "\\libs"
plat_bin_dir = bin_dir
plat_sip_dir = sys.prefix + "\\sip"
else:
lib_dir = sysconfig.get_python_lib(plat_specific=1, standard_lib=1)
plat_py_lib_dir = lib_dir + "/config"
plat_bin_dir = sys.exec_prefix + "/bin"
plat_sip_dir = sys.prefix + "/share/sip"
def create_config(module, template, macros):
"""Create the SIP configuration module so that it can be imported by build
scripts.
module is the module file name.
template is the template file name.
macros is the dictionary of build macros.
"""
siputils.inform("Creating %s..." % module)
content = {
"sip_config_args": sys.argv[1:],
"sip_version": sip_version,
"sip_version_str": sip_version_str,
"platform": build_platform,
"sip_bin": os.path.join(sip_bin_dir, "sip"),
"sip_inc_dir": sip_inc_dir,
"sip_root_dir": sip_root_dir,
"sip_module_dir": sip_module_dir,
"default_bin_dir": plat_bin_dir,
"default_mod_dir": plat_py_site_dir,
"default_sip_dir": sip_sip_dir,
"py_version": py_version,
"py_inc_dir": plat_py_inc_dir,
"py_conf_inc_dir": plat_py_conf_inc_dir,
"py_lib_dir": plat_py_lib_dir,
"universal": opts.universal,
"arch": opts.arch,
"deployment_target": opts.deployment_target,
"qt_framework": 0
}
siputils.create_config_module(module, template, content, macros)
def create_makefiles(macros):
"""Create the Makefiles.
macros is the dictionary of platform specific build macros.
"""
# Bootstrap. Make sure we get the right one.
sys.path.insert(0, os.path.curdir)
invalidate_caches()
import sipconfig
cfg = sipconfig.Configuration()
cfg.set_build_macros(macros)
all_installs = []
top_installs = []
gen_installs = []
subdirs = []
if not opts.no_tools:
subdirs.append('sipgen')
top_installs.append(
(["sipconfig.py", os.path.join(src_dir, "sipdistutils.py")],
cfg.sip_root_dir))
gen_installs.append(
(os.path.join(src_dir, "siplib", "sip.h"), cfg.sip_inc_dir))
if not opts.no_module:
subdirs.append('siplib')
all_installs.extend(top_installs)
all_installs.extend(gen_installs)
# The command to run to generate the dist-info directory.
mk_distinfo = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'mk_distinfo.py')
distinfo_dir = os.path.join(cfg.sip_module_dir,
'%s-%s.dist-info' % (sip_module_name.replace('.', '_'),
sip_version_str))
if opts.use_qmake:
run_mk_distinfo = '%s %s \\\"$(INSTALL_ROOT)\\\" %s installed.txt' % (
sys.executable, mk_distinfo, distinfo_dir)
sipconfig.inform("Creating top level .pro file...")
pro = open("sip.pro", "w")
pro.write("TEMPLATE = subdirs\n")
pro.write("SUBDIRS = %s\n" % " ".join(subdirs))
if top_installs:
# There will only be one element.
files, path = top_installs[0]
pro.write("\n")
pro.write("build_system.files = %s\n" % " ".join(files))
pro.write("build_system.path = %s\n" % quote(path))
pro.write("INSTALLS += build_system\n")
if opts.distinfo:
pro.write("\n")
pro.write("distinfo.extra = %s\n" % run_mk_distinfo)
pro.write("distinfo.path = %s\n" % quote(cfg.sip_module_dir))
pro.write("INSTALLS += distinfo\n")
pro.close()
else:
run_mk_distinfo = '%s %s "$(DESTDIR)" %s installed.txt' % (
sys.executable, mk_distinfo, distinfo_dir)
sipconfig.inform("Creating top level Makefile...")
# Note that mk_distinfo.py won't exist if we are building from the
# repository.
if opts.distinfo and os.path.isfile(mk_distinfo):
top_installs.append((run_mk_distinfo, None))
sipconfig.ParentMakefile(
configuration=cfg,
subdirs=subdirs,
installs=top_installs
).generate()
if opts.use_qmake:
sipconfig.inform("Creating sip code generator .pro file...")
pro = open(os.path.join("sipgen", "sipgen.pro"), "w")
pro.write("TEMPLATE = app\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt app_bundle\n")
pro.write("CONFIG += warn_on exceptions_off console %s\n" % (
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("target.path = %s\n" % os.path.dirname(cfg.sip_bin))
pro.write("INSTALLS += target\n")
c_sources = get_sources("sipgen", "*.c")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(
[qmake_quote(s) for s in c_sources]))
headers = get_sources("sipgen", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(
[qmake_quote(h) for h in headers]))
if gen_installs:
# There will only be one element.
files, path = gen_installs[0]
pro.write("\n")
pro.write("sip_h.files = %s\n" % " ".join(files))
pro.write("sip_h.path = %s\n" % quote(path))
pro.write("INSTALLS += sip_h\n")
pro.close()
else:
sipconfig.inform("Creating sip code generator Makefile...")
sipconfig.ProgramMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "sipgen", "sipgen.sbf"),
dir="sipgen",
install_dir=os.path.dirname(cfg.sip_bin),
installs=gen_installs,
console=1,
warnings=1,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
).generate()
# The implied code generator installs.
if not opts.no_tools:
sip_dir, sip_exe = os.path.split(cfg.sip_bin)
if sys.platform == 'win32':
sip_exe += '.exe'
all_installs.append((sip_exe, sip_dir))
# The module installs.
module_installs=[]
if opts.pyi:
module_installs.append((os.path.join(src_dir, 'sip.pyi'), pyi_dir))
all_installs.extend(module_installs)
if not opts.no_module:
if sys.platform == 'win32':
mod = 'sip.lib' if opts.static else 'sip.pyd'
else:
mod = 'libsip.a' if opts.static else 'sip.so'
all_installs.append((mod, sip_module_dest_dir))
if opts.use_qmake:
sipconfig.inform("Creating sip module .pro file...")
pro = open(os.path.join("siplib", "siplib.pro"), "w")
pro.write("TEMPLATE = lib\n")
pro.write("TARGET = sip\n")
pro.write("CONFIG -= qt\n")
pro.write("CONFIG += warn_on exceptions_off %s %s\n" % (
("staticlib" if opts.static else "plugin plugin_bundle"),
("debug" if opts.debug else "release")))
pro.write("\n")
pro.write("# Work around QTBUG-39300.\n")
pro.write("CONFIG -= android_install\n")
pro.write("\n")
pro.write("INCLUDEPATH += %s\n" % cfg.py_inc_dir)
if cfg.py_conf_inc_dir != cfg.py_inc_dir:
pro.write("INCLUDEPATH += %s\n" % cfg.py_conf_inc_dir)
if sip_module_name != 'sip':
pro.write("\n")
pro.write('DEFINES += SIP_MODULE_NAME=%s\n' % sip_module_name)
base_name = sip_module_name.split('.')[-1]
if base_name != 'sip':
pro.write('DEFINES += SIP_MODULE_BASENAME=%s\n' % base_name)
if not opts.static:
# These only need to be correct for Windows.
debug_suffix = "_d" if opts.debug else ""
link_lib_dir = quote("-L" + cfg.py_lib_dir)
pro.write("""
win32 {
PY_MODULE = sip%s.pyd
PY_MODULE_SRC = $(DESTDIR_TARGET)
LIBS += %s
} else {
PY_MODULE = sip.so
macx {
PY_MODULE_SRC = $(TARGET).plugin/Contents/MacOS/$(TARGET)
QMAKE_LFLAGS += "-undefined dynamic_lookup"
} else {
PY_MODULE_SRC = $(TARGET)
}
}
QMAKE_POST_LINK = $(COPY_FILE) $$PY_MODULE_SRC $$PY_MODULE
target.CONFIG = no_check_exist
target.files = $$PY_MODULE
""" % (debug_suffix, link_lib_dir))
pro.write("\n")
pro.write("target.path = %s\n" % sip_module_dest_dir)
pro.write("INSTALLS += target\n")
if opts.pyi:
pro.write("\n")
pro.write("sip_pyi.files = sip.pyi\n")
pro.write("sip_pyi.path = %s\n" % pyi_dir)
pro.write("INSTALLS += sip_pyi\n")
c_sources = get_sources("siplib", "*.c")
cpp_sources = get_sources("siplib", "*.cpp")
pro.write("\n")
pro.write("SOURCES = %s\n" % " ".join(
[qmake_quote(s) for s in c_sources + cpp_sources]))
headers = get_sources("siplib", "*.h")
pro.write("\n")
pro.write("HEADERS = %s\n" % " ".join(
[qmake_quote(h) for h in headers]))
pro.close()
else:
sipconfig.inform("Creating sip module Makefile...")
build_dir = os.getcwd()
makefile = sipconfig.ModuleMakefile(
configuration=cfg,
build_file=os.path.join(src_dir, "siplib", "siplib.sbf"),
dir="siplib",
install_dir=sip_module_dest_dir,
installs=module_installs,
console=1,
warnings=1,
static=opts.static,
debug=opts.debug,
universal=opts.universal,
arch=opts.arch,
deployment_target=opts.deployment_target
)
if sip_module_name != 'sip':
makefile.DEFINES.append('SIP_MODULE_NAME=%s' % sip_module_name)
base_name = sip_module_name.split('.')[-1]
if base_name != 'sip':
makefile.DEFINES.append('SIP_MODULE_BASENAME=%s' % base_name)
if src_dir != build_dir:
src_siplib_dir = os.path.join(src_dir, "siplib")
makefile.extra_include_dirs.append(src_siplib_dir)
makefile.extra_source_dirs.append(src_siplib_dir)
makefile.generate()
# Create the file containing all installed files.
if opts.distinfo:
installed = open('installed.txt', 'w')
for sources, dst in all_installs:
if not isinstance(sources, (list, tuple)):
sources = [sources]
for src in sources:
installed.write(
os.path.join(dst, os.path.basename(src)) + '\n')
installed.close()
def get_sources(sources_dir, ext):
""" Get the quoted files with the specified extension from a directory. """
return [quote(f) for f in glob.glob(os.path.join(src_dir, sources_dir, ext))]
def quote(path):
""" Return a path that is quoted if necessary. """
if ' ' in path:
path = '"' + path + '"'
return path
def qmake_quote(path):
""" Return a path quoted for qmake if it contains spaces. path is the
path.
"""
if ' ' in path:
path = '$$quote(%s)' % path
return path
# Look out for recursive definitions.
_extrapolating = []
def _get_configuration_value(config, name, default=None):
""" Get a configuration value while extrapolating. """
value = config.get(name)
if value is None:
if default is None:
siputils.error("Configuration file references non-existent name '%s'." % name)
return default
parts = value.split('%(', 1)
while len(parts) == 2:
prefix, tail = parts
parts = tail.split(')', 1)
if len(parts) != 2:
siputils.error("Configuration file contains unterminated extrapolated name '%s'." % tail)
xtra_name, suffix = parts
if xtra_name in _extrapolating:
siputils.error("Configuration file contains a recursive reference to '%s'." % xtra_name)
_extrapolating.append(xtra_name)
xtra_value = _get_configuration_value(config, xtra_name)
_extrapolating.pop()
value = prefix + xtra_value + suffix
parts = value.split('%(', 1)
return value
def update_from_configuration_file(config_file):
""" Update a number of globals from values read from a configuration file.
"""
siputils.inform("Reading configuration from %s..." % config_file)
config = {}
# Read the file into the dict.
cfg = open(config_file)
line_nr = 0
for l in cfg:
line_nr += 1
# Strip comments and blank lines.
l = l.split('#')[0].strip()
if l == '':
continue
parts = l.split('=', 1)
if len(parts) == 2:
name = parts[0].strip()
value = parts[1].strip()
else:
name = value = ''
if name == '' or value == '':
siputils.error("%s:%d: Invalid line." % (config_file, line_nr))
config[name] = value
last_name = name
cfg.close()
# Enforce the presets.
version = siputils.version_to_string(py_version).split('.')
config['py_major'] = version[0]
config['py_minor'] = version[1]
config['sysroot'] = sysroot
# Override the relevant values.
global py_platform, plat_py_conf_inc_dir, plat_py_inc_dir, plat_py_lib_dir
global sip_bin_dir, sip_inc_dir, sip_module_dir, sip_sip_dir
py_platform = _get_configuration_value(config, 'py_platform', py_platform)
plat_py_inc_dir = _get_configuration_value(config, 'py_inc_dir',
plat_py_inc_dir)
plat_py_lib_dir = _get_configuration_value(config, 'py_pylib_dir',
plat_py_lib_dir)
# The pyconfig.h directory defaults to the Python.h directory.
plat_py_conf_inc_dir = _get_configuration_value(config, 'py_conf_inc_dir',
plat_py_inc_dir)
sip_bin_dir = _get_configuration_value(config, 'sip_bin_dir', sip_bin_dir)
sip_module_dir = _get_configuration_value(config, 'sip_module_dir',
sip_module_dir)
# Note that this defaults to any 'py_inc_dir' specified in the
# configuration file.
sip_inc_dir = _get_configuration_value(config, 'sip_inc_dir',
plat_py_inc_dir)
# Note that this is only used when creating sipconfig.py.
sip_sip_dir = _get_configuration_value(config, 'sip_sip_dir', sip_sip_dir)
def create_optparser(sdk_dir):
"""Create the parser for the command line.
"""
def store_abspath(option, opt_str, value, parser):
setattr(parser.values, option.dest, os.path.abspath(value))
def store_abspath_dir(option, opt_str, value, parser):
if not os.path.isdir(value):
raise optparse.OptionValueError("'%s' is not a directory" % value)
setattr(parser.values, option.dest, os.path.abspath(value))
def store_abspath_file(option, opt_str, value, parser):
if not os.path.isfile(value):
raise optparse.OptionValueError("'%s' is not a file" % value)
setattr(parser.values, option.dest, os.path.abspath(value))
def store_version(option, opt_str, value, parser):
version = siputils.version_from_string(value)
if version is None:
raise optparse.OptionValueError(
"'%s' is not a valid version number" % value)
setattr(parser.values, option.dest, version)
p = optparse.OptionParser(usage="python %prog [opts] [macro=value] "
"[macro+=value]", version=sip_version_str)
# Note: we don't use %default to be compatible with Python 2.3.
p.add_option("-k", "--static", action="store_true", default=False,
dest="static", help="build the SIP module as a static library")
p.add_option("-p", "--platform", action="store", type="string",
metavar="PLATFORM", dest="platform", help="the platform/compiler "
"configuration [default: %s]" % build_platform)
p.add_option("-u", "--debug", action="store_true", default=False,
help="build with debugging symbols")
p.add_option("--sip-module", action="store", default="sip", type="string",
metavar="NAME", dest="sip_module", help="the package.module name "
"of the sip module [default: sip]")
p.add_option("--configuration", dest='config_file', type='string',
action='callback', callback=store_abspath_file, metavar="FILE",
help="FILE contains the target configuration")
p.add_option("--target-py-version", dest='target_py_version',
type='string', action='callback', callback=store_version,
metavar="VERSION",
help="the major.minor version of the target Python [default: "
"%s]" % siputils.version_to_string(py_version, parts=2))
p.add_option("--sysroot", dest='sysroot', type='string', action='callback',
callback=store_abspath_dir, metavar="DIR",
help="DIR is the target system root directory")
p.add_option("--no-module", action="store_true", default=False,
dest="no_module", help="disable the installation of the sip "
"module [default: enabled]")
p.add_option("--no-tools", action="store_true", default=False,
dest="no_tools", help="disable the building of the code generator "
"and the installation of the build system [default: enabled]")
p.add_option("--use-qmake", action="store_true", default=False,
dest="use_qmake", help="generate qmake .pro files instead of "
"Makefiles")
if sys.platform == 'darwin':
# Get the latest SDK to use as the default.
sdks = glob.glob(sdk_dir + '/MacOSX*.sdk')
if len(sdks) > 0:
sdks.sort()
_, default_sdk = os.path.split(sdks[-1])
else:
default_sdk = 'MacOSX10.4u.sdk'
g = optparse.OptionGroup(p, title="MacOS X Configuration")
g.add_option("--arch", action="append", default=[], dest="arch",
choices=["i386", "x86_64", "ppc"],
help="build for architecture ARCH")
g.add_option("--deployment-target", action="store", default='',
metavar="VERSION", dest="deployment_target",
help="set the value of the MACOSX_DEPLOYMENT_TARGET "
"environment variable in generated Makefiles")
g.add_option("-n", "--universal", action="store_true", default=False,
dest="universal",
help="build the SIP code generator and module as universal "
"binaries")
g.add_option("-s", "--sdk", action="store", default=default_sdk,
type="string", metavar="SDK", dest="sdk",
help="the name of the SDK used when building universal "
"binaries [default: %s]" % default_sdk)
p.add_option_group(g)
# Querying.
g = optparse.OptionGroup(p, title="Query")
g.add_option("--show-platforms", action="store_true", default=False,
dest="show_platforms", help="show the list of supported "
"platform/compiler configurations")
g.add_option("--show-build-macros", action="store_true", default=False,
dest="show_build_macros", help="show the list of supported build "
"macros")
p.add_option_group(g)
# Installation.
g = optparse.OptionGroup(p, title="Installation")
g.add_option("-b", "--bindir", action="callback", type="string",
metavar="DIR", dest="sipbindir", callback=store_abspath,
help="where the SIP code generator will be installed [default: "
"%s]" % plat_bin_dir)
g.add_option("-d", "--destdir", action="callback", type="string",
metavar="DIR", dest="destdir", callback=store_abspath,
help="where the SIP module will be installed [default: "
"%s]" % plat_py_site_dir)
g.add_option("-e", "--incdir", action="callback", type="string",
metavar="DIR", dest="sipincdir", callback=store_abspath,
help="where the SIP header file will be installed [default: "
"%s]" % plat_py_venv_inc_dir)
g.add_option("-v", "--sipdir", action="callback", type="string",
metavar="DIR", dest="sipsipdir", callback=store_abspath,
help="where .sip files are normally installed [default: "
"%s]" % plat_sip_dir)
g.add_option("--no-dist-info", action="store_false", default=True,
dest="distinfo",
help="do not install the dist-info directory")
g.add_option("--no-stubs", "--no-pyi", action="store_false", default=True,
dest="pyi",
help="do not install the sip.pyi stub file")
g.add_option("--stubsdir", "--pyidir", action="callback", type="string",
metavar="DIR", dest="pyidir", callback=store_abspath,
help="where the sip.pyi stub file will be installed [default: "
"%s]" % plat_py_site_dir)
p.add_option_group(g)
return p
def main(argv):
"""Create the configuration module module.
argv is the list of command line arguments.
"""
siputils.inform("This is SIP %s for Python %s on %s." % (sip_version_str, sys.version.split()[0], sys.platform))
global py_version, build_platform
if py_version < 0x020300:
siputils.error("This version of SIP requires Python v2.3 or later.")
# Basic initialisation.
set_platform_directories()
set_build_platform()
# Build up the list of valid specs.
for s in os.listdir(os.path.join(src_dir, "specs")):
platform_specs.append(s)
# Determine the directory containing the default OS/X SDK.
if sys.platform == 'darwin':
for sdk_dir in MACOSX_SDK_DIRS:
if os.path.isdir(sdk_dir):
break
else:
sdk_dir = MACOSX_SDK_DIRS[0]
else:
sdk_dir = ''
# Parse the command line.
global opts
p = create_optparser(sdk_dir)
opts, args = p.parse_args()
# Override defaults that affect subsequent configuration.
if opts.target_py_version is not None:
py_version = opts.target_py_version
if opts.sysroot is not None:
global sysroot
sysroot = opts.sysroot
# Make sure MacOS specific options get initialised.
if sys.platform != 'darwin':
opts.universal = ''
opts.arch = []
opts.sdk = ''
opts.deployment_target = ''
# Handle the query options.
if opts.show_platforms or opts.show_build_macros:
if opts.show_platforms:
show_platforms()
if opts.show_build_macros:
show_macros()
sys.exit()
# Convert the list 'arch' option to a string. Multiple architectures
# imply a universal binary.
if len(opts.arch) > 1:
opts.universal = True
opts.arch = ' '.join(opts.arch)
# Convert the boolean 'universal' option to a string.
if opts.universal:
if '/' in opts.sdk:
opts.universal = os.path.abspath(opts.sdk)
else:
opts.universal = sdk_dir + '/' + opts.sdk
if not os.path.isdir(opts.universal):
siputils.error("Unable to find the SDK directory %s. Use the --sdk flag to specify the name of the SDK or its full path." % opts.universal)
if opts.arch == '':
opts.arch = DEFAULT_MACOSX_ARCH
else:
opts.universal = ''
# No sip module also implies no stubs.
if opts.no_module:
opts.pyi = False
# Apply the overrides from any configuration file.
global plat_bin_dir, plat_py_conf_inc_dir, plat_py_inc_dir
global plat_py_lib_dir, plat_py_site_dir, plat_sip_dir
global sip_bin_dir, sip_inc_dir, sip_root_dir, sip_module_dir, sip_sip_dir
global sip_module_dest_dir, sip_module_name, pyi_dir
# Set defaults.
sip_bin_dir = plat_bin_dir
sip_inc_dir = plat_py_venv_inc_dir
sip_root_dir = plat_py_site_dir
sip_sip_dir = plat_sip_dir
if opts.config_file is not None:
update_from_configuration_file(opts.config_file)
elif sysroot != '':
def apply_sysroot(d):
if d.startswith(sys.prefix):
d = sysroot + d[len(sys.prefix):]
return d
plat_bin_dir = apply_sysroot(plat_bin_dir)
plat_py_conf_inc_dir = apply_sysroot(plat_py_conf_inc_dir)
plat_py_inc_dir = apply_sysroot(plat_py_inc_dir)
plat_py_lib_dir = apply_sysroot(plat_py_lib_dir)
plat_py_site_dir = apply_sysroot(plat_py_site_dir)
plat_sip_dir = apply_sysroot(plat_sip_dir)
sip_bin_dir = apply_sysroot(sip_bin_dir)
sip_inc_dir = apply_sysroot(sip_inc_dir)
sip_root_dir = apply_sysroot(sip_root_dir)
sip_sip_dir = apply_sysroot(sip_sip_dir)
# Fix the name of the sip module.
if opts.destdir is not None:
sip_root_dir = opts.destdir
# The module directory might have been set in a configuration file.
if not sip_module_dir:
sip_module_dir = sip_root_dir
sip_module_name = opts.sip_module
module_path = sip_module_name.split(".")
# Check the module name is valid.
for m in module_path:
# Python v2 doesn't have isidentifier() but we don't bother to provide
# an alternative.
try:
if keyword.iskeyword(m) or not m.isidentifier():
siputils.error(
"'%s' is an invalid Python module name." % sip_module_name)
except AttributeError:
pass
if len(module_path) > 1:
del module_path[-1]
module_path.insert(0, sip_module_dir)
sip_module_dest_dir = os.path.join(*module_path)
else:
sip_module_dest_dir = sip_module_dir
# Override from the command line.
if opts.platform is not None:
build_platform = opts.platform
if opts.sipbindir is not None:
sip_bin_dir = opts.sipbindir
if opts.sipincdir is not None:
sip_inc_dir = opts.sipincdir
if opts.sipsipdir is not None:
sip_sip_dir = opts.sipsipdir
if opts.pyidir is not None:
pyi_dir = opts.pyidir
else:
pyi_dir = sip_module_dest_dir
# Get the platform specific macros for building.
macros = siputils.parse_build_macros(
os.path.join(src_dir, "specs", build_platform), build_macro_names,
args)
if macros is None:
siputils.error("Unsupported macro name specified. Use the --show-build-macros flag to see a list of supported macros.")
sys.exit(2)
# Tell the user what's been found.
inform_user()
# Install the configuration module.
create_config("sipconfig.py", os.path.join(src_dir, "siputils.py"),
macros)
# Create the Makefiles.
create_makefiles(macros)
###############################################################################
# The script starts here.
###############################################################################
if __name__ == "__main__":
try:
main(sys.argv)
except SystemExit:
raise
except:
sys.stderr.write(
"""An internal error occured. Please report all the output from the program,
including the following traceback, to support@riverbankcomputing.com.
""")
raise
| 916 | 0 | 137 |
ea27b8518a7a50aa30fef48f4dcfe22e4fa2434b | 908 | py | Python | stix2matcher/test/test_object_path_quoting.py | clslgrnc/cti-pattern-matcher | bcd37eeb8c44e012ff1dbe4434c510f343575840 | [
"BSD-3-Clause"
] | 32 | 2017-02-22T14:54:01.000Z | 2022-02-21T09:10:02.000Z | stix2matcher/test/test_object_path_quoting.py | clslgrnc/cti-pattern-matcher | bcd37eeb8c44e012ff1dbe4434c510f343575840 | [
"BSD-3-Clause"
] | 44 | 2016-11-03T21:41:53.000Z | 2022-03-30T15:22:23.000Z | stix2matcher/test/test_object_path_quoting.py | clslgrnc/cti-pattern-matcher | bcd37eeb8c44e012ff1dbe4434c510f343575840 | [
"BSD-3-Clause"
] | 23 | 2016-11-07T19:03:13.000Z | 2021-06-10T10:12:36.000Z | import pytest
from stix2patterns.pattern import ParseException
from stix2matcher.matcher import match
_observations = [
{
"type": "observed-data",
"first_observed": "2004-10-11T21:44:58Z",
"last_observed": "2004-10-11T21:44:58Z",
"number_observed": 1,
"objects": {
"0": {
"type": u"some-type",
"has-hyphen": 1,
"has.dot": 2,
"has-hyphen.dot": 3
}
}
},
]
@pytest.mark.parametrize("pattern", [
"[some-type:'has-hyphen' = 1]",
"[some-type:'has.dot' = 2]",
"[some-type:'has-hyphen.dot' = 3]"
])
@pytest.mark.parametrize("pattern", [
"[some-type:needs-quotes = 1]"
])
| 23.282051 | 49 | 0.562775 | import pytest
from stix2patterns.pattern import ParseException
from stix2matcher.matcher import match
_observations = [
{
"type": "observed-data",
"first_observed": "2004-10-11T21:44:58Z",
"last_observed": "2004-10-11T21:44:58Z",
"number_observed": 1,
"objects": {
"0": {
"type": u"some-type",
"has-hyphen": 1,
"has.dot": 2,
"has-hyphen.dot": 3
}
}
},
]
@pytest.mark.parametrize("pattern", [
"[some-type:'has-hyphen' = 1]",
"[some-type:'has.dot' = 2]",
"[some-type:'has-hyphen.dot' = 3]"
])
def test_quoting(pattern):
assert match(pattern, _observations)
@pytest.mark.parametrize("pattern", [
"[some-type:needs-quotes = 1]"
])
def test_quoting_error(pattern):
with pytest.raises(ParseException):
match(pattern, _observations)
| 135 | 0 | 44 |
6f77ee6c621798575aa325b96f73f4cb76819cbc | 987 | py | Python | GitRangerLiu/0002/store_reldb.py | saurabh896/python-1 | f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7 | [
"MIT"
] | 3,976 | 2015-01-01T15:49:39.000Z | 2022-03-31T03:47:56.000Z | GitRangerLiu/0002/store_reldb.py | dwh65416396/python | 1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a | [
"MIT"
] | 97 | 2015-01-11T02:59:46.000Z | 2022-03-16T14:01:56.000Z | GitRangerLiu/0002/store_reldb.py | dwh65416396/python | 1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a | [
"MIT"
] | 3,533 | 2015-01-01T06:19:30.000Z | 2022-03-28T13:14:54.000Z | #reference: https://www.tutorialspoint.com/python/python_database_access.htm
import MySQLdb as mysqldb
if __name__ == '__main__':
store_reldb()
| 25.973684 | 79 | 0.570415 | #reference: https://www.tutorialspoint.com/python/python_database_access.htm
import MySQLdb as mysqldb
def store_reldb():
db = mysqldb.connect(host = 'localhost', user = 'chris', passwd = '1314', \
db = 'show_me_the_code')
cursor = db.cursor()
#Create a table
cursor.execute('drop table if exists verify_info')
sql = '''
create table verify_info (
id int not null auto_increment primary key,
verify_code char(20)
)'''
cursor.execute(sql)
#Insert data
f = open('result.txt', 'rb')
for line in f:
verify_code = line.strip()
sql = 'insert into verifY_info(verify_code) values ("%s");' % \
(verify_code)
cursor.execute(sql)
try:
db.commit()
except:
db.rollback()
f.close()
print 'Error happened when inserting data'
f.close()
db.close()
if __name__ == '__main__':
store_reldb()
| 810 | 0 | 23 |
ab5129ded6bb3b79a9f5e6771ecbe5565cc412bb | 775 | py | Python | examples/simple/routes/get_user.py | nekonoshiri/tiny-router | 3bb808bcc9f9eb368ee390179dfc5e9d48cf8600 | [
"MIT"
] | null | null | null | examples/simple/routes/get_user.py | nekonoshiri/tiny-router | 3bb808bcc9f9eb368ee390179dfc5e9d48cf8600 | [
"MIT"
] | null | null | null | examples/simple/routes/get_user.py | nekonoshiri/tiny-router | 3bb808bcc9f9eb368ee390179dfc5e9d48cf8600 | [
"MIT"
] | null | null | null | import json
from typing import Any, Dict
from ..request import Request
from ..response import Response
from ..router import Router
router = Router()
@router.get("/users/{user_id}")
| 25 | 76 | 0.654194 | import json
from typing import Any, Dict
from ..request import Request
from ..response import Response
from ..router import Router
router = Router()
@router.get("/users/{user_id}")
def get_user(request: Request) -> Response:
user_id = request.path_parameters.get("user_id")
if not user_id:
return Response(400, json.dumps({"message": "user_id is required"}))
try:
user = load_user(user_id)
return Response(200, json.dumps(user))
except Exception:
return Response(404, json.dumps({"message": "User not found"}))
def load_user(user_id: str) -> Dict[str, Any]:
# Here load user from database.
if user_id == "001":
return {"id": user_id, "name": "Alice"}
else:
raise Exception("User not found")
| 544 | 0 | 45 |
c32d8594788213d0081bff4ce0cd3bedd23da94d | 350 | py | Python | E033/main.py | alperkonuralp/AlperIlePython | 64e4940648a74306951dbfd97b593cfbcd94b7f6 | [
"Apache-2.0"
] | 1 | 2021-01-30T16:50:40.000Z | 2021-01-30T16:50:40.000Z | E033/main.py | alperkonuralp/AlperIlePython | 64e4940648a74306951dbfd97b593cfbcd94b7f6 | [
"Apache-2.0"
] | null | null | null | E033/main.py | alperkonuralp/AlperIlePython | 64e4940648a74306951dbfd97b593cfbcd94b7f6 | [
"Apache-2.0"
] | null | null | null | import os
filename = os.path.join(os.getcwd(), "data.json")
# f = open(filename)
with open(filename, mode="w") as f:
print("name :", f.name)
print("mode :", f.mode)
print("buffer :", f.buffer)
print("encoding :", f.encoding)
print("closed :", f.closed)
print("errors :", f.errors)
print("closed :", f.closed)
| 23.333333 | 50 | 0.574286 | import os
filename = os.path.join(os.getcwd(), "data.json")
# f = open(filename)
with open(filename, mode="w") as f:
print("name :", f.name)
print("mode :", f.mode)
print("buffer :", f.buffer)
print("encoding :", f.encoding)
print("closed :", f.closed)
print("errors :", f.errors)
print("closed :", f.closed)
| 0 | 0 | 0 |
f0884fef8ad8756af5861bcee7d27f5a89b2c466 | 931 | py | Python | setup.py | jmtapio/not-co-logger | 2556b874511e583e483d84f1b5bbcec4cb0d74ba | [
"MIT"
] | 1 | 2020-04-23T06:17:11.000Z | 2020-04-23T06:17:11.000Z | setup.py | jmtapio/not-co-logger | 2556b874511e583e483d84f1b5bbcec4cb0d74ba | [
"MIT"
] | null | null | null | setup.py | jmtapio/not-co-logger | 2556b874511e583e483d84f1b5bbcec4cb0d74ba | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup
setup(
name='notcologger',
version='0.1.2',
description='Not CO Logger, a cloud logging library.',
long_description=
'''This library is aimed at helping produce consistent searchable log
entries to stdout in a cloud/container environment.''',
keywords='logging',
url='https://github.com/jmtapio/not-co-logger',
author='Juha-Matti Tapio',
author_email='jmtapio@verkkotelakka.net',
license='MIT',
packages=['notcologger'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Logging',
],
python_requires='>=3',
test_suite='tests.test_logger',
include_package_data=True,
zip_safe=True)
| 30.032258 | 70 | 0.649839 | #!/usr/bin/env python3
from setuptools import setup
setup(
name='notcologger',
version='0.1.2',
description='Not CO Logger, a cloud logging library.',
long_description=
'''This library is aimed at helping produce consistent searchable log
entries to stdout in a cloud/container environment.''',
keywords='logging',
url='https://github.com/jmtapio/not-co-logger',
author='Juha-Matti Tapio',
author_email='jmtapio@verkkotelakka.net',
license='MIT',
packages=['notcologger'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Logging',
],
python_requires='>=3',
test_suite='tests.test_logger',
include_package_data=True,
zip_safe=True)
| 0 | 0 | 0 |
846c34c251c0cbeffeac8f6640cbae570766595b | 1,359 | py | Python | src/sensai/tensorflow/tf_mlp.py | schroedk/sensAI | a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7 | [
"MIT"
] | 10 | 2020-02-19T09:16:54.000Z | 2022-02-04T16:19:33.000Z | src/sensai/tensorflow/tf_mlp.py | schroedk/sensAI | a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7 | [
"MIT"
] | 47 | 2020-03-11T16:26:51.000Z | 2022-02-04T15:29:40.000Z | src/sensai/tensorflow/tf_mlp.py | schroedk/sensAI | a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7 | [
"MIT"
] | 5 | 2020-03-12T21:33:22.000Z | 2020-12-21T14:43:04.000Z | from tensorflow import keras
from .tf_base import KerasVectorRegressionModel
from .. import normalisation
| 46.862069 | 123 | 0.727005 | from tensorflow import keras
from .tf_base import KerasVectorRegressionModel
from .. import normalisation
class KerasMultiLayerPerceptronVectorRegressionModel(KerasVectorRegressionModel):
def __init__(self, hiddenDims=(5,5), hiddenActivation="sigmoid", outputActivation="sigmoid", loss="mse",
metrics=("mse",), optimiser="adam", normalisationMode=normalisation.NormalisationMode.MAX_BY_COLUMN, **kwargs):
super().__init__(normalisationMode, loss, metrics, optimiser, **kwargs)
self.hiddenDims = hiddenDims
self.hiddenActivation = hiddenActivation
self.outputActivation = outputActivation
def __str__(self):
params = dict(hiddenDims=self.hiddenDims, hiddenActivation=self.hiddenActivation,
outputActivation=self.outputActivation)
return f"{self.__class__.__name__}{params}={super().__str__()}"
def _createModel(self, inputDim, outputDim):
modelInputs = keras.Input(shape=(inputDim,), name='input')
x = modelInputs
for i, hiddenDim in enumerate(self.hiddenDims):
x = keras.layers.Dense(hiddenDim, activation=self.hiddenActivation, name='dense_%d' % i)(x)
modelOutputs = keras.layers.Dense(outputDim, activation=self.outputActivation, name='predictions')(x)
return keras.Model(inputs=modelInputs, outputs=modelOutputs)
| 1,086 | 60 | 103 |
06f624c430991a082034d586c733777b98be2ca9 | 1,998 | py | Python | run_scripts/convert_pmg_xsec_db.py | morgenst/PyAnalysisTools | f3b1f89870e7bbae1549c228a56d2c36bbba7af3 | [
"MIT"
] | null | null | null | run_scripts/convert_pmg_xsec_db.py | morgenst/PyAnalysisTools | f3b1f89870e7bbae1549c228a56d2c36bbba7af3 | [
"MIT"
] | null | null | null | run_scripts/convert_pmg_xsec_db.py | morgenst/PyAnalysisTools | f3b1f89870e7bbae1549c228a56d2c36bbba7af3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import sys
from PyAnalysisTools.base import get_default_argparser, default_init
from PyAnalysisTools.base.YAMLHandle import YAMLLoader, YAMLDumper
from PyAnalysisTools.AnalysisTools.XSHandle import Dataset
if __name__ == '__main__':
main(sys.argv[1:])
| 39.176471 | 110 | 0.5996 | #!/usr/bin/env python
from __future__ import print_function
import sys
from PyAnalysisTools.base import get_default_argparser, default_init
from PyAnalysisTools.base.YAMLHandle import YAMLLoader, YAMLDumper
from PyAnalysisTools.AnalysisTools.XSHandle import Dataset
def main(argv):
parser = get_default_argparser(description='Converter from PMG xsec DB to dataset format')
parser.add_argument('input_file', help="PMG input file")
parser.add_argument('--output_file', '-o', help='output file name')
parser.add_argument('--dataset_decoration', '-ds', help="dataset decoration file")
args = default_init(parser)
dataset_decoration = YAMLLoader.read_yaml(args.dataset_decoration)
datasets = {}
with open(args.input_file, 'r') as input_file:
for line in input_file.readlines():
try:
ds_id, _, xsec, filter_eff, kfactor, _, _, _, _ = line.split()
except ValueError:
try:
ds_id, _, xsec, filter_eff, kfactor, _, _, _ = line.split()
except ValueError:
continue
if int(ds_id) not in list(dataset_decoration.keys()):
continue
ds_id = int(ds_id)
decoration = dataset_decoration[ds_id]
if 'process_name' not in decoration:
continue
dataset_info = {"is_mc": True,
"cross_section": float(xsec) / 1000.,
"dsid": ds_id,
"kfactor": float(kfactor),
"filtereff": float(filter_eff),
"latex_label": decoration['latex_label'] if 'latex_label' in decoration else None,
'process_name': decoration['process_name']}
dataset = Dataset(**dataset_info)
datasets[ds_id] = dataset
YAMLDumper.dump_yaml(datasets, args.output_file)
if __name__ == '__main__':
main(sys.argv[1:])
| 1,653 | 0 | 23 |
a2becad7a4561b792574e2f10d402e4cc13be0c4 | 3,702 | py | Python | tests/builder/test_decorator_builder.py | igrek51/nuclear | d610e63ad98143dcc77e77e4ecbdaed9c38c3314 | [
"MIT"
] | 6 | 2020-06-24T20:03:06.000Z | 2021-09-21T10:05:17.000Z | tests/builder/test_decorator_builder.py | igrek51/nuclear | d610e63ad98143dcc77e77e4ecbdaed9c38c3314 | [
"MIT"
] | 2 | 2021-09-19T15:28:02.000Z | 2021-09-21T17:29:38.000Z | tests/builder/test_decorator_builder.py | igrek51/nuclear | d610e63ad98143dcc77e77e4ecbdaed9c38c3314 | [
"MIT"
] | 2 | 2020-06-24T21:21:35.000Z | 2021-08-01T17:24:38.000Z | from nuclear import *
from nuclear.parser.error import CliDefinitionError
from tests.asserts import MockIO, assert_error
from functools import reduce
import base64
cli = CliBuilder()
@cli.add_command('hello')
def say_hello(name: str, decode: bool = False, repeat: int = 1):
"""
Say hello to someone
:param name: Name to say hello to
:param decode: Decode name as base64
"""
if decode:
name = base64.b64decode(name).decode('utf-8')
print(' '.join([f"I'm a {name}!"] * repeat))
@cli.add_command('calculate', 'factorial')
def calculate_factorial(n: int):
"""Calculate factorial"""
result = reduce(lambda x, y: x * y, range(1, n + 1))
print(result)
return result
@cli.add_command('calculate', 'primes')
def calculate_primes(n: int = 100):
"""
List prime numbers using Sieve of Eratosthenes
:param n: maximum number to check
"""
print(sorted(reduce((lambda r, x: r - set(range(x**2, n, x)) if (x in r) else r),
range(2, n), set(range(2, n)))))
| 31.109244 | 86 | 0.619395 | from nuclear import *
from nuclear.parser.error import CliDefinitionError
from tests.asserts import MockIO, assert_error
from functools import reduce
import base64
cli = CliBuilder()
@cli.add_command('hello')
def say_hello(name: str, decode: bool = False, repeat: int = 1):
"""
Say hello to someone
:param name: Name to say hello to
:param decode: Decode name as base64
"""
if decode:
name = base64.b64decode(name).decode('utf-8')
print(' '.join([f"I'm a {name}!"] * repeat))
@cli.add_command('calculate', 'factorial')
def calculate_factorial(n: int):
"""Calculate factorial"""
result = reduce(lambda x, y: x * y, range(1, n + 1))
print(result)
return result
@cli.add_command('calculate', 'primes')
def calculate_primes(n: int = 100):
"""
List prime numbers using Sieve of Eratosthenes
:param n: maximum number to check
"""
print(sorted(reduce((lambda r, x: r - set(range(x**2, n, x)) if (x in r) else r),
range(2, n), set(range(2, n)))))
def test_calling_subcommand():
with MockIO('hello', 'world') as mockio:
cli.run()
assert mockio.output() == "I'm a world!\n"
with MockIO('calculate', 'factorial', '6') as mockio:
cli.run()
assert mockio.output() == "720\n"
with MockIO('calculate', 'primes', '-n=10') as mockio:
cli.run()
assert mockio.output() == "[2, 3, 5, 7]\n"
def test_bool_flag():
@cli.add_command('print very stupid flag')
def print_something_very_stupid(force: bool):
print(f"argument: {force}")
@cli.add_command('test flaggy default_false')
def flaggy_false(force: bool = False):
print(f"flag: {force}")
@cli.add_command('test flaggy default_true')
def flaggy_true(force: bool = True):
print(f"parameter: {force}")
with MockIO('print', 'very', 'stupid', 'flag', 'false') as mockio:
cli.run()
assert mockio.output() == "argument: False\n"
with MockIO('test', 'flaggy', 'default_false') as mockio:
cli.run()
assert mockio.output() == "flag: False\n"
with MockIO('test', 'flaggy', 'default_false', '--force') as mockio:
cli.run()
assert mockio.output() == "flag: True\n"
with MockIO('test', 'flaggy', 'default_true') as mockio:
cli.run()
assert mockio.output() == "parameter: True\n"
with MockIO('test', 'flaggy', 'default_true', '--force=false') as mockio:
cli.run()
assert mockio.output() == "parameter: False\n"
def test_function_calling_works_after_decorating():
assert calculate_factorial(6) == 720
def test_no_subcommand_name_error():
def do_something_evil():
@cli.add_command()
def do_nothing(n: int):
print('nothing')
assert_error(do_something_evil, error_type=CliDefinitionError)
def test_varargs_with_kwonly_args():
@cli.add_command('doit')
def doit(*numbers: int, temperature = 0, force: bool = False):
print(f"args: {numbers}, temperature: {temperature}, force: {force}")
with MockIO('doit', '1', '2', '--temperature', '36', '--force') as mockio:
cli.run()
assert mockio.output() == "args: (1, 2), temperature: 36, force: True\n"
def test_extract_param_docstring_to_help():
with MockIO('--help') as mockio:
cli.run()
assert 'Say hello to someone' in mockio.output()
assert ':param' not in mockio.output()
with MockIO('hello', '--help') as mockio:
cli.run()
assert 'Decode name as base64' in mockio.output()
assert 'NAME - Name to say hello to' in mockio.output()
assert ':param' not in mockio.output()
| 2,509 | 0 | 138 |
9b1e9c8c1bcdcd96a6e85f61e36b49c932bf7d5a | 1,083 | py | Python | intake/migrations/0064_purgedvisitor.py | cforlando/intake | a5233d5c0f862f28ee265b9b4831405aabeec7e2 | [
"MIT"
] | 51 | 2016-07-20T02:26:57.000Z | 2021-07-07T14:45:06.000Z | intake/migrations/0064_purgedvisitor.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | 1,091 | 2016-04-29T18:07:45.000Z | 2021-04-19T18:39:39.000Z | intake/migrations/0064_purgedvisitor.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | 24 | 2016-06-14T18:10:43.000Z | 2021-11-14T20:26:39.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-22 18:16
from __future__ import unicode_literals
from django.db import migrations, models
| 27.075 | 114 | 0.493998 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-22 18:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intake', '0063_purgedapplication_purgedstatusupdate'),
]
operations = [
migrations.CreateModel(
name='PurgedVisitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'purged"."intake_visitor',
'managed': False,
},
),
migrations.RunSQL(
"""CREATE OR REPLACE VIEW purged.intake_visitor AS
SELECT %s From intake_visitor;
""" %
', '.join([
'id',
'uuid',
'first_visit',
'source',
'referrer',
'locale',
]),
"""DROP VIEW purged.intake_visitor;
"""),
]
| 0 | 904 | 23 |
41483716ff9a653ae3d2f1df97b42c5f2ab2706f | 67 | py | Python | files/Factors.py | AjayRajNelapudi/Script-Evaluation-Assistant | 200a3fc11e15f22b812c829d133707393be39c23 | [
"MIT"
] | 1 | 2018-09-22T10:10:42.000Z | 2018-09-22T10:10:42.000Z | files/Factors.py | AjayRajNelapudi/Script-Evaluation-Assistant | 200a3fc11e15f22b812c829d133707393be39c23 | [
"MIT"
] | 1 | 2018-09-16T14:44:47.000Z | 2018-09-16T14:44:47.000Z | files/Factors.py | AjayRajNelapudi/Script-Evaluation-Assistant | 200a3fc11e15f22b812c829d133707393be39c23 | [
"MIT"
] | 3 | 2018-09-16T14:37:55.000Z | 2018-09-30T06:44:49.000Z | n = int(input())
for i in range(0, n+1):
if n % i == 0:
print(i) | 16.75 | 23 | 0.507463 | n = int(input())
for i in range(0, n+1):
if n % i == 0:
print(i) | 0 | 0 | 0 |
c11ae1d44dda85eca2e9072897484d9f30c150e5 | 70,518 | py | Python | neurallog/network/dataset.py | guimaraes13/NeuralLog | 63cca557257ec67905b79048718db385cc4c3aab | [
"Apache-2.0"
] | 6 | 2021-05-04T12:24:17.000Z | 2021-07-28T03:20:48.000Z | neurallog/network/dataset.py | guimaraes13/NeuralLog | 63cca557257ec67905b79048718db385cc4c3aab | [
"Apache-2.0"
] | null | null | null | neurallog/network/dataset.py | guimaraes13/NeuralLog | 63cca557257ec67905b79048718db385cc4c3aab | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Victor Guimarães
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handles the examples.
"""
import collections
import logging
import sys
from abc import abstractmethod, ABC
from collections import OrderedDict, deque
from functools import partial
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from bert.tokenization.bert_tokenization import FullTokenizer
from neurallog.knowledge.examples import Examples
from neurallog.knowledge.program import NeuralLogProgram, NO_EXAMPLE_SET, \
get_predicate_from_string
from neurallog.language.language import AtomClause, Atom, Predicate, \
get_constant_from_string, get_term_from_string, TermType, Constant, Quote
from neurallog.network import registry
PARTIAL_WORD_PREFIX = "##"
logger = logging.getLogger(__name__)
dataset_classes = dict()
def neural_log_dataset(identifier):
"""
A decorator for NeuralLog dataset.
:param identifier: the identifier of the function
:type identifier: str
:return: the decorated function
:rtype: function
"""
return lambda x: registry(x, identifier, dataset_classes)
def get_dataset_class(identifier):
"""
Returns the class of the dataset based on the `identifier`.
:param identifier: the identifier
:type identifier: str
:return: the dataset class
:rtype: function
"""
return dataset_classes.get(identifier, DefaultDataset)
# IMPROVE: create a parameter to specify whether or not to print prediction
# that are not in the dataset
# noinspection PyTypeChecker,DuplicatedCode
def print_neural_log_predictions(model, neural_program, neural_dataset, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param neural_dataset: the NeuralLog dataset
:type neural_dataset: NeuralLogDataset
:param neural_program: the neural program
:type neural_program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to print to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before each
batch
:type print_batch_header: bool
"""
count = 0
batches = None
empty_entry = None
fix = 0
if isinstance(neural_dataset, SequenceDataset):
if print_batch_header and dataset_name is not None:
batches = list(neural_program.mega_examples[dataset_name].keys())
empty_entry = neural_dataset.empty_word_index
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
if isinstance(neural_dataset, WordCharDataset):
predicate = Predicate(predicate.name, predicate.arity - 1)
if inverted:
continue
row_scores = y_scores[i]
if len(row_scores.shape) == 3:
row_scores = np.squeeze(row_scores, axis=1)
for j in range(len(row_scores)):
y_score = row_scores[j]
x = []
subjects = []
stop = False
offset = sum(model.input_sizes[:i])
for k in range(model.input_sizes[i] + fix):
x_k = features[offset + k][j].numpy()
if x_k.dtype == np.float32:
if np.max(x_k) == 0:
stop = True
break
arg_max = np.argmax(x_k)
if arg_max == empty_entry:
stop = True
break
else:
arg_max = x_k[0]
if arg_max < 0 or arg_max == empty_entry:
stop = True
break
subjects.append(neural_program.get_constant_by_index(
predicate, k, arg_max))
x.append(x_k)
offset += model.input_sizes[i]
if stop:
continue
if predicate.arity == 1:
clause = AtomClause(Atom(predicate, subjects[0],
weight=float(y_score)))
print(clause, file=writer)
else:
clauses = []
for index in range(len(y_score)):
object_term = neural_program.get_constant_by_index(
predicate, -1, index)
prediction = Atom(predicate, *subjects, object_term,
weight=float(y_score[index]))
if dataset_name is not None and \
not neural_dataset.has_example_key(
prediction.simple_key()):
continue
clauses.append(AtomClause(prediction))
if len(clauses) > 0:
clause = AtomClause(Atom(predicate, *subjects, "X"))
print("%%", clause, file=writer, sep=" ")
for clause in sorted(
clauses,
key=lambda c: c.atom.weight,
reverse=True):
print(clause, file=writer)
print(file=writer)
# print(file=writer)
# noinspection DuplicatedCode
def _print_word_char_predictions(model, neural_program, neural_dataset, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param neural_dataset: the NeuralLog dataset
:type neural_dataset: WordCharDataset
:param neural_program: the neural program
:type neural_program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to print to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before each
batch
:type print_batch_header: bool
"""
count = 0
batches = None
fix = -1
if print_batch_header and dataset_name is not None:
batches = list(neural_program.mega_examples[dataset_name].keys())
empty_entry = neural_dataset.empty_word_index
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
predicate = Predicate(predicate.name, predicate.arity - 1)
row_scores = y_scores[i]
if len(row_scores.shape) == 3:
row_scores = np.squeeze(row_scores, axis=1)
for j in range(len(row_scores)):
y_score = row_scores[j]
x = []
subjects = []
stop = False
offset = sum(model.input_sizes[:i])
for k in range(model.input_sizes[i] + fix):
x_k = features[offset + k][j].numpy()
if x_k.dtype == np.float32:
if np.max(x_k) == 0:
stop = True
break
arg_max = np.argmax(x_k)
if arg_max == empty_entry:
stop = True
break
else:
arg_max = x_k[0]
if arg_max < 0 or arg_max == empty_entry:
stop = True
break
subjects.append(neural_program.get_constant_by_index(
predicate, k, arg_max))
x.append(x_k)
offset += model.input_sizes[i]
if stop:
continue
last_feature = features[offset - 1][j].numpy()
subject_string = "\""
for k in last_feature:
if k == neural_dataset.empty_char_index:
break
subject_string += neural_program.get_constant_by_index(
neural_dataset.character_predicate,
neural_dataset.character_predicate_index,
k
).value
subject_string += "\""
subjects[-1] = get_term_from_string(subject_string)
if predicate.arity == 1:
clause = AtomClause(Atom(predicate, subjects[0],
weight=float(y_score)))
print(clause, file=writer)
else:
clauses = []
for index in range(len(y_score)):
object_term = neural_program.get_constant_by_index(
predicate, -1, index)
prediction = Atom(predicate, *subjects, object_term,
weight=float(y_score[index]))
if dataset_name is not None and \
not neural_dataset.has_example_key(
prediction.simple_key()):
continue
clauses.append(AtomClause(prediction))
if len(clauses) > 0:
clause = AtomClause(Atom(predicate, *subjects, "X"))
print("%%", clause, file=writer, sep=" ")
for clause in sorted(
clauses,
key=lambda c: c.atom.weight,
reverse=True):
print(clause, file=writer)
print(file=writer)
# print(file=writer)
def get_predicate_indices(predicate, inverted):
"""
Gets the indices of the predicate's input and output.
:param predicate: the predicate
:type predicate: Predicate
:param inverted: if the predicate is inverted
:type inverted: bool
:return: the input and output indices
:rtype: (list[int], int)
"""
if predicate.arity == 1:
input_index = [0]
output_index = 0
elif predicate.arity == 2:
if inverted:
input_index = [1]
output_index = 0
else:
input_index = [0]
output_index = 1
else:
input_index = [x for x in range(predicate.arity - 1)]
output_index = predicate.arity - 1
return input_index, output_index
# noinspection DuplicatedCode
def viterbi(potentials, transition_matrix, initial_distribution=None):
"""
Computes the best path given based on the Viterbi algorithm.
:param potentials: the emission of the neural network
:type potentials: np.ndarray
:param transition_matrix: the transition matrix
:type transition_matrix: np.ndarray
:param initial_distribution: the probabilities of the first state,
it assumes an uniform probability, if `None`.
:type initial_distribution: np.ndarray
:return: the best path
:rtype: np.ndarray
"""
sequence_length, number_of_tags = potentials.shape
state_probabilities = np.zeros((sequence_length, number_of_tags),
dtype=np.float64)
best_paths = np.zeros((sequence_length, number_of_tags), dtype=np.int32)
best_path = np.zeros(sequence_length, dtype=np.int32)
if initial_distribution is None:
# initial_distribution = np.full(number_of_tags, 1.0 / number_of_tags)
initial_distribution = np.full(number_of_tags, 1.0)
state_probabilities[0, :] = potentials[0, :] * initial_distribution
transpose = transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] * transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] *= potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
# noinspection DuplicatedCode
def log_viterbi(potentials, transition_matrix, initial_distribution=None):
"""
Computes the best path given based on the Viterbi algorithm.
This version uses sum instead of multiplication and assumes that both
`transition_matrix` and `emission` are the log of the probabilities.
:param potentials: the emission of the neural network
:type potentials: np.ndarray
:param transition_matrix: the transition matrix
:type transition_matrix: np.ndarray
:param initial_distribution: the probabilities of the first state,
it assumes an uniform probability, if `None`.
:type initial_distribution: np.ndarray
:return: the best path
:rtype: np.ndarray
"""
sequence_length, number_of_tags = potentials.shape
state_probabilities = np.zeros((sequence_length, number_of_tags),
dtype=np.float64)
best_paths = np.zeros((sequence_length, number_of_tags), dtype=np.int32)
best_path = np.zeros(sequence_length, dtype=np.int32)
if initial_distribution is None:
# initial_distribution = np.full(number_of_tags, 1.0 / number_of_tags)
initial_distribution = np.full(number_of_tags, 1.0)
state_probabilities[0, :] = potentials[0, :] + initial_distribution
transpose = transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] + transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] += potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
class NeuralLogDataset(ABC):
"""
Represents a NeuralLog dataset to train a NeuralLog network.
"""
program: NeuralLogProgram
"The NeuralLog program"
def __init__(self, program, inverse_relations=True):
"""
Creates a NeuralLogNetwork.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
self.program = program
self.inverse_relations = inverse_relations
self._target_predicates = None
@property
def target_predicates(self):
"""
Gets the target predicates.
:return: the target predicates
:rtype: List[Tuple[Predicate, bool]]
"""
return self._target_predicates
@target_predicates.setter
def target_predicates(self, value):
"""
Sets the target predicates.
:param value: the target predicates
:type value: List[Tuple[Predicate, bool]]
"""
self._target_predicates = value
@abstractmethod
def has_example_key(self, key):
"""
Checks if the dataset contains the example key.
:param key: the example key
:type key: Any
:return: if the dataset contains the atom example
:rtype: bool
"""
pass
@abstractmethod
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
"""
Gets the data set for the example set.
:param example_set: the name of the example set
:type example_set: str
:param batch_size: the batch size
:type batch_size: int
:param shuffle: if `True`, shuffles the dataset.
:type shuffle: bool
:return: the dataset
:rtype: tf.data.Dataset
"""
pass
@abstractmethod
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
pass
def get_target_predicates(self):
"""
Gets a list of tuples containing the target predicates and whether it
is inverted or not.
:return: the list of target predicates
:rtype: list[tuple[Predicate, bool]]
"""
return self._target_predicates
@abstractmethod
def print_predictions(self, model, program, dataset, writer=sys.stdout,
dataset_name=None, print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param program: the neural program
:type program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to write to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before
each batch
:type print_batch_header: bool
"""
pass
@neural_log_dataset("default_dataset")
class DefaultDataset(NeuralLogDataset):
"""
The default NeuralLog dataset.
"""
def __init__(self, program, inverse_relations=True):
"""
Creates a DefaultDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
super(DefaultDataset, self).__init__(program, inverse_relations)
self._target_predicates = self._compute_target_predicates()
self.example_keys = self._load_example_keys()
# noinspection PyMissingOrEmptyDocstring
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
dense_features.append(feature)
count += 1
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
# all_dense_features = tuple(all_dense_features)
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def are_features_empty(self, features):
"""
Checks if the features are empty.
:param features: the features
:type features: List[List[int]] or Tuple[List[int]]
:return: `True`, if the features are empty
:rtype: bool
"""
size = len(features[0])
if not size:
return True
if size > 1:
return False
index = 0
for i in range(len(self._target_predicates)):
in_indices, out_index = \
get_predicate_indices(*self._target_predicates[i])
for j in range(len(in_indices)):
empty_value = self._get_out_of_vocabulary_index(
self._target_predicates[i][0], in_indices[j])
if empty_value != features[index][0]:
return False
index += 1
return True
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor]) or
(list[tuple[tf.SparseTensor]], tuple[tf.SparseTensor])
"""
examples = self.program.examples.get(example_set, OrderedDict())
return self._build(examples)
def ground_atom(self, example):
"""
Grounds the example by replacing the value of the variables for each
possible value found in the program.
:param example: the example
:type example: Atom
:return: the grounded atoms
:rtype: collections.Iterable[Atom]
"""
if example.is_grounded():
return example,
current_atoms = deque([example])
predicate = example.predicate
term_types: Tuple[TermType] = self.program.predicates[predicate]
for i in range(example.arity()):
if example.terms[i].is_constant():
continue
next_atoms = deque()
for atom in current_atoms:
if term_types[i].number:
terms = list(atom.terms)
terms[i] = 0.0
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
else:
possible_terms = \
self.program.iterable_constants_per_term[(predicate, i)]
for constant in possible_terms.values():
terms = list(atom.terms)
terms[i] = constant
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
current_atoms = next_atoms
return current_atoms
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: Examples
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
output_by_term = OrderedDict()
input_terms = []
for predicate, inverted in self._target_predicates:
facts = examples.get(predicate, dict())
facts = facts.values()
for example in facts:
for fact in self.ground_atom(example):
if predicate.arity < 3:
input_term = (fact.terms[-1 if inverted else 0],)
else:
input_term = tuple(fact.terms[0:predicate.arity - 1])
if input_term not in output_by_term:
output = dict()
output_by_term[input_term] = output
input_terms.append(input_term)
else:
output = output_by_term[input_term]
if predicate.arity == 1:
output[(predicate, inverted)] = fact.weight
else:
output_term = fact.terms[0 if inverted else -1]
# noinspection PyTypeChecker
output.setdefault((predicate, inverted), []).append(
(output_term, fact.weight))
all_features = []
all_labels = []
for predicate, inverted in self._target_predicates:
features = [[] for _ in range(max(1, predicate.arity - 1))]
label_values = []
label_indices = []
in_indices, out_index = get_predicate_indices(predicate, inverted)
for i in range(len(input_terms)):
outputs = output_by_term[input_terms[i]].get(
(predicate, inverted), None)
constant_index = 0
for input_index in in_indices:
index = None
if outputs is not None:
index = self.program.get_index_of_constant(
predicate, input_index,
input_terms[i][constant_index])
if index is None:
index = self._get_out_of_vocabulary_index(
predicate, input_index)
features[constant_index].append(index)
constant_index += 1
if outputs is not None:
if predicate.arity == 1:
label_indices.append([i, 0])
label_values.append(outputs)
else:
# noinspection PyTypeChecker
for output_term, output_value in outputs:
output_term_index = \
self.program.get_index_of_constant(
predicate, out_index, output_term)
label_indices.append([i, output_term_index])
label_values.append(output_value)
all_features += features
if predicate.arity == 1:
dense_shape = [len(input_terms), 1]
empty_index = [[0, 0]]
else:
dense_shape = [
len(input_terms),
self.program.get_constant_size(predicate, out_index)]
empty_index = [[0, 0]]
if len(label_values) == 0:
sparse_tensor = tf.SparseTensor(indices=empty_index,
values=[0.0],
dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(indices=label_indices,
values=label_values,
dense_shape=dense_shape)
sparse_tensor = tf.sparse.reorder(sparse_tensor)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace the not found one
:rtype: int
"""
return -1
# noinspection PyMissingOrEmptyDocstring
class AbstractSequenceDataset(DefaultDataset, ABC):
"""
Represents an Abstract Sequence Dataset.
"""
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = tf.reshape(features[count], [-1, 1])
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
mega_examples = self.program.mega_examples.get(
example_set, OrderedDict())
for _, examples in sorted(mega_examples.items(), key=lambda x: x[0]):
features, labels = self._build(examples)
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
yield features, labels
@abstractmethod
def _build(self, examples):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: dict[Predicate, List[Atom]]
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[list[int]], tuple[tf.SparseTensor])
"""
pass
@neural_log_dataset("sequence_dataset")
class SequenceDataset(AbstractSequenceDataset):
"""
The sequence dataset.
"""
def __init__(self, program, empty_word_index, inverse_relations=True,
oov_word="<OOV>", expand_one_hot=True):
"""
Creates a SequenceDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param empty_word_index: the index of an entity that is not found in any
example, to represent an empty entry
:type empty_word_index: int
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
:param oov_word: the value to replace out of the vocabulary
entities
:type oov_word: str
:param expand_one_hot: if `True`, expands the indices of the input
into one hot tensors
:type expand_one_hot: bool
"""
super(SequenceDataset, self).__init__(
program, empty_word_index, inverse_relations, expand_one_hot)
self.oov_word = get_constant_from_string(oov_word)
# noinspection DuplicatedCode
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace the not found one
:rtype: int
"""
return self.program.get_index_of_constant(predicate, term_index,
self.oov_word)
# noinspection PyUnusedLocal
@neural_log_dataset("language_dataset")
class LanguageDataset(AbstractSequenceDataset):
"""
Class to process mega examples as phrases.
"""
# noinspection PyMissingOrEmptyDocstring
__call__ = call
# noinspection PyMissingOrEmptyDocstring
# noinspection DuplicatedCode
# noinspection DuplicatedCode
# noinspection PyMissingOrEmptyDocstring,DuplicatedCode
@neural_log_dataset("word_char_dataset")
class WordCharDataset(SequenceDataset):
"""
Class to represent a Word and Char dataset.
This class considers ternary predicates as being composed by an word,
a sequence of characters (represented as a string) and a a class.
The word and the class will be treated as usual. The sequence of
characters will be transformed into a vector of index of the character
entity in a given predicate. The vector will have the size of the
largest sequence in the batch.
"""
def __init__(self, program, empty_word_index, empty_char_index,
character_predicate, character_predicate_index=0,
inverse_relations=True, oov_word="<OOV>", oov_char="<OOV>",
expand_one_hot=True, char_pad=0):
"""
Creates a word char dataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param empty_word_index: the index of an entity that is not found in any
example, to represent an empty entry
:type empty_word_index: int
:param character_predicate: the predicate to get the index of the
characters
:type character_predicate: str
:param character_predicate_index: the index of the term in the character
predicate, to get the index of the characters
:type character_predicate_index: int
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
:param oov_word: the value to replace out of the vocabulary words
:type oov_word: str
:param oov_char: the value to replace out of the vocabulary chars
:type oov_char: str
:param expand_one_hot: if `True`, expands the indices of the input
into one hot tensors
:type expand_one_hot: bool
:param char_pad: the number of empty elements to append at the end of
the char sequence
:type char_pad: int
"""
super(WordCharDataset, self).__init__(
program, empty_word_index, inverse_relations,
oov_word, expand_one_hot)
self.empty_char_index = empty_char_index
self.character_predicate = \
get_predicate_from_string(character_predicate)
self.character_predicate_index = character_predicate_index
self.oov_char = get_constant_from_string(oov_char)
self._ooc_char_index = self._get_out_of_vocabulary_index(
get_predicate_from_string(character_predicate),
character_predicate_index
)
self.char_pad = max(char_pad, 0)
# noinspection PyMissingOrEmptyDocstring
# noinspection PyMissingOrEmptyDocstring
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = tf.constant(features[count])
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
if len(labels) == 1:
labels = labels[0]
return dense_features, labels
__call__ = call
# noinspection DuplicatedCode
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: dict[Predicate, List[Atom]]
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[list[int]], tuple[tf.SparseTensor])
"""
all_features = [] # type: List[int] or List[List[int]]
all_label_indices = []
all_label_values = []
row_index = 0
max_lengths = []
for predicate, inverted in self._target_predicates:
input_indices, output_index = get_predicate_indices(
predicate, inverted)
output_index -= 1
real_predicate = Predicate(predicate.name, predicate.arity - 1)
feature = [[] for _ in range(max(1, predicate.arity - 1))]
label_indices = []
label_values = []
facts = examples.get(real_predicate, [])
max_length = -1
for example in facts:
for fact in self.ground_atom(example):
input_terms = tuple(fact.terms[0:predicate.arity - 2])
count = 0
for input_index, in_term in zip(input_indices, input_terms):
in_term = get_term_from_string(str(in_term).lower())
input_value = self.program.get_index_of_constant(
real_predicate, input_index, in_term)
if input_value is None:
input_value = self._get_out_of_vocabulary_index(
real_predicate, input_index)
feature[count].append([input_value])
count += 1
if predicate.arity > 2:
char_features = []
last_term = input_terms[-1].value
max_length = max(len(last_term), max_length)
for char in last_term:
input_value = self.program.get_index_of_constant(
self.character_predicate,
self.character_predicate_index,
get_constant_from_string(char)
)
if input_value is None:
input_value = self._ooc_char_index
char_features.append(input_value)
feature[-1].append(char_features)
output_term = fact.terms[output_index]
output_value = self.program.get_index_of_constant(
real_predicate, output_index, output_term)
label_indices.append([row_index, output_value])
label_values.append(fact.weight)
row_index += 1
max_lengths.append(max_length + self.char_pad)
all_label_indices.append(label_indices)
all_label_values.append(label_values)
all_features += feature
all_labels = []
examples_offset = 0
features_offset = 0
for i in range(len(self._target_predicates)):
# Features
arity = self._target_predicates[i][0].arity
number_of_features = max(arity - 1, 1)
length = len(all_features[features_offset])
if arity > 2:
number_of_features -= 1
for j in range(number_of_features):
all_features[features_offset + j] = \
([self.empty_word_index] * examples_offset) + \
all_features[features_offset + j]
all_features[features_offset + j] += \
[self.empty_word_index] * (
row_index - examples_offset - length)
if arity > 2:
j = number_of_features
adjusted_features = []
for current in all_features[features_offset + j]:
# noinspection PyTypeChecker
adjusted_features.append(
current +
([self.empty_char_index] *
(max_lengths[i] - len(current))))
all_features[features_offset + j] = \
([[self.empty_char_index] * max_lengths[i]] *
examples_offset) + adjusted_features
all_features[features_offset + j] += \
[[self.empty_char_index] * max_lengths[i]] * (
row_index - examples_offset - length)
number_of_features += 1
examples_offset += length
features_offset += number_of_features
# Labels
predicate, index = self._target_predicates[i]
real_predicate = Predicate(predicate.name, predicate.arity - 1)
_, output_index = get_predicate_indices(predicate, index)
output_index -= 1
if predicate.arity == 1:
dense_shape = [row_index, 1]
empty_index = [[0, 0]]
else:
dense_shape = [
row_index,
self.program.get_constant_size(
real_predicate, output_index)]
empty_index = [[0, 0]]
if len(all_label_values[i]) == 0:
sparse_tensor = tf.SparseTensor(
indices=empty_index, values=[0.0], dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(
indices=all_label_indices[i], values=all_label_values[i],
dense_shape=dense_shape)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
# noinspection PyMissingOrEmptyDocstring
| 40.181197 | 80 | 0.57011 | # Copyright 2021 Victor Guimarães
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handles the examples.
"""
import collections
import logging
import sys
from abc import abstractmethod, ABC
from collections import OrderedDict, deque
from functools import partial
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from bert.tokenization.bert_tokenization import FullTokenizer
from neurallog.knowledge.examples import Examples
from neurallog.knowledge.program import NeuralLogProgram, NO_EXAMPLE_SET, \
get_predicate_from_string
from neurallog.language.language import AtomClause, Atom, Predicate, \
get_constant_from_string, get_term_from_string, TermType, Constant, Quote
from neurallog.network import registry
PARTIAL_WORD_PREFIX = "##"
logger = logging.getLogger(__name__)
dataset_classes = dict()
def neural_log_dataset(identifier):
"""
A decorator for NeuralLog dataset.
:param identifier: the identifier of the function
:type identifier: str
:return: the decorated function
:rtype: function
"""
return lambda x: registry(x, identifier, dataset_classes)
def get_dataset_class(identifier):
"""
Returns the class of the dataset based on the `identifier`.
:param identifier: the identifier
:type identifier: str
:return: the dataset class
:rtype: function
"""
return dataset_classes.get(identifier, DefaultDataset)
# IMPROVE: create a parameter to specify whether or not to print prediction
# that are not in the dataset
# noinspection PyTypeChecker,DuplicatedCode
def print_neural_log_predictions(model, neural_program, neural_dataset, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param neural_dataset: the NeuralLog dataset
:type neural_dataset: NeuralLogDataset
:param neural_program: the neural program
:type neural_program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to print to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before each
batch
:type print_batch_header: bool
"""
count = 0
batches = None
empty_entry = None
fix = 0
if isinstance(neural_dataset, SequenceDataset):
if print_batch_header and dataset_name is not None:
batches = list(neural_program.mega_examples[dataset_name].keys())
empty_entry = neural_dataset.empty_word_index
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
if isinstance(neural_dataset, WordCharDataset):
predicate = Predicate(predicate.name, predicate.arity - 1)
if inverted:
continue
row_scores = y_scores[i]
if len(row_scores.shape) == 3:
row_scores = np.squeeze(row_scores, axis=1)
for j in range(len(row_scores)):
y_score = row_scores[j]
x = []
subjects = []
stop = False
offset = sum(model.input_sizes[:i])
for k in range(model.input_sizes[i] + fix):
x_k = features[offset + k][j].numpy()
if x_k.dtype == np.float32:
if np.max(x_k) == 0:
stop = True
break
arg_max = np.argmax(x_k)
if arg_max == empty_entry:
stop = True
break
else:
arg_max = x_k[0]
if arg_max < 0 or arg_max == empty_entry:
stop = True
break
subjects.append(neural_program.get_constant_by_index(
predicate, k, arg_max))
x.append(x_k)
offset += model.input_sizes[i]
if stop:
continue
if predicate.arity == 1:
clause = AtomClause(Atom(predicate, subjects[0],
weight=float(y_score)))
print(clause, file=writer)
else:
clauses = []
for index in range(len(y_score)):
object_term = neural_program.get_constant_by_index(
predicate, -1, index)
prediction = Atom(predicate, *subjects, object_term,
weight=float(y_score[index]))
if dataset_name is not None and \
not neural_dataset.has_example_key(
prediction.simple_key()):
continue
clauses.append(AtomClause(prediction))
if len(clauses) > 0:
clause = AtomClause(Atom(predicate, *subjects, "X"))
print("%%", clause, file=writer, sep=" ")
for clause in sorted(
clauses,
key=lambda c: c.atom.weight,
reverse=True):
print(clause, file=writer)
print(file=writer)
# print(file=writer)
# noinspection DuplicatedCode
def _print_word_char_predictions(model, neural_program, neural_dataset, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param neural_dataset: the NeuralLog dataset
:type neural_dataset: WordCharDataset
:param neural_program: the neural program
:type neural_program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to print to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before each
batch
:type print_batch_header: bool
"""
count = 0
batches = None
fix = -1
if print_batch_header and dataset_name is not None:
batches = list(neural_program.mega_examples[dataset_name].keys())
empty_entry = neural_dataset.empty_word_index
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
predicate = Predicate(predicate.name, predicate.arity - 1)
row_scores = y_scores[i]
if len(row_scores.shape) == 3:
row_scores = np.squeeze(row_scores, axis=1)
for j in range(len(row_scores)):
y_score = row_scores[j]
x = []
subjects = []
stop = False
offset = sum(model.input_sizes[:i])
for k in range(model.input_sizes[i] + fix):
x_k = features[offset + k][j].numpy()
if x_k.dtype == np.float32:
if np.max(x_k) == 0:
stop = True
break
arg_max = np.argmax(x_k)
if arg_max == empty_entry:
stop = True
break
else:
arg_max = x_k[0]
if arg_max < 0 or arg_max == empty_entry:
stop = True
break
subjects.append(neural_program.get_constant_by_index(
predicate, k, arg_max))
x.append(x_k)
offset += model.input_sizes[i]
if stop:
continue
last_feature = features[offset - 1][j].numpy()
subject_string = "\""
for k in last_feature:
if k == neural_dataset.empty_char_index:
break
subject_string += neural_program.get_constant_by_index(
neural_dataset.character_predicate,
neural_dataset.character_predicate_index,
k
).value
subject_string += "\""
subjects[-1] = get_term_from_string(subject_string)
if predicate.arity == 1:
clause = AtomClause(Atom(predicate, subjects[0],
weight=float(y_score)))
print(clause, file=writer)
else:
clauses = []
for index in range(len(y_score)):
object_term = neural_program.get_constant_by_index(
predicate, -1, index)
prediction = Atom(predicate, *subjects, object_term,
weight=float(y_score[index]))
if dataset_name is not None and \
not neural_dataset.has_example_key(
prediction.simple_key()):
continue
clauses.append(AtomClause(prediction))
if len(clauses) > 0:
clause = AtomClause(Atom(predicate, *subjects, "X"))
print("%%", clause, file=writer, sep=" ")
for clause in sorted(
clauses,
key=lambda c: c.atom.weight,
reverse=True):
print(clause, file=writer)
print(file=writer)
# print(file=writer)
def get_predicate_indices(predicate, inverted):
"""
Gets the indices of the predicate's input and output.
:param predicate: the predicate
:type predicate: Predicate
:param inverted: if the predicate is inverted
:type inverted: bool
:return: the input and output indices
:rtype: (list[int], int)
"""
if predicate.arity == 1:
input_index = [0]
output_index = 0
elif predicate.arity == 2:
if inverted:
input_index = [1]
output_index = 0
else:
input_index = [0]
output_index = 1
else:
input_index = [x for x in range(predicate.arity - 1)]
output_index = predicate.arity - 1
return input_index, output_index
# noinspection DuplicatedCode
def viterbi(potentials, transition_matrix, initial_distribution=None):
"""
Computes the best path given based on the Viterbi algorithm.
:param potentials: the emission of the neural network
:type potentials: np.ndarray
:param transition_matrix: the transition matrix
:type transition_matrix: np.ndarray
:param initial_distribution: the probabilities of the first state,
it assumes an uniform probability, if `None`.
:type initial_distribution: np.ndarray
:return: the best path
:rtype: np.ndarray
"""
sequence_length, number_of_tags = potentials.shape
state_probabilities = np.zeros((sequence_length, number_of_tags),
dtype=np.float64)
best_paths = np.zeros((sequence_length, number_of_tags), dtype=np.int32)
best_path = np.zeros(sequence_length, dtype=np.int32)
if initial_distribution is None:
# initial_distribution = np.full(number_of_tags, 1.0 / number_of_tags)
initial_distribution = np.full(number_of_tags, 1.0)
state_probabilities[0, :] = potentials[0, :] * initial_distribution
transpose = transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] * transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] *= potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
# noinspection DuplicatedCode
def log_viterbi(potentials, transition_matrix, initial_distribution=None):
"""
Computes the best path given based on the Viterbi algorithm.
This version uses sum instead of multiplication and assumes that both
`transition_matrix` and `emission` are the log of the probabilities.
:param potentials: the emission of the neural network
:type potentials: np.ndarray
:param transition_matrix: the transition matrix
:type transition_matrix: np.ndarray
:param initial_distribution: the probabilities of the first state,
it assumes an uniform probability, if `None`.
:type initial_distribution: np.ndarray
:return: the best path
:rtype: np.ndarray
"""
sequence_length, number_of_tags = potentials.shape
state_probabilities = np.zeros((sequence_length, number_of_tags),
dtype=np.float64)
best_paths = np.zeros((sequence_length, number_of_tags), dtype=np.int32)
best_path = np.zeros(sequence_length, dtype=np.int32)
if initial_distribution is None:
# initial_distribution = np.full(number_of_tags, 1.0 / number_of_tags)
initial_distribution = np.full(number_of_tags, 1.0)
state_probabilities[0, :] = potentials[0, :] + initial_distribution
transpose = transition_matrix.transpose()
for i in range(1, sequence_length):
prev_p = state_probabilities[i - 1, :] + transpose
best_previous_nodes = np.argmax(prev_p, axis=1)
state_probabilities[i] = np.max(prev_p, axis=1)
state_probabilities[i] += potentials[i, :]
best_paths[i, :] = best_previous_nodes
best_path[-1] = np.argmax(state_probabilities[-1, :])
for i in reversed(range(1, sequence_length)):
best_path[i - 1] = best_paths[i, best_path[i]]
return best_path
class NeuralLogDataset(ABC):
"""
Represents a NeuralLog dataset to train a NeuralLog network.
"""
program: NeuralLogProgram
"The NeuralLog program"
def __init__(self, program, inverse_relations=True):
"""
Creates a NeuralLogNetwork.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
self.program = program
self.inverse_relations = inverse_relations
self._target_predicates = None
@property
def target_predicates(self):
"""
Gets the target predicates.
:return: the target predicates
:rtype: List[Tuple[Predicate, bool]]
"""
return self._target_predicates
@target_predicates.setter
def target_predicates(self, value):
"""
Sets the target predicates.
:param value: the target predicates
:type value: List[Tuple[Predicate, bool]]
"""
self._target_predicates = value
@abstractmethod
def has_example_key(self, key):
"""
Checks if the dataset contains the example key.
:param key: the example key
:type key: Any
:return: if the dataset contains the atom example
:rtype: bool
"""
pass
@abstractmethod
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
"""
Gets the data set for the example set.
:param example_set: the name of the example set
:type example_set: str
:param batch_size: the batch size
:type batch_size: int
:param shuffle: if `True`, shuffles the dataset.
:type shuffle: bool
:return: the dataset
:rtype: tf.data.Dataset
"""
pass
@abstractmethod
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
pass
def get_target_predicates(self):
"""
Gets a list of tuples containing the target predicates and whether it
is inverted or not.
:return: the list of target predicates
:rtype: list[tuple[Predicate, bool]]
"""
return self._target_predicates
@abstractmethod
def print_predictions(self, model, program, dataset, writer=sys.stdout,
dataset_name=None, print_batch_header=False):
"""
Prints the predictions of `model` to `writer`.
:param model: the model
:type model: NeuralLogNetwork
:param program: the neural program
:type program: NeuralLogProgram
:param dataset: the dataset
:type dataset: tf.data.Dataset
:param writer: the writer. Default is to write to the standard output
:type writer: Any
:param dataset_name: the name of the dataset
:type dataset_name: str
:param print_batch_header: if `True`, prints a commented line before
each batch
:type print_batch_header: bool
"""
pass
@neural_log_dataset("default_dataset")
class DefaultDataset(NeuralLogDataset):
"""
The default NeuralLog dataset.
"""
def __init__(self, program, inverse_relations=True):
"""
Creates a DefaultDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
"""
super(DefaultDataset, self).__init__(program, inverse_relations)
self._target_predicates = self._compute_target_predicates()
self.example_keys = self._load_example_keys()
def _load_example_keys(self):
example_keys = set()
for example_set in self.program.examples.values():
for examples_by_predicate in example_set.values():
for keys in examples_by_predicate.keys():
example_keys.add(keys)
return example_keys
def _compute_target_predicates(self):
target_predicates = []
predicates = set()
for example_set in self.program.examples.values():
for predicate in example_set:
if predicate in predicates:
continue
predicates.add(predicate)
target_predicates.append((predicate, False))
if self.inverse_relations and predicate.arity == 2:
target_predicates.append((predicate, True))
return target_predicates
# noinspection PyMissingOrEmptyDocstring
def has_example_key(self, key):
return key in self.example_keys
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
dense_features.append(feature)
count += 1
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
# all_dense_features = tuple(all_dense_features)
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
features, labels = self.build(example_set=example_set)
# noinspection PyTypeChecker
if not features:
return None
if self.are_features_empty(features):
return None
dataset_size = len(features[0])
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
if shuffle:
dataset = dataset.shuffle(dataset_size)
dataset = dataset.batch(batch_size)
dataset = dataset.map(self)
logger.debug("Dataset %s created with %d example(s)", example_set,
dataset_size)
return dataset
def are_features_empty(self, features):
"""
Checks if the features are empty.
:param features: the features
:type features: List[List[int]] or Tuple[List[int]]
:return: `True`, if the features are empty
:rtype: bool
"""
size = len(features[0])
if not size:
return True
if size > 1:
return False
index = 0
for i in range(len(self._target_predicates)):
in_indices, out_index = \
get_predicate_indices(*self._target_predicates[i])
for j in range(len(in_indices)):
empty_value = self._get_out_of_vocabulary_index(
self._target_predicates[i][0], in_indices[j])
if empty_value != features[index][0]:
return False
index += 1
return True
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor]) or
(list[tuple[tf.SparseTensor]], tuple[tf.SparseTensor])
"""
examples = self.program.examples.get(example_set, OrderedDict())
return self._build(examples)
def ground_atom(self, example):
"""
Grounds the example by replacing the value of the variables for each
possible value found in the program.
:param example: the example
:type example: Atom
:return: the grounded atoms
:rtype: collections.Iterable[Atom]
"""
if example.is_grounded():
return example,
current_atoms = deque([example])
predicate = example.predicate
term_types: Tuple[TermType] = self.program.predicates[predicate]
for i in range(example.arity()):
if example.terms[i].is_constant():
continue
next_atoms = deque()
for atom in current_atoms:
if term_types[i].number:
terms = list(atom.terms)
terms[i] = 0.0
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
else:
possible_terms = \
self.program.iterable_constants_per_term[(predicate, i)]
for constant in possible_terms.values():
terms = list(atom.terms)
terms[i] = constant
next_atoms.append(
Atom(predicate, *terms, weight=example.weight))
current_atoms = next_atoms
return current_atoms
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: Examples
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
output_by_term = OrderedDict()
input_terms = []
for predicate, inverted in self._target_predicates:
facts = examples.get(predicate, dict())
facts = facts.values()
for example in facts:
for fact in self.ground_atom(example):
if predicate.arity < 3:
input_term = (fact.terms[-1 if inverted else 0],)
else:
input_term = tuple(fact.terms[0:predicate.arity - 1])
if input_term not in output_by_term:
output = dict()
output_by_term[input_term] = output
input_terms.append(input_term)
else:
output = output_by_term[input_term]
if predicate.arity == 1:
output[(predicate, inverted)] = fact.weight
else:
output_term = fact.terms[0 if inverted else -1]
# noinspection PyTypeChecker
output.setdefault((predicate, inverted), []).append(
(output_term, fact.weight))
all_features = []
all_labels = []
for predicate, inverted in self._target_predicates:
features = [[] for _ in range(max(1, predicate.arity - 1))]
label_values = []
label_indices = []
in_indices, out_index = get_predicate_indices(predicate, inverted)
for i in range(len(input_terms)):
outputs = output_by_term[input_terms[i]].get(
(predicate, inverted), None)
constant_index = 0
for input_index in in_indices:
index = None
if outputs is not None:
index = self.program.get_index_of_constant(
predicate, input_index,
input_terms[i][constant_index])
if index is None:
index = self._get_out_of_vocabulary_index(
predicate, input_index)
features[constant_index].append(index)
constant_index += 1
if outputs is not None:
if predicate.arity == 1:
label_indices.append([i, 0])
label_values.append(outputs)
else:
# noinspection PyTypeChecker
for output_term, output_value in outputs:
output_term_index = \
self.program.get_index_of_constant(
predicate, out_index, output_term)
label_indices.append([i, output_term_index])
label_values.append(output_value)
all_features += features
if predicate.arity == 1:
dense_shape = [len(input_terms), 1]
empty_index = [[0, 0]]
else:
dense_shape = [
len(input_terms),
self.program.get_constant_size(predicate, out_index)]
empty_index = [[0, 0]]
if len(label_values) == 0:
sparse_tensor = tf.SparseTensor(indices=empty_index,
values=[0.0],
dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(indices=label_indices,
values=label_values,
dense_shape=dense_shape)
sparse_tensor = tf.sparse.reorder(sparse_tensor)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace the not found one
:rtype: int
"""
return -1
# noinspection PyMissingOrEmptyDocstring
def print_predictions(self, model, program, dataset, writer=sys.stdout,
dataset_name=None, print_batch_header=False):
return print_neural_log_predictions(
model, program, self, dataset, writer=writer,
dataset_name=dataset_name, print_batch_header=print_batch_header)
class AbstractSequenceDataset(DefaultDataset, ABC):
"""
Represents an Abstract Sequence Dataset.
"""
def __init__(self, program, pad_index,
inverse_relations=False, expand_one_hot=True):
super().__init__(program, inverse_relations=inverse_relations)
self.pad_index = pad_index
self.expand_one_hot = expand_one_hot
self.example_keys = self._load_example_keys()
self._output_types = None
self._output_shapes = None
self._compute_output_format()
def _load_example_keys(self):
example_keys = set()
for mega_examples in self.program.mega_examples.values():
for example_set in mega_examples.values():
for examples_by_predicate in example_set.values():
for example in examples_by_predicate:
example_keys.add(example.simple_key())
return example_keys
def _compute_target_predicates(self):
target_predicates = []
predicates = set()
for mega_examples in self.program.mega_examples.values():
for example_set in mega_examples.values():
for predicate in example_set:
if predicate in predicates:
continue
predicates.add(predicate)
target_predicates.append((predicate, False))
if self.inverse_relations and predicate.arity > 1:
target_predicates.append((predicate, True))
return target_predicates
def _compute_output_format(self):
length = 0
output_types = []
output_shapes = []
for predicate, inverted in self._target_predicates:
length += max(predicate.arity - 1, 1)
_, index = get_predicate_indices(predicate, inverted)
size = self.program.get_constant_size(predicate, index)
output_types.append(tf.float32)
output_shapes.append((None, size))
self._output_types = (tf.int32,) + (tuple(output_types),)
# noinspection PyTypeChecker
self._output_shapes = ((length, None),) + (tuple(output_shapes),)
# noinspection PyUnusedLocal,DuplicatedCode
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = tf.reshape(features[count], [-1, 1])
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
return dense_features, labels
__call__ = call
# noinspection PyMissingOrEmptyDocstring
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
# noinspection PyTypeChecker
dataset = tf.data.Dataset.from_generator(
partial(self.build, example_set),
output_types=self._output_types,
output_shapes=self._output_shapes
)
dataset_size = len(self.program.mega_examples.get(example_set, []))
if shuffle:
dataset = dataset.shuffle(dataset_size)
dataset = dataset.map(self)
logger.debug("Dataset %s created with %d example(s)", example_set,
dataset_size)
return dataset
def build(self, example_set=NO_EXAMPLE_SET):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param example_set: the name of the set of examples
:type example_set: str
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[tf.SparseTensor], tuple[tf.SparseTensor])
"""
mega_examples = self.program.mega_examples.get(
example_set, OrderedDict())
for _, examples in sorted(mega_examples.items(), key=lambda x: x[0]):
features, labels = self._build(examples)
labels = tuple(map(lambda x: tf.sparse.to_dense(x), labels))
yield features, labels
@abstractmethod
def _build(self, examples):
"""
Builds the features and label to train the neural network
based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: dict[Predicate, List[Atom]]
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[list[int]], tuple[tf.SparseTensor])
"""
pass
@neural_log_dataset("sequence_dataset")
class SequenceDataset(AbstractSequenceDataset):
"""
The sequence dataset.
"""
def __init__(self, program, empty_word_index, inverse_relations=True,
oov_word="<OOV>", expand_one_hot=True):
"""
Creates a SequenceDataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param empty_word_index: the index of an entity that is not found in any
example, to represent an empty entry
:type empty_word_index: int
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
:param oov_word: the value to replace out of the vocabulary
entities
:type oov_word: str
:param expand_one_hot: if `True`, expands the indices of the input
into one hot tensors
:type expand_one_hot: bool
"""
super(SequenceDataset, self).__init__(
program, empty_word_index, inverse_relations, expand_one_hot)
self.oov_word = get_constant_from_string(oov_word)
# noinspection DuplicatedCode
def _build(self, examples):
all_features = []
all_label_indices = []
all_label_values = []
total_of_rows = 0
for predicate, inverted in self._target_predicates:
input_indices, output_index = get_predicate_indices(
predicate, inverted)
feature = [[] for _ in range(max(1, predicate.arity - 1))]
label_indices = []
label_values = []
facts = examples.get(predicate, [])
for example in facts:
for fact in self.ground_atom(example):
if predicate.arity < 3:
input_terms = (fact.terms[-1 if inverted else 0],)
else:
input_terms = tuple(fact.terms[0:predicate.arity - 1])
count = 0
for input_index, input_term in zip(input_indices,
input_terms):
input_value = self.program.get_index_of_constant(
predicate, input_index, input_term)
if input_value is None:
input_value = self._get_out_of_vocabulary_index(
predicate, input_index)
feature[count].append(input_value)
count += 1
if predicate.arity == 1:
label_indices.append([total_of_rows, 0])
else:
output_term = fact.terms[output_index]
output_value = self.program.get_index_of_constant(
predicate, output_index, output_term)
label_indices.append([total_of_rows, output_value])
label_values.append(fact.weight)
total_of_rows += 1
all_label_indices.append(label_indices)
all_label_values.append(label_values)
all_features += feature
all_labels = []
examples_offset = 0
features_offset = 0
for i in range(len(self._target_predicates)):
# Features
number_of_features = max(self._target_predicates[i][0].arity - 1, 1)
length = len(all_features[features_offset])
for j in range(number_of_features):
all_features[features_offset + j] = \
([self.pad_index] * examples_offset) + \
all_features[features_offset + j]
all_features[features_offset + j] += \
[self.pad_index] * (
total_of_rows - examples_offset - length)
examples_offset += length
features_offset += number_of_features
# Labels
predicate, index = self._target_predicates[i]
_, output_index = get_predicate_indices(predicate, index)
if predicate.arity == 1:
dense_shape = [total_of_rows, 1]
empty_index = [[0, 0]]
else:
dense_shape = [
total_of_rows,
self.program.get_constant_size(predicate, output_index)]
empty_index = [[0, 0]]
if len(all_label_values[i]) == 0:
sparse_tensor = tf.SparseTensor(
indices=empty_index, values=[0.0], dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(
indices=all_label_indices[i], values=all_label_values[i],
dense_shape=dense_shape)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
def _get_out_of_vocabulary_index(self, predicate, term_index):
"""
Returns the index of the entity to replace the not found entity.
:param predicate: the predicate
:type predicate: Predicate
:param term_index: the index of the term
:type term_index: int
:return: the index of entity to replace the not found one
:rtype: int
"""
return self.program.get_index_of_constant(predicate, term_index,
self.oov_word)
def _atom_processor(predicate, feature, label, weight):
return str(AtomClause(Atom(predicate, feature, label, weight=weight)))
# noinspection PyUnusedLocal
def _conll_processor(predicate, feature, label, weight):
return f"{feature.value}\t{label.value}"
@neural_log_dataset("language_dataset")
class LanguageDataset(AbstractSequenceDataset):
"""
Class to process mega examples as phrases.
"""
def __init__(self,
program,
inverse_relations,
vocabulary_file,
initial_token="[CLS]",
final_token="[SEP]",
pad_token="[PAD]",
sub_token_label="X",
maximum_sentence_length=None,
do_lower_case=False,
pad_to_maximum_length=False,
string_processor=None
# expand_one_hot=False,
# language_predicates=None,
):
self.tokenizer = FullTokenizer(
vocabulary_file, do_lower_case=do_lower_case)
self.maximum_sentence_length = maximum_sentence_length
self.initial_token = Constant(initial_token)
self.initial_token_index = self.tokenizer.vocab[initial_token]
self.final_token = Constant(final_token)
self.final_token_index = self.tokenizer.vocab[final_token]
self.skip_tokens = [initial_token, final_token]
self.pad_token = Constant(pad_token)
self.pad_index = self.tokenizer.vocab[pad_token]
self.sub_token_label = Constant(sub_token_label)
self.pad_to_maximum_length = \
pad_to_maximum_length and maximum_sentence_length is not None
super(LanguageDataset, self).__init__(
program, self.pad_index, inverse_relations, expand_one_hot=False)
if string_processor is not None and string_processor.lower() == 'conll':
self._string_processor = _conll_processor
else:
self._string_processor = _atom_processor
# noinspection PyMissingOrEmptyDocstring
def call(self, features, labels, *args, **kwargs):
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = features[count]
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
return dense_features, labels
__call__ = call
def _compute_output_format(self):
batch_size = None
if self.pad_to_maximum_length:
batch_size = self.maximum_sentence_length
length = 0
output_types = []
output_shapes = []
for predicate, inverted in self._target_predicates:
length += max(predicate.arity - 1, 1)
_, index = get_predicate_indices(predicate, inverted)
size = self.program.get_constant_size(predicate, index)
output_types.append(tf.float32)
output_shapes.append((batch_size, size))
self._output_types = (tf.int32,) + (tuple(output_types),)
# noinspection PyTypeChecker
self._output_shapes = ((length, batch_size),) + (tuple(output_shapes),)
# noinspection PyMissingOrEmptyDocstring
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
dataset = super().get_dataset(
example_set, batch_size=batch_size, shuffle=shuffle)
return dataset.batch(batch_size)
def _reached_maximum_length(self, current_length):
if self.maximum_sentence_length is None:
return False
return current_length > self.maximum_sentence_length - 2
# noinspection DuplicatedCode
def _build(self, examples):
all_features, all_label_indices, all_label_values, total_of_rows = \
self._preprocess_examples(examples)
all_labels = []
examples_offset = 0
features_offset = 0
for i in range(len(self._target_predicates)):
# Features
number_of_features = max(
self._target_predicates[i][0].arity - 1, 1)
length = len(all_features[features_offset])
for j in range(number_of_features):
all_features[features_offset + j] = \
([self.pad_index] * examples_offset) + \
all_features[features_offset + j]
all_features[features_offset + j] += \
[self.pad_index] * (
total_of_rows - examples_offset - length)
examples_offset += length
features_offset += number_of_features
# Labels
predicate, index = self._target_predicates[i]
_, output_index = get_predicate_indices(predicate, index)
if predicate.arity == 1:
dense_shape = [total_of_rows, 1]
empty_index = [[0, 0]]
else:
dense_shape = [
total_of_rows,
self.program.get_constant_size(predicate, output_index)]
empty_index = [[0, 0]]
if len(all_label_values[i]) == 0:
sparse_tensor = tf.SparseTensor(
indices=empty_index, values=[0.0],
dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(
indices=all_label_indices[i],
values=all_label_values[i],
dense_shape=dense_shape)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
# noinspection DuplicatedCode
def _preprocess_examples(self, examples):
all_features = []
all_label_indices = []
all_label_values = []
total_of_rows = 0
for predicate, inverted in self._target_predicates:
input_indices, output_index = get_predicate_indices(
predicate, inverted)
feature = [[] for _ in range(max(1, predicate.arity - 1))]
label_indices = []
label_values = []
facts = examples.get(predicate, [])
# Adds the initial token
for i in range(len(input_indices)):
feature[i].append(self.initial_token_index)
if predicate.arity == 1:
label_indices.append([total_of_rows, 0])
else:
index = self.program.get_index_of_constant(
predicate, -1, self.initial_token)
label_indices.append([total_of_rows, index])
label_values.append(1.0)
total_of_rows += 1
reached_maximum_length = False
for example in facts:
for fact in self.ground_atom(example):
if predicate.arity < 3:
input_terms = (fact.terms[-1 if inverted else 0],)
else:
input_terms = tuple(
fact.terms[0:predicate.arity - 1])
count = 0
input_values = []
maximum_rows = \
self.maximum_sentence_length - total_of_rows - 1
for input_index, input_term in zip(input_indices,
input_terms):
input_values = self.tokenizer.tokenize(input_term.value)
if input_values is None:
input_values = [self._get_out_of_vocabulary_index(
predicate, input_index)]
else:
input_values = self.tokenizer.convert_tokens_to_ids(
input_values)
feature[count].extend(input_values[:maximum_rows])
count += 1
if predicate.arity == 1:
label_indices.append([total_of_rows, 0])
else:
output_term = fact.terms[output_index]
output_value = self.program.get_index_of_constant(
predicate, output_index, output_term)
label_indices.append([total_of_rows, output_value])
label_values.append(fact.weight)
total_of_rows += 1
if self._reached_maximum_length(total_of_rows):
reached_maximum_length = True
break
sub_token_index = self.program.get_index_of_constant(
predicate, -1, self.sub_token_label)
# Adds the sub_token labels
for _ in range(len(input_values) - 1):
if predicate.arity == 1:
label_indices.append([total_of_rows, 0])
else:
label_indices.append(
[total_of_rows, sub_token_index])
label_values.append(1.0)
total_of_rows += 1
if self._reached_maximum_length(total_of_rows):
reached_maximum_length = True
break
if reached_maximum_length:
break
# Adds the final token
for i in range(len(input_indices)):
feature[i].append(self.final_token_index)
if predicate.arity == 1:
label_indices.append([total_of_rows, 0])
else:
index = self.program.get_index_of_constant(
predicate, -1, self.final_token)
label_indices.append([total_of_rows, index])
label_values.append(1.0)
total_of_rows += 1
# Adds pads
if self.pad_to_maximum_length:
index = self.program.get_index_of_constant(
predicate, -1, self.pad_token)
for _ in range(self.maximum_sentence_length - total_of_rows):
for i in range(len(input_indices)):
feature[i].append(self.pad_index)
if predicate.arity == 1:
label_indices.append([total_of_rows, 0])
else:
label_indices.append([total_of_rows, index])
label_values.append(1.0)
total_of_rows += 1
all_label_indices.append(label_indices)
all_label_values.append(label_values)
all_features += feature
return all_features, all_label_indices, all_label_values, total_of_rows
# noinspection PyMissingOrEmptyDocstring,DuplicatedCode
def print_predictions(self, model, program, dataset,
writer=sys.stdout, dataset_name=None,
print_batch_header=False):
prefix_length = len(PARTIAL_WORD_PREFIX)
count = 0
batches = None
if print_batch_header and dataset_name is not None:
batches = list(program.mega_examples[dataset_name].keys())
for features, _ in dataset:
if print_batch_header:
batch = batches[count] if batches is not None else count
print("%% Batch:", batch, file=writer, sep="\t")
count += 1
y_scores = model.predict(features)
if len(model.predicates) == 1:
# if not isinstance(neural_dataset, WordCharDataset):
y_scores = [y_scores]
if model.predicates[0][0].arity < 3:
features = [features]
# i iterates over the predicates
for i in range(len(model.predicates)):
predicate, inverted = model.predicates[i]
if inverted:
continue
row_scores = y_scores[i]
# j iterates over the mega examples
for j in range(len(row_scores)):
y_score = row_scores[j]
offset = sum(model.input_sizes[:i])
x_k = features[offset][j].numpy()
subjects = self.tokenizer.convert_ids_to_tokens(x_k)
processed_token = None
label_index = None
for index, sub in enumerate(subjects):
if sub == self.pad_token.value:
break
if sub.startswith(PARTIAL_WORD_PREFIX):
processed_token += sub[prefix_length:]
else:
if processed_token is not None:
max_label = y_score[label_index].argmax()
obj = program.get_constant_by_index(
predicate, -1, max_label)
weight = float(y_score[label_index][max_label])
feature = Quote(f'"{processed_token}"')
string_format = self._string_processor(
predicate, feature, obj, weight)
print(string_format, file=writer)
if sub in self.skip_tokens:
continue
processed_token = sub
label_index = index
print(file=writer)
@neural_log_dataset("word_char_dataset")
class WordCharDataset(SequenceDataset):
"""
Class to represent a Word and Char dataset.
This class considers ternary predicates as being composed by an word,
a sequence of characters (represented as a string) and a a class.
The word and the class will be treated as usual. The sequence of
characters will be transformed into a vector of index of the character
entity in a given predicate. The vector will have the size of the
largest sequence in the batch.
"""
def __init__(self, program, empty_word_index, empty_char_index,
character_predicate, character_predicate_index=0,
inverse_relations=True, oov_word="<OOV>", oov_char="<OOV>",
expand_one_hot=True, char_pad=0):
"""
Creates a word char dataset.
:param program: the NeuralLog program
:type program: NeuralLogProgram
:param empty_word_index: the index of an entity that is not found in any
example, to represent an empty entry
:type empty_word_index: int
:param character_predicate: the predicate to get the index of the
characters
:type character_predicate: str
:param character_predicate_index: the index of the term in the character
predicate, to get the index of the characters
:type character_predicate_index: int
:param inverse_relations: whether the dataset must consider the
inverse relations
:type inverse_relations: bool
:param oov_word: the value to replace out of the vocabulary words
:type oov_word: str
:param oov_char: the value to replace out of the vocabulary chars
:type oov_char: str
:param expand_one_hot: if `True`, expands the indices of the input
into one hot tensors
:type expand_one_hot: bool
:param char_pad: the number of empty elements to append at the end of
the char sequence
:type char_pad: int
"""
super(WordCharDataset, self).__init__(
program, empty_word_index, inverse_relations,
oov_word, expand_one_hot)
self.empty_char_index = empty_char_index
self.character_predicate = \
get_predicate_from_string(character_predicate)
self.character_predicate_index = character_predicate_index
self.oov_char = get_constant_from_string(oov_char)
self._ooc_char_index = self._get_out_of_vocabulary_index(
get_predicate_from_string(character_predicate),
character_predicate_index
)
self.char_pad = max(char_pad, 0)
# noinspection PyMissingOrEmptyDocstring
def has_example_key(self, key):
return True
def _compute_target_predicates(self):
target_predicates = []
predicates = set()
for mega_examples in self.program.mega_examples.values():
for example_set in mega_examples.values():
for predicate in example_set:
if predicate in predicates:
continue
predicates.add(predicate)
predicate = Predicate(predicate.name, predicate.arity + 1)
self.program.logic_predicates.add(predicate)
target_predicates.append((predicate, False))
return target_predicates
def _compute_output_format(self):
length = 0
label_types = []
label_shapes = []
feature_shapes = []
for predicate, inverted in self._target_predicates:
length += max(predicate.arity - 1, 1)
_, index = get_predicate_indices(predicate, inverted)
size = self.program.get_constant_size(predicate, index)
label_types.append(tf.float32)
label_shapes.append((None, size))
if length == 1:
feature_shapes.append((None, 1))
else:
feature_shapes += [(None, 1)] * (length - 1)
feature_shapes.append((None, None))
feature_types = (tf.int32, tf.int32)
self._output_types = (feature_types, tuple(label_types))
# noinspection PyTypeChecker
self._output_shapes = (tuple(feature_shapes), tuple(label_shapes))
# noinspection PyMissingOrEmptyDocstring
def get_dataset(self, example_set=NO_EXAMPLE_SET,
batch_size=1, shuffle=False):
# noinspection PyTypeChecker
dataset = tf.data.Dataset.from_generator(
partial(self.build, example_set),
output_types=self._output_types,
output_shapes=self._output_shapes
)
dataset_size = len(self.program.mega_examples.get(example_set, []))
if shuffle:
dataset = dataset.shuffle(dataset_size)
logger.debug("Dataset %s created with %d example(s)", example_set,
dataset_size)
return dataset
def call(self, features, labels, *args, **kwargs):
"""
Used to transform the features and examples from the sparse
representation to dense in order to train the network.
:param features: A dense index tensor of the features
:type features: tuple[tf.SparseTensor]
:param labels: A tuple sparse tensor of labels
:type labels: tuple[tf.SparseTensor]
:param args: additional arguments
:type args: list
:param kwargs: additional arguments
:type kwargs: dict
:return: the features and label tensors
:rtype: (tf.Tensor or tuple[tf.Tensor], tuple[tf.Tensor])
"""
dense_features = []
count = 0
for i in range(len(self._target_predicates)):
predicate, inverted = self._target_predicates[i]
indices, _ = get_predicate_indices(predicate, inverted)
for index in indices:
if self.expand_one_hot:
feature = tf.one_hot(
features[count],
self.program.get_constant_size(predicate, index))
else:
feature = tf.constant(features[count])
dense_features.append(feature)
count += 1
if len(dense_features) > 1:
dense_features = tuple(dense_features)
else:
dense_features = dense_features[0]
if len(labels) == 1:
labels = labels[0]
return dense_features, labels
__call__ = call
# noinspection DuplicatedCode
def _build(self, examples):
"""
Builds the features and label to train the neural network based on
the `example_set`.
The labels are always a sparse tensor.
:param examples: the set of examples
:type examples: dict[Predicate, List[Atom]]
sparse tensor. If `False`, the features are generated as a dense
tensor of indices, for each index a one hot vector creation is
necessary.
:return: the features and labels
:rtype: (tuple[list[int]], tuple[tf.SparseTensor])
"""
all_features = [] # type: List[int] or List[List[int]]
all_label_indices = []
all_label_values = []
row_index = 0
max_lengths = []
for predicate, inverted in self._target_predicates:
input_indices, output_index = get_predicate_indices(
predicate, inverted)
output_index -= 1
real_predicate = Predicate(predicate.name, predicate.arity - 1)
feature = [[] for _ in range(max(1, predicate.arity - 1))]
label_indices = []
label_values = []
facts = examples.get(real_predicate, [])
max_length = -1
for example in facts:
for fact in self.ground_atom(example):
input_terms = tuple(fact.terms[0:predicate.arity - 2])
count = 0
for input_index, in_term in zip(input_indices, input_terms):
in_term = get_term_from_string(str(in_term).lower())
input_value = self.program.get_index_of_constant(
real_predicate, input_index, in_term)
if input_value is None:
input_value = self._get_out_of_vocabulary_index(
real_predicate, input_index)
feature[count].append([input_value])
count += 1
if predicate.arity > 2:
char_features = []
last_term = input_terms[-1].value
max_length = max(len(last_term), max_length)
for char in last_term:
input_value = self.program.get_index_of_constant(
self.character_predicate,
self.character_predicate_index,
get_constant_from_string(char)
)
if input_value is None:
input_value = self._ooc_char_index
char_features.append(input_value)
feature[-1].append(char_features)
output_term = fact.terms[output_index]
output_value = self.program.get_index_of_constant(
real_predicate, output_index, output_term)
label_indices.append([row_index, output_value])
label_values.append(fact.weight)
row_index += 1
max_lengths.append(max_length + self.char_pad)
all_label_indices.append(label_indices)
all_label_values.append(label_values)
all_features += feature
all_labels = []
examples_offset = 0
features_offset = 0
for i in range(len(self._target_predicates)):
# Features
arity = self._target_predicates[i][0].arity
number_of_features = max(arity - 1, 1)
length = len(all_features[features_offset])
if arity > 2:
number_of_features -= 1
for j in range(number_of_features):
all_features[features_offset + j] = \
([self.empty_word_index] * examples_offset) + \
all_features[features_offset + j]
all_features[features_offset + j] += \
[self.empty_word_index] * (
row_index - examples_offset - length)
if arity > 2:
j = number_of_features
adjusted_features = []
for current in all_features[features_offset + j]:
# noinspection PyTypeChecker
adjusted_features.append(
current +
([self.empty_char_index] *
(max_lengths[i] - len(current))))
all_features[features_offset + j] = \
([[self.empty_char_index] * max_lengths[i]] *
examples_offset) + adjusted_features
all_features[features_offset + j] += \
[[self.empty_char_index] * max_lengths[i]] * (
row_index - examples_offset - length)
number_of_features += 1
examples_offset += length
features_offset += number_of_features
# Labels
predicate, index = self._target_predicates[i]
real_predicate = Predicate(predicate.name, predicate.arity - 1)
_, output_index = get_predicate_indices(predicate, index)
output_index -= 1
if predicate.arity == 1:
dense_shape = [row_index, 1]
empty_index = [[0, 0]]
else:
dense_shape = [
row_index,
self.program.get_constant_size(
real_predicate, output_index)]
empty_index = [[0, 0]]
if len(all_label_values[i]) == 0:
sparse_tensor = tf.SparseTensor(
indices=empty_index, values=[0.0], dense_shape=dense_shape)
else:
sparse_tensor = tf.SparseTensor(
indices=all_label_indices[i], values=all_label_values[i],
dense_shape=dense_shape)
all_labels.append(sparse_tensor)
return tuple(all_features), tuple(all_labels)
# noinspection PyMissingOrEmptyDocstring
def print_predictions(self, model, program, dataset, writer=sys.stdout,
dataset_name=None, print_batch_header=False):
return _print_word_char_predictions(
model, program, self, dataset, writer=writer,
dataset_name=dataset_name, print_batch_header=print_batch_header)
| 23,995 | 0 | 680 |
ebce24889550988f1b0cf018468119ba1adbce7c | 8,754 | py | Python | hat/vector_control/migrations/0001_initial.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | hat/vector_control/migrations/0001_initial.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | hat/vector_control/migrations/0001_initial.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | # Generated by Django 2.0 on 2018-12-07 11:05
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.citext
from django.db import migrations, models
import django.db.models.deletion
import uuid
| 37.570815 | 113 | 0.421407 | # Generated by Django 2.0 on 2018-12-07 11:05
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.citext
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Catch",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("operation", models.TextField(null=True)),
("setup_date", models.DateTimeField(null=True)),
("collect_date", models.DateTimeField(null=True)),
("in_out", models.TextField(null=True)),
("male_count", models.IntegerField(default=0, null=True)),
("female_count", models.IntegerField(default=0, null=True)),
("unknown_count", models.IntegerField(default=0, null=True)),
("remarks", models.TextField(default="")),
(
"distance_to_targets",
models.DecimalField(decimal_places=3, max_digits=10, null=True),
),
("near_intervention", models.CharField(max_length=100)),
("elev_change", models.IntegerField(null=True)),
("trap_elev", models.IntegerField(null=True)),
("target_elev", models.IntegerField(null=True)),
("elev_diff", models.IntegerField(null=True)),
("uuid", models.TextField(default=uuid.uuid4, unique=True)),
(
"source",
models.TextField(
choices=[("excel", "Excel"), ("API", "API")],
default="excel",
null=True,
),
),
],
),
migrations.CreateModel(
name="GpsImport",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("filename", models.TextField()),
("file_date_time", models.DateTimeField(null=True)),
("creator", models.TextField(blank=True, null=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="GpsWaypoint",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.TextField()),
("date_time", models.DateTimeField()),
(
"latitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"longitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"elevation",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
(
"tags",
django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.citext.CITextField(blank=True, max_length=255),
blank=True,
null=True,
size=20,
),
),
("ignore", models.BooleanField(default=False)),
(
"gps_import",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="vector_control.GpsImport",
),
),
],
),
migrations.CreateModel(
name="Site",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=50, null=True)),
("zone", models.TextField(null=True)),
(
"latitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"longitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"altitude",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
(
"accuracy",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
("habitat", models.CharField(max_length=255, null=True)),
("description", models.CharField(max_length=255, null=True)),
("first_survey", models.CharField(max_length=255, null=True)),
("first_survey_date", models.DateTimeField(null=True)),
("count", models.IntegerField()),
("total", models.IntegerField()),
("uuid", models.TextField(default=uuid.uuid4, unique=True)),
(
"source",
models.TextField(
choices=[("excel", "Excel"), ("API", "API")],
default="excel",
null=True,
),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Target",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.TextField(null=True)),
("deployment", models.IntegerField(null=True)),
("full_name", models.TextField(null=True)),
("gps", models.CharField(max_length=100)),
(
"latitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"longitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"altitude",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
("date_time", models.DateTimeField(null=True)),
("river", models.TextField(null=True)),
],
),
migrations.AddField(
model_name="catch",
name="site",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="vector_control.Site"),
),
migrations.AddField(
model_name="catch",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
to=settings.AUTH_USER_MODEL,
),
),
]
| 0 | 8,481 | 23 |
0c7f8da16f61bced21ebb02151a449d6604c473a | 3,447 | py | Python | kaivy/windows/virtual_window_title_bar.py | team-kaivy/kaivy | e27b53e8e9eedc48abc99151f3adbb76f0a9b331 | [
"MIT"
] | null | null | null | kaivy/windows/virtual_window_title_bar.py | team-kaivy/kaivy | e27b53e8e9eedc48abc99151f3adbb76f0a9b331 | [
"MIT"
] | null | null | null | kaivy/windows/virtual_window_title_bar.py | team-kaivy/kaivy | e27b53e8e9eedc48abc99151f3adbb76f0a9b331 | [
"MIT"
] | null | null | null | ########################################################################################################################
# #
# This file is part of kAIvy #
# #
# Copyright (c) 2019-2021 by the kAIvy team and contributors #
# #
########################################################################################################################
"""
Implements a window title bar view
"""
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.behaviors import ButtonBehavior
from kivy.graphics import Color, Rectangle, InstructionGroup
import time
from kivymd.uix.button import MDIconButton
class VirtualWindowTitleBar(ButtonBehavior, FloatLayout):
"""
Display's the title bar of a window
"""
TITLE_BAR_MIN_WIDTH = 320 # Default title bar width
TITLE_BAR_HEIGHT = 32 # Default title bar height
def set_text(self, text):
"""
Sets a new title text
:param text:
:return:
"""
self.text = text
self.title_label.text = self.text
def update_title(self, _=None, __=None):
"""
Updates the title bar's bounding
"""
self.paint_group.clear()
self.paint_group.add(Color(0.5, 0.5, 0.7, 1.0))
self.paint_group.add(Rectangle(pos=self.pos, size=self.size))
self.button.pos_hint = {'right': 1.0, 'center_y': 0.5}
self.button.bind(on_release=self.handle_resize_click)
self.do_layout()
def get_required_space(self):
"""
Returns the minimum required space of the view
:return: Width, Height tuple
"""
return (self.TITLE_BAR_MIN_WIDTH, self.TITLE_BAR_HEIGHT)
def handle_resize_click(self, btn):
"""
Triggered when the resize button was pressed
"""
self.window.handle_maximize_restore()
def handle_release(self, evt):
"""
Called on click on title bar
:param evt: The event source
"""
cur_time = time.time()
if cur_time - self.last_tap < self.double_tap_time:
self.window.handle_maximize_restore()
self.last_tap = cur_time | 39.62069 | 136 | 0.495213 | ########################################################################################################################
# #
# This file is part of kAIvy #
# #
# Copyright (c) 2019-2021 by the kAIvy team and contributors #
# #
########################################################################################################################
"""
Implements a window title bar view
"""
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.behaviors import ButtonBehavior
from kivy.graphics import Color, Rectangle, InstructionGroup
import time
from kivymd.uix.button import MDIconButton
class VirtualWindowTitleBar(ButtonBehavior, FloatLayout):
"""
Display's the title bar of a window
"""
TITLE_BAR_MIN_WIDTH = 320 # Default title bar width
TITLE_BAR_HEIGHT = 32 # Default title bar height
def __init__(self, window):
super().__init__(size_hint=(1.0, None), size=(40, self.TITLE_BAR_HEIGHT))
self.window = window
self.paint_group = InstructionGroup()
self.canvas.add(self.paint_group)
self.bind(size=self.update_title)
self.bind(pos=self.update_title)
self.text = "title"
self.title_label = Label(text=self.text, size_hint=(1.0, 1.0))
self.title_label.pos_hint = {'center': (0.5, 0.5)}
self.add_widget(self.title_label)
self.bind(on_release=self.handle_release)
self.last_tap = 0
self.double_tap_time = 0.5
self.button = MDIconButton(icon='arrow-expand', size_hint=(None, None))
self.add_widget(self.button)
self.update_title()
def set_text(self, text):
"""
Sets a new title text
:param text:
:return:
"""
self.text = text
self.title_label.text = self.text
def update_title(self, _=None, __=None):
"""
Updates the title bar's bounding
"""
self.paint_group.clear()
self.paint_group.add(Color(0.5, 0.5, 0.7, 1.0))
self.paint_group.add(Rectangle(pos=self.pos, size=self.size))
self.button.pos_hint = {'right': 1.0, 'center_y': 0.5}
self.button.bind(on_release=self.handle_resize_click)
self.do_layout()
def get_required_space(self):
"""
Returns the minimum required space of the view
:return: Width, Height tuple
"""
return (self.TITLE_BAR_MIN_WIDTH, self.TITLE_BAR_HEIGHT)
def handle_resize_click(self, btn):
"""
Triggered when the resize button was pressed
"""
self.window.handle_maximize_restore()
def handle_release(self, evt):
"""
Called on click on title bar
:param evt: The event source
"""
cur_time = time.time()
if cur_time - self.last_tap < self.double_tap_time:
self.window.handle_maximize_restore()
self.last_tap = cur_time | 744 | 0 | 27 |
b0334bea56715d8ba34f24b296effc7b67be149a | 15,462 | py | Python | poet/build/builder.py | sdispater/poet | 5a07ee95e546ab6460bde43bf59837120e17dfa5 | [
"MIT"
] | 367 | 2017-04-01T15:10:04.000Z | 2021-12-23T18:26:03.000Z | poet/build/builder.py | sdispater/poet | 5a07ee95e546ab6460bde43bf59837120e17dfa5 | [
"MIT"
] | 22 | 2017-04-13T15:39:02.000Z | 2017-10-05T14:55:26.000Z | poet/build/builder.py | sdispater/poet | 5a07ee95e546ab6460bde43bf59837120e17dfa5 | [
"MIT"
] | 16 | 2017-04-14T08:19:48.000Z | 2019-07-21T13:34:12.000Z | # -*- coding: utf-8 -*-
import os
import re
import warnings
from setuptools.dist import Distribution
from setuptools.extension import Extension
from pip.commands.wheel import WheelCommand
from pip.status_codes import SUCCESS
from semantic_version import Spec, Version
from .._compat import Path, PY2, encode
from ..utils.helpers import template
class Builder(object):
"""
Tool to transform a poet file to a setup() instruction.
It also creates the MANIFEST.in file if necessary.
"""
AUTHOR_REGEX = re.compile('(?u)^(?P<name>[- .,\w\d\'’"()]+) <(?P<email>.+?)>$')
PYTHON_VERSIONS = {
2: ['2.6', '2.7'],
3: ['3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6']
}
def build(self, poet, **options):
"""
Builds a package from a Poet instance
:param poet: The poet to build.
:type poet: poet.poet.Poet
"""
setup_kwargs = self._setup(poet, **options)
setup = os.path.join(poet.base_dir, 'setup.py')
manifest = os.path.join(poet.base_dir, 'MANIFEST.in')
self._write_setup(setup_kwargs, setup)
readme = None
if poet.has_markdown_readme():
readme = os.path.join(poet.base_dir, 'README.rst')
if os.path.exists(readme):
readme = None
else:
self._write_readme(readme, setup_kwargs['long_description'])
self._manifest.append('include README.rst')
self._write_manifest(manifest)
try:
dist = Distribution(setup_kwargs)
dist.run_command('sdist')
except Exception:
raise
finally:
os.unlink(setup)
os.unlink(manifest)
if readme:
os.unlink(readme)
# Building wheel if necessary
if not options.get('no_wheels'):
command = WheelCommand()
command_args = [
'--no-index',
'--no-deps',
'-q',
'--wheel-dir', 'dist',
'dist/{}'.format(poet.archive)
]
if options.get('universal', True):
command_args.append('--build-option=--universal')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
status = command.main(command_args)
if status != SUCCESS:
raise Exception('An error occurred while executing command.')
def _setup(self, poet, **options):
"""
Builds the setup kwargs base on the Poet instance
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
setup_kwargs = {
'name': poet.name,
'version': poet.normalized_version,
'description': poet.description,
'long_description': poet.readme,
'include_package_data': True,
'script_name': 'setup.py'
}
setup_kwargs.update(self._author(poet))
setup_kwargs['url'] = self._url(poet)
setup_kwargs['license'] = poet.license
setup_kwargs['keywords'] = self._keywords(poet)
setup_kwargs['classifiers'] = self._classifiers(poet)
setup_kwargs['entry_points'] = self._entry_points(poet)
setup_kwargs['install_requires'] = self._install_requires(poet)
setup_kwargs['tests_require'] = self._tests_require(poet)
setup_kwargs['extras_require'] = self._extras_require(poet)
setup_kwargs.update(self._packages(poet))
# Extensions
setup_kwargs.update(self._ext_modules(poet))
return setup_kwargs
def _author(self, poet):
"""
Build the author information from a Poet instance.
Transforms a author in the form "name <email>" into
a proper dictionary.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
m = self.AUTHOR_REGEX.match(poet.authors[0])
name = m.group('name')
email = m.group('email')
if PY2:
name = encode(name)
email = encode(email)
return {
'author': name,
'author_email': email
}
def _classifiers(self, poet):
"""
Builds the classifiers list from the
specified Python versions.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: list
"""
classifers = ['Programming Language :: Python']
compatible_versions = {}
for python in poet.python_versions:
constraint = Spec(python)
for major in [2, 3]:
available_versions = self.PYTHON_VERSIONS[major]
for version in available_versions:
if Version.coerce(version) in constraint:
if major not in compatible_versions:
compatible_versions[major] = []
compatible_versions[major].append(version)
for major in sorted(list(compatible_versions.keys())):
versions = compatible_versions[major]
classifer_template = 'Programming Language :: Python :: {}'
classifers.append(classifer_template.format(major))
for version in versions:
classifers.append(classifer_template.format(version))
return classifers
def _entry_points(self, poet):
"""
Builds the entry points
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: list
"""
entry_points = {
'console_scripts': []
}
# Generic entry points
for category, entry_points in poet.entry_points.items():
entry_points[category] = []
for name, script in entry_points.items():
entry_points[category].append('{} = {}'.format(name, script))
# Console scripts entry points
for name, script in poet.scripts.items():
entry_points['console_scripts'].append('{} = {}'.format(name, script))
return entry_points
def _install_requires(self, poet):
"""
Builds the dependencies list.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
requires = []
dependencies = poet.dependencies
for dependency in dependencies:
if dependency.optional:
continue
requires.append(dependency.normalized_name)
return requires
def _tests_require(self, poet):
"""
Builds the dev dependencies list.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
requires = []
dependencies = poet.dev_dependencies
for dependency in dependencies:
if dependency.optional:
continue
requires.append(dependency.normalized_name)
return requires
def _extras_require(self, poet):
"""
Builds the extras dictionary from
the configured features.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
if not poet.features:
return {}
extras = {}
for feature_name, featured_packages in poet.features.items():
extras[feature_name] = []
for package in featured_packages:
for dep in poet.dependencies:
if dep.name == package:
extras[feature_name].append(dep.normalized_name)
return extras
def _packages(self, poet):
"""
Builds the packages and modules list
based on the include and exclude sections.
It will also register files that need to be put
in the MANIFEST.in file.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
includes = poet.include
packages = []
modules = []
package_dirs = {}
crawled = []
excluded = []
root = Path(poet.base_dir)
for exclude in poet.exclude + poet.ignore:
if not exclude:
continue
if exclude.startswith('/'):
exclude = exclude[1:]
for exc in root.glob(exclude):
if exc.suffix == '.py':
exc = exc.relative_to(root)
excluded.append('.'.join(exc.with_suffix('').parts))
if not isinstance(includes, list):
includes = [includes]
for include in includes:
if isinstance(include, dict):
settings = self._find_packages_from(
root,
include['from'],
include['include'],
include.get('as', ''),
excluded=excluded,
crawled=crawled
)
else:
settings = self._find_packages_from(
root,
'',
include,
excluded=excluded,
crawled=crawled
)
packages += settings['packages']
modules += settings['modules']
package_dirs.update(settings.get('package_dirs', {}))
packages = [p for p in packages if p not in excluded]
modules = [m for m in modules if m not in excluded]
settings = {
'packages': packages,
'py_modules': modules
}
package_dir = {}
for package_name, directory in package_dirs.items():
package_dir[package_name] = directory.as_posix()
if package_dir:
settings['package_dir'] = package_dir
return settings
def _ext_modules(self, poet):
"""
Builds the extension modules.
Transforms the extensions section:
[extensions]
"my.module" = "my/module.c"
to a proper extension:
Extension('my.module', 'my/module.c')
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
extensions = []
for module, source in poet.extensions.items():
if not isinstance(source, list):
source = [source]
extensions.append(Extension(module, source))
return {
'ext_modules': extensions
}
| 29.677543 | 98 | 0.525805 | # -*- coding: utf-8 -*-
import os
import re
import warnings
from setuptools.dist import Distribution
from setuptools.extension import Extension
from pip.commands.wheel import WheelCommand
from pip.status_codes import SUCCESS
from semantic_version import Spec, Version
from .._compat import Path, PY2, encode
from ..utils.helpers import template
class Builder(object):
"""
Tool to transform a poet file to a setup() instruction.
It also creates the MANIFEST.in file if necessary.
"""
AUTHOR_REGEX = re.compile('(?u)^(?P<name>[- .,\w\d\'’"()]+) <(?P<email>.+?)>$')
PYTHON_VERSIONS = {
2: ['2.6', '2.7'],
3: ['3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6']
}
def __init__(self):
self._manifest = []
def build(self, poet, **options):
"""
Builds a package from a Poet instance
:param poet: The poet to build.
:type poet: poet.poet.Poet
"""
setup_kwargs = self._setup(poet, **options)
setup = os.path.join(poet.base_dir, 'setup.py')
manifest = os.path.join(poet.base_dir, 'MANIFEST.in')
self._write_setup(setup_kwargs, setup)
readme = None
if poet.has_markdown_readme():
readme = os.path.join(poet.base_dir, 'README.rst')
if os.path.exists(readme):
readme = None
else:
self._write_readme(readme, setup_kwargs['long_description'])
self._manifest.append('include README.rst')
self._write_manifest(manifest)
try:
dist = Distribution(setup_kwargs)
dist.run_command('sdist')
except Exception:
raise
finally:
os.unlink(setup)
os.unlink(manifest)
if readme:
os.unlink(readme)
# Building wheel if necessary
if not options.get('no_wheels'):
command = WheelCommand()
command_args = [
'--no-index',
'--no-deps',
'-q',
'--wheel-dir', 'dist',
'dist/{}'.format(poet.archive)
]
if options.get('universal', True):
command_args.append('--build-option=--universal')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
status = command.main(command_args)
if status != SUCCESS:
raise Exception('An error occurred while executing command.')
def _setup(self, poet, **options):
"""
Builds the setup kwargs base on the Poet instance
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
setup_kwargs = {
'name': poet.name,
'version': poet.normalized_version,
'description': poet.description,
'long_description': poet.readme,
'include_package_data': True,
'script_name': 'setup.py'
}
setup_kwargs.update(self._author(poet))
setup_kwargs['url'] = self._url(poet)
setup_kwargs['license'] = poet.license
setup_kwargs['keywords'] = self._keywords(poet)
setup_kwargs['classifiers'] = self._classifiers(poet)
setup_kwargs['entry_points'] = self._entry_points(poet)
setup_kwargs['install_requires'] = self._install_requires(poet)
setup_kwargs['tests_require'] = self._tests_require(poet)
setup_kwargs['extras_require'] = self._extras_require(poet)
setup_kwargs.update(self._packages(poet))
# Extensions
setup_kwargs.update(self._ext_modules(poet))
return setup_kwargs
def _author(self, poet):
"""
Build the author information from a Poet instance.
Transforms a author in the form "name <email>" into
a proper dictionary.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
m = self.AUTHOR_REGEX.match(poet.authors[0])
name = m.group('name')
email = m.group('email')
if PY2:
name = encode(name)
email = encode(email)
return {
'author': name,
'author_email': email
}
def _url(self, poet):
return poet.homepage or poet.repository
def _keywords(self, poet):
return ' '.join(poet.keywords or [])
def _classifiers(self, poet):
"""
Builds the classifiers list from the
specified Python versions.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: list
"""
classifers = ['Programming Language :: Python']
compatible_versions = {}
for python in poet.python_versions:
constraint = Spec(python)
for major in [2, 3]:
available_versions = self.PYTHON_VERSIONS[major]
for version in available_versions:
if Version.coerce(version) in constraint:
if major not in compatible_versions:
compatible_versions[major] = []
compatible_versions[major].append(version)
for major in sorted(list(compatible_versions.keys())):
versions = compatible_versions[major]
classifer_template = 'Programming Language :: Python :: {}'
classifers.append(classifer_template.format(major))
for version in versions:
classifers.append(classifer_template.format(version))
return classifers
def _entry_points(self, poet):
"""
Builds the entry points
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: list
"""
entry_points = {
'console_scripts': []
}
# Generic entry points
for category, entry_points in poet.entry_points.items():
entry_points[category] = []
for name, script in entry_points.items():
entry_points[category].append('{} = {}'.format(name, script))
# Console scripts entry points
for name, script in poet.scripts.items():
entry_points['console_scripts'].append('{} = {}'.format(name, script))
return entry_points
def _install_requires(self, poet):
"""
Builds the dependencies list.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
requires = []
dependencies = poet.dependencies
for dependency in dependencies:
if dependency.optional:
continue
requires.append(dependency.normalized_name)
return requires
def _tests_require(self, poet):
"""
Builds the dev dependencies list.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
requires = []
dependencies = poet.dev_dependencies
for dependency in dependencies:
if dependency.optional:
continue
requires.append(dependency.normalized_name)
return requires
def _extras_require(self, poet):
"""
Builds the extras dictionary from
the configured features.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
if not poet.features:
return {}
extras = {}
for feature_name, featured_packages in poet.features.items():
extras[feature_name] = []
for package in featured_packages:
for dep in poet.dependencies:
if dep.name == package:
extras[feature_name].append(dep.normalized_name)
return extras
def _packages(self, poet):
"""
Builds the packages and modules list
based on the include and exclude sections.
It will also register files that need to be put
in the MANIFEST.in file.
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
includes = poet.include
packages = []
modules = []
package_dirs = {}
crawled = []
excluded = []
root = Path(poet.base_dir)
for exclude in poet.exclude + poet.ignore:
if not exclude:
continue
if exclude.startswith('/'):
exclude = exclude[1:]
for exc in root.glob(exclude):
if exc.suffix == '.py':
exc = exc.relative_to(root)
excluded.append('.'.join(exc.with_suffix('').parts))
if not isinstance(includes, list):
includes = [includes]
for include in includes:
if isinstance(include, dict):
settings = self._find_packages_from(
root,
include['from'],
include['include'],
include.get('as', ''),
excluded=excluded,
crawled=crawled
)
else:
settings = self._find_packages_from(
root,
'',
include,
excluded=excluded,
crawled=crawled
)
packages += settings['packages']
modules += settings['modules']
package_dirs.update(settings.get('package_dirs', {}))
packages = [p for p in packages if p not in excluded]
modules = [m for m in modules if m not in excluded]
settings = {
'packages': packages,
'py_modules': modules
}
package_dir = {}
for package_name, directory in package_dirs.items():
package_dir[package_name] = directory.as_posix()
if package_dir:
settings['package_dir'] = package_dir
return settings
def _find_packages_from(self, root, base_dir, includes,
package_name=None, excluded=None, crawled=None):
package_dirs = {}
packages = []
modules = []
if package_name is not None:
package_dirs[package_name] = Path(base_dir)
if excluded is None:
excluded = []
if crawled is None:
crawled = []
if not isinstance(includes, list):
includes = [includes]
if not isinstance(base_dir, Path):
base_dir = Path(base_dir)
base_path = root / base_dir
for include in includes:
dirs = []
others = []
for element in base_path.glob(include):
if element.is_dir():
dirs.append(element.relative_to(base_path))
else:
others.append(element.relative_to(base_path))
m = re.match('^([^./]+)/\*\*/\*(\..+)?$', include)
if m:
# {dir}/**/* will not take the root directory
# So we add it
dirs.insert(0, Path(m.group(1)))
for dir in dirs:
if dir in crawled:
continue
package = '.'.join(dir.parts)
# We have a package
real_dir = base_path / dir
if (real_dir / '__init__.py').exists():
children = [
c.relative_to(base_path) for c in real_dir.glob('*.py')
]
filtered_children = [c for c in children if '.'.join(c.parts) not in excluded]
if children == filtered_children:
# If none of the children are excluded
# We have a full package
packages.append(package)
else:
modules += ['.'.join(c.parts) for c in filtered_children]
crawled += [base_path / child for child in children]
crawled.append(real_dir)
for element in others:
if base_path / element in crawled or element.suffix == '.pyc':
continue
if element.suffix == '.py' and element.name != '__init__.py':
modules.append('.'.join(element.with_suffix('').parts))
elif element.suffix not in ['.py', '.pyc'] and '__pycache__' not in element.parts:
# Non Python file, add them to data
self._manifest.append('include {}\n'.format(element.as_posix()))
elif element.name == '__init__.py':
dir = element.parent
real_dir = base_path / dir
children = [
c.relative_to(base_path)
for c in real_dir.glob('*.py')
if c.name != '__init__.py'
]
if not children and dir not in crawled:
# We actually have a package
packages.append('.'.join(dir.parts))
crawled.append(base_path / dir)
crawled.append(base_path / element)
packages = [p for p in packages if p not in excluded]
modules = [m for m in modules if m not in excluded]
settings = {
'packages': packages,
'modules': modules
}
if package_dirs:
settings['package_dirs'] = package_dirs
return settings
def _ext_modules(self, poet):
"""
Builds the extension modules.
Transforms the extensions section:
[extensions]
"my.module" = "my/module.c"
to a proper extension:
Extension('my.module', 'my/module.c')
:param poet: The Poet instance for which to build.
:type poet: poet.poet.Poet
:rtype: dict
"""
extensions = []
for module, source in poet.extensions.items():
if not isinstance(source, list):
source = [source]
extensions.append(Extension(module, source))
return {
'ext_modules': extensions
}
def _write_setup(self, setup, dest):
parameters = setup.copy()
for key in parameters.keys():
value = parameters[key]
if value is not None and not isinstance(value, (list, dict)):
parameters[key] = repr(value)
setup_template = template('setup.py')
with open(dest, 'w') as f:
f.write(setup_template.render(**parameters))
def _write_manifest(self, manifest):
with open(manifest, 'w') as f:
f.writelines(self._manifest)
def _write_readme(self, readme, content):
with open(readme, 'w') as f:
f.write(content)
| 4,264 | 0 | 189 |
8aa35a9d3b950a629adc8f543bb25b86ccfe5177 | 24,262 | py | Python | lingvo/core/beam_search_helper.py | shadowridgedev/lingvo | 7a55484a3bc424f06ad8a921fb7e623ade73b0bf | [
"Apache-2.0"
] | null | null | null | lingvo/core/beam_search_helper.py | shadowridgedev/lingvo | 7a55484a3bc424f06ad8a921fb7e623ade73b0bf | [
"Apache-2.0"
] | null | null | null | lingvo/core/beam_search_helper.py | shadowridgedev/lingvo | 7a55484a3bc424f06ad8a921fb7e623ade73b0bf | [
"Apache-2.0"
] | 1 | 2021-12-22T00:26:59.000Z | 2021-12-22T00:26:59.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper class for implementing a beam search decoder.
Individual models just need to provide a few callback functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core.ops import py_x_ops
# TODO(yonghui):
# 1) Change the tensor shape [max_decoder_time_steps, batch_size *
# num_hyps_per_beam] to [max_decoder_time_steps, num_hyps_per_beam,
# batch_size] to avoid confusing and mis-interpretation of the results.
# Defines a namedtuple to store the results of BeamSearchDecode. It contains
# the following entries:
# done_hyps: A string Tensor of shape
# [max_decoder_time_steps, batch_size * num_hyps_per_beam] which can be
# either an empty string, or a serialized Hypothesis proto. The non-empty
# hyps in done_hyps are terminated hypotheses. The 'h'-th hyp for sample
# 'b' at time step 't' can be found at done_hyps[t, batch_size * h + b].
# topk_hyps: A string Tensor of shape [batch_size, num_hyps_per_beam].
# topk_hyps[b, h] is the h-th hypothesis for the sample 'b' in the
# batch, which can either be an empty string or a serialized Hypothesis
# proto.
# topk_ids: Int32 Tensor of shape [batch_size * num_hyps_per_beam,
# target_seq_len] which contains the IDs of the targets in each of the
# hypotheses in the beam for the samples in the batch. For sample
# 'b' in the batch, the h-th hypothesis for this sample can be found at
# position [b * num_hyps_per_beam + h, :].
# topk_lens: Int32 Tensor of shape [batch_size * num_hyps_per_beam] which
# indicates the length (>=0) of each of the hypotheses.
# topk_scores: Float32 Tensor of shape [batch_size * num_hyps_per_beam]
# containing the scores (negative log probabilities) of each of the
# hypotheses in the beam.
# topk_decoded: A string Tensor of shape [batch_size * num_hyps_per_beam] which
# contains the decoded target strings in each of the hypotheses in the
# beam for the samples in the batch. The 'h'-th hyp for sample 'b' can
# be found at topk_decoded[b * num_hyps_per_beam + h]
BeamSearchDecodeOutput = collections.namedtuple(
'BeamSearchDecodeOutput',
[
'done_hyps', 'topk_hyps', 'topk_ids', 'topk_lens', 'topk_scores',
'topk_decoded', 'other_states'
],
)
# Make the last attribute default to None.
BeamSearchDecodeOutput.__new__.__defaults__ = (None,)
class BeamSearchHelper(base_layer.BaseLayer):
"""Helper class for performing beam search.
The user of this helper class needs to implement three callbacks.
This callback is called once only at the beginning of beam search:
.. code-block:: none
def InitBeamSearchState(theta, encoder_outputs, num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
initial_results: a `.NestedMap` of initial results. It should contain
the following tensors at the minimum.
.log_probs: The initial log probs for each of the tokens in
the target vocab, of shape [num_hyps_per_beam * src_batch,
vocab_size]. src_batch "b" and hyp_per_beam "h" is
represented at index (h * src_batch + b).
.atten_probs: The initial attention probs, of shape [
num_hyps_per_beam * src_batch, src_len]. src_batch "b"
and hyp_per_beam "h" is represented at index
(h * src_batch + b).
states: a `.NestedMap` of tensors representing states that the client
would like to keep track of for each hyp.
This callback is called once every decoding time step before beam_search_step
is called:
.. code-block:: none
def PreBeamSearchStepCallback(theta,
encoder_outputs,
step_ids,
in_states,
num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
step_ids: A tensor of shape [num_hyps_per_beam * src_batch, 1].
in_states: A `.NestedMap` of tensors representing states that the
clients would like to keep track of for each of the active hyps.
Returns:
results: A `.NestedMap` of beam search results. It should contain
the 'atten_probs' and 'log_probs' tensors at the minimal.
Optionally it may contain 'is_last_chunk' if it is decoding a
neural transducer model.
.atten_probs: The updated attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index (h * src_batch + b).
.log_probs: Log prob for each of the tokens in the target vocab.
This is of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
(h * src_batch + b).
.is_last_chunk: Whether or not each of the hyp is at the end of a
chunk. If non-empty, it is of shape
[num_hyps_per_beam * src_batch, 1].
out_states: A `.NestedMap`. The updated states. This 'out_states'
should be of the exact same structure as 'in_states'
This callback is called once every decoding time step after beam_search_step
is called:
.. code-block:: none
def PostBeamSearchStepCallback(theta,
encoder_outputs,
new_step_ids,
other_states):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
new_step_ids: Token ids for the next beam search step.
other_states: A `.NestedMap`.
Returns:
final_states, A `.NestedMap`.
"""
@classmethod
@base_layer.initializer
def _BeamSearchStep(self, theta, encoder_outputs, cur_step, step_ids,
core_bs_states, other_states, num_hyps_per_beam,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend beam search hyps for one step.
| num_beams = Number of source sequences to be decoded.
| num_hyps_per_beam = Number of hyps to keep per source sequence.
| num_hyps = num_beams * num_hyps_per_beam
| src_seq_len = Number of time steps in the source sequence.
| src_batch = Number of examples in the source sequence.
| tgt_seq_len = Maximum allowed time steps in the target sequence.
| tgt_batch = num_hyps_per_beam * src_batch
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed
to the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
core_bs_states: A tuple of core beam search states. This list is
maintained by this helper class.
other_states: A `.NestedMap` of other beam search states.
This `.NestedMap` is managed and updated by the client. It is
expected that each of its member tensors are of rank >= 1. t[i, ...]
is the state of the i-th hyp at the beginning of this search step.
num_hyps_per_beam: Num of hyps to keep per beam.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next beam search step,
(next step, all_done, step_ids, core_bs_states, other_states)
"""
p = self.params
bs_results, other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, num_hyps_per_beam)
(best_scores, cumulative_scores, in_scores, in_hyps, in_prev_hyps,
in_done_hyps, in_atten_probs) = core_bs_states
(out_best_scores, out_cumulative_scores, out_scores, out_hyps,
out_prev_hyps, out_done_hyps, out_atten_probs,
all_done) = py_x_ops.beam_search_step(
bs_results.log_probs,
bs_results.atten_probs,
best_scores,
cumulative_scores,
in_scores,
in_hyps,
in_prev_hyps,
in_done_hyps,
in_atten_probs,
bs_results.is_last_chunk if self._model_uses_eoc_id else [],
cur_step,
eoc_id=p.target_eoc_id,
eos_id=p.target_eos_id,
beam_size=p.beam_size,
num_hyps_per_beam=num_hyps_per_beam,
valid_eos_max_logit_delta=p.valid_eos_max_logit_delta,
merge_paths=p.merge_paths,
allow_empty_terminated_hyp=p.allow_empty_terminated_hyp,
ensure_full_beam=p.ensure_full_beam,
force_eos_in_last_step=p.force_eos_in_last_step)
new_step_ids = tf.reshape(out_hyps[cur_step, :], tf.shape(step_ids))
new_step_ids.set_shape(step_ids.get_shape())
old_hyp_ids = tf.reshape(
tf.slice(out_prev_hyps, begin=[cur_step, 0], size=[1, -1]), [-1])
new_bs_states = (out_best_scores, out_cumulative_scores, out_scores,
out_hyps, out_prev_hyps, out_done_hyps, out_atten_probs)
def ReOrderHyps(x_in):
"""Reorders x_in based on prev hyp ids."""
if (isinstance(x_in, tf.Tensor) and x_in.shape.ndims and
x_in.shape.ndims > 0):
if x_in.shape.ndims > 2 and not p.batch_major_state:
x_out = tf.gather(x_in, old_hyp_ids, axis=1)
else:
x_out = tf.gather(x_in, old_hyp_ids)
x_out.set_shape(x_in.get_shape())
return x_out
else:
return x_in
new_other_states = other_states.Transform(ReOrderHyps)
final_other_states = post_beam_search_step_callback(
theta, encoder_outputs, new_step_ids, new_other_states)
return (cur_step + 1, all_done, new_step_ids, new_bs_states,
final_other_states)
def BeamSearchDecode(self,
theta,
encoder_outputs,
num_hyps_per_beam_override=0,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs beam-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed
to the callbacks.
num_hyps_per_beam_override: If set to a value <= 0, this parameter is
ignored. If set to a value > 0, then this value will be used to
override `p.num_hyps_per_beam`.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None,
use self.params.target_seq_len.
Returns:
A `BeamSearchDecodeOutput`.
"""
p = self.params
num_hyps_per_beam = p.num_hyps_per_beam
if num_hyps_per_beam_override > 0:
num_hyps_per_beam = num_hyps_per_beam_override
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta, encoder_outputs, num_hyps_per_beam)
num_hyps = tf.shape(initial_results.log_probs)[0]
num_beams = num_hyps // num_hyps_per_beam
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
min_score = -1e36
best_scores = (tf.zeros(shape=[num_beams], dtype=p.dtype) + min_score)
cumulative_scores = tf.zeros(shape=[num_hyps], dtype=p.dtype)
in_scores = tf.zeros([max_steps, num_hyps], dtype=p.dtype)
in_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_prev_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_done_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.string)
bs_atten_probs = tf.zeros(
[max_steps, num_hyps,
tf.shape(initial_results.atten_probs)[1]],
dtype=p.dtype)
cur_step = tf.constant(0, dtype=tf.int32)
all_done = tf.constant(False, dtype=tf.bool)
core_bs_states = (best_scores, cumulative_scores, in_scores, in_hyps,
in_prev_hyps, in_done_hyps, bs_atten_probs)
flat_other_states = other_states.Flatten()
_, _, _, final_bs_states, flat_final_other_states = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, all_done, step_ids, core_bs_states,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(all_done.get_shape()),
tf.TensorShape(step_ids.get_shape()),
_GetShapes(core_bs_states),
_GetShapes(flat_other_states, none_shapes=True)))
# [target_seq_len, num_beams * num_hyps_per_beam].
final_done_hyps = final_bs_states[5]
final_other_states = other_states.Pack(flat_final_other_states)
# TODO(rpang): avoid inspecting 'encoder_outputs'.
source_paddings = encoder_outputs.padding
if isinstance(source_paddings, py_utils.NestedMap):
source_seq_lengths = tf.to_int32(
tf.reduce_sum(1.0 - tf.transpose(source_paddings.Flatten()[0]), 1))
else:
source_seq_lengths = tf.to_int32(
tf.reduce_sum(1.0 - tf.transpose(source_paddings), 1))
# [num_beams, num_hyps_per_beam].
topk_hyps = py_x_ops.top_k_terminated_hyps(
final_done_hyps,
source_seq_lengths,
k=num_hyps_per_beam,
num_hyps_per_beam=num_hyps_per_beam,
length_normalization=p.length_normalization,
coverage_penalty=p.coverage_penalty,
target_seq_length_ratio=p.target_seq_length_ratio,
eoc_id=p.target_eoc_id,
merge_paths=p.merge_paths)
# [num_beams * num_hyps_per_beam, ...].
max_seq_length = 0 if isinstance(max_steps, tf.Tensor) else max_steps
topk_ids, topk_lens, topk_scores = py_x_ops.unpack_hyp(
tf.reshape(topk_hyps, [-1]), max_seq_length=max_seq_length)
# [num_beams, num_hyps_per_beam].
topk_scores = tf.reshape(topk_scores, tf.shape(topk_hyps))
return BeamSearchDecodeOutput(final_done_hyps, topk_hyps, topk_ids,
topk_lens, topk_scores, None,
final_other_states)
def _GetShapes(tensors, none_shapes=False):
"""Util for getting nested structure of shapes from structure of tensors.
Args:
tensors: Structure of Tensors to get shapes for.
none_shapes: Returns None shapes if true.
Returns:
The same structure as tensors but of corresponding `TensorShape` objects.
"""
shapes = []
for t in tf.contrib.framework.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
shapes.append(tf.TensorShape(shape))
return type(tensors)(
tf.contrib.framework.nest.pack_sequence_as(tensors, shapes))
def MergeBeamSearchOutputs(max_hyps_per_beam, beam_search_outputs):
"""Merges beam search hyps from multiple decoders.
Args:
max_hyps_per_beam: the number of top hyps in the merged results. Must be
less than or equal to total number of input hyps.
beam_search_outputs: a list of BeamSearchDecodeOutput objects. Must share
the same source_batch and max sequence length.
Returns:
A BeamSearchDecodeOutput object containing max_hyps_per_beam hypotheses per
beam.
"""
source_batch = tf.shape(beam_search_outputs[0].topk_hyps)[0]
value_dict = {}
for output in beam_search_outputs:
hyps_per_beam = py_utils.with_dependencies([
py_utils.assert_equal(source_batch,
tf.shape(output.topk_hyps)[0]),
],
tf.shape(output.topk_hyps)[1])
for k, v in output._asdict().iteritems():
if v is None:
continue
if k == 'done_hyps':
v = tf.transpose(v)
if k not in value_dict:
value_dict[k] = []
value_dict[k].append(tf.reshape(v, [source_batch, hyps_per_beam, -1]))
# Concatenate the tensors along the 'num_hyps_per_beam' dimension.
concatenated = {}
for k, values in value_dict.iteritems():
if len(values) != len(beam_search_outputs):
raise ValueError(
'Incomplete values for %s: %s' % (k, beam_search_outputs))
concatenated[k] = tf.concat(values, axis=1)
scores = concatenated['topk_scores']
scores = tf.where(
tf.equal(concatenated['topk_lens'], 0), tf.fill(tf.shape(scores), -1e6),
scores)
scores = tf.squeeze(scores, -1)
# Select top max_hyps_per_beam indices per beam.
_, top_indices = tf.nn.top_k(scores, max_hyps_per_beam)
batch_ids = tf.tile(
tf.expand_dims(tf.range(source_batch), -1), [1, max_hyps_per_beam])
# [source_batch, max_hyps_per_beam, 2]
gather_indices = tf.stack([batch_ids, top_indices], axis=-1)
# Gather the merged top hyps according to 'gather_indices'.
top = beam_search_outputs[0]._asdict()
total_hyps = source_batch * max_hyps_per_beam
for k, v in concatenated.iteritems():
v = tf.gather_nd(v, gather_indices)
if k == 'done_hyps':
v = tf.transpose(tf.reshape(v, [total_hyps, -1]))
elif k == 'topk_hyps':
v = tf.reshape(v, [source_batch, max_hyps_per_beam])
elif k == 'topk_ids':
v = tf.reshape(v, [total_hyps, -1])
elif k in ('topk_lens', 'topk_scores', 'topk_decoded'):
v = tf.reshape(v, [total_hyps])
else:
raise ValueError('Unexpected field: %s' % k)
top[k] = v
return BeamSearchDecodeOutput(**top)
| 44.435897 | 80 | 0.667958 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper class for implementing a beam search decoder.
Individual models just need to provide a few callback functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core.ops import py_x_ops
# TODO(yonghui):
# 1) Change the tensor shape [max_decoder_time_steps, batch_size *
# num_hyps_per_beam] to [max_decoder_time_steps, num_hyps_per_beam,
# batch_size] to avoid confusing and mis-interpretation of the results.
# Defines a namedtuple to store the results of BeamSearchDecode. It contains
# the following entries:
# done_hyps: A string Tensor of shape
# [max_decoder_time_steps, batch_size * num_hyps_per_beam] which can be
# either an empty string, or a serialized Hypothesis proto. The non-empty
# hyps in done_hyps are terminated hypotheses. The 'h'-th hyp for sample
# 'b' at time step 't' can be found at done_hyps[t, batch_size * h + b].
# topk_hyps: A string Tensor of shape [batch_size, num_hyps_per_beam].
# topk_hyps[b, h] is the h-th hypothesis for the sample 'b' in the
# batch, which can either be an empty string or a serialized Hypothesis
# proto.
# topk_ids: Int32 Tensor of shape [batch_size * num_hyps_per_beam,
# target_seq_len] which contains the IDs of the targets in each of the
# hypotheses in the beam for the samples in the batch. For sample
# 'b' in the batch, the h-th hypothesis for this sample can be found at
# position [b * num_hyps_per_beam + h, :].
# topk_lens: Int32 Tensor of shape [batch_size * num_hyps_per_beam] which
# indicates the length (>=0) of each of the hypotheses.
# topk_scores: Float32 Tensor of shape [batch_size * num_hyps_per_beam]
# containing the scores (negative log probabilities) of each of the
# hypotheses in the beam.
# topk_decoded: A string Tensor of shape [batch_size * num_hyps_per_beam] which
# contains the decoded target strings in each of the hypotheses in the
# beam for the samples in the batch. The 'h'-th hyp for sample 'b' can
# be found at topk_decoded[b * num_hyps_per_beam + h]
BeamSearchDecodeOutput = collections.namedtuple(
'BeamSearchDecodeOutput',
[
'done_hyps', 'topk_hyps', 'topk_ids', 'topk_lens', 'topk_scores',
'topk_decoded', 'other_states'
],
)
# Make the last attribute default to None.
BeamSearchDecodeOutput.__new__.__defaults__ = (None,)
class BeamSearchHelper(base_layer.BaseLayer):
"""Helper class for performing beam search.
The user of this helper class needs to implement three callbacks.
This callback is called once only at the beginning of beam search:
.. code-block:: none
def InitBeamSearchState(theta, encoder_outputs, num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
initial_results: a `.NestedMap` of initial results. It should contain
the following tensors at the minimum.
.log_probs: The initial log probs for each of the tokens in
the target vocab, of shape [num_hyps_per_beam * src_batch,
vocab_size]. src_batch "b" and hyp_per_beam "h" is
represented at index (h * src_batch + b).
.atten_probs: The initial attention probs, of shape [
num_hyps_per_beam * src_batch, src_len]. src_batch "b"
and hyp_per_beam "h" is represented at index
(h * src_batch + b).
states: a `.NestedMap` of tensors representing states that the client
would like to keep track of for each hyp.
This callback is called once every decoding time step before beam_search_step
is called:
.. code-block:: none
def PreBeamSearchStepCallback(theta,
encoder_outputs,
step_ids,
in_states,
num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
step_ids: A tensor of shape [num_hyps_per_beam * src_batch, 1].
in_states: A `.NestedMap` of tensors representing states that the
clients would like to keep track of for each of the active hyps.
Returns:
results: A `.NestedMap` of beam search results. It should contain
the 'atten_probs' and 'log_probs' tensors at the minimal.
Optionally it may contain 'is_last_chunk' if it is decoding a
neural transducer model.
.atten_probs: The updated attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index (h * src_batch + b).
.log_probs: Log prob for each of the tokens in the target vocab.
This is of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
(h * src_batch + b).
.is_last_chunk: Whether or not each of the hyp is at the end of a
chunk. If non-empty, it is of shape
[num_hyps_per_beam * src_batch, 1].
out_states: A `.NestedMap`. The updated states. This 'out_states'
should be of the exact same structure as 'in_states'
This callback is called once every decoding time step after beam_search_step
is called:
.. code-block:: none
def PostBeamSearchStepCallback(theta,
encoder_outputs,
new_step_ids,
other_states):
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
encoder_outputs: A NestedMap computed by encoder.
new_step_ids: Token ids for the next beam search step.
other_states: A `.NestedMap`.
Returns:
final_states, A `.NestedMap`.
"""
@classmethod
def Params(cls):
p = super(BeamSearchHelper, cls).Params()
p.Define('num_hyps_per_beam', 8,
'Num of hyps to keep per beam during decoding.')
p.Define(
'target_seq_length_ratio', 1.0,
'Ratio of the average target sequence length over the average '
'source sequence length.')
p.Define('length_normalization', 0.0,
'Beam search length normalization ratio.')
p.Define('coverage_penalty', 0.0, 'Beam search coverage penalty.')
p.Define(
'valid_eos_max_logit_delta', 5.0,
'During beam search, allow </s> to terminate a hyp only if its '
'logit is no more than than this value away from the logit of the '
'best candidate.')
p.Define(
'beam_size', 3.0,
'The maximum difference between best hyp and the worst in a beam.'
' This allows to prune our search when none of the active hyp is'
' close enough to the current best.')
p.Define('target_sos_id', 1, 'Id of the start of sentence token.')
p.Define('target_eos_id', 2, 'Id of the end of sentence token.')
p.Define(
'target_eoc_id', -1,
'Id of the end of chunk token. Used by neural transducer only.'
' Set this id to a non-negative value only for NT.')
p.Define(
'target_seq_len', 0, 'Maximum allowed target seq length. Note '
'that decoding terminates if an end of sentence token '
'is not emitted after target_seq_len decode steps.')
p.Define(
'merge_paths', False, 'If true, hyps which are identical when '
'epsilons are removed will be combined into a single hyp. The '
'probability for that combined hyp will be the sum of the '
'probabilities of the component hyps. This can only be applied '
'for epsilon-emitting models (RNN-T and NT).')
p.Define(
'allow_empty_terminated_hyp', True, 'Whether it is okay to consider a '
'hyp that consists only of epsilons as terminated. By default this '
'is true, as an utterance may consist of silence. It should be set '
'to false when EMBR training epsilon-emitting models (e.g., RNN-T), '
'which are prone to emit all-epsilon hyps even in the presence of '
'speech. Note that a hyp that terminates in EOS is not considered '
'empty, so this flag has no effect for non-epsilon-emitting models.')
p.Define(
'ensure_full_beam', False, 'If True, we will not terminate the search '
'until both of these conditions are satisfied: we have found '
'num_hyps_per_beam terminated hyps AND no active hyps have a score '
'within beam_size of the best terminated hyp. If False, only the '
'second condition must be satisfied. Note that in either case, we can '
'also terminate if we have run for target_seq_len steps. Generally '
'this should be False unless beam search is being run as part of '
'minimum word error rate training.')
p.Define(
'force_eos_in_last_step', False,
'For all active hyps that are still on the beam after target_seq_len '
'steps, return partial hyps with EOS set as the last token.')
p.Define(
'batch_major_state', True, 'If True, we use batch as the major '
'dimension of the hyp states. Otherwise, timing becomes the major '
'dimension, and the gathers are performed along the second-to-major '
'dimension.')
p.name = 'beam_search'
return p
@base_layer.initializer
def __init__(self, params):
super(BeamSearchHelper, self).__init__(params)
p = self.params
self._model_uses_eoc_id = p.target_eoc_id >= 0
def _BeamSearchStep(self, theta, encoder_outputs, cur_step, step_ids,
core_bs_states, other_states, num_hyps_per_beam,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend beam search hyps for one step.
| num_beams = Number of source sequences to be decoded.
| num_hyps_per_beam = Number of hyps to keep per source sequence.
| num_hyps = num_beams * num_hyps_per_beam
| src_seq_len = Number of time steps in the source sequence.
| src_batch = Number of examples in the source sequence.
| tgt_seq_len = Maximum allowed time steps in the target sequence.
| tgt_batch = num_hyps_per_beam * src_batch
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed
to the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
core_bs_states: A tuple of core beam search states. This list is
maintained by this helper class.
other_states: A `.NestedMap` of other beam search states.
This `.NestedMap` is managed and updated by the client. It is
expected that each of its member tensors are of rank >= 1. t[i, ...]
is the state of the i-th hyp at the beginning of this search step.
num_hyps_per_beam: Num of hyps to keep per beam.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next beam search step,
(next step, all_done, step_ids, core_bs_states, other_states)
"""
p = self.params
bs_results, other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, num_hyps_per_beam)
(best_scores, cumulative_scores, in_scores, in_hyps, in_prev_hyps,
in_done_hyps, in_atten_probs) = core_bs_states
(out_best_scores, out_cumulative_scores, out_scores, out_hyps,
out_prev_hyps, out_done_hyps, out_atten_probs,
all_done) = py_x_ops.beam_search_step(
bs_results.log_probs,
bs_results.atten_probs,
best_scores,
cumulative_scores,
in_scores,
in_hyps,
in_prev_hyps,
in_done_hyps,
in_atten_probs,
bs_results.is_last_chunk if self._model_uses_eoc_id else [],
cur_step,
eoc_id=p.target_eoc_id,
eos_id=p.target_eos_id,
beam_size=p.beam_size,
num_hyps_per_beam=num_hyps_per_beam,
valid_eos_max_logit_delta=p.valid_eos_max_logit_delta,
merge_paths=p.merge_paths,
allow_empty_terminated_hyp=p.allow_empty_terminated_hyp,
ensure_full_beam=p.ensure_full_beam,
force_eos_in_last_step=p.force_eos_in_last_step)
new_step_ids = tf.reshape(out_hyps[cur_step, :], tf.shape(step_ids))
new_step_ids.set_shape(step_ids.get_shape())
old_hyp_ids = tf.reshape(
tf.slice(out_prev_hyps, begin=[cur_step, 0], size=[1, -1]), [-1])
new_bs_states = (out_best_scores, out_cumulative_scores, out_scores,
out_hyps, out_prev_hyps, out_done_hyps, out_atten_probs)
def ReOrderHyps(x_in):
"""Reorders x_in based on prev hyp ids."""
if (isinstance(x_in, tf.Tensor) and x_in.shape.ndims and
x_in.shape.ndims > 0):
if x_in.shape.ndims > 2 and not p.batch_major_state:
x_out = tf.gather(x_in, old_hyp_ids, axis=1)
else:
x_out = tf.gather(x_in, old_hyp_ids)
x_out.set_shape(x_in.get_shape())
return x_out
else:
return x_in
new_other_states = other_states.Transform(ReOrderHyps)
final_other_states = post_beam_search_step_callback(
theta, encoder_outputs, new_step_ids, new_other_states)
return (cur_step + 1, all_done, new_step_ids, new_bs_states,
final_other_states)
def BeamSearchDecode(self,
theta,
encoder_outputs,
num_hyps_per_beam_override=0,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs beam-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed
to the callbacks.
num_hyps_per_beam_override: If set to a value <= 0, this parameter is
ignored. If set to a value > 0, then this value will be used to
override `p.num_hyps_per_beam`.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None,
use self.params.target_seq_len.
Returns:
A `BeamSearchDecodeOutput`.
"""
p = self.params
num_hyps_per_beam = p.num_hyps_per_beam
if num_hyps_per_beam_override > 0:
num_hyps_per_beam = num_hyps_per_beam_override
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta, encoder_outputs, num_hyps_per_beam)
num_hyps = tf.shape(initial_results.log_probs)[0]
num_beams = num_hyps // num_hyps_per_beam
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
min_score = -1e36
best_scores = (tf.zeros(shape=[num_beams], dtype=p.dtype) + min_score)
cumulative_scores = tf.zeros(shape=[num_hyps], dtype=p.dtype)
in_scores = tf.zeros([max_steps, num_hyps], dtype=p.dtype)
in_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_prev_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_done_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.string)
bs_atten_probs = tf.zeros(
[max_steps, num_hyps,
tf.shape(initial_results.atten_probs)[1]],
dtype=p.dtype)
cur_step = tf.constant(0, dtype=tf.int32)
all_done = tf.constant(False, dtype=tf.bool)
core_bs_states = (best_scores, cumulative_scores, in_scores, in_hyps,
in_prev_hyps, in_done_hyps, bs_atten_probs)
def LoopContinue(cur_step, all_done, unused_step_ids, unused_core_bs_states,
unused_other_states_list):
return tf.logical_and(cur_step < max_steps, tf.logical_not(all_done))
def LoopBody(cur_step, unused_all_done, step_ids, core_bs_states,
other_states_list):
(cur_step, all_done, new_step_ids, new_bs_states,
new_other_states) = self._BeamSearchStep(
theta, encoder_outputs, cur_step, step_ids, core_bs_states,
other_states.Pack(other_states_list), num_hyps_per_beam,
pre_beam_search_step_callback, post_beam_search_step_callback)
return (cur_step, all_done, new_step_ids, new_bs_states,
new_other_states.Flatten())
flat_other_states = other_states.Flatten()
_, _, _, final_bs_states, flat_final_other_states = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, all_done, step_ids, core_bs_states,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(all_done.get_shape()),
tf.TensorShape(step_ids.get_shape()),
_GetShapes(core_bs_states),
_GetShapes(flat_other_states, none_shapes=True)))
# [target_seq_len, num_beams * num_hyps_per_beam].
final_done_hyps = final_bs_states[5]
final_other_states = other_states.Pack(flat_final_other_states)
# TODO(rpang): avoid inspecting 'encoder_outputs'.
source_paddings = encoder_outputs.padding
if isinstance(source_paddings, py_utils.NestedMap):
source_seq_lengths = tf.to_int32(
tf.reduce_sum(1.0 - tf.transpose(source_paddings.Flatten()[0]), 1))
else:
source_seq_lengths = tf.to_int32(
tf.reduce_sum(1.0 - tf.transpose(source_paddings), 1))
# [num_beams, num_hyps_per_beam].
topk_hyps = py_x_ops.top_k_terminated_hyps(
final_done_hyps,
source_seq_lengths,
k=num_hyps_per_beam,
num_hyps_per_beam=num_hyps_per_beam,
length_normalization=p.length_normalization,
coverage_penalty=p.coverage_penalty,
target_seq_length_ratio=p.target_seq_length_ratio,
eoc_id=p.target_eoc_id,
merge_paths=p.merge_paths)
# [num_beams * num_hyps_per_beam, ...].
max_seq_length = 0 if isinstance(max_steps, tf.Tensor) else max_steps
topk_ids, topk_lens, topk_scores = py_x_ops.unpack_hyp(
tf.reshape(topk_hyps, [-1]), max_seq_length=max_seq_length)
# [num_beams, num_hyps_per_beam].
topk_scores = tf.reshape(topk_scores, tf.shape(topk_hyps))
return BeamSearchDecodeOutput(final_done_hyps, topk_hyps, topk_ids,
topk_lens, topk_scores, None,
final_other_states)
def _GetShapes(tensors, none_shapes=False):
"""Util for getting nested structure of shapes from structure of tensors.
Args:
tensors: Structure of Tensors to get shapes for.
none_shapes: Returns None shapes if true.
Returns:
The same structure as tensors but of corresponding `TensorShape` objects.
"""
shapes = []
for t in tf.contrib.framework.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
shapes.append(tf.TensorShape(shape))
return type(tensors)(
tf.contrib.framework.nest.pack_sequence_as(tensors, shapes))
def MergeBeamSearchOutputs(max_hyps_per_beam, beam_search_outputs):
"""Merges beam search hyps from multiple decoders.
Args:
max_hyps_per_beam: the number of top hyps in the merged results. Must be
less than or equal to total number of input hyps.
beam_search_outputs: a list of BeamSearchDecodeOutput objects. Must share
the same source_batch and max sequence length.
Returns:
A BeamSearchDecodeOutput object containing max_hyps_per_beam hypotheses per
beam.
"""
source_batch = tf.shape(beam_search_outputs[0].topk_hyps)[0]
value_dict = {}
for output in beam_search_outputs:
hyps_per_beam = py_utils.with_dependencies([
py_utils.assert_equal(source_batch,
tf.shape(output.topk_hyps)[0]),
],
tf.shape(output.topk_hyps)[1])
for k, v in output._asdict().iteritems():
if v is None:
continue
if k == 'done_hyps':
v = tf.transpose(v)
if k not in value_dict:
value_dict[k] = []
value_dict[k].append(tf.reshape(v, [source_batch, hyps_per_beam, -1]))
# Concatenate the tensors along the 'num_hyps_per_beam' dimension.
concatenated = {}
for k, values in value_dict.iteritems():
if len(values) != len(beam_search_outputs):
raise ValueError(
'Incomplete values for %s: %s' % (k, beam_search_outputs))
concatenated[k] = tf.concat(values, axis=1)
scores = concatenated['topk_scores']
scores = tf.where(
tf.equal(concatenated['topk_lens'], 0), tf.fill(tf.shape(scores), -1e6),
scores)
scores = tf.squeeze(scores, -1)
# Select top max_hyps_per_beam indices per beam.
_, top_indices = tf.nn.top_k(scores, max_hyps_per_beam)
batch_ids = tf.tile(
tf.expand_dims(tf.range(source_batch), -1), [1, max_hyps_per_beam])
# [source_batch, max_hyps_per_beam, 2]
gather_indices = tf.stack([batch_ids, top_indices], axis=-1)
# Gather the merged top hyps according to 'gather_indices'.
top = beam_search_outputs[0]._asdict()
total_hyps = source_batch * max_hyps_per_beam
for k, v in concatenated.iteritems():
v = tf.gather_nd(v, gather_indices)
if k == 'done_hyps':
v = tf.transpose(tf.reshape(v, [total_hyps, -1]))
elif k == 'topk_hyps':
v = tf.reshape(v, [source_batch, max_hyps_per_beam])
elif k == 'topk_ids':
v = tf.reshape(v, [total_hyps, -1])
elif k in ('topk_lens', 'topk_scores', 'topk_decoded'):
v = tf.reshape(v, [total_hyps])
else:
raise ValueError('Unexpected field: %s' % k)
top[k] = v
return BeamSearchDecodeOutput(**top)
| 4,294 | 0 | 102 |
a783b5038e9d34e2c321cad981dfaf4376994938 | 5,021 | py | Python | vortex/VortexPayloadProtocol.py | Synerty/vortexpy | fcf4c4dccbdf300b04bb962276230a48434cba17 | [
"MIT"
] | 1 | 2018-01-04T10:55:49.000Z | 2018-01-04T10:55:49.000Z | vortex/VortexPayloadProtocol.py | Synerty/vortexpy | fcf4c4dccbdf300b04bb962276230a48434cba17 | [
"MIT"
] | null | null | null | vortex/VortexPayloadProtocol.py | Synerty/vortexpy | fcf4c4dccbdf300b04bb962276230a48434cba17 | [
"MIT"
] | null | null | null | """
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
import logging
from abc import ABCMeta, abstractmethod
from collections import deque
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import ConnectionDone, ConnectionLost
from twisted.internet.protocol import Protocol, connectionDone
from vortex.DeferUtil import nonConcurrentMethod
from vortex.PayloadEnvelope import PayloadEnvelope
from vortex.PayloadIO import PayloadIO
logger = logging.getLogger(name=__name__)
| 32.185897 | 89 | 0.614021 | """
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
import logging
from abc import ABCMeta, abstractmethod
from collections import deque
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import ConnectionDone, ConnectionLost
from twisted.internet.protocol import Protocol, connectionDone
from vortex.DeferUtil import nonConcurrentMethod
from vortex.PayloadEnvelope import PayloadEnvelope
from vortex.PayloadIO import PayloadIO
logger = logging.getLogger(name=__name__)
class VortexPayloadProtocol(Protocol, metaclass=ABCMeta):
def __init__(self, logger):
self._data = b""
self._serverVortexUuid = None
self._serverVortexName = None
self._logger = logger
self._processVortexMsgsInProgress = False
self._vortexMsgsQueue = deque()
@abstractmethod
def _beat(self):
"""
EG :
if self._vortexClient:
self._vortexClient._beat()
"""
@abstractmethod
def _nameAndUuidReceived(self, name, uuid):
"""
EG :
if self._vortexClient:
self._vortexClient._setNameAndUuid(name=self._serverVortexName,
uuid=self._serverVortexUuid)
"""
@abstractmethod
def _createResponseSenderCallable(self):
"""
EG :
def sendResponse(vortexMsgs: Union[VortexMsgList, bytes]):
return self._vortexClient.sendVortexMsg(vortexMsgs=vortexMsgs)
"""
def dataReceived(self, bytesIn):
if bytesIn.startswith(b"<"):
raise Exception("Not Logged In")
self._data += bytesIn
self._beat()
self._processData()
def connectionLost(self, reason=connectionDone):
reasonFailure = reason
self._processData()
if reasonFailure.check(ConnectionDone):
self._logger.info("Connection closed by other end (it may be shutting down)")
elif isinstance(reasonFailure.value, ConnectionLost):
self._logger.info("Connection to other end lost (We may be shutting down)")
else:
self._logger.error("Closed with error")
try:
self._logger.exception(reason.getErrorMessage())
self._logger.exception(reason.getTraceback())
except:
self._logger.exception(reasonFailure.value)
def _processData(self):
if not self._data:
return
def getNextChunkIter():
try:
while True:
yield self._data.index(b'.')
except ValueError:
# There is no '.' in it, wait for more data.
return
for nextChunk in getNextChunkIter():
vortexMsg = self._data[:nextChunk]
self._data = self._data[nextChunk + 1:]
# If we get two heartbeats in a row, this will be false
if len(vortexMsg):
self._vortexMsgsQueue.append(vortexMsg)
if self._vortexMsgsQueue and not self._processVortexMsgs.running:
reactor.callLater(0, self._processVortexMsgs)
@inlineCallbacks
@nonConcurrentMethod
def _processVortexMsgs(self):
while self._vortexMsgsQueue:
vortexMsg = self._vortexMsgsQueue.popleft()
if b"." in vortexMsg:
raise Exception("Something went wrong, there is a '.' in the msg")
try:
payloadEnvelope = yield PayloadEnvelope().fromVortexMsgDefer(vortexMsg)
if payloadEnvelope.isEmpty():
self._processServerInfoPayload(payloadEnvelope)
else:
self._deliverPayload(payloadEnvelope)
except Exception as e:
print(vortexMsg)
print(e)
self._logger.exception(e)
raise
def _processServerInfoPayload(self, payload):
""" Process Server Info Payload
The first payload a server sends to the client contains information about it's
self.
"""
if PayloadEnvelope.vortexUuidKey in payload.filt:
self._serverVortexUuid = payload.filt[PayloadEnvelope.vortexUuidKey]
if PayloadEnvelope.vortexNameKey in payload.filt:
self._serverVortexName = payload.filt[PayloadEnvelope.vortexNameKey]
self._nameAndUuidReceived(name=self._serverVortexName,
uuid=self._serverVortexUuid)
def _deliverPayload(self, payload):
PayloadIO().process(payload,
vortexUuid=self._serverVortexUuid,
vortexName=self._serverVortexName,
httpSession=None,
sendResponse=self._createResponseSenderCallable()
)
| 2,801 | 1,539 | 23 |
2fa250f6887152c9f5dfbcb9850a379e8af9a21f | 4,568 | py | Python | matsdp/apt/apt_plot.py | dianwdw/matsdp | b5b822036d2ae1dab00f02a39fe7ba4a51384017 | [
"BSD-3-Clause"
] | 2 | 2019-11-12T08:35:45.000Z | 2022-02-20T14:26:54.000Z | matsdp/apt/apt_plot.py | dianwdw/matsdp | b5b822036d2ae1dab00f02a39fe7ba4a51384017 | [
"BSD-3-Clause"
] | null | null | null | matsdp/apt/apt_plot.py | dianwdw/matsdp | b5b822036d2ae1dab00f02a39fe7ba4a51384017 | [
"BSD-3-Clause"
] | 1 | 2021-12-13T13:27:04.000Z | 2021-12-13T13:27:04.000Z | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use("Agg")
| 44.784314 | 167 | 0.598074 | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use("Agg")
def plot_proxigram_csv(proxigram_csv_file_path, sysname, visible_elmt_list, interplation_on = False, fig_width=6, fig_height=5, fig_dpi = 600, fig_format = 'png'):
args_dict = locals()
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import numpy as np
from .. import funcs
from . import apt_read
from .. import default_params
defaults_dict = default_params.default_params()
logfile = defaults_dict['logfile']
output_dir = os.getcwd() + '/' + defaults_dict['output_dir_name']
funcs.mkdir(output_dir)
font_size = 15
label_size = 15
line_width = 0.5
fig_size = (fig_width,fig_height)
data_set, elmtname_list = apt_read.read_proxigram_csv(os.path.abspath(proxigram_csv_file_path))
plt.rcParams['figure.figsize'] = fig_size
distance = np.array(data_set[:,0], dtype = np.float32)
fig = plt.figure('concentration' + str(sysname))
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom = 0.10, left = 0.12, top = 0.94, right = 0.99, wspace=0.0, hspace=0.0)
plt.xticks([])
plt.yticks([])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
a, c = np.random.exponential(size=2)
b, d = np.random.randn(2)
ax1 = np.array([None]*len(visible_elmt_list))
for i_plot in range(0, len(visible_elmt_list)):
elmt_indx = elmtname_list.index(visible_elmt_list[i_plot])
ax1[i_plot] = fig.add_subplot(str(len(visible_elmt_list))+'1'+str(i_plot+1))
concentration = np.array(data_set[:,2*elmt_indx+1], dtype = np.float32)
handle_list = []
label_list = []
p1, = plt.plot(distance, concentration,color='black',marker='o', mfc='none', linestyle = '')
if interplation_on == True:
(a_, b_, c_, d_), _ = opt.curve_fit(funcs.sigmoid, distance, concentration)
xdense = np.linspace(min(distance),max(distance),num = 100,endpoint = True)
y_fit = funcs.sigmoid(xdense, a_, b_, c_, d_)
p1_fit, = plt.plot(xdense,y_fit,'--',color='black',linewidth=line_width)
handle_list.append((p1,p1_fit))
label_list.append(elmtname_list[elmt_indx])
else:
handle_list.append((p1))
label_list.append(elmtname_list[elmt_indx])
ax1[i_plot].legend(handles = handle_list, labels = label_list, loc = 'best', fontsize = font_size)
xmin = min(distance)
xmax = max(distance)
MarginFraction = 0.09
plt.vlines(x = 0,
ymin = min(concentration)-(max(concentration)-min(concentration))*MarginFraction,
ymax = max(concentration)+(max(concentration)-min(concentration))*MarginFraction,
linestyle='--',
linewidth=0.5)
plt.ylim(min(concentration)-(max(concentration)-min(concentration))*MarginFraction, max(concentration)+(max(concentration)-min(concentration))*MarginFraction)
if i_plot < len(visible_elmt_list)-1:
plt.xticks([])
plt.sca(ax)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
## plt.text(xlim[1]*0.03, ylim[1]*1.02, '$\gamma$ phase', fontsize = font_size)
## plt.text(xlim[1]*0.8, ylim[1]*1.02, '$\gamma^{\prime}$ phase', fontsize = font_size)
plt.xlabel('distance from the interface ($nm$)', size = label_size, labelpad=font_size*1.4)
plt.ylabel('concentration ($at.\%$)', size = label_size, labelpad=font_size*2.3)
fig_file = output_dir + '/' + 'apt_concentration_profile_' + str(sysname) + '.' + fig_format
plt.savefig(fig_file, dpi = fig_dpi)
plt.close()
funcs.write_log(
logfile,
'apt_plot.plot_proxigram_csv(' + '\n' +
' proxigram_csv_file_path=' + 'r\'' + str(proxigram_csv_file_path) + '\'' + ',\n' +
' sysname=' + '\'' + str(sysname) + '\'' + ',\n' +
' visible_elmt_list=' + str(visible_elmt_list) + ',\n' +
' interplation_on=' + str(interplation_on) + ',\n' +
' fig_width=' + str(fig_width) + ',\n' +
' fig_height=' + str(fig_height) + ',\n' +
' fig_dpi=' + str(fig_dpi) + ',\n' +
' fig_format=' + '\'' + str(fig_format) + '\'' + ')\n' +
'###############################\n')
return 0
| 4,474 | 0 | 25 |
7cca1a7ee26e5b0c58ebf6109568a30fa8e46997 | 37,125 | py | Python | src/tac/core/wavenet_vocoder/models/wavenet.py | stefantaubert/Tacotron-2 | 710a3b39b48147307fa8eef2c9f635562f48d49a | [
"MIT"
] | null | null | null | src/tac/core/wavenet_vocoder/models/wavenet.py | stefantaubert/Tacotron-2 | 710a3b39b48147307fa8eef2c9f635562f48d49a | [
"MIT"
] | null | null | null | src/tac/core/wavenet_vocoder/models/wavenet.py | stefantaubert/Tacotron-2 | 710a3b39b48147307fa8eef2c9f635562f48d49a | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from src.etc import audio
from src.tac.core.wavenet_vocoder import util
from src.tac.core.wavenet_vocoder.models.gaussian import sample_from_gaussian
from src.tac.core.wavenet_vocoder.models.mixture import \
sample_from_discretized_mix_logistic
from src.tac.core.wavenet_vocoder.models.modules import (
Conv1D1x1, ConvTranspose1D, ConvTranspose2D,
DiscretizedMixtureLogisticLoss, Embedding,
GaussianMaximumLikelihoodEstimation, LeakyReluActivation,
MaskedCrossEntropyLoss, MaskedMeanSquaredError, NearestNeighborUpsample,
ReluActivation, ResidualConv1DGLU, ResizeConvolution, SubPixelConvolution,
WeightNorm)
from src.tac.core.wavenet_vocoder.util import *
from src.tac.infolog import log
def _expand_global_features(batch_size, time_length, global_features, data_format='BCT'):
"""Expand global conditioning features to all time steps
Args:
batch_size: int
time_length: int
global_features: Tensor of shape [batch_size, channels] or [batch_size, channels, 1]
data_format: string, 'BCT' to get output of shape [batch_size, channels, time_length]
or 'BTC' to get output of shape [batch_size, time_length, channels]
Returns:
None or Tensor of shape [batch_size, channels, time_length] or [batch_size, time_length, channels]
"""
accepted_formats = ['BCT', 'BTC']
if not (data_format in accepted_formats):
raise ValueError('{} is an unknow data format, accepted formats are "BCT" and "BTC"'.format(data_format))
if global_features is None:
return None
#[batch_size, channels] ==> [batch_size, channels, 1]
# g = tf.cond(tf.equal(tf.rank(global_features), 2),
# lambda: tf.expand_dims(global_features, axis=-1),
# lambda: global_features)
g = tf.reshape(global_features, [tf.shape(global_features)[0], tf.shape(global_features)[1], 1])
g_shape = tf.shape(g)
#[batch_size, channels, 1] ==> [batch_size, channels, time_length]
# ones = tf.ones([g_shape[0], g_shape[1], time_length], tf.int32)
# g = g * ones
g = tf.tile(g, [1, 1, time_length])
if data_format == 'BCT':
return g
else:
#[batch_size, channels, time_length] ==> [batch_size, time_length, channels]
return tf.transpose(g, [0, 2, 1])
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x):
"""Compute receptive field size.
Args:
total_layers; int
num_cycles: int
kernel_size: int
dilation: callable, function used to compute dilation factor.
use "lambda x: 1" to disable dilated convolutions.
Returns:
int: receptive field size in sample.
"""
assert total_layers % num_cycles == 0
layers_per_cycle = total_layers // num_cycles
dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)]
return (kernel_size - 1) * sum(dilations) + 1
def maybe_Normalize_weights(layer, weight_normalization=True, init=False, init_scale=1.):
"""Maybe Wraps layer with Weight Normalization wrapper.
Args;
layer: tf layers instance, the layer candidate for normalization
weight_normalization: Boolean, determines whether to normalize the layer
init: Boolean, determines if the current run is the data dependent initialization run
init_scale: Float, Initialisation scale of the data dependent initialization. Usually 1.
"""
if weight_normalization:
return WeightNorm(layer, init, init_scale)
return layer
class WaveNet():
"""Tacotron-2 Wavenet Vocoder model.
"""
def initialize(self, y, c, g, input_lengths, x=None, synthesis_length=None, test_inputs=None, split_infos=None):
'''Initialize wavenet graph for train, eval and test cases.
'''
hparams = self._hparams
self.is_training = x is not None
self.is_evaluating = not self.is_training and y is not None
#Set all convolutions to corresponding mode
self.set_mode(self.is_training)
split_device = '/cpu:0' if self._hparams.wavenet_num_gpus > 1 or self._hparams.split_on_cpu else '/gpu:0'
with tf.device(split_device):
hp = self._hparams
lout_int = [tf.int32] * hp.wavenet_num_gpus
lout_float = [tf.float32] * hp.wavenet_num_gpus
tower_input_lengths = tf.split(input_lengths, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if input_lengths is not None else [input_lengths] * hp.wavenet_num_gpus
tower_y = tf.split(y, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if y is not None else [y] * hp.wavenet_num_gpus
tower_x = tf.split(x, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if x is not None else [x] * hp.wavenet_num_gpus
tower_c = tf.split(c, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if self.local_conditioning_enabled() else [None] * hp.wavenet_num_gpus
tower_g = tf.split(g, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if self.global_conditioning_enabled() else [None] * hp.wavenet_num_gpus
tower_test_inputs = tf.split(test_inputs, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if test_inputs is not None else [test_inputs] * hp.wavenet_num_gpus
self.tower_y_hat_q = []
self.tower_y_hat_train = []
self.tower_y = []
self.tower_input_lengths = []
self.tower_means = []
self.tower_log_scales = []
self.tower_y_hat_log = []
self.tower_y_log = []
self.tower_c = []
self.tower_y_eval = []
self.tower_eval_length = []
self.tower_y_hat = []
self.tower_y_target = []
self.tower_eval_c = []
self.tower_mask = []
self.tower_upsampled_local_features = []
self.tower_eval_upsampled_local_features = []
self.tower_synth_upsampled_local_features = []
log('Initializing Wavenet model. Dimensions (? = dynamic shape): ')
log(' Train mode: {}'.format(self.is_training))
log(' Eval mode: {}'.format(self.is_evaluating))
log(' Synthesis mode: {}'.format(not (self.is_training or self.is_evaluating)))
#1. Declare GPU devices
gpus = ['/gpu:{}'.format(i) for i in range(hp.wavenet_num_gpus)]
for i in range(hp.wavenet_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('inference') as scope:
log(' device: {}'.format(i))
#Training
if self.is_training:
batch_size = tf.shape(x)[0]
#[batch_size, time_length, 1]
self.tower_mask.append(self.get_mask(tower_input_lengths[i], maxlen=tf.shape(tower_x[i])[-1])) #To be used in loss computation
#[batch_size, channels, time_length]
y_hat_train = self.step(tower_x[i], tower_c[i], tower_g[i], softmax=False) #softmax is automatically computed inside softmax_cross_entropy if needed
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length, channels]
self.tower_y_hat_q.append(tf.transpose(y_hat_train, [0, 2, 1]))
self.tower_y_hat_train.append(y_hat_train)
self.tower_y.append(tower_y[i])
self.tower_input_lengths.append(tower_input_lengths[i])
#Add mean and scale stats if using Guassian distribution output (there would be too many logistics if using MoL)
if self._hparams.out_channels == 2:
self.tower_means.append(y_hat_train[:, 0, :])
self.tower_log_scales.append(y_hat_train[:, 1, :])
else:
self.tower_means.append(None)
#Graph extension for log saving
#[batch_size, time_length]
shape_control = (batch_size, tf.shape(tower_x[i])[-1], 1)
with tf.control_dependencies([tf.assert_equal(tf.shape(tower_y[i]), shape_control)]):
y_log = tf.squeeze(tower_y[i], [-1])
if is_mulaw_quantize(hparams.input_type):
self.tower_y[i] = y_log
y_hat_log = tf.cond(tf.equal(tf.rank(y_hat_train), 4),
lambda: tf.squeeze(y_hat_train, [-1]),
lambda: y_hat_train)
y_hat_log = tf.reshape(y_hat_log, [batch_size, hparams.out_channels, -1])
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length]
y_hat_log = tf.argmax(tf.nn.softmax(y_hat_log, axis=1), 1)
y_hat_log = util.inv_mulaw_quantize(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw_quantize(y_log, hparams.quantize_channels)
else:
#[batch_size, time_length]
if hparams.out_channels == 2:
y_hat_log = sample_from_gaussian(
y_hat_log, log_scale_min_gauss=hparams.log_scale_min_gauss)
else:
y_hat_log = sample_from_discretized_mix_logistic(
y_hat_log, log_scale_min=hparams.log_scale_min)
if is_mulaw(hparams.input_type):
y_hat_log = util.inv_mulaw(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw(y_log, hparams.quantize_channels)
self.tower_y_hat_log.append(y_hat_log)
self.tower_y_log.append(y_log)
self.tower_c.append(tower_c[i])
self.tower_upsampled_local_features.append(self.upsampled_local_features)
log(' inputs: {}'.format(tower_x[i].shape))
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' targets: {}'.format(y_log.shape))
log(' outputs: {}'.format(y_hat_log.shape))
#evaluating
elif self.is_evaluating:
#[time_length, ]
idx = 0
length = tower_input_lengths[i][idx]
y_target = tf.reshape(tower_y[i][idx], [-1])[:length]
test_inputs = tf.reshape(y_target, [1, -1, 1]) if not hparams.wavenet_natural_eval else None
if tower_c[i] is not None:
tower_c[i] = tf.expand_dims(tower_c[i][idx, :, :length], axis=0)
with tf.control_dependencies([tf.assert_equal(tf.rank(tower_c[i]), 3)]):
tower_c[i] = tf.identity(tower_c[i], name='eval_assert_c_rank_op')
if tower_g[i] is not None:
tower_g[i] = tf.expand_dims(tower_g[i][idx], axis=0)
batch_size = tf.shape(tower_c[i])[0]
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
#[channels, ]
if is_mulaw_quantize(hparams.input_type):
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
#Fast eval
y_hat = self.incremental(initial_input, c=tower_c[i], g=tower_g[i], time_length=length, test_inputs=test_inputs,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
#Save targets and length for eval loss computation
if is_mulaw_quantize(hparams.input_type):
self.tower_y_eval.append(tf.reshape(y[idx], [1, -1])[:, :length])
else:
self.tower_y_eval.append(tf.expand_dims(y[idx], axis=0)[:, :length, :])
self.tower_eval_length.append(length)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [-1])
y_hat = inv_mulaw_quantize(y_hat, hparams.quantize_channels)
y_target = inv_mulaw_quantize(y_target, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = inv_mulaw(tf.reshape(y_hat, [-1]), hparams.quantize_channels)
y_target = inv_mulaw(y_target, hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [-1])
self.tower_y_hat.append(y_hat)
self.tower_y_target.append(y_target)
self.tower_eval_c.append(tower_c[i][idx])
self.tower_eval_upsampled_local_features.append(self.upsampled_local_features[idx])
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' targets: {}'.format(y_target.shape))
log(' outputs: {}'.format(y_hat.shape))
#synthesizing
else:
batch_size = tf.shape(tower_c[i])[0]
if c is None:
assert synthesis_length is not None
else:
#[batch_size, local_condition_time, local_condition_dimension(num_mels)]
message = ('Expected 3 dimension shape [batch_size(1), time_length, {}] for local condition features but found {}'.format(
hparams.cin_channels, tower_c[i].shape))
with tf.control_dependencies([tf.assert_equal(tf.rank(tower_c[i]), 3, message=message)]):
tower_c[i] = tf.identity(tower_c[i], name='synthesis_assert_c_rank_op')
Tc = tf.shape(tower_c[i])[1]
upsample_factor = audio.get_hop_size(self._hparams)
#Overwrite length with respect to local condition features
synthesis_length = Tc * upsample_factor
#[batch_size, local_condition_dimension, local_condition_time]
#time_length will be corrected using the upsample network
tower_c[i] = tf.transpose(tower_c[i], [0, 2, 1])
if tower_g[i] is not None:
assert tower_g[i].shape == (batch_size, 1)
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
if is_mulaw_quantize(hparams.input_type):
assert initial_value >= 0 and initial_value < hparams.quantize_channels
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
y_hat = self.incremental(initial_input, c=tower_c[i], g=tower_g[i], time_length=synthesis_length, test_inputs=tower_test_inputs[i],
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [batch_size, -1])
y_hat = util.inv_mulaw_quantize(y_hat, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = util.inv_mulaw(tf.reshape(y_hat, [batch_size, -1]), hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [batch_size, -1])
self.tower_y_hat.append(y_hat)
self.tower_synth_upsampled_local_features.append(self.upsampled_local_features)
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' outputs: {}'.format(y_hat.shape))
self.variables = tf.trainable_variables()
log(' Receptive Field: ({} samples / {:.1f} ms)'.format(self.receptive_field, self.receptive_field / hparams.sample_rate * 1000.))
#1_000_000 is causing syntax problems for some people?! Python please :)
log(' WaveNet Parameters: {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.variables]) / 1000000))
self.ema = tf.train.ExponentialMovingAverage(decay=hparams.wavenet_ema_decay)
def add_loss(self):
'''Adds loss computation to the graph. Supposes that initialize function has already been called.
'''
self.tower_loss = []
total_loss = 0
gpus = ['/gpu:{}'.format(i) for i in range(self._hparams.wavenet_num_gpus)]
for i in range(self._hparams.wavenet_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('loss') as scope:
if self.is_training:
if is_mulaw_quantize(self._hparams.input_type):
tower_loss = MaskedCrossEntropyLoss(self.tower_y_hat_q[i][:, :-1, :], self.tower_y[i][:, 1:], mask=self.tower_mask[i])
else:
if self._hparams.out_channels == 2:
tower_loss = GaussianMaximumLikelihoodEstimation(self.tower_y_hat_train[i][:, :, :-1], self.tower_y[i][:, 1:, :],
hparams=self._hparams, mask=self.tower_mask[i])
else:
tower_loss = DiscretizedMixtureLogisticLoss(self.tower_y_hat_train[i][:, :, :-1], self.tower_y[i][:, 1:, :],
hparams=self._hparams, mask=self.tower_mask[i])
elif self.is_evaluating:
if is_mulaw_quantize(self._hparams.input_type):
tower_loss = MaskedCrossEntropyLoss(self.tower_y_hat_eval[i], self.tower_y_eval[i], lengths=[self.tower_eval_length[i]])
else:
if self._hparams.out_channels == 2:
tower_loss = GaussianMaximumLikelihoodEstimation(self.tower_y_hat_eval[i], self.tower_y_eval[i],
hparams=self._hparams, lengths=[self.tower_eval_length[i]])
else:
tower_loss = DiscretizedMixtureLogisticLoss(self.tower_y_hat_eval[i], self.tower_y_eval[i],
hparams=self._hparams, lengths=[self.tower_eval_length[i]])
else:
raise RuntimeError('Model not in train/eval mode but computing loss: Where did this go wrong?')
#Compute final loss
self.tower_loss.append(tower_loss)
total_loss += tower_loss
if self.is_training:
self.loss = total_loss / self._hparams.wavenet_num_gpus
else:
self.eval_loss = total_loss / self._hparams.wavenet_num_gpus
def add_optimizer(self, global_step):
'''Adds optimizer to the graph. Supposes that initialize function has already been called.
'''
hp = self._hparams
tower_gradients = []
# 1. Declare GPU devices
gpus = ['/gpu:{}'.format(i) for i in range(hp.wavenet_num_gpus)]
grad_device = '/cpu:0' if hp.tacotron_num_gpus > 1 else gpus[0]
with tf.device(grad_device):
with tf.variable_scope('optimizer'):
#Create lr schedule
if hp.wavenet_lr_schedule == 'noam':
learning_rate = self._noam_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
warmup_steps=hp.wavenet_warmup)
else:
assert hp.wavenet_lr_schedule == 'exponential'
learning_rate = self._exponential_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
hp.wavenet_decay_rate,
hp.wavenet_decay_steps)
#Adam optimization
self.learning_rate = learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate, hp.wavenet_adam_beta1,
hp.wavenet_adam_beta2, hp.wavenet_adam_epsilon)
# 2. Compute Gradient
for i in range(hp.wavenet_num_gpus):
#Device placemenet
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('optimizer') as scope:
gradients = optimizer.compute_gradients(self.tower_loss[i])
tower_gradients.append(gradients)
# 3. Average Gradient
with tf.device(grad_device):
avg_grads = []
variables = []
for grad_and_vars in zip(*tower_gradients):
# each_grads_vars = ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
if grad_and_vars[0][0] is not None:
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
#Append on a "tower" dimension which we will average over below.
grads.append(expanded_g)
#Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
else:
grad = grad_and_vars[0][0]
v = grad_and_vars[0][1]
avg_grads.append(grad)
variables.append(v)
self.gradients = avg_grads
#Gradients clipping
if hp.wavenet_clip_gradients:
#Clip each gradient by a [min, max] range of values and its norm by [0, max_norm_value]
clipped_grads = []
for g in avg_grads:
if g is not None:
clipped_g = tf.clip_by_norm(g, hp.wavenet_gradient_max_norm)
clipped_g = tf.clip_by_value(clipped_g, -hp.wavenet_gradient_max_value, hp.wavenet_gradient_max_value)
clipped_grads.append(clipped_g)
else:
clipped_grads.append(g)
else:
clipped_grads = avg_grads
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
adam_optimize = optimizer.apply_gradients(zip(clipped_grads, variables),
global_step=global_step)
#Add exponential moving average
#https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
#Use adam optimization process as a dependency
with tf.control_dependencies([adam_optimize]):
#Create the shadow variables and add ops to maintain moving averages
#Also updates moving averages after each update step
#This is the optimize call instead of traditional adam_optimize one.
assert set(self.variables) == set(variables) #Verify all trainable variables are being averaged
self.optimize = self.ema.apply(variables)
#Sanity check functions
def step(self, x, c=None, g=None, softmax=False):
"""Forward step
Args:
x: Tensor of shape [batch_size, channels, time_length], One-hot encoded audio signal.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features.
g: Tensor of shape [batch_size, gin_channels, 1] or Ids of shape [batch_size, 1],
Global conditioning features.
Note: set hparams.use_speaker_embedding to False to disable embedding layer and
use extrnal One-hot encoded features.
softmax: Boolean, Whether to apply softmax.
Returns:
a Tensor of shape [batch_size, out_channels, time_length]
"""
#[batch_size, channels, time_length] -> [batch_size, time_length, channels]
batch_size = tf.shape(x)[0]
time_length = tf.shape(x)[-1]
if g is not None:
if self.embed_speakers is not None:
#[batch_size, 1] ==> [batch_size, 1, gin_channels]
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, gin_channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
#Expand global conditioning features to all time steps
g_bct = _expand_global_features(batch_size, time_length, g, data_format='BCT')
if c is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
elif self._hparams.upsample_type == '1D':
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
else:
assert self._hparams.upsample_type in ('Resize', 'SubPixel', 'NearestNeighbor')
#[batch_size, cin_channels, time_length, 1]
expand_dim = 3
c = tf.expand_dims(c, axis=expand_dim)
for transposed_conv in self.upsample_conv:
c = transposed_conv(c)
#[batch_size, cin_channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], tf.shape(x)[-1])]):
c = tf.identity(c, name='control_c_and_x_shape')
self.upsampled_local_features = c
#Feed data to network
x = self.first_conv(x)
skips = None
for conv in self.residual_layers:
x, h = conv(x, c=c, g=g_bct)
if skips is None:
skips = h
else:
skips = skips + h
if self._hparams.legacy:
skips = skips * np.sqrt(0.5)
x = skips
for conv in self.last_conv_layers:
x = conv(x)
return tf.nn.softmax(x, axis=1) if softmax else x
def incremental(self, initial_input, c=None, g=None,
time_length=100, test_inputs=None,
softmax=True, quantize=True, log_scale_min=-7.0, log_scale_min_gauss=-7.0):
"""Inceremental forward step
Inputs of shape [batch_size, channels, time_length] are reshaped to [batch_size, time_length, channels]
Input of each time step is of shape [batch_size, 1, channels]
Args:
Initial input: Tensor of shape [batch_size, channels, 1], initial recurrence input.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features
g: Tensor of shape [batch_size, gin_channels, time_length] or [batch_size, gin_channels, 1]
global conditioning features
T: int, number of timesteps to generate
test_inputs: Tensor, teacher forcing inputs (debug)
softmax: Boolean, whether to apply softmax activation
quantize: Whether to quantize softmax output before feeding to
next time step input
log_scale_min: float, log scale minimum value.
Returns:
Tensor of shape [batch_size, channels, time_length] or [batch_size, channels, 1]
Generated one_hot encoded samples
"""
batch_size = tf.shape(initial_input)[0]
#Note: should reshape to [batch_size, time_length, channels]
#not [batch_size, channels, time_length]
if test_inputs is not None:
if self.scalar_input:
if tf.shape(test_inputs)[1] == 1:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
else:
test_inputs = tf.cast(test_inputs, tf.int32)
test_inputs = tf.one_hot(indices=test_inputs, depth=self._hparams.quantize_channels, dtype=tf.float32)
test_inputs = tf.squeeze(test_inputs, [2])
if tf.shape(test_inputs)[1] == self._hparams.out_channels:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
batch_size = tf.shape(test_inputs)[0]
if time_length is None:
time_length = tf.shape(test_inputs)[1]
else:
time_length = tf.maximum(time_length, tf.shape(test_inputs)[1])
#Global conditioning
if g is not None:
if self.embed_speakers is not None:
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
self.g_btc = _expand_global_features(batch_size, time_length, g, data_format='BTC')
#Local conditioning
if c is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
elif self._hparams.upsample_type == '1D':
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
else:
assert self._hparams.upsample_type in ('Resize', 'SubPixel', 'NearestNeighbor')
#[batch_size, cin_channels, time_length, 1]
expand_dim = 3
c = tf.expand_dims(c, axis=expand_dim)
for upsample_conv in self.upsample_conv:
c = upsample_conv(c)
#[batch_size, channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], time_length)]):
self.c = tf.transpose(c, [0, 2, 1])
self.upsampled_local_features = c
#Initialize loop variables
if initial_input.shape[1] == self._hparams.out_channels:
initial_input = tf.transpose(initial_input, [0, 2, 1])
initial_time = tf.constant(0, dtype=tf.int32)
# if test_inputs is not None:
# initial_input = tf.expand_dims(test_inputs[:, 0, :], axis=1)
initial_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
initial_loss_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
#Only use convolutions queues for Residual Blocks main convolutions (only ones with kernel size 3 and dilations, all others are 1x1)
initial_queues = [tf.zeros((batch_size, res_conv.layer.kw + (res_conv.layer.kw - 1) * (res_conv.layer.dilation_rate[0] - 1), self._hparams.residual_channels),
name='convolution_queue_{}'.format(i+1)) for i, res_conv in enumerate(self.residual_layers)]
res = tf.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_input, initial_loss_outputs_ta, initial_queues
],
parallel_iterations=32,
swap_memory=self._hparams.wavenet_swap_with_cpu)
outputs_ta = res[1]
#[time_length, batch_size, channels]
outputs = outputs_ta.stack()
#Save eval prediction for eval loss computation
eval_outputs = res[3].stack()
self.tower_y_hat_eval = []
if is_mulaw_quantize(self._hparams.input_type):
self.tower_y_hat_eval.append(tf.transpose(eval_outputs, [1, 0, 2]))
else:
self.tower_y_hat_eval.append(tf.transpose(eval_outputs, [1, 2, 0]))
#[batch_size, channels, time_length]
return tf.transpose(outputs, [1, 2, 0])
| 40.005388 | 168 | 0.713185 | import numpy as np
import tensorflow as tf
from src.etc import audio
from src.tac.core.wavenet_vocoder import util
from src.tac.core.wavenet_vocoder.models.gaussian import sample_from_gaussian
from src.tac.core.wavenet_vocoder.models.mixture import \
sample_from_discretized_mix_logistic
from src.tac.core.wavenet_vocoder.models.modules import (
Conv1D1x1, ConvTranspose1D, ConvTranspose2D,
DiscretizedMixtureLogisticLoss, Embedding,
GaussianMaximumLikelihoodEstimation, LeakyReluActivation,
MaskedCrossEntropyLoss, MaskedMeanSquaredError, NearestNeighborUpsample,
ReluActivation, ResidualConv1DGLU, ResizeConvolution, SubPixelConvolution,
WeightNorm)
from src.tac.core.wavenet_vocoder.util import *
from src.tac.infolog import log
def _expand_global_features(batch_size, time_length, global_features, data_format='BCT'):
"""Expand global conditioning features to all time steps
Args:
batch_size: int
time_length: int
global_features: Tensor of shape [batch_size, channels] or [batch_size, channels, 1]
data_format: string, 'BCT' to get output of shape [batch_size, channels, time_length]
or 'BTC' to get output of shape [batch_size, time_length, channels]
Returns:
None or Tensor of shape [batch_size, channels, time_length] or [batch_size, time_length, channels]
"""
accepted_formats = ['BCT', 'BTC']
if not (data_format in accepted_formats):
raise ValueError('{} is an unknow data format, accepted formats are "BCT" and "BTC"'.format(data_format))
if global_features is None:
return None
#[batch_size, channels] ==> [batch_size, channels, 1]
# g = tf.cond(tf.equal(tf.rank(global_features), 2),
# lambda: tf.expand_dims(global_features, axis=-1),
# lambda: global_features)
g = tf.reshape(global_features, [tf.shape(global_features)[0], tf.shape(global_features)[1], 1])
g_shape = tf.shape(g)
#[batch_size, channels, 1] ==> [batch_size, channels, time_length]
# ones = tf.ones([g_shape[0], g_shape[1], time_length], tf.int32)
# g = g * ones
g = tf.tile(g, [1, 1, time_length])
if data_format == 'BCT':
return g
else:
#[batch_size, channels, time_length] ==> [batch_size, time_length, channels]
return tf.transpose(g, [0, 2, 1])
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x):
"""Compute receptive field size.
Args:
total_layers; int
num_cycles: int
kernel_size: int
dilation: callable, function used to compute dilation factor.
use "lambda x: 1" to disable dilated convolutions.
Returns:
int: receptive field size in sample.
"""
assert total_layers % num_cycles == 0
layers_per_cycle = total_layers // num_cycles
dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)]
return (kernel_size - 1) * sum(dilations) + 1
def maybe_Normalize_weights(layer, weight_normalization=True, init=False, init_scale=1.):
"""Maybe Wraps layer with Weight Normalization wrapper.
Args;
layer: tf layers instance, the layer candidate for normalization
weight_normalization: Boolean, determines whether to normalize the layer
init: Boolean, determines if the current run is the data dependent initialization run
init_scale: Float, Initialisation scale of the data dependent initialization. Usually 1.
"""
if weight_normalization:
return WeightNorm(layer, init, init_scale)
return layer
class WaveNet():
"""Tacotron-2 Wavenet Vocoder model.
"""
def __init__(self, hparams, init):
#Get hparams
self._hparams = hparams
if self.local_conditioning_enabled():
assert hparams.num_mels == hparams.cin_channels
#Initialize model architecture
assert hparams.layers % hparams.stacks == 0
layers_per_stack = hparams.layers // hparams.stacks
self.scalar_input = is_scalar_input(hparams.input_type)
#first (embedding) convolution
with tf.variable_scope('input_convolution'):
if self.scalar_input:
self.first_conv = Conv1D1x1(hparams.residual_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='input_convolution')
else:
self.first_conv = Conv1D1x1(hparams.residual_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='input_convolution')
#Residual Blocks
self.residual_layers = []
for layer in range(hparams.layers):
self.residual_layers.append(ResidualConv1DGLU(
hparams.residual_channels, hparams.gate_channels,
kernel_size=hparams.kernel_size,
skip_out_channels=hparams.skip_out_channels,
use_bias=hparams.use_bias,
dilation_rate=2**(layer % layers_per_stack),
dropout=hparams.wavenet_dropout,
cin_channels=hparams.cin_channels,
gin_channels=hparams.gin_channels,
weight_normalization=hparams.wavenet_weight_normalization,
init=init,
init_scale=hparams.wavenet_init_scale,
residual_legacy=hparams.residual_legacy,
name='ResidualConv1DGLU_{}'.format(layer)))
#Final (skip) convolutions
with tf.variable_scope('skip_convolutions'):
self.last_conv_layers = [
ReluActivation(name='final_conv_relu1'),
Conv1D1x1(hparams.skip_out_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='final_convolution_1'),
ReluActivation(name='final_conv_relu2'),
Conv1D1x1(hparams.out_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='final_convolution_2'),]
#Global conditionning embedding
if hparams.gin_channels > 0 and hparams.use_speaker_embedding:
assert hparams.n_speakers is not None
self.embed_speakers = Embedding(
hparams.n_speakers, hparams.gin_channels, std=0.1, name='gc_embedding')
self.embedding_table = self.embed_speakers.embedding_table
else:
self.embed_speakers = None
self.all_convs = [self.first_conv] + self.residual_layers + self.last_conv_layers
#Upsample conv net
if self.local_conditioning_enabled():
self.upsample_conv = []
if hparams.upsample_type == 'NearestNeighbor':
#Nearest neighbor upsampling (non-learnable)
self.upsample_conv.append(NearestNeighborUpsample(strides=(1, audio.get_hop_size(hparams))))
else:
#Learnable upsampling layers
for i, s in enumerate(hparams.upsample_scales):
with tf.variable_scope('local_conditioning_upsampling_{}'.format(i+1)):
if hparams.upsample_type == '2D':
convt = ConvTranspose2D(1, (hparams.freq_axis_kernel_size, s),
padding='same', strides=(1, s), NN_init=hparams.NN_init, NN_scaler=hparams.NN_scaler,
up_layers=len(hparams.upsample_scales), name='ConvTranspose2D_layer_{}'.format(i))
elif hparams.upsample_type == '1D':
convt = ConvTranspose1D(hparams.cin_channels, (s, ),
padding='same', strides=(s, ), NN_init=hparams.NN_init, NN_scaler=hparams.NN_scaler,
up_layers=len(hparams.upsample_scales), name='ConvTranspose1D_layer_{}'.format(i))
elif hparams.upsample_type == 'Resize':
convt = ResizeConvolution(1, (hparams.freq_axis_kernel_size, s),
padding='same', strides=(1, s), NN_init=hparams.NN_init, NN_scaler=hparams.NN_scaler,
up_layers=len(hparams.upsample_scales), name='ResizeConvolution_layer_{}'.format(i))
else:
assert hparams.upsample_type == 'SubPixel'
convt = SubPixelConvolution(1, (hparams.freq_axis_kernel_size, 3),
padding='same', strides=(1, s), NN_init=hparams.NN_init, NN_scaler=hparams.NN_scaler,
up_layers=len(hparams.upsample_scales), name='SubPixelConvolution_layer_{}'.format(i))
self.upsample_conv.append(maybe_Normalize_weights(convt,
hparams.wavenet_weight_normalization, init, hparams.wavenet_init_scale))
if hparams.upsample_activation == 'LeakyRelu':
self.upsample_conv.append(LeakyReluActivation(alpha=hparams.leaky_alpha,
name='upsample_leaky_relu_{}'.format(i+1)))
elif hparams.upsample_activation == 'Relu':
self.upsample_conv.append(ReluActivation(name='upsample_relu_{}'.format(i+1)))
else:
assert hparams.upsample_activation == None
self.all_convs += self.upsample_conv
self.receptive_field = receptive_field_size(hparams.layers,
hparams.stacks, hparams.kernel_size)
def set_mode(self, is_training):
for conv in self.all_convs:
try:
conv.set_mode(is_training)
except AttributeError:
pass
def initialize(self, y, c, g, input_lengths, x=None, synthesis_length=None, test_inputs=None, split_infos=None):
'''Initialize wavenet graph for train, eval and test cases.
'''
hparams = self._hparams
self.is_training = x is not None
self.is_evaluating = not self.is_training and y is not None
#Set all convolutions to corresponding mode
self.set_mode(self.is_training)
split_device = '/cpu:0' if self._hparams.wavenet_num_gpus > 1 or self._hparams.split_on_cpu else '/gpu:0'
with tf.device(split_device):
hp = self._hparams
lout_int = [tf.int32] * hp.wavenet_num_gpus
lout_float = [tf.float32] * hp.wavenet_num_gpus
tower_input_lengths = tf.split(input_lengths, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if input_lengths is not None else [input_lengths] * hp.wavenet_num_gpus
tower_y = tf.split(y, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if y is not None else [y] * hp.wavenet_num_gpus
tower_x = tf.split(x, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if x is not None else [x] * hp.wavenet_num_gpus
tower_c = tf.split(c, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if self.local_conditioning_enabled() else [None] * hp.wavenet_num_gpus
tower_g = tf.split(g, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if self.global_conditioning_enabled() else [None] * hp.wavenet_num_gpus
tower_test_inputs = tf.split(test_inputs, num_or_size_splits=hp.wavenet_num_gpus, axis=0) if test_inputs is not None else [test_inputs] * hp.wavenet_num_gpus
self.tower_y_hat_q = []
self.tower_y_hat_train = []
self.tower_y = []
self.tower_input_lengths = []
self.tower_means = []
self.tower_log_scales = []
self.tower_y_hat_log = []
self.tower_y_log = []
self.tower_c = []
self.tower_y_eval = []
self.tower_eval_length = []
self.tower_y_hat = []
self.tower_y_target = []
self.tower_eval_c = []
self.tower_mask = []
self.tower_upsampled_local_features = []
self.tower_eval_upsampled_local_features = []
self.tower_synth_upsampled_local_features = []
log('Initializing Wavenet model. Dimensions (? = dynamic shape): ')
log(' Train mode: {}'.format(self.is_training))
log(' Eval mode: {}'.format(self.is_evaluating))
log(' Synthesis mode: {}'.format(not (self.is_training or self.is_evaluating)))
#1. Declare GPU devices
gpus = ['/gpu:{}'.format(i) for i in range(hp.wavenet_num_gpus)]
for i in range(hp.wavenet_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('inference') as scope:
log(' device: {}'.format(i))
#Training
if self.is_training:
batch_size = tf.shape(x)[0]
#[batch_size, time_length, 1]
self.tower_mask.append(self.get_mask(tower_input_lengths[i], maxlen=tf.shape(tower_x[i])[-1])) #To be used in loss computation
#[batch_size, channels, time_length]
y_hat_train = self.step(tower_x[i], tower_c[i], tower_g[i], softmax=False) #softmax is automatically computed inside softmax_cross_entropy if needed
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length, channels]
self.tower_y_hat_q.append(tf.transpose(y_hat_train, [0, 2, 1]))
self.tower_y_hat_train.append(y_hat_train)
self.tower_y.append(tower_y[i])
self.tower_input_lengths.append(tower_input_lengths[i])
#Add mean and scale stats if using Guassian distribution output (there would be too many logistics if using MoL)
if self._hparams.out_channels == 2:
self.tower_means.append(y_hat_train[:, 0, :])
self.tower_log_scales.append(y_hat_train[:, 1, :])
else:
self.tower_means.append(None)
#Graph extension for log saving
#[batch_size, time_length]
shape_control = (batch_size, tf.shape(tower_x[i])[-1], 1)
with tf.control_dependencies([tf.assert_equal(tf.shape(tower_y[i]), shape_control)]):
y_log = tf.squeeze(tower_y[i], [-1])
if is_mulaw_quantize(hparams.input_type):
self.tower_y[i] = y_log
y_hat_log = tf.cond(tf.equal(tf.rank(y_hat_train), 4),
lambda: tf.squeeze(y_hat_train, [-1]),
lambda: y_hat_train)
y_hat_log = tf.reshape(y_hat_log, [batch_size, hparams.out_channels, -1])
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length]
y_hat_log = tf.argmax(tf.nn.softmax(y_hat_log, axis=1), 1)
y_hat_log = util.inv_mulaw_quantize(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw_quantize(y_log, hparams.quantize_channels)
else:
#[batch_size, time_length]
if hparams.out_channels == 2:
y_hat_log = sample_from_gaussian(
y_hat_log, log_scale_min_gauss=hparams.log_scale_min_gauss)
else:
y_hat_log = sample_from_discretized_mix_logistic(
y_hat_log, log_scale_min=hparams.log_scale_min)
if is_mulaw(hparams.input_type):
y_hat_log = util.inv_mulaw(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw(y_log, hparams.quantize_channels)
self.tower_y_hat_log.append(y_hat_log)
self.tower_y_log.append(y_log)
self.tower_c.append(tower_c[i])
self.tower_upsampled_local_features.append(self.upsampled_local_features)
log(' inputs: {}'.format(tower_x[i].shape))
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' targets: {}'.format(y_log.shape))
log(' outputs: {}'.format(y_hat_log.shape))
#evaluating
elif self.is_evaluating:
#[time_length, ]
idx = 0
length = tower_input_lengths[i][idx]
y_target = tf.reshape(tower_y[i][idx], [-1])[:length]
test_inputs = tf.reshape(y_target, [1, -1, 1]) if not hparams.wavenet_natural_eval else None
if tower_c[i] is not None:
tower_c[i] = tf.expand_dims(tower_c[i][idx, :, :length], axis=0)
with tf.control_dependencies([tf.assert_equal(tf.rank(tower_c[i]), 3)]):
tower_c[i] = tf.identity(tower_c[i], name='eval_assert_c_rank_op')
if tower_g[i] is not None:
tower_g[i] = tf.expand_dims(tower_g[i][idx], axis=0)
batch_size = tf.shape(tower_c[i])[0]
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
#[channels, ]
if is_mulaw_quantize(hparams.input_type):
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
#Fast eval
y_hat = self.incremental(initial_input, c=tower_c[i], g=tower_g[i], time_length=length, test_inputs=test_inputs,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
#Save targets and length for eval loss computation
if is_mulaw_quantize(hparams.input_type):
self.tower_y_eval.append(tf.reshape(y[idx], [1, -1])[:, :length])
else:
self.tower_y_eval.append(tf.expand_dims(y[idx], axis=0)[:, :length, :])
self.tower_eval_length.append(length)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [-1])
y_hat = inv_mulaw_quantize(y_hat, hparams.quantize_channels)
y_target = inv_mulaw_quantize(y_target, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = inv_mulaw(tf.reshape(y_hat, [-1]), hparams.quantize_channels)
y_target = inv_mulaw(y_target, hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [-1])
self.tower_y_hat.append(y_hat)
self.tower_y_target.append(y_target)
self.tower_eval_c.append(tower_c[i][idx])
self.tower_eval_upsampled_local_features.append(self.upsampled_local_features[idx])
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' targets: {}'.format(y_target.shape))
log(' outputs: {}'.format(y_hat.shape))
#synthesizing
else:
batch_size = tf.shape(tower_c[i])[0]
if c is None:
assert synthesis_length is not None
else:
#[batch_size, local_condition_time, local_condition_dimension(num_mels)]
message = ('Expected 3 dimension shape [batch_size(1), time_length, {}] for local condition features but found {}'.format(
hparams.cin_channels, tower_c[i].shape))
with tf.control_dependencies([tf.assert_equal(tf.rank(tower_c[i]), 3, message=message)]):
tower_c[i] = tf.identity(tower_c[i], name='synthesis_assert_c_rank_op')
Tc = tf.shape(tower_c[i])[1]
upsample_factor = audio.get_hop_size(self._hparams)
#Overwrite length with respect to local condition features
synthesis_length = Tc * upsample_factor
#[batch_size, local_condition_dimension, local_condition_time]
#time_length will be corrected using the upsample network
tower_c[i] = tf.transpose(tower_c[i], [0, 2, 1])
if tower_g[i] is not None:
assert tower_g[i].shape == (batch_size, 1)
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
if is_mulaw_quantize(hparams.input_type):
assert initial_value >= 0 and initial_value < hparams.quantize_channels
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
y_hat = self.incremental(initial_input, c=tower_c[i], g=tower_g[i], time_length=synthesis_length, test_inputs=tower_test_inputs[i],
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [batch_size, -1])
y_hat = util.inv_mulaw_quantize(y_hat, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = util.inv_mulaw(tf.reshape(y_hat, [batch_size, -1]), hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [batch_size, -1])
self.tower_y_hat.append(y_hat)
self.tower_synth_upsampled_local_features.append(self.upsampled_local_features)
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(tower_c[i].shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(tower_g[i].shape))
log(' outputs: {}'.format(y_hat.shape))
self.variables = tf.trainable_variables()
log(' Receptive Field: ({} samples / {:.1f} ms)'.format(self.receptive_field, self.receptive_field / hparams.sample_rate * 1000.))
#1_000_000 is causing syntax problems for some people?! Python please :)
log(' WaveNet Parameters: {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.variables]) / 1000000))
self.ema = tf.train.ExponentialMovingAverage(decay=hparams.wavenet_ema_decay)
def add_loss(self):
'''Adds loss computation to the graph. Supposes that initialize function has already been called.
'''
self.tower_loss = []
total_loss = 0
gpus = ['/gpu:{}'.format(i) for i in range(self._hparams.wavenet_num_gpus)]
for i in range(self._hparams.wavenet_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('loss') as scope:
if self.is_training:
if is_mulaw_quantize(self._hparams.input_type):
tower_loss = MaskedCrossEntropyLoss(self.tower_y_hat_q[i][:, :-1, :], self.tower_y[i][:, 1:], mask=self.tower_mask[i])
else:
if self._hparams.out_channels == 2:
tower_loss = GaussianMaximumLikelihoodEstimation(self.tower_y_hat_train[i][:, :, :-1], self.tower_y[i][:, 1:, :],
hparams=self._hparams, mask=self.tower_mask[i])
else:
tower_loss = DiscretizedMixtureLogisticLoss(self.tower_y_hat_train[i][:, :, :-1], self.tower_y[i][:, 1:, :],
hparams=self._hparams, mask=self.tower_mask[i])
elif self.is_evaluating:
if is_mulaw_quantize(self._hparams.input_type):
tower_loss = MaskedCrossEntropyLoss(self.tower_y_hat_eval[i], self.tower_y_eval[i], lengths=[self.tower_eval_length[i]])
else:
if self._hparams.out_channels == 2:
tower_loss = GaussianMaximumLikelihoodEstimation(self.tower_y_hat_eval[i], self.tower_y_eval[i],
hparams=self._hparams, lengths=[self.tower_eval_length[i]])
else:
tower_loss = DiscretizedMixtureLogisticLoss(self.tower_y_hat_eval[i], self.tower_y_eval[i],
hparams=self._hparams, lengths=[self.tower_eval_length[i]])
else:
raise RuntimeError('Model not in train/eval mode but computing loss: Where did this go wrong?')
#Compute final loss
self.tower_loss.append(tower_loss)
total_loss += tower_loss
if self.is_training:
self.loss = total_loss / self._hparams.wavenet_num_gpus
else:
self.eval_loss = total_loss / self._hparams.wavenet_num_gpus
def add_optimizer(self, global_step):
'''Adds optimizer to the graph. Supposes that initialize function has already been called.
'''
hp = self._hparams
tower_gradients = []
# 1. Declare GPU devices
gpus = ['/gpu:{}'.format(i) for i in range(hp.wavenet_num_gpus)]
grad_device = '/cpu:0' if hp.tacotron_num_gpus > 1 else gpus[0]
with tf.device(grad_device):
with tf.variable_scope('optimizer'):
#Create lr schedule
if hp.wavenet_lr_schedule == 'noam':
learning_rate = self._noam_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
warmup_steps=hp.wavenet_warmup)
else:
assert hp.wavenet_lr_schedule == 'exponential'
learning_rate = self._exponential_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
hp.wavenet_decay_rate,
hp.wavenet_decay_steps)
#Adam optimization
self.learning_rate = learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate, hp.wavenet_adam_beta1,
hp.wavenet_adam_beta2, hp.wavenet_adam_epsilon)
# 2. Compute Gradient
for i in range(hp.wavenet_num_gpus):
#Device placemenet
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/cpu:0', worker_device=gpus[i])):
with tf.variable_scope('optimizer') as scope:
gradients = optimizer.compute_gradients(self.tower_loss[i])
tower_gradients.append(gradients)
# 3. Average Gradient
with tf.device(grad_device):
avg_grads = []
variables = []
for grad_and_vars in zip(*tower_gradients):
# each_grads_vars = ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
if grad_and_vars[0][0] is not None:
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
#Append on a "tower" dimension which we will average over below.
grads.append(expanded_g)
#Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
else:
grad = grad_and_vars[0][0]
v = grad_and_vars[0][1]
avg_grads.append(grad)
variables.append(v)
self.gradients = avg_grads
#Gradients clipping
if hp.wavenet_clip_gradients:
#Clip each gradient by a [min, max] range of values and its norm by [0, max_norm_value]
clipped_grads = []
for g in avg_grads:
if g is not None:
clipped_g = tf.clip_by_norm(g, hp.wavenet_gradient_max_norm)
clipped_g = tf.clip_by_value(clipped_g, -hp.wavenet_gradient_max_value, hp.wavenet_gradient_max_value)
clipped_grads.append(clipped_g)
else:
clipped_grads.append(g)
else:
clipped_grads = avg_grads
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
adam_optimize = optimizer.apply_gradients(zip(clipped_grads, variables),
global_step=global_step)
#Add exponential moving average
#https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
#Use adam optimization process as a dependency
with tf.control_dependencies([adam_optimize]):
#Create the shadow variables and add ops to maintain moving averages
#Also updates moving averages after each update step
#This is the optimize call instead of traditional adam_optimize one.
assert set(self.variables) == set(variables) #Verify all trainable variables are being averaged
self.optimize = self.ema.apply(variables)
def _noam_learning_rate_decay(self, init_lr, global_step, warmup_steps=4000.0):
# Noam scheme from tensor2tensor:
step = tf.cast(global_step + 1, dtype=tf.float32)
return tf.maximum(init_lr * warmup_steps**0.5 * tf.minimum(step * warmup_steps**-1.5, step**-0.5), 1e-4)
def _exponential_learning_rate_decay(self, init_lr, global_step,
decay_rate=0.5,
decay_steps=300000):
#Compute natural exponential decay
lr = tf.train.exponential_decay(init_lr,
global_step,
decay_steps,
decay_rate,
name='wavenet_lr_exponential_decay')
return lr
def get_mask(self, input_lengths, maxlen=None):
expand = not is_mulaw_quantize(self._hparams.input_type)
mask = sequence_mask(input_lengths, max_len=maxlen, expand=expand)
if is_mulaw_quantize(self._hparams.input_type):
return mask[:, 1:]
return mask[:, 1:, :]
#Sanity check functions
def has_speaker_embedding(self):
return self.embed_speakers is not None
def local_conditioning_enabled(self):
return self._hparams.cin_channels > 0
def global_conditioning_enabled(self):
return self._hparams.gin_channels > 0
def step(self, x, c=None, g=None, softmax=False):
"""Forward step
Args:
x: Tensor of shape [batch_size, channels, time_length], One-hot encoded audio signal.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features.
g: Tensor of shape [batch_size, gin_channels, 1] or Ids of shape [batch_size, 1],
Global conditioning features.
Note: set hparams.use_speaker_embedding to False to disable embedding layer and
use extrnal One-hot encoded features.
softmax: Boolean, Whether to apply softmax.
Returns:
a Tensor of shape [batch_size, out_channels, time_length]
"""
#[batch_size, channels, time_length] -> [batch_size, time_length, channels]
batch_size = tf.shape(x)[0]
time_length = tf.shape(x)[-1]
if g is not None:
if self.embed_speakers is not None:
#[batch_size, 1] ==> [batch_size, 1, gin_channels]
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, gin_channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
#Expand global conditioning features to all time steps
g_bct = _expand_global_features(batch_size, time_length, g, data_format='BCT')
if c is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
elif self._hparams.upsample_type == '1D':
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
else:
assert self._hparams.upsample_type in ('Resize', 'SubPixel', 'NearestNeighbor')
#[batch_size, cin_channels, time_length, 1]
expand_dim = 3
c = tf.expand_dims(c, axis=expand_dim)
for transposed_conv in self.upsample_conv:
c = transposed_conv(c)
#[batch_size, cin_channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], tf.shape(x)[-1])]):
c = tf.identity(c, name='control_c_and_x_shape')
self.upsampled_local_features = c
#Feed data to network
x = self.first_conv(x)
skips = None
for conv in self.residual_layers:
x, h = conv(x, c=c, g=g_bct)
if skips is None:
skips = h
else:
skips = skips + h
if self._hparams.legacy:
skips = skips * np.sqrt(0.5)
x = skips
for conv in self.last_conv_layers:
x = conv(x)
return tf.nn.softmax(x, axis=1) if softmax else x
def incremental(self, initial_input, c=None, g=None,
time_length=100, test_inputs=None,
softmax=True, quantize=True, log_scale_min=-7.0, log_scale_min_gauss=-7.0):
"""Inceremental forward step
Inputs of shape [batch_size, channels, time_length] are reshaped to [batch_size, time_length, channels]
Input of each time step is of shape [batch_size, 1, channels]
Args:
Initial input: Tensor of shape [batch_size, channels, 1], initial recurrence input.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features
g: Tensor of shape [batch_size, gin_channels, time_length] or [batch_size, gin_channels, 1]
global conditioning features
T: int, number of timesteps to generate
test_inputs: Tensor, teacher forcing inputs (debug)
softmax: Boolean, whether to apply softmax activation
quantize: Whether to quantize softmax output before feeding to
next time step input
log_scale_min: float, log scale minimum value.
Returns:
Tensor of shape [batch_size, channels, time_length] or [batch_size, channels, 1]
Generated one_hot encoded samples
"""
batch_size = tf.shape(initial_input)[0]
#Note: should reshape to [batch_size, time_length, channels]
#not [batch_size, channels, time_length]
if test_inputs is not None:
if self.scalar_input:
if tf.shape(test_inputs)[1] == 1:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
else:
test_inputs = tf.cast(test_inputs, tf.int32)
test_inputs = tf.one_hot(indices=test_inputs, depth=self._hparams.quantize_channels, dtype=tf.float32)
test_inputs = tf.squeeze(test_inputs, [2])
if tf.shape(test_inputs)[1] == self._hparams.out_channels:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
batch_size = tf.shape(test_inputs)[0]
if time_length is None:
time_length = tf.shape(test_inputs)[1]
else:
time_length = tf.maximum(time_length, tf.shape(test_inputs)[1])
#Global conditioning
if g is not None:
if self.embed_speakers is not None:
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
self.g_btc = _expand_global_features(batch_size, time_length, g, data_format='BTC')
#Local conditioning
if c is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
elif self._hparams.upsample_type == '1D':
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
else:
assert self._hparams.upsample_type in ('Resize', 'SubPixel', 'NearestNeighbor')
#[batch_size, cin_channels, time_length, 1]
expand_dim = 3
c = tf.expand_dims(c, axis=expand_dim)
for upsample_conv in self.upsample_conv:
c = upsample_conv(c)
#[batch_size, channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], time_length)]):
self.c = tf.transpose(c, [0, 2, 1])
self.upsampled_local_features = c
#Initialize loop variables
if initial_input.shape[1] == self._hparams.out_channels:
initial_input = tf.transpose(initial_input, [0, 2, 1])
initial_time = tf.constant(0, dtype=tf.int32)
# if test_inputs is not None:
# initial_input = tf.expand_dims(test_inputs[:, 0, :], axis=1)
initial_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
initial_loss_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
#Only use convolutions queues for Residual Blocks main convolutions (only ones with kernel size 3 and dilations, all others are 1x1)
initial_queues = [tf.zeros((batch_size, res_conv.layer.kw + (res_conv.layer.kw - 1) * (res_conv.layer.dilation_rate[0] - 1), self._hparams.residual_channels),
name='convolution_queue_{}'.format(i+1)) for i, res_conv in enumerate(self.residual_layers)]
def condition(time, unused_outputs_ta, unused_current_input, unused_loss_outputs_ta, unused_queues):
return tf.less(time, time_length)
def body(time, outputs_ta, current_input, loss_outputs_ta, queues):
#conditioning features for single time step
ct = None if self.c is None else tf.expand_dims(self.c[:, time, :], axis=1)
gt = None if self.g_btc is None else tf.expand_dims(self.g_btc[:, time, :], axis=1)
x = self.first_conv.incremental_step(current_input)
skips = None
new_queues = []
for conv, queue in zip(self.residual_layers, queues):
x, h, new_queue = conv.incremental_step(x, c=ct, g=gt, queue=queue)
if self._hparams.legacy:
skips = h if skips is None else (skips + h) * np.sqrt(0.5)
else:
skips = h if skips is None else (skips + h)
new_queues.append(new_queue)
x = skips
for conv in self.last_conv_layers:
try:
x = conv.incremental_step(x)
except AttributeError: #When calling Relu activation
x = conv(x)
#Save x for eval loss computation
loss_outputs_ta = loss_outputs_ta.write(time, tf.squeeze(x, [1])) #squeeze time_length dimension (=1)
#Generate next input by sampling
if self.scalar_input:
if self._hparams.out_channels == 2:
x = sample_from_gaussian(
tf.reshape(x, [batch_size, -1, 1]),
log_scale_min_gauss=log_scale_min_gauss)
else:
x = sample_from_discretized_mix_logistic(
tf.reshape(x, [batch_size, -1, 1]), log_scale_min=log_scale_min)
next_input = tf.expand_dims(x, axis=-1) #Expand on the channels dimension
else:
x = tf.nn.softmax(tf.reshape(x, [batch_size, -1]), axis=1) if softmax \
else tf.reshape(x, [batch_size, -1])
if quantize:
#[batch_size, 1]
sample = tf.multinomial(x, 1) #Pick a sample using x as probability (one for each batch)
#[batch_size, 1, quantize_channels] (time dimension extended by default)
x = tf.one_hot(sample, depth=self._hparams.quantize_channels)
next_input = x
if len(x.shape) == 3:
x = tf.squeeze(x, [1])
outputs_ta = outputs_ta.write(time, x)
#Override input with ground truth
if test_inputs is not None:
next_input = tf.expand_dims(test_inputs[:, time, :], axis=1)
time = tf.Print(time + 1, [time+1, time_length])
#output = x (maybe next input)
# if test_inputs is not None:
# #override next_input with ground truth
# next_input = tf.expand_dims(test_inputs[:, time, :], axis=1)
return (time, outputs_ta, next_input, loss_outputs_ta, new_queues)
res = tf.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_input, initial_loss_outputs_ta, initial_queues
],
parallel_iterations=32,
swap_memory=self._hparams.wavenet_swap_with_cpu)
outputs_ta = res[1]
#[time_length, batch_size, channels]
outputs = outputs_ta.stack()
#Save eval prediction for eval loss computation
eval_outputs = res[3].stack()
self.tower_y_hat_eval = []
if is_mulaw_quantize(self._hparams.input_type):
self.tower_y_hat_eval.append(tf.transpose(eval_outputs, [1, 0, 2]))
else:
self.tower_y_hat_eval.append(tf.transpose(eval_outputs, [1, 2, 0]))
#[batch_size, channels, time_length]
return tf.transpose(outputs, [1, 2, 0])
def clear_queue(self):
self.first_conv.clear_queue()
for f in self.conv_layers:
f.clear_queue()
for f in self.last_conv_layers:
try:
f.clear_queue()
except AttributeError:
pass
| 8,819 | 0 | 264 |
d4cd7ebf8fa512c108f7df03d7c875dde967fc1e | 2,022 | py | Python | Recipe_Core_App/models/custom_user.py | ziibii88/Recipe_API_Project | 486da89a1c71554930dbb5a535a9a5c27d26667b | [
"MIT"
] | 2 | 2020-05-12T03:36:33.000Z | 2020-11-24T08:22:16.000Z | Recipe_Core_App/models/custom_user.py | ziibii88/Recipe_API_Project | 486da89a1c71554930dbb5a535a9a5c27d26667b | [
"MIT"
] | 10 | 2021-03-30T13:20:42.000Z | 2022-03-12T00:29:05.000Z | Recipe_Core_App/models/custom_user.py | ziibii88/Recipe_API_Project | 486da89a1c71554930dbb5a535a9a5c27d26667b | [
"MIT"
] | null | null | null | """Custom User Model"""
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.db import models
class CustomUserManager(BaseUserManager):
"""Custom User Manager overridden from BaseUserManager for CustomUser"""
def _create_user(self, email, password=None, **extra_fields):
"""Creates and returns a new user using an email address"""
if not email: # check for an empty email
raise ValueError("User must set an email address")
else: # normalizes the provided email
email = self.normalize_email(email)
# create user
user = self.model(email=email, **extra_fields)
user.set_password(password) # hashes/encrypts password
user.save(using=self._db) # safe for multiple databases
return user
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""Custom User model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True, blank=False,
null=True)
name = models.CharField(max_length=255, blank=True, null=True)
is_staff = models.BooleanField('Staff status', default=False, null=True)
is_active = models.BooleanField('Active', default=True, null=True)
date_joined = models.DateTimeField(auto_now_add=True, null=True)
objects = CustomUserManager() # uses the custom manager
USERNAME_FIELD = 'email' # overrides username to email field
| 42.125 | 76 | 0.696835 | """Custom User Model"""
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.db import models
class CustomUserManager(BaseUserManager):
"""Custom User Manager overridden from BaseUserManager for CustomUser"""
def _create_user(self, email, password=None, **extra_fields):
"""Creates and returns a new user using an email address"""
if not email: # check for an empty email
raise ValueError("User must set an email address")
else: # normalizes the provided email
email = self.normalize_email(email)
# create user
user = self.model(email=email, **extra_fields)
user.set_password(password) # hashes/encrypts password
user.save(using=self._db) # safe for multiple databases
return user
def create_user(self, email, password=None, **extra_fields):
# set defaults
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password=None, **extra_fields):
# set defaults
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self._create_user(email, password, **extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""Custom User model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True, blank=False,
null=True)
name = models.CharField(max_length=255, blank=True, null=True)
is_staff = models.BooleanField('Staff status', default=False, null=True)
is_active = models.BooleanField('Active', default=True, null=True)
date_joined = models.DateTimeField(auto_now_add=True, null=True)
objects = CustomUserManager() # uses the custom manager
USERNAME_FIELD = 'email' # overrides username to email field
| 471 | 0 | 54 |
0ec5f780bd00d1bb32c388da1e6b8f06ae969ace | 30,621 | py | Python | train_valid.py | qbhan/pathembed | c21823529840593bf606e10696f5879e5adb51b2 | [
"MIT"
] | 1 | 2021-10-13T05:01:22.000Z | 2021-10-13T05:01:22.000Z | train_valid.py | qbhan/pathembed | c21823529840593bf606e10696f5879e5adb51b2 | [
"MIT"
] | null | null | null | train_valid.py | qbhan/pathembed | c21823529840593bf606e10696f5879e5adb51b2 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.utils
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
# for mixed precision
import torch.nn.utils as torch_utils
from torch.cuda.amp import autocast
from torch.cuda.amp import GradScaler
import matplotlib.pyplot as plt
import os
import numpy as np
import argparse
import csv
import random
from tqdm import tqdm
from utils import *
from kpcn import *
from kpal import *
from multiscale import *
from decomp import *
from path import *
from losses import *
from dataset import MSDenoiseDataset, init_data
# from test_cython import *
# L = 9 # number of convolutional layers
# n_kernels = 100 # number of kernels in each layer
# kernel_size = 5 # size of kernel (square)
# # input_channels = dataset[0]['X_diff'].shape[-1]
# hidden_channels = 100
permutation = [0, 3, 1, 2]
eps = 0.00316
parser = argparse.ArgumentParser(description='Train the model')
'''
Needed parameters
1. Data & Model specifications
device : which device will the data & model should be loaded
mode : which kind of model should it train
input_channel : input channel
hidden_channel : hidden channel
num_layer : number of layers / depth of models
'''
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--mode', default='kpcn')
parser.add_argument('--num_layers', default=9, type=int)
parser.add_argument('--input_channels', default=34, type=int)
parser.add_argument('--hidden_channels', default=100, type=int)
parser.add_argument('--kernel_size', default=5, type=int)
'''
2. Preprocessing specifications
eps
'''
parser.add_argument('--eps', default=0.00316, type=float)
'''
3. Training Specification
val : should it perform validation
early_stopping : should it perform early stopping
trainset : dataset for training
valset : dataset for validation
lr : learning rate
epoch : epoch
criterion : which loss function should it use
'''
parser.set_defaults(do_feature_dropout=False)
parser.add_argument('--do_feature_dropout', dest='do_feature_dropout', action='store_true')
parser.set_defaults(do_finetune=False)
parser.add_argument('--do_finetune', dest='do_finetune', action='store_true')
parser.add_argument('--use_llpm_buf', default=False, type=bool)
parser.set_defaults(do_val=False)
parser.add_argument('--do_val', dest='do_val', action='store_true')
parser.set_defaults(do_early_stopping=False)
parser.add_argument('--do_early_stopping', dest='do_early_stopping', action='store_true')
parser.add_argument('--data_dir')
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--manif_w', default=0.1, type=float)
parser.add_argument('--loss', default='L1')
save_dir = 'kpcn_manif_valid_fix'
writer = SummaryWriter('kpcn/'+save_dir)
if __name__ == '__main__':
main() | 45.297337 | 269 | 0.616048 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.utils
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
# for mixed precision
import torch.nn.utils as torch_utils
from torch.cuda.amp import autocast
from torch.cuda.amp import GradScaler
import matplotlib.pyplot as plt
import os
import numpy as np
import argparse
import csv
import random
from tqdm import tqdm
from utils import *
from kpcn import *
from kpal import *
from multiscale import *
from decomp import *
from path import *
from losses import *
from dataset import MSDenoiseDataset, init_data
# from test_cython import *
# L = 9 # number of convolutional layers
# n_kernels = 100 # number of kernels in each layer
# kernel_size = 5 # size of kernel (square)
# # input_channels = dataset[0]['X_diff'].shape[-1]
# hidden_channels = 100
permutation = [0, 3, 1, 2]
eps = 0.00316
parser = argparse.ArgumentParser(description='Train the model')
'''
Needed parameters
1. Data & Model specifications
device : which device will the data & model should be loaded
mode : which kind of model should it train
input_channel : input channel
hidden_channel : hidden channel
num_layer : number of layers / depth of models
'''
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--mode', default='kpcn')
parser.add_argument('--num_layers', default=9, type=int)
parser.add_argument('--input_channels', default=34, type=int)
parser.add_argument('--hidden_channels', default=100, type=int)
parser.add_argument('--kernel_size', default=5, type=int)
'''
2. Preprocessing specifications
eps
'''
parser.add_argument('--eps', default=0.00316, type=float)
'''
3. Training Specification
val : should it perform validation
early_stopping : should it perform early stopping
trainset : dataset for training
valset : dataset for validation
lr : learning rate
epoch : epoch
criterion : which loss function should it use
'''
parser.set_defaults(do_feature_dropout=False)
parser.add_argument('--do_feature_dropout', dest='do_feature_dropout', action='store_true')
parser.set_defaults(do_finetune=False)
parser.add_argument('--do_finetune', dest='do_finetune', action='store_true')
parser.add_argument('--use_llpm_buf', default=False, type=bool)
parser.set_defaults(do_val=False)
parser.add_argument('--do_val', dest='do_val', action='store_true')
parser.set_defaults(do_early_stopping=False)
parser.add_argument('--do_early_stopping', dest='do_early_stopping', action='store_true')
parser.add_argument('--data_dir')
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--manif_w', default=0.1, type=float)
parser.add_argument('--loss', default='L1')
save_dir = 'kpcn_manif_valid_fix'
writer = SummaryWriter('kpcn/'+save_dir)
def validation(models, dataloader, eps, criterion, device, epoch, use_llpm_buf, mode='kpcn'):
pass
lossDiff = 0
lossSpec = 0
lossFinal = 0
relL2Final = 0
lossDiffPath = 0
lossSpecPath = 0
relL2 = RelativeMSE()
# path_criterion = GlobalRelativeSimilarityLoss()
path_criterion = FeatureMSE()
# for batch_idx, data in enumerate(dataloader):
batch_idx = 0
if use_llpm_buf:
diffPathNet, specPathNet = models['path_diffuse'].eval(), models['path_specular'].eval()
diffuseNet, specularNet = models['diffuse'].eval(), models['specular'].eval()
preDiffuseNet, preSpecularNet = models['preDiffuse'].eval(), models['preSpecular'].eval()
with torch.no_grad():
for batch in tqdm(dataloader, leave=False, ncols=70):
# print(data.keys())
# assert 'paths' in data
# print('WORKING WITH PATH')
# print(batch['kpcn_diffuse_in'].shape)
if use_llpm_buf:
X_diff = batch['kpcn_diffuse_in'][:, :-1].to(device)
Y_diff = batch['target_diffuse'].to(device)
X_spec = batch['kpcn_specular_in'][:, :-1].to(device)
Y_spec = batch['target_specular'].to(device)
outputDiff = preDiffuseNet(X_diff)
outputSpec = preSpecularNet(X_spec)
Y_diff = torch.norm(crop_like(Y_diff, outputDiff), dim=1, keepdim=True)
Y_spec = torch.norm(crop_like(Y_spec, outputSpec), dim=1, keepdim=True)
outputDiff = torch.norm(outputDiff, dim=1, keepdim=True)
outputSpec = torch.norm(outputSpec, dim=1, keepdim=True)
# print(outputDiff.shape, outputSpec.shape)
error_diff = torch.abs(outputDiff - Y_diff)
error_spec = torch.abs(outputSpec - Y_spec)
# print(error_diff.shape, error_spec.shape)
padding = (18, 18, 18, 18)
error_diff = F.pad(error_diff, padding, "constant", 0)
error_spec = F.pad(error_spec, padding, "constant", 0)
# print(error_diff.shape, error_spec.shape)
error_scale = 1.0
paths = batch['paths'].to(device)
p_buffer_diffuse, p_buffer_specular = diffPathNet(paths), specPathNet(paths)
'''Feature Disentanglement'''
#TODO
_, _, c, _, _ = p_buffer_diffuse.shape
assert c >= 2
# Variance
p_var_diffuse = p_buffer_diffuse.var(1).mean(1, keepdims=True)
p_var_diffuse /= p_buffer_diffuse.shape[1]
p_var_specular = p_buffer_specular.var(1).mean(1, keepdims=True)
p_var_specular /= p_buffer_specular.shape[1]
p_buffer_diffuse_in = p_buffer_diffuse.mean(1) * (error_scale * error_diff.expand(-1, 3, -1, -1))
p_buffer_specular_in = p_buffer_specular.mean(1) * (error_scale * error_spec.expand(-1, 3, -1, -1))
# make new batch
batch = {
'target_total': batch['target_total'].to(device),
'target_diffuse': batch['target_diffuse'].to(device),
'target_specular': batch['target_specular'].to(device),
'kpcn_diffuse_in': torch.cat([batch['kpcn_diffuse_in'].to(device), p_buffer_diffuse_in, p_var_diffuse], 1),
'kpcn_specular_in': torch.cat([batch['kpcn_specular_in'].to(device), p_buffer_specular_in, p_var_specular], 1),
'kpcn_diffuse_buffer': batch['kpcn_diffuse_buffer'].to(device),
'kpcn_specular_buffer': batch['kpcn_specular_buffer'].to(device),
'kpcn_albedo': batch['kpcn_albedo'].to(device),
}
X_diff = batch['kpcn_diffuse_in'].to(device)
Y_diff = batch['target_diffuse'].to(device)
outputDiff = diffuseNet(X_diff)
Y_diff = crop_like(Y_diff, outputDiff)
lossDiff += criterion(outputDiff, Y_diff).item()
X_spec = batch['kpcn_specular_in'].to(device)
Y_spec = batch['target_specular'].to(device)
outputSpec = specularNet(X_spec)
Y_spec = crop_like(Y_spec, outputSpec)
lossSpec += criterion(outputSpec, Y_spec).item()
# calculate final ground truth error
albedo = batch['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff)
outputFinal = outputDiff * (albedo + eps) + torch.exp(outputSpec) - 1.0
Y_final = batch['target_total'].to(device)
Y_final = crop_like(Y_final, outputFinal)
lossFinal += criterion(outputFinal, Y_final).item()
relL2Final += relL2(outputFinal, Y_final).item()
if use_llpm_buf:
# print(p_buffer_diffuse.shape, error_diff.shape)
_, s, c, _, _ = p_buffer_diffuse.shape
error_diff = error_diff.unsqueeze(1).expand(-1, s, 3, -1, -1)
error_spec = error_spec.unsqueeze(1).expand(-1, s, 3, -1, -1)
p_buffer_diffuse = p_buffer_diffuse * error_diff
p_buffer_specular = p_buffer_specular * error_spec
p_buffer_diffuse = crop_like(p_buffer_diffuse, outputDiff)
loss_manif_diffuse = path_criterion(p_buffer_diffuse, Y_diff)
p_buffer_specular = crop_like(p_buffer_specular, outputSpec)
loss_manif_specular = path_criterion(p_buffer_specular, Y_spec)
lossDiffPath += loss_manif_diffuse
lossSpecPath += loss_manif_specular
# lossDiff += 0.1 * loss_manif_diffuse
# lossSpec += 0.1 * loss_manif_specular
# visualize
if batch_idx == 20:
inputFinal = batch['kpcn_diffuse_buffer'] * (batch['kpcn_albedo'] + eps) + torch.exp(batch['kpcn_specular_buffer']) - 1.0
inputGrid = torchvision.utils.make_grid(inputFinal)
writer.add_image('noisy patches e{}'.format(epoch+1), inputGrid)
writer.add_image('noisy patches e{}'.format(str(epoch+1)+'_'+str(batch_idx)), inputGrid)
outputGrid = torchvision.utils.make_grid(outputFinal)
writer.add_image('denoised patches e{}'.format(str(epoch+1)+'_'+str(batch_idx)), outputGrid)
# writer.add_image('denoised patches e{}'.format(epoch+1), outputGrid)
cleanGrid = torchvision.utils.make_grid(Y_final)
# writer.add_image('clean patches e{}'.format(epoch+1), cleanGrid)
writer.add_image('clean patches e{}'.format(str(epoch+1)+'_'+str(batch_idx)), cleanGrid)
batch_idx += 1
return lossDiff/(4*len(dataloader)), lossSpec/(4*len(dataloader)), lossFinal/(4*len(dataloader)), relL2Final/(4*len(dataloader)), lossDiffPath/(4*len(dataloader)), lossSpecPath/(4*len(dataloader))
def train(mode,
device,
trainset,
validset,
eps,
L,
input_channels,
hidden_channels,
kernel_size,
epochs,
learning_rate,
loss,
do_early_stopping,
do_finetune,
use_llpm_buf,
manif_w
):
dataloader = DataLoader(trainset, batch_size=8, num_workers=1, pin_memory=False)
print(len(dataloader))
if validset is not None:
validDataloader = DataLoader(validset, batch_size=4, num_workers=1, pin_memory=False)
# instantiate networks
print(L, input_channels, hidden_channels, kernel_size, mode)
print(mode)
if mode == 'kpcn':
diffuseNet = KPCN(L, input_channels, hidden_channels, kernel_size).to(device)
specularNet = KPCN(L, input_channels, hidden_channels, kernel_size).to(device)
preDiffuseNet = KPCN(L, 34, hidden_channels, kernel_size).to(device)
preSpecularNet = KPCN(L, 34, hidden_channels, kernel_size).to(device)
# Path module
if use_llpm_buf:
diffPathNet = PathNet(trainset.pnet_in_size).to(device)
optimizerDiffPath = optim.Adam(diffPathNet.parameters(), lr=learning_rate, betas=(0.9, 0.99))
specPathNet = PathNet(trainset.pnet_in_size).to(device)
optimizerSpecPath = optim.Adam(specPathNet.parameters(), lr=learning_rate, betas=(0.9, 0.99))
path_criterion = GlobalRelativeSimilarityLoss()
checkpointDiffPath = torch.load('trained_model/kpcn_manif_w1/path_diff_e8.pt')
diffPathNet.load_state_dict(checkpointDiffPath['model_state_dict'])
optimizerDiffPath.load_state_dict(checkpointDiffPath['optimizer_state_dict'])
diffPathNet.train()
checkpointSpecPath = torch.load('trained_model/kpcn_manif_w1/path_spec_e8.pt')
specPathNet.load_state_dict(checkpointSpecPath['model_state_dict'])
optimizerSpecPath.load_state_dict(checkpointSpecPath['optimizer_state_dict'])
specPathNet.train()
# else
print(diffuseNet, "CUDA:", next(diffuseNet.parameters()).is_cuda)
print('# Parameter for diffuseNet : {}'.format(sum([p.numel() for p in diffuseNet.parameters()])))
print(specularNet, "CUDA:", next(specularNet.parameters()).is_cuda)
print('# Parameter for specularNet : {}'.format(sum([p.numel() for p in diffuseNet.parameters()])))
# print(summary(diffuseNet, input_size=(3, 128, 128)))
if loss == 'L1':
criterion = nn.L1Loss()
elif loss =='SMAPE':
criterion = SMAPE()
else:
print('Loss Not Supported')
return
print('LEARNING RATE : {}'.format(learning_rate))
optimizerDiff = optim.Adam(diffuseNet.parameters(), lr=learning_rate, betas=(0.9, 0.99))
optimizerSpec = optim.Adam(specularNet.parameters(), lr=learning_rate, betas=(0.9, 0.99))
# optimizerP = optim.Adam(specularNet.parameters(), lr=1e-4, betas=(0.9, 0.99))
# checkpointDiff = torch.load('trained_model/kpcn_manif_feat/diff_e13.pt')
# diffuseNet.load_state_dict(checkpointDiff['model_state_dict'])
# optimizerDiff.load_state_dict(checkpointDiff['optimizer_state_dict'])
diffuseNet.train()
# checkpointSpec = torch.load('trained_model/kpcn_manif_feat/spec_e13.pt')
# specularNet.load_state_dict(checkpointSpec['model_state_dict'])
# optimizerSpec.load_state_dict(checkpointSpec['optimizer_state_dict'])
specularNet.train()
ckptPreDiff = torch.load('trained_model/kpcn/diff_e8.pt')
ckptPreSpec = torch.load('trained_model/kpcn/spec_e8.pt')
preDiffuseNet.load_state_dict(ckptPreDiff['model_state_dict'])
preSpecularNet.load_state_dict(ckptPreSpec['model_state_dict'])
preDiffuseNet.eval()
preSpecularNet.eval()
last_epoch = 0
# last_epoch = checkpointDiff['epoch'] + 1
print(last_epoch)
# scaler = GradScaler()
accuLossDiff = 0
accuLossSpec = 0
accuLossPathDiff = 0
accuLossPathSpec = 0
accuLossFinal = 0
lDiff = []
lSpec = []
lFinal = []
valLDiff = []
valLSpec = []
valLFinal = []
# writer = SummaryWriter('runs/'+mode+'_2')
total_epoch = 0
# epoch = checkpointDiff['epoch']
# if last_epoch == 0:
# epoch = 0
# print('Check Initialization')
# models = {'diffuse': diffuseNet, 'specular': specularNet, 'preDiffuse': preDiffuseNet, 'preSpecular': preSpecularNet}
# if use_llpm_buf:
# models['path_diffuse'] = diffPathNet
# models['path_specular'] = specPathNet
# # models.append({'path_diffuse': diffPathNet, 'path_specular': specPathNet})
# initLossDiff, initLossSpec, initLossFinal, relL2LossFinal, pathDiffLoss, pathSpecLoss = validation(models, validDataloader, eps, criterion, device, -1, use_llpm_buf,mode)
# print("initLossDiff: {}".format(initLossDiff))
# print("initLossSpec: {}".format(initLossSpec))
# print("initLossFinal: {}".format(initLossFinal))
# print("relL2LossFinal: {}".format(relL2LossFinal))
# print("pathDiffLoss: {}".format(pathDiffLoss))
# print("pathSpecLoss: {}".format(pathSpecLoss))
# writer.add_scalar('Valid total relL2 loss', relL2LossFinal if relL2LossFinal != float('inf') else 0, (epoch + 1) * len(validDataloader))
# writer.add_scalar('Valid total loss', initLossFinal if initLossFinal != float('inf') else 0, (epoch + 1) * len(validDataloader))
# writer.add_scalar('Valid diffuse loss', initLossDiff if initLossDiff != float('inf') else 0, (epoch + 1) * len(validDataloader))
# writer.add_scalar('Valid specular loss', initLossSpec if initLossSpec != float('inf') else 0, (epoch + 1) * len(validDataloader))
# writer.add_scalar('Valid path diffuse loss', pathDiffLoss if pathDiffLoss != float('inf') else 0, (epoch + 1) * len(validDataloader))
# writer.add_scalar('Valid path specular loss', pathSpecLoss if pathSpecLoss != float('inf') else 0, (epoch + 1) * len(validDataloader))
import time
start = time.time()
print('START')
for epoch in range(last_epoch, epochs):
print('EPOCH {}'.format(epoch+1))
diffuseNet.train()
specularNet.train()
i_batch = -1
for batch in tqdm(dataloader, leave=False, ncols=70):
i_batch += 1
with torch.no_grad():
X_diff = batch['kpcn_diffuse_in'][:, :-1].to(device)
Y_diff = batch['target_diffuse'].to(device)
X_spec = batch['kpcn_specular_in'][:, :-1].to(device)
Y_spec = batch['target_specular'].to(device)
outputDiff = preDiffuseNet(X_diff)
outputSpec = preSpecularNet(X_spec)
Y_diff = torch.norm(crop_like(Y_diff, outputDiff), dim=1, keepdim=True)
Y_spec = torch.norm(crop_like(Y_spec, outputSpec), dim=1, keepdim=True)
outputDiff = torch.norm(outputDiff, dim=1, keepdim=True)
outputSpec = torch.norm(outputSpec, dim=1, keepdim=True)
# print(outputDiff.shape, outputSpec.shape)
error_diff = torch.abs(outputDiff - Y_diff)
error_spec = torch.abs(outputSpec - Y_spec)
# print(error_diff.shape, error_spec.shape)
padding = (18, 18, 18, 18)
error_diff = F.pad(error_diff, padding, "constant", 0)
error_spec = F.pad(error_spec, padding, "constant", 0)
# print(error_diff.shape, error_spec.shape)
error_scale = 1.0
# zero the parameter gradients
optimizerDiff.zero_grad()
optimizerSpec.zero_grad()
if use_llpm_buf:
with torch.no_grad():
optimizerDiffPath.zero_grad()
optimizerSpecPath.zero_grad()
# if use_llpm_buf:
paths = batch['paths'].to(device)
diffPathNet.train()
specPathNet.train()
p_buffer_diffuse, p_buffer_specular = diffPathNet(paths), specPathNet(paths)
'''Feature Disentanglement'''
#TODO
_, _, c, _, _ = p_buffer_diffuse.shape
assert c >= 2
# Variance
p_var_diffuse = p_buffer_diffuse.var(1).mean(1, keepdims=True)
p_var_diffuse /= p_buffer_diffuse.shape[1]
p_var_specular = p_buffer_specular.var(1).mean(1, keepdims=True)
p_var_specular /= p_buffer_specular.shape[1]
p_buffer_diffuse_in = p_buffer_diffuse.mean(1) * (error_diff.expand(-1, 3, -1, -1))
p_buffer_specular_in = p_buffer_specular.mean(1) * (error_spec.expand(-1, 3, -1, -1))
# print(torch.max(error_diff), torch.min(error_spec))
# print(error_diff.expand(-1, 3, -1, -1).shape)
# make new batch
batch = {
'target_total': batch['target_total'].to(device),
'target_diffuse': batch['target_diffuse'].to(device),
'target_specular': batch['target_specular'].to(device),
'kpcn_diffuse_in': torch.cat([batch['kpcn_diffuse_in'].to(device), p_buffer_diffuse_in, p_var_diffuse], 1),
'kpcn_specular_in': torch.cat([batch['kpcn_specular_in'].to(device), p_buffer_specular_in, p_var_specular], 1),
'kpcn_diffuse_buffer': batch['kpcn_diffuse_buffer'].to(device),
'kpcn_specular_buffer': batch['kpcn_specular_buffer'].to(device),
'kpcn_albedo': batch['kpcn_albedo'].to(device),
}
# get the inputs
X_diff = batch['kpcn_diffuse_in'].to(device)
Y_diff = batch['target_diffuse'].to(device)
outputDiff = diffuseNet(X_diff)
Y_diff = crop_like(Y_diff, outputDiff)
# get the inputs
X_spec = batch['kpcn_specular_in'].to(device)
Y_spec = batch['target_specular'].to(device)
# forward + backward + optimize
outputSpec = specularNet(X_spec)
Y_spec = crop_like(Y_spec, outputSpec)
lossDiff = criterion(outputDiff, Y_diff)
lossSpec = criterion(outputSpec, Y_spec)
# loss
if use_llpm_buf:
_, s, c, _, _ = p_buffer_diffuse.shape
error_diff = error_diff.unsqueeze(1).expand(-1, s, 3, -1, -1)
error_spec = error_spec.unsqueeze(1).expand(-1, s, 3, -1, -1)
p_buffer_diffuse = p_buffer_diffuse * error_diff
p_buffer_specular = p_buffer_specular * error_spec
p_buffer_diffuse = crop_like(p_buffer_diffuse, outputDiff)
loss_manif_diffuse = path_criterion(p_buffer_diffuse, Y_diff)
p_buffer_specular = crop_like(p_buffer_specular, outputSpec)
loss_manif_specular = path_criterion(p_buffer_specular, Y_spec)
# lossDiff += manif_w * loss_manif_diffuse
# lossSpec += manif_w * loss_manif_specular
accuLossPathDiff += loss_manif_diffuse
accuLossPathSpec += loss_manif_specular
if not do_finetune:
lossDiff.backward()
lossSpec.backward()
# scaler.scale(lossDiff).backward()
# scaler.scale(lossSpec).backward()
optimizerDiff.step()
optimizerSpec.step()
# if use_llpm_buf:
# optimizerDiffPath.step()
# optimizerSpecPath.step()
# calculate final ground truth error
# if not do_finetune:
with torch.no_grad():
albedo = batch['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff)
outputFinal = outputDiff * (albedo + eps) + torch.exp(outputSpec) - 1.0
Y_final = batch['target_total'].to(device)
Y_final = crop_like(Y_final, outputFinal)
lossFinal = criterion(outputFinal, Y_final)
else:
albedo = batch['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff)
outputFinal = outputDiff * (albedo + eps) + torch.exp(outputSpec) - 1.0
Y_final = batch['target_total'].to(device)
Y_final = crop_like(Y_final, outputFinal)
lossFinal = criterion(outputFinal, Y_final)
lossFinal.backward()
optimizerDiff.step()
optimizerSpec.step()
if use_llpm_buf:
optimizerDiffPath.step()
optimizerSpecPath.step()
# if do_finetune:
# # print('FINETUNING')
# lossFinal.backward()
# optimizerDiff.step()
# optimizerSpec.step()
accuLossFinal += lossFinal.item()
accuLossDiff += lossDiff.item()
accuLossSpec += lossSpec.item()
writer.add_scalar('lossFinal', lossFinal if lossFinal != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('lossDiffuse', lossDiff if lossDiff != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('lossSpec', lossSpec if lossSpec != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
accuLossDiff, accuLossSpec, accuLossFinal, accuLossPathDiff, accuLossPathSpec = accuLossDiff/(8*len(dataloader)), accuLossSpec/(8*len(dataloader)), accuLossFinal/(8*len(dataloader)), accuLossPathDiff/(8*len(dataloader)), accuLossPathSpec/(8*len(dataloader))
writer.add_scalar('Train total loss', accuLossFinal if accuLossFinal != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train diffuse loss', accuLossDiff if accuLossDiff != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train specular loss', accuLossSpec if accuLossSpec != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train path diffuse loss', accuLossPathDiff if accuLossPathDiff != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train path specular loss', accuLossPathSpec if accuLossPathSpec != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
if not os.path.exists('trained_model/' + save_dir):
os.makedirs('trained_model/' + save_dir)
print('MAKE DIR {}'.format('trained_model/'+save_dir))
torch.save({
'epoch': epoch,
'model_state_dict': diffuseNet.state_dict(),
'optimizer_state_dict': optimizerDiff.state_dict(),
}, 'trained_model/'+ save_dir + '/diff_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': specularNet.state_dict(),
'optimizer_state_dict': optimizerSpec.state_dict(),
}, 'trained_model/'+ save_dir + '/spec_e{}.pt'.format(epoch+1))
if use_llpm_buf:
torch.save({
'epoch': epoch,
'model_state_dict': diffPathNet.state_dict(),
'optimizer_state_dict': optimizerDiffPath.state_dict(),
}, 'trained_model/'+ save_dir + '/path_diff_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': specPathNet.state_dict(),
'optimizer_state_dict': optimizerSpecPath.state_dict(),
}, 'trained_model/'+ save_dir + '/path_spec_e{}.pt'.format(epoch+1))
# print('VALIDATION WORKING!')
models = {'diffuse': diffuseNet, 'specular': specularNet, 'preDiffuse': preDiffuseNet, 'preSpecular': preSpecularNet}
if use_llpm_buf:
models['path_diffuse'] = diffPathNet
models['path_specular'] = specPathNet
validLossDiff, validLossSpec, validLossFinal, relL2LossFinal, pathDiffLoss, pathSpecLoss = validation(models, validDataloader, eps, criterion, device, epoch, use_llpm_buf,mode)
writer.add_scalar('Valid total relL2 loss', relL2LossFinal if relL2LossFinal != float('inf') else 1e+35, (epoch + 1) * len(dataloader))
writer.add_scalar('Valid total loss', validLossFinal if accuLossFinal != float('inf') else 1e+35, (epoch + 1) * len(dataloader))
writer.add_scalar('Valid diffuse loss', validLossDiff if accuLossDiff != float('inf') else 1e+35, (epoch + 1) * len(dataloader))
writer.add_scalar('Valid specular loss', validLossSpec if accuLossSpec != float('inf') else 1e+35, (epoch + 1) * len(dataloader))
writer.add_scalar('Valid path diffuse loss', pathDiffLoss if pathDiffLoss != float('inf') else 0, (epoch + 1) * len(dataloader))
writer.add_scalar('Valid path specular loss', pathSpecLoss if pathSpecLoss != float('inf') else 0, (epoch + 1) * len(dataloader))
print("Epoch {}".format(epoch + 1))
print("LossDiff: {}".format(accuLossDiff))
print("LossSpec: {}".format(accuLossSpec))
print("LossFinal: {}".format(accuLossFinal))
print("pathDiffLoss: {}".format(pathDiffLoss))
print("pathSpecLoss: {}".format(pathSpecLoss))
print("ValidrelL2LossDiff: {}".format(relL2LossFinal))
print("ValidLossDiff: {}".format(validLossDiff))
print("ValidLossSpec: {}".format(validLossSpec))
print("ValidLossFinal: {}".format(validLossFinal))
lDiff.append(accuLossDiff)
lSpec.append(accuLossSpec)
lFinal.append(accuLossFinal)
valLDiff.append(validLossDiff)
valLSpec.append(validLossSpec)
valLFinal.append(validLossFinal)
# if not os.path.exists('trained_model/' + save_dir):
# os.makedirs('trained_model/' + save_dir)
# print('MAKE DIR {}'.format('trained_model/'+save_dir))
# # torch.save(diffuseNet.state_dict(), 'trained_model/'+ save_dir + '/diff_e{}.pt'.format(epoch+1))
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': diffuseNet.state_dict(),
# 'optimizer_state_dict': optimizerDiff.state_dict(),
# }, 'trained_model/'+ save_dir + '/diff_e{}.pt'.format(epoch+1))
# # torch.save(specularNet.state_dict(), 'trained_model/' + save_dir + '/spec_e{}.pt'.format(epoch+1))
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': specularNet.state_dict(),
# 'optimizer_state_dict': optimizerSpec.state_dict(),
# }, 'trained_model/'+ save_dir + '/spec_e{}.pt'.format(epoch+1))
print('SAVED {}/diff_e{}, {}/spec_e{}'.format(save_dir, epoch+1, save_dir, epoch+1))
total_epoch += 1
if do_early_stopping and len(valLFinal) > 10 and valLFinal[-1] >= valLFinal[-2]:
print('EARLY STOPPING!')
break
accuLossDiff = 0
accuLossSpec = 0
accuLossFinal = 0
accuLossPathDiff = 0
accuLossPathSpec = 0
writer.close()
print('Finished training in mode, {} with epoch {}'.format(mode, total_epoch))
print('Took', time.time() - start, 'seconds.')
return diffuseNet, specularNet, lDiff, lSpec, lFinal
def main():
args = parser.parse_args()
print(args)
dataset, dataloader = init_data(args)
print(len(dataset['train']), len(dataloader['train']))
# trainset, validset = dataloader['train'], dataloader['val']
trainset, validset = dataset['train'], dataset['val']
print(trainset, validset)
input_channels = dataset['train'].dncnn_in_size
train(
args.mode,
args.device,
trainset,
validset,
eps,
args.num_layers,
input_channels,
args.hidden_channels,
args.kernel_size,
args.epochs,
args.lr,
args.loss,
args.do_early_stopping,
args.do_finetune,
args.use_llpm_buf,
args.manif_w
)
if __name__ == '__main__':
main() | 27,537 | 0 | 69 |
d35989135b0bef0d1ac8815fe7e35507a50d08b4 | 132 | py | Python | examples/tenant_tutorial/tenant_tutorial/urls_public.py | pvandegeer/django-tenant-schemas | 20c72782cee51a33fd5c56a0af7b2c653c1b6770 | [
"MIT"
] | 1,101 | 2015-01-01T23:36:37.000Z | 2022-03-29T18:11:25.000Z | examples/tenant_tutorial/tenant_tutorial/urls_public.py | pvandegeer/django-tenant-schemas | 20c72782cee51a33fd5c56a0af7b2c653c1b6770 | [
"MIT"
] | 429 | 2015-01-01T23:38:43.000Z | 2022-03-28T08:43:54.000Z | examples/tenant_tutorial/tenant_tutorial/urls_public.py | pvandegeer/django-tenant-schemas | 20c72782cee51a33fd5c56a0af7b2c653c1b6770 | [
"MIT"
] | 429 | 2015-01-01T23:29:17.000Z | 2022-03-14T20:18:55.000Z | from django.conf.urls import url
from tenant_tutorial.views import HomeView
urlpatterns = [
url(r'^$', HomeView.as_view()),
]
| 16.5 | 42 | 0.719697 | from django.conf.urls import url
from tenant_tutorial.views import HomeView
urlpatterns = [
url(r'^$', HomeView.as_view()),
]
| 0 | 0 | 0 |
ae3a0ae1a47c4b69b34bdf7d2eaeae6277915332 | 643 | py | Python | people/views.py | JimInCO/bishopric_tools | 6d7ddee52eb1f5884b051d9cb2eab9c241663423 | [
"MIT"
] | null | null | null | people/views.py | JimInCO/bishopric_tools | 6d7ddee52eb1f5884b051d9cb2eab9c241663423 | [
"MIT"
] | 2 | 2020-03-09T04:49:55.000Z | 2020-03-10T04:08:16.000Z | people/views.py | JimInCO/bishopric_tools | 6d7ddee52eb1f5884b051d9cb2eab9c241663423 | [
"MIT"
] | null | null | null | from django.views.generic import ListView, CreateView, DetailView
from events.models import Talk
from . import forms
from . import models
| 23.814815 | 81 | 0.709176 | from django.views.generic import ListView, CreateView, DetailView
from events.models import Talk
from . import forms
from . import models
class MemberDetail(DetailView):
model = models.Member
slug_field = "pk"
slug_url_kwarg = "pk"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["talks"] = Talk.objects.filter(speaker=self.object).order_by("-date")
return ctx
class MemberList(ListView):
model = models.Member
queryset = model.objects.filter(active=True)
class MemberAddView(CreateView):
model = models.Member
form_class = forms.MemberAddForm
| 166 | 266 | 69 |
cc1e1ca11436570f3c50cedfc283e510b9340d39 | 5,702 | py | Python | cogs/funs.py | SpyRisk/EveDj | 2f0bd8dd54f5da8e3c072961dbf9f34352e7a5d5 | [
"MIT"
] | null | null | null | cogs/funs.py | SpyRisk/EveDj | 2f0bd8dd54f5da8e3c072961dbf9f34352e7a5d5 | [
"MIT"
] | null | null | null | cogs/funs.py | SpyRisk/EveDj | 2f0bd8dd54f5da8e3c072961dbf9f34352e7a5d5 | [
"MIT"
] | null | null | null | from discord.ext import commands
from random import choice, shuffle
import aiohttp
import asyncio
import discord
import urllib.request, json
import random
import requests
class Funs:
"""Commandes funs."""
@commands.command()
async def avatar(self, ctx, user : discord.Member):
"""Récuperer l'avatar de ..."""
embed = discord.Embed(title="Avatar de : " + user.name, url=user.avatar_url, description="[Voir en plus grand]({})".format(user.avatar_url))
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.command(pass_context=True)
async def poke(self, ctx, user : discord.Member):
"""Poke quelqu'un"""
await ctx.send(":clap: Hey {0} tu t'es fait poker par {1} !".format(user.mention, ctx.message.author.name))
await ctx.message.delete()
@commands.command()
async def btcprice(self, ctx):
"""Le prix du BTC"""
loading = await ctx.send("_réfléchis..._")
try:
with urllib.request.urlopen("http://api.coindesk.com/v1/bpi/currentprice/EUR.json") as url:
data = json.loads(url.read().decode())
btc = data['bpi']['EUR']['rate']
btc = btc.split(".")
except:
btc = 1
if btc == 1:
await ctx.send("Impossible d'accèder à l'API coindesk.com, veuillez réessayer ultérieurment !")
else:
await loading.edit(content="Un bitcoin est égal à : " + btc[0] + " €")
@commands.command()
async def joke(self, ctx):
"""Print a random joke in a json file"""
with open('texts/jokes.json') as js:
jk = json.load(js)
clef = str(random.randint(1,13))
joke = jk["{}".format(clef)]
embed = discord.Embed(title="Blague _{}_ : ".format(clef), description=joke['content'], colour=0x03C9A9)
embed.set_footer(text="Par " + joke['author'])
embed.set_thumbnail(url='https://outout.tech/tuxbot/blobjoy.png')
await ctx.send(embed=embed)
@commands.command()
async def ethylotest(self, ctx):
"""Ethylotest simulator 2018"""
results_poulet = ["Désolé mais mon ethylotest est sous Windows Vista, merci de patienter...", "_(ethylotest)_ ``Une erreur est survenue. Windows cherche une solution à se problème...``", "Mais j'l'ai foutu où ce p*** d'ethylotest de m*** bordel fait ch*** tab***", "C'est pas possible z'avez cassé l'ethylotest !"]
results_client = ["D'accord, il n'y a pas de problème à cela je suis complètement clean", "Bien sur si c'est votre devoir !", "Suce bi** !", "J'ai l'air d'être bourré ?", "_laissez moi prendre un bonbon à la menthe..._"]
result_p = random.choice(results_poulet)
result_c = random.choice(results_client)
await ctx.send(":oncoming_police_car: Bonjour bonjour, controle d'alcoolémie !")
await asyncio.sleep(0.5)
await ctx.send(":man: " + result_c)
await asyncio.sleep(1)
await ctx.send(":police_car: " + result_p)
@commands.command()
async def coin(self, ctx):
"""Coin flip simulator 2025"""
starts_msg = ["Je lance la pièce !", "C'est parti !", "C'est une pièce d'un cent faut pas la perdre", "C'est une pièce d'un euro faut pas la perdre", "Je lance !"]
results_coin = ["{0} pile", "{0} face", "{1} Heu c'est quoi pile c'est quoi face enfaite ?", "{1} Oh shit, je crois que je l'ai perdue", "{1} Et bim je te vol ta pièce !", "{0} Oh une erreur d'impression il n'y a ni pile ni face !"]
start = random.choice(starts_msg)
result = random.choice(results_coin)
await ctx.send(start)
await asyncio.sleep(0.6)
await ctx.send(result.format(":moneybag: Et la pièce retombe sur ...", ":robot:"))
@commands.command()
async def pokemon(self, ctx):
"""Random pokemon fight"""
with open('texts/pokemons.json') as js:
jk = json.load(js)
poke1 = jk[random.randint(1, 150)]
poke2 = jk[random.randint(1, 150)]
try:
if poke1['MaxHP'] > poke2['MaxHP']:
winer = poke1
else:
winer = poke2
except:
winer = poke1
await ctx.send(":flag_white: **Le combat commence !**")
await asyncio.sleep(1)
await ctx.send(":loudspeaker: Les concurants sont {} contre {} ! Bonne chance à eux !".format(poke1["Name"], poke2["Name"]))
await asyncio.sleep(0.5)
await ctx.send(":boom: {} commence et utilise {}".format(poke1["Name"], poke1["Fast Attack(s)"][0]["Name"]))
await asyncio.sleep(1)
await ctx.send(":dash: {} réplique avec {}".format(poke2["Name"], poke2["Fast Attack(s)"][0]["Name"]))
await asyncio.sleep(1.2)
await ctx.send("_le combat continue de se dérouler..._")
await asyncio.sleep(1.5)
await ctx.send(":trophy: Le gagnant est **{}** !".format(winer["Name"]))
@commands.command()
async def randomcat(self, ctx):
"""Display a random cat"""
r = requests.get('http://random.cat/meow.php')
cat = str(r.json()['file'])
embed = discord.Embed(title="Meow", description="[Voir le chat plus grand]({})".format(cat), colour=0x03C9A9)
embed.set_thumbnail(url=cat)
embed.set_author(name="Random.cat", url='https://random.cat/', icon_url='http://outout.tech/tuxbot/nyancat2.gif')
await ctx.send(embed=embed)
| 42.87218 | 323 | 0.590495 | from discord.ext import commands
from random import choice, shuffle
import aiohttp
import asyncio
import discord
import urllib.request, json
import random
import requests
class Funs:
"""Commandes funs."""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def avatar(self, ctx, user : discord.Member):
"""Récuperer l'avatar de ..."""
embed = discord.Embed(title="Avatar de : " + user.name, url=user.avatar_url, description="[Voir en plus grand]({})".format(user.avatar_url))
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.command(pass_context=True)
async def poke(self, ctx, user : discord.Member):
"""Poke quelqu'un"""
await ctx.send(":clap: Hey {0} tu t'es fait poker par {1} !".format(user.mention, ctx.message.author.name))
await ctx.message.delete()
@commands.command()
async def btcprice(self, ctx):
"""Le prix du BTC"""
loading = await ctx.send("_réfléchis..._")
try:
with urllib.request.urlopen("http://api.coindesk.com/v1/bpi/currentprice/EUR.json") as url:
data = json.loads(url.read().decode())
btc = data['bpi']['EUR']['rate']
btc = btc.split(".")
except:
btc = 1
if btc == 1:
await ctx.send("Impossible d'accèder à l'API coindesk.com, veuillez réessayer ultérieurment !")
else:
await loading.edit(content="Un bitcoin est égal à : " + btc[0] + " €")
@commands.command()
async def joke(self, ctx):
"""Print a random joke in a json file"""
with open('texts/jokes.json') as js:
jk = json.load(js)
clef = str(random.randint(1,13))
joke = jk["{}".format(clef)]
embed = discord.Embed(title="Blague _{}_ : ".format(clef), description=joke['content'], colour=0x03C9A9)
embed.set_footer(text="Par " + joke['author'])
embed.set_thumbnail(url='https://outout.tech/tuxbot/blobjoy.png')
await ctx.send(embed=embed)
@commands.command()
async def ethylotest(self, ctx):
"""Ethylotest simulator 2018"""
results_poulet = ["Désolé mais mon ethylotest est sous Windows Vista, merci de patienter...", "_(ethylotest)_ ``Une erreur est survenue. Windows cherche une solution à se problème...``", "Mais j'l'ai foutu où ce p*** d'ethylotest de m*** bordel fait ch*** tab***", "C'est pas possible z'avez cassé l'ethylotest !"]
results_client = ["D'accord, il n'y a pas de problème à cela je suis complètement clean", "Bien sur si c'est votre devoir !", "Suce bi** !", "J'ai l'air d'être bourré ?", "_laissez moi prendre un bonbon à la menthe..._"]
result_p = random.choice(results_poulet)
result_c = random.choice(results_client)
await ctx.send(":oncoming_police_car: Bonjour bonjour, controle d'alcoolémie !")
await asyncio.sleep(0.5)
await ctx.send(":man: " + result_c)
await asyncio.sleep(1)
await ctx.send(":police_car: " + result_p)
@commands.command()
async def coin(self, ctx):
"""Coin flip simulator 2025"""
starts_msg = ["Je lance la pièce !", "C'est parti !", "C'est une pièce d'un cent faut pas la perdre", "C'est une pièce d'un euro faut pas la perdre", "Je lance !"]
results_coin = ["{0} pile", "{0} face", "{1} Heu c'est quoi pile c'est quoi face enfaite ?", "{1} Oh shit, je crois que je l'ai perdue", "{1} Et bim je te vol ta pièce !", "{0} Oh une erreur d'impression il n'y a ni pile ni face !"]
start = random.choice(starts_msg)
result = random.choice(results_coin)
await ctx.send(start)
await asyncio.sleep(0.6)
await ctx.send(result.format(":moneybag: Et la pièce retombe sur ...", ":robot:"))
@commands.command()
async def pokemon(self, ctx):
"""Random pokemon fight"""
with open('texts/pokemons.json') as js:
jk = json.load(js)
poke1 = jk[random.randint(1, 150)]
poke2 = jk[random.randint(1, 150)]
try:
if poke1['MaxHP'] > poke2['MaxHP']:
winer = poke1
else:
winer = poke2
except:
winer = poke1
await ctx.send(":flag_white: **Le combat commence !**")
await asyncio.sleep(1)
await ctx.send(":loudspeaker: Les concurants sont {} contre {} ! Bonne chance à eux !".format(poke1["Name"], poke2["Name"]))
await asyncio.sleep(0.5)
await ctx.send(":boom: {} commence et utilise {}".format(poke1["Name"], poke1["Fast Attack(s)"][0]["Name"]))
await asyncio.sleep(1)
await ctx.send(":dash: {} réplique avec {}".format(poke2["Name"], poke2["Fast Attack(s)"][0]["Name"]))
await asyncio.sleep(1.2)
await ctx.send("_le combat continue de se dérouler..._")
await asyncio.sleep(1.5)
await ctx.send(":trophy: Le gagnant est **{}** !".format(winer["Name"]))
@commands.command()
async def randomcat(self, ctx):
"""Display a random cat"""
r = requests.get('http://random.cat/meow.php')
cat = str(r.json()['file'])
embed = discord.Embed(title="Meow", description="[Voir le chat plus grand]({})".format(cat), colour=0x03C9A9)
embed.set_thumbnail(url=cat)
embed.set_author(name="Random.cat", url='https://random.cat/', icon_url='http://outout.tech/tuxbot/nyancat2.gif')
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Funs(bot)) | 49 | 0 | 53 |
5f6ab16294339aea5e6325296ab3b6d7a03e0b9f | 230 | py | Python | tests/models.py | Apkawa/django-modeltranslation-rosetta | 568354ceee201f891e1f9f6d1f5987dbdfa8f84a | [
"MIT"
] | null | null | null | tests/models.py | Apkawa/django-modeltranslation-rosetta | 568354ceee201f891e1f9f6d1f5987dbdfa8f84a | [
"MIT"
] | 14 | 2020-01-06T16:18:37.000Z | 2022-01-20T19:40:56.000Z | tests/models.py | Apkawa/django-modeltranslation-rosetta | 568354ceee201f891e1f9f6d1f5987dbdfa8f84a | [
"MIT"
] | null | null | null | from django.db import models
| 23 | 66 | 0.717391 | from django.db import models
class Article(models.Model):
title = models.CharField(verbose_name='Title', max_length=100)
body = models.TextField(verbose_name='Body')
def __unicode__(self):
return self.title
| 27 | 150 | 23 |
7f2c3b138b5ad1b508898a36bd80800d2e2f4cad | 4,909 | py | Python | ctwingest/marker_table.py | henrygong/ctwingest | 1eafea4e4faad62de2473cb77857d24c3d9c6e77 | [
"MIT"
] | null | null | null | ctwingest/marker_table.py | henrygong/ctwingest | 1eafea4e4faad62de2473cb77857d24c3d9c6e77 | [
"MIT"
] | null | null | null | ctwingest/marker_table.py | henrygong/ctwingest | 1eafea4e4faad62de2473cb77857d24c3d9c6e77 | [
"MIT"
] | null | null | null | """
Create a Marker table from an AnnData Object.
"""
from statsmodels.stats.proportion import proportions_ztest
from scipy.stats import ttest_ind
import pandas as pd
import numpy as np
from ctwingest.scanpyapi import proportion_expressed_cluster, centroids, get_expression, std_gt_0_genes
def scale_centroids(centers, max=2.5, min=-2.5):
"""avg.exp.scaled"""
scaled = centers.subtract(centers.mean(axis=1), axis=0)
scaled = scaled.divide(centers.std(axis=1), axis=0)
scaled = scaled.where(scaled <= max, other=max)
scaled = scaled.where(scaled >= min, other=min)
return scaled
def run_pipe(ad, cluster_solution_name="louvain", use_raw=True):
"""Returns a markers table from an anndata object. Looks for anndata.raw to
make metrics directly from counts. If .raw is not there then proceeds with whatever is in anndata.expression_matrix.
Metrics are t-statistic a proportions z-statistic, their pvalues and log2fc."""
# Grab the expression matrix and get ready for processing.
expression_matrix = get_expression(ad, use_raw=use_raw)
expression_matrix = expression_matrix.transpose()
expression_matrix = expression_matrix.dropna(axis='columns', how='all')
# A cluster solution is a mapping from cell->cluster.name
cluster_solution = ad.obs[cluster_solution_name]
cluster_solution = cluster_solution.dropna()
clusters = cluster_solution.unique()
print("Calculating centroids and proportions of %d samples and %d genes with %d clusters" % (
expression_matrix.shape[0], expression_matrix.shape[1], len(clusters)
))
proportions = proportion_expressed_cluster(ad, cluster_solution, use_raw=use_raw)
centroid_df = centroids(ad, cs_name=cluster_solution_name, use_raw=use_raw)
# Filter to genes that have some standard deviation across thier means
# Weak filtering intended to prevent downstream errors.
marker_genes = std_gt_0_genes(centroid_df)
centroid_df = centroid_df.loc[marker_genes]
scaled_centroid_df = scale_centroids(centroid_df)
print(
"Removing %d genes because standard deviation across means is 0"
% (expression_matrix.shape[1] - len(marker_genes))
)
print(scaled_centroid_df.head())
expression_matrix = expression_matrix[marker_genes]
# Current implementation builds one dataframe for each cluster and then concats them together.
dfs = []
for cluster_name in clusters:
print("Calculating Cluster ", cluster_name)
df = pd.DataFrame(
index=expression_matrix.columns,
#columns=["tstat", "pct.exp", "zstat", "log2fc", "zpval", "tpval", "cluster"]
#columns=["gene", "avg.exp.scaled", "pct.exp", "t-statistic", "p-value", "cluster"]
columns=["gene", "avg.exp.scaled", "pct.exp", "u-statistic", "p-value", "cluster"]
)
df['cluster'] = cluster_name
cell_names = cluster_solution.index[(cluster_solution == cluster_name).tolist()]
other_cell_names = cluster_solution.index[(cluster_solution != cluster_name).tolist()]
#pseudocount = .1
#df['log2fc'] = np.log2(expression_matrix.loc[cell_names].mean() + pseudocount) - np.log2(
# expression_matrix.loc[other_cell_names].mean() + pseudocount)
# set up for proportions z test
# expressed_in_cluster = (expression_matrix.loc[cell_names] > 0).sum()
# expressed_out_cluster = (expression_matrix.loc[other_cell_names] > 0).sum()
#out_size = len(other_cell_names)
#cluster_size = len(cell_names)
#ztest_df = pd.DataFrame([expressed_in_cluster, expressed_out_cluster])
#ztest = lambda x: proportions_ztest(
# count=[x[0], x[1]],
# nobs=[cluster_size, out_size],
# alternative='larger'
#)
#zstat_zpval = ztest_df.apply(ztest, axis='index')
#zstat = zstat_zpval.apply(lambda x: x[0])
#zpval = zstat_zpval.apply(lambda x: x[1])
from scipy.stats import mannwhitneyu
#test = lambda x: ttest_ind(x[cell_names], x[other_cell_names])
test = lambda x: mannwhitneyu(x[cell_names], x[other_cell_names])
stat_pval = expression_matrix.apply(test, axis="index")
stat = stat_pval.apply(lambda x: x[0])
pval = stat_pval.apply(lambda x: x[1])
rownames = df.index.tolist()
df["u-statistic"] = stat
df['p-value'] = pval
#df["zstat"] = zstat
#df["zpval"] = zpval
df['gene'] = rownames
df['pct.exp'] = proportions.loc[rownames, str(cluster_name)]
df['avg.exp'] = centroid_df.loc[rownames, str(cluster_name)]
df['avg.exp.scaled'] = scaled_centroid_df.loc[rownames, str(cluster_name)]
dfs.append(df)
markers_table = pd.concat(dfs, axis=0)
return markers_table
DEFAULT_LEGEND_METRICS = pd.Series(["avg.exp", "avg.exp.scaled", "pct.exp"])
| 43.061404 | 120 | 0.681605 | """
Create a Marker table from an AnnData Object.
"""
from statsmodels.stats.proportion import proportions_ztest
from scipy.stats import ttest_ind
import pandas as pd
import numpy as np
from ctwingest.scanpyapi import proportion_expressed_cluster, centroids, get_expression, std_gt_0_genes
def scale_centroids(centers, max=2.5, min=-2.5):
"""avg.exp.scaled"""
scaled = centers.subtract(centers.mean(axis=1), axis=0)
scaled = scaled.divide(centers.std(axis=1), axis=0)
scaled = scaled.where(scaled <= max, other=max)
scaled = scaled.where(scaled >= min, other=min)
return scaled
def run_pipe(ad, cluster_solution_name="louvain", use_raw=True):
"""Returns a markers table from an anndata object. Looks for anndata.raw to
make metrics directly from counts. If .raw is not there then proceeds with whatever is in anndata.expression_matrix.
Metrics are t-statistic a proportions z-statistic, their pvalues and log2fc."""
# Grab the expression matrix and get ready for processing.
expression_matrix = get_expression(ad, use_raw=use_raw)
expression_matrix = expression_matrix.transpose()
expression_matrix = expression_matrix.dropna(axis='columns', how='all')
# A cluster solution is a mapping from cell->cluster.name
cluster_solution = ad.obs[cluster_solution_name]
cluster_solution = cluster_solution.dropna()
clusters = cluster_solution.unique()
print("Calculating centroids and proportions of %d samples and %d genes with %d clusters" % (
expression_matrix.shape[0], expression_matrix.shape[1], len(clusters)
))
proportions = proportion_expressed_cluster(ad, cluster_solution, use_raw=use_raw)
centroid_df = centroids(ad, cs_name=cluster_solution_name, use_raw=use_raw)
# Filter to genes that have some standard deviation across thier means
# Weak filtering intended to prevent downstream errors.
marker_genes = std_gt_0_genes(centroid_df)
centroid_df = centroid_df.loc[marker_genes]
scaled_centroid_df = scale_centroids(centroid_df)
print(
"Removing %d genes because standard deviation across means is 0"
% (expression_matrix.shape[1] - len(marker_genes))
)
print(scaled_centroid_df.head())
expression_matrix = expression_matrix[marker_genes]
# Current implementation builds one dataframe for each cluster and then concats them together.
dfs = []
for cluster_name in clusters:
print("Calculating Cluster ", cluster_name)
df = pd.DataFrame(
index=expression_matrix.columns,
#columns=["tstat", "pct.exp", "zstat", "log2fc", "zpval", "tpval", "cluster"]
#columns=["gene", "avg.exp.scaled", "pct.exp", "t-statistic", "p-value", "cluster"]
columns=["gene", "avg.exp.scaled", "pct.exp", "u-statistic", "p-value", "cluster"]
)
df['cluster'] = cluster_name
cell_names = cluster_solution.index[(cluster_solution == cluster_name).tolist()]
other_cell_names = cluster_solution.index[(cluster_solution != cluster_name).tolist()]
#pseudocount = .1
#df['log2fc'] = np.log2(expression_matrix.loc[cell_names].mean() + pseudocount) - np.log2(
# expression_matrix.loc[other_cell_names].mean() + pseudocount)
# set up for proportions z test
# expressed_in_cluster = (expression_matrix.loc[cell_names] > 0).sum()
# expressed_out_cluster = (expression_matrix.loc[other_cell_names] > 0).sum()
#out_size = len(other_cell_names)
#cluster_size = len(cell_names)
#ztest_df = pd.DataFrame([expressed_in_cluster, expressed_out_cluster])
#ztest = lambda x: proportions_ztest(
# count=[x[0], x[1]],
# nobs=[cluster_size, out_size],
# alternative='larger'
#)
#zstat_zpval = ztest_df.apply(ztest, axis='index')
#zstat = zstat_zpval.apply(lambda x: x[0])
#zpval = zstat_zpval.apply(lambda x: x[1])
from scipy.stats import mannwhitneyu
#test = lambda x: ttest_ind(x[cell_names], x[other_cell_names])
test = lambda x: mannwhitneyu(x[cell_names], x[other_cell_names])
stat_pval = expression_matrix.apply(test, axis="index")
stat = stat_pval.apply(lambda x: x[0])
pval = stat_pval.apply(lambda x: x[1])
rownames = df.index.tolist()
df["u-statistic"] = stat
df['p-value'] = pval
#df["zstat"] = zstat
#df["zpval"] = zpval
df['gene'] = rownames
df['pct.exp'] = proportions.loc[rownames, str(cluster_name)]
df['avg.exp'] = centroid_df.loc[rownames, str(cluster_name)]
df['avg.exp.scaled'] = scaled_centroid_df.loc[rownames, str(cluster_name)]
dfs.append(df)
markers_table = pd.concat(dfs, axis=0)
return markers_table
DEFAULT_LEGEND_METRICS = pd.Series(["avg.exp", "avg.exp.scaled", "pct.exp"])
| 0 | 0 | 0 |
c31e4d77fa90c2a95e1a0a532f837cf00973ea85 | 5,235 | py | Python | Agent/dqn.py | dacozai/QuantumDeepAdvantage | 6ca2388141906e2782d29b60740db27925ee557f | [
"Apache-2.0"
] | 3 | 2019-11-20T07:51:28.000Z | 2021-02-02T08:34:52.000Z | Agent/dqn.py | dacozai/QuantumDeepAdvantage | 6ca2388141906e2782d29b60740db27925ee557f | [
"Apache-2.0"
] | null | null | null | Agent/dqn.py | dacozai/QuantumDeepAdvantage | 6ca2388141906e2782d29b60740db27925ee557f | [
"Apache-2.0"
] | 1 | 2019-12-31T04:01:18.000Z | 2019-12-31T04:01:18.000Z | #################################################################
# Copyright (C) #
# 2019 Qiskit Team #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#################################################################
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, optimizers
from typing import Dict, Tuple, Sequence, List
import copy
from Agent.network.nets import *
class dqn:
"""
Deep Q Network
Action Space: {x1, x2, y1, y2, z1, z2, h1, h2, c12, c21}
Attribute
self.num_qubits:
self.input_dim:
Methods
parse_action: convert 0 to 9 to specific gate and its argument
"""
# convert 1 * 2^n array into 2 * 2^n array
| 32.515528 | 112 | 0.640306 | #################################################################
# Copyright (C) #
# 2019 Qiskit Team #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#################################################################
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, optimizers
from typing import Dict, Tuple, Sequence, List
import copy
from Agent.network.nets import *
class dqn:
"""
Deep Q Network
Action Space: {x1, x2, y1, y2, z1, z2, h1, h2, c12, c21}
Attribute
self.num_qubits:
self.input_dim:
Methods
parse_action: convert 0 to 9 to specific gate and its argument
"""
def __init__(self, num_qubits=2, num_action=12, gamma=0.99, alpha=10e-2, epsilon=0.01):
self.num_qubits = num_qubits
self.input_sz = 2 ** self.num_qubits
self.input_dim = ( 1, self.input_sz )
self.num_action = num_action
self.init = False
self.gamma = gamma
self.alpha = alpha
self.epsilon = epsilon
self.net_instance = vanila_neural_net(self.input_sz, self.num_action, self.input_dim, self.alpha)
self.q_network = self.net_instance.init_model()
self.total_reward = 0
self.win_times = 0
def parse_action(self, action_num):
if action_num == 0 or action_num == 1:
return ["X", action_num]
elif action_num == 2 or action_num == 3:
return ["Y", action_num%self.num_qubits]
elif action_num == 4 or action_num == 5:
return ["Z", action_num%self.num_qubits]
elif action_num == 6 or action_num == 7:
return ["H", action_num%self.num_qubits]
elif action_num == 8 or action_num == 9:
return ["T", action_num%self.num_qubits]
# It can be better!!! (Only good in 2 qubits)
return [ "CX", [action_num%self.num_qubits, 1-(action_num%self.num_qubits)] ]
def find_max_val_indx(self, q_values):
init_flag = False
indx_list = []
max_val:float = None
for indx in range(self.num_action):
if not init_flag:
max_val = q_values[indx]
indx_list.append(indx)
init_flag = True
else:
if max_val < q_values[indx]:
max_val = q_values[indx]
indx_list = [indx]
elif max_val == q_values[indx]:
indx_list.append(indx)
return np.random.choice(indx_list)
def get_action(self, state):
self.prev_state = copy.deepcopy(state.reshape(1, self.input_sz))
favor_action = None
if np.random.uniform(0, 1) < self.epsilon:
favor_action = np.random.choice(range(self.num_action))
else:
q_values = self.q_network.predict(self.prev_state)[0]
favor_action = self.find_max_val_indx(q_values)
self.prev_action = favor_action
return self.parse_action(favor_action)
def learn_from_transition(self, next_state, reward, terminate):
if not self.init:
self.init = True
return
state = self.prev_state
n_state = copy.deepcopy(next_state.reshape(self.input_dim))
action = self.prev_action
q_table = self.q_network.predict(state)
q_values = 0
if not terminate:
q_values = np.max(q_table[0])
# print("q_values is ",q_values)
else:
self.init = False
self.prev_action = None
self.prev_state = None
q_table[0][action] = reward + self.gamma * q_values
self.q_network.fit(state, q_table, batch_size=1, verbose=0)
def reset(self):
self.init = False
self.q_network = self.net_instance.init_model()
# self.q_network.save_weights(filepath +'train_' + str(ag_times) + '.h5')
class drqn(dqn):
def __init__(self, num_qubits, gamma=0.9, alpha=10e-2):
super().__init__(num_qubits=num_qubits, gamma=gamma, alpha=alpha)
self.net_instance = lstm(self.input_sz, self.num_action, self.input_dim, self.alpha)
self.q_network = self.net_instance.init_model()
# convert 1 * 2^n array into 2 * 2^n array
def complexToReal(self, complexArray):
return np.array([[[complexArray.real[indx], complexArray.imag[indx]] for indx in range(len(complexArray))]])
def get_action(self, state):
self.prev_state = copy.deepcopy(self.complexToReal(state))
favor_action = None
if np.random.uniform(0, 1) < self.epsilon:
favor_action = np.random.choice(range(self.num_action))
else:
q_values = self.q_network.predict(self.prev_state)[0]
favor_action = self.find_max_val_indx(q_values)
self.prev_action = favor_action
return self.parse_action(favor_action)
def learn_from_transition(self, next_state, reward, terminate):
if not self.init:
self.init = True
return
state = self.prev_state
n_state = copy.deepcopy(self.complexToReal(next_state))
action = self.prev_action
q_table = self.q_network.predict(state)
q_values = 0
if not terminate:
q_values = np.max(q_table[0])
else:
self.init = False
self.prev_action = None
self.prev_state = None
q_table[0][action] = reward + self.gamma * q_values
self.q_network.fit(state, q_table, batch_size=1, verbose=0)
| 4,088 | -5 | 278 |
3fa893ae3b5d7fc4a34a49eff4fe696c69c68d19 | 12,818 | py | Python | tests/networks_test.py | micbia/SegU-Net | 69c3e3596d32d93b62d3636317e1dbf531f5862e | [
"MIT"
] | 9 | 2020-05-13T22:45:43.000Z | 2022-02-10T10:13:21.000Z | tests/networks_test.py | micbia/SegU-Net | 69c3e3596d32d93b62d3636317e1dbf531f5862e | [
"MIT"
] | null | null | null | tests/networks_test.py | micbia/SegU-Net | 69c3e3596d32d93b62d3636317e1dbf531f5862e | [
"MIT"
] | null | null | null | import numpy as np, time
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dropout, concatenate
from keras.layers.convolutional import Conv2D, Conv2DTranspose, Conv3D, Conv3DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D, MaxPooling3D
from keras.layers.merge import concatenate
from keras.utils import plot_model
| 58 | 181 | 0.599001 | import numpy as np, time
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dropout, concatenate
from keras.layers.convolutional import Conv2D, Conv2DTranspose, Conv3D, Conv3DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D, MaxPooling3D
from keras.layers.merge import concatenate
from keras.utils import plot_model
def Unet(img_shape, params, path='./'):
# print message at runtime
if(img_shape[0] == 64 and np.size(img_shape) == 3):
print('Create 2D U-Net network with 3 levels...\n')
elif(img_shape[0] == 128 and np.size(img_shape) == 3):
print('Create 2D U-Net network with 4 levels...\n')
elif(img_shape[0] == 64 and np.size(img_shape) == 4):
print('Create 3D U-Net network with 3 levels...\n')
elif(img_shape[0] == 128 and np.size(img_shape) == 4):
print('Create 3D U-Net network with 4 levels...\n')
else:
print('???')
def Conv2D_Layers(prev_layer, kernel_size, nr_filts, layer_name):
# first layer
a = Conv2D(filters=nr_filts, kernel_size=kernel_size, padding='same',
kernel_initializer="he_normal", name='%s_C1' %layer_name)(prev_layer)
a = BatchNormalization(name='%s_BN1' %layer_name)(a)
a = Activation(params['activation'], name='relu_%s_A1' %layer_name)(a)
# second layer
a = Conv2D(filters=nr_filts, kernel_size=kernel_size, padding='same',
kernel_initializer="he_normal", name='%s_C2' %layer_name)(a)
a = BatchNormalization(name='%s_BN2' %layer_name)(a)
a = Activation(params['activation'], name='relu_%s_A2' %layer_name)(a)
return a
def Conv3D_Layers(prev_layer, kernel_size, nr_filts, layer_name):
# first layer
a = Conv3D(filters=nr_filts, kernel_size=kernel_size, padding='same',
kernel_initializer="he_normal", name='%s_C1' %layer_name)(prev_layer)
a = BatchNormalization(name='%s_BN1' %layer_name)(a)
a = Activation(params['activation'], name='relu_%s_A1' %layer_name)(a)
# second layer
a = Conv3D(filters=nr_filts, kernel_size=kernel_size, padding='same',
kernel_initializer="he_normal", name='%s_C2' %layer_name)(a)
a = BatchNormalization(name='%s_BN2' %layer_name)(a)
a = Activation(params['activation'], name='relu_%s_A2' %layer_name)(a)
return a
img_input = Input(shape=img_shape, name='Image')
# U-Net Encoder - upper level
if(np.size(img_shape) == 3):
# 2-D network
e1c = Conv2D_Layers(prev_layer=img_input, nr_filts=int(params['coarse_dim']/16),
kernel_size=params['kernel_size'], layer_name='E1')
e1 = MaxPooling2D(pool_size=(2, 2), name='E1_P')(e1c)
e1 = Dropout(params['dropout']*0.5, name='E1_D2')(e1)
elif(np.size(img_shape) == 4):
# 3-D network
e1c = Conv3D_Layers(prev_layer=img_input, nr_filts=int(params['coarse_dim']/16),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='E1')
e1 = MaxPooling3D(pool_size=(2, 2, 2), name='E1_P')(e1c)
e1 = Dropout(params['dropout']*0.5, name='E1_D2')(e1)
# U-Net Encoder - second level
if(np.size(img_shape) == 3):
# 2-D network
e2c = Conv2D_Layers(prev_layer=e1, nr_filts=int(params['coarse_dim']/8),
kernel_size=params['kernel_size'], layer_name='E2')
e2 = MaxPooling2D(pool_size=(2, 2), name='E2_P')(e2c)
e2 = Dropout(params['dropout'], name='E2_D2')(e2)
elif(np.size(img_shape) == 4):
# 3-D network
e2c = Conv3D_Layers(prev_layer=e1, nr_filts=int(params['coarse_dim']/8),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='E2')
e2 = MaxPooling3D(pool_size=(2, 2, 2), name='E2_P')(e2c)
e2 = Dropout(params['dropout'], name='E2_D2')(e2)
# U-Net Encoder - third level
if(np.size(img_shape) == 3):
# 2-D network
e3c = Conv2D_Layers(prev_layer=e2, nr_filts=int(params['coarse_dim']/4),
kernel_size=params['kernel_size'], layer_name='E3')
e3 = MaxPooling2D(pool_size=(2, 2), name='E3_P')(e3c)
e3 = Dropout(params['dropout'], name='E3_D2')(e3)
elif(np.size(img_shape) == 4):
# 3-D network
e3c = Conv3D_Layers(prev_layer=e2, nr_filts=int(params['coarse_dim']/4),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='E3')
e3 = MaxPooling3D(pool_size=(2, 2, 2), name='E3_P')(e3c)
e3 = Dropout(params['dropout'], name='E3_D2')(e3)
if(img_shape[0] >= 64 and img_shape[0] < 128):
# U-Net Encoder - bottom level
if(np.size(img_shape) == 3):
# 2-D network
b = Conv2D_Layers(prev_layer=e3, nr_filts=int(params['coarse_dim']/2), kernel_size=(params['kernel_size'], params['kernel_size']), layer_name='B')
d3 = Conv2DTranspose(filters=int(params['coarse_dim']/4), kernel_size=(params['kernel_size'], params['kernel_size']),
strides=(2, 2), padding='same', name='D3_DC')(b)
elif(np.size(img_shape) == 4):
# 3-D network
b = Conv3D_Layers(prev_layer=e3, nr_filts=int(params['coarse_dim']/2), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='B')
d3 = Conv3DTranspose(filters=int(params['coarse_dim']/4), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']),
strides=(2, 2, 2), padding='same', name='D3_DC')(b)
elif(img_shape[0] >= 128):
if(np.size(img_shape) == 3):
# 2-D network
# U-Net Encoder - fourth level
e4c = Conv2D_Layers(prev_layer=e3, nr_filts=int(params['coarse_dim']/2),
kernel_size=params['kernel_size'], layer_name='E4')
e4 = MaxPooling2D(pool_size=(2, 2), name='E4_P')(e4c)
e4 = Dropout(params['dropout'], name='E4_D2')(e4)
# U-Net Encoder - bottom level
b = Conv2D_Layers(prev_layer=e4, nr_filts=params['coarse_dim'], kernel_size=params['kernel_size'], layer_name='B')
# U-Net Decoder - fourth level
d4 = Conv2DTranspose(filters=int(params['coarse_dim']/2), kernel_size=params['kernel_size'],
strides=(2, 2), padding='same', name='D4_DC')(b)
d4 = concatenate([d4, e4c], name='merge_layer_E4_A2')
d4 = Dropout(params['dropout'], name='D4_D1')(d4)
d4 = Conv2D_Layers(prev_layer=d4, nr_filts=int(params['coarse_dim']/2),
kernel_size=(params['kernel_size'], params['kernel_size']), layer_name='D4')
# U-Net Decoder - third level
d3 = Conv2DTranspose(filters=int(params['coarse_dim']/4), kernel_size=params['kernel_size'],
strides=(2, 2), padding='same', name='D3_DC')(d4)
elif(np.size(img_shape) == 4):
# 3-D network
# U-Net Encoder - fourth level
e4c = Conv3D_Layers(prev_layer=e3, nr_filts=int(params['coarse_dim']/2),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='E4')
e4 = MaxPooling3D(pool_size=(2, 2, 2), name='E4_P')(e4c)
e4 = Dropout(params['dropout'], name='E4_D2')(e4)
# U-Net Encoder - bottom level
b = Conv3D_Layers(prev_layer=e4, nr_filts=params['coarse_dim'], kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='B')
# U-Net Decoder - fourth level
d4 = Conv3DTranspose(filters=int(params['coarse_dim']/2), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']),
strides=(2, 2, 2), padding='same', name='D4_DC')(b)
d4 = concatenate([d4, e4c], name='merge_layer_E4_A2')
d4 = Dropout(params['dropout'], name='D4_D1')(d4)
d4 = Conv3D_Layers(prev_layer=d4, nr_filts=int(params['coarse_dim']/2),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='D4')
# U-Net Decoder - third level
d3 = Conv3DTranspose(filters=int(params['coarse_dim']/4), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']),
strides=(2, 2, 2), padding='same', name='D3_DC')(d4)
else:
print('ERROR: input data have wrong dimension')
# U-Net Decoder - third level (continue)
if(np.size(img_shape) == 3):
# 2-D network
d3 = concatenate([d3, e3c], name='merge_layer_E3_A2')
d3 = Dropout(params['dropout'], name='D3_D1')(d3)
d3 = Conv2D_Layers(prev_layer=d3, nr_filts=int(params['coarse_dim']/2),
kernel_size=(params['kernel_size'], params['kernel_size']), layer_name='D3')
elif(np.size(img_shape) == 4):
# 3-D network
d3 = concatenate([d3, e3c], name='merge_layer_E3_A2')
d3 = Dropout(params['dropout'], name='D3_D1')(d3)
d3 = Conv3D_Layers(prev_layer=d3, nr_filts=int(params['coarse_dim']/2),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='D3')
# U-Net Decoder - second level
if(np.size(img_shape) == 3):
# 2-D network
d2 = Conv2DTranspose(filters=int(params['coarse_dim']/8), kernel_size=params['kernel_size'],
strides=(2, 2), padding='same', name='D2_DC')(d3)
d2 = concatenate([d2, e2c], name='merge_layer_E2_A2')
d2 = Dropout(params['dropout'], name='D2_D1')(d2)
d2 = Conv2D_Layers(prev_layer=d2, nr_filts=int(params['coarse_dim']/4),
kernel_size=(params['kernel_size'], params['kernel_size']), layer_name='D2')
elif(np.size(img_shape) == 4):
# 3-D network
d2 = Conv3DTranspose(filters=int(params['coarse_dim']/8), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']),
strides=(2, 2, 2), padding='same', name='D2_DC')(d3)
d2 = concatenate([d2, e2c], name='merge_layer_E2_A2')
d2 = Dropout(params['dropout'], name='D2_D1')(d2)
d2 = Conv3D_Layers(prev_layer=d2, nr_filts=int(params['coarse_dim']/4),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='D2')
# U-Net Decoder - upper level
if(np.size(img_shape) == 3):
d1 = Conv2DTranspose(filters=int(params['coarse_dim']/16), kernel_size=params['kernel_size'],
strides=(2, 2), padding='same', name='D1_DC')(d2)
d1 = concatenate([d1, e1c], name='merge_layer_E1_A2')
d1 = Dropout(params['dropout'], name='D1_D1')(d1)
d1 = Conv2D_Layers(prev_layer=d1, nr_filts=int(params['coarse_dim']/16),
kernel_size=(params['kernel_size'], params['kernel_size']), layer_name='D1')
elif(np.size(img_shape) == 4):
d1 = Conv3DTranspose(filters=int(params['coarse_dim']/16), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']),
strides=(2, 2, 2), padding='same', name='D1_DC')(d2)
d1 = concatenate([d1, e1c], name='merge_layer_E1_A2')
d1 = Dropout(params['dropout'], name='D1_D1')(d1)
d1 = Conv3D_Layers(prev_layer=d1, nr_filts=int(params['coarse_dim']/16),
kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']), layer_name='D1')
# Outro Layer
if(np.size(img_shape) == 3):
output_image = Conv2D(filters=int(img_shape[-1]), kernel_size=params['kernel_size'],
strides=(1, 1), padding='same', name='out_C')(d1)
elif(np.size(img_shape) == 4):
output_image = Conv3D(filters=int(img_shape[-1]), kernel_size=(params['kernel_size'], params['kernel_size'], params['kernel_size']),
strides=(1, 1, 1), padding='same', name='out_C')(d1)
output_image = Activation("sigmoid", name='sigmoid')(output_image)
model = Model(inputs=[img_input], outputs=[output_image], name='Unet')
plot_model(model, to_file=path+'model_visualization.png', show_shapes=True, show_layer_names=True)
return model | 12,398 | 0 | 23 |
ed7a1c4be9927b9f24bf9212440d3f9502a6b548 | 746 | py | Python | account/admin.py | PrestonMonteWest/comp-mart | e7699f3c4ac1d77c447cd9300fb9912f4840d2a9 | [
"Apache-2.0"
] | 1 | 2018-04-09T02:06:30.000Z | 2018-04-09T02:06:30.000Z | account/admin.py | PrestonMonteWest/compmart | e7699f3c4ac1d77c447cd9300fb9912f4840d2a9 | [
"Apache-2.0"
] | 1 | 2018-07-04T21:08:58.000Z | 2018-07-09T02:34:50.000Z | account/admin.py | PrestonMonteWest/compmart | e7699f3c4ac1d77c447cd9300fb9912f4840d2a9 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from . import models
admin.site.register(models.Address, AddressAdmin)
admin.site.register(models.CreditCard, CreditCardAdmin)
| 39.263158 | 83 | 0.698391 | from django.contrib import admin
from . import models
class AddressAdmin(admin.ModelAdmin):
list_display = ('user', 'street', 'city', 'state', 'zip_code')
raw_id_fields = ('user',)
search_fields = ('street', 'city', 'state', 'zip_code')
fields = ('user', 'street', 'city', 'state', 'zip_code')
class CreditCardAdmin(admin.ModelAdmin):
list_display = ('user', 'card_type', 'holder_name', 'expiration_date')
search_fields = ('card_type', 'holder_name')
fields = ('user', 'card_number', 'card_type', 'holder_name', 'expiration_date')
date_hierarchy = 'expiration_date'
list_filter = ('expiration_date',)
admin.site.register(models.Address, AddressAdmin)
admin.site.register(models.CreditCard, CreditCardAdmin)
| 0 | 539 | 46 |
71a22cab3a5f46c536fa8e2e24f0611fcff253c2 | 1,091 | py | Python | PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/base_instance.py | frederica07/Dragon_Programming_Process | c0dff2e20c1be6db5adc6f9977efae8f7f888ef5 | [
"BSD-2-Clause"
] | null | null | null | PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/base_instance.py | frederica07/Dragon_Programming_Process | c0dff2e20c1be6db5adc6f9977efae8f7f888ef5 | [
"BSD-2-Clause"
] | null | null | null | PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/base_instance.py | frederica07/Dragon_Programming_Process | c0dff2e20c1be6db5adc6f9977efae8f7f888ef5 | [
"BSD-2-Clause"
] | null | null | null | '''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_base_instance'
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLuint)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.POINTER(_cs.void),_cs.GLsizei,_cs.GLuint)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.POINTER(_cs.void),_cs.GLsizei,_cs.GLint,_cs.GLuint)
def glInitBaseInstanceARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| 45.458333 | 119 | 0.810266 | '''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_base_instance'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_ARB_base_instance',False)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLuint)
def glDrawArraysInstancedBaseInstance( mode,first,count,instancecount,baseinstance ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.POINTER(_cs.void),_cs.GLsizei,_cs.GLuint)
def glDrawElementsInstancedBaseInstance( mode,count,type,indices,instancecount,baseinstance ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.POINTER(_cs.void),_cs.GLsizei,_cs.GLint,_cs.GLuint)
def glDrawElementsInstancedBaseVertexBaseInstance( mode,count,type,indices,instancecount,basevertex,baseinstance ):pass
def glInitBaseInstanceARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| 316 | 0 | 88 |
517e237ee7721179f23df9b8e1d8c509af07f2d3 | 4,046 | py | Python | kits.py | gajanlee/UCAS-kits | 75c985a8ea1e69bfe401c9b0555a7d526d4cc71e | [
"MIT"
] | null | null | null | kits.py | gajanlee/UCAS-kits | 75c985a8ea1e69bfe401c9b0555a7d526d4cc71e | [
"MIT"
] | null | null | null | kits.py | gajanlee/UCAS-kits | 75c985a8ea1e69bfe401c9b0555a7d526d4cc71e | [
"MIT"
] | null | null | null | import configparser
import requests
import time
import numpy as np
from PIL import Image
from io import BytesIO
import configparser
from code import CodeRecognizer
from utils import *
if __name__ == "__main__":
cf = configparser.ConfigParser()
cf.read("info.conf")
while process_orders(cf):
time.sleep(2) | 32.629032 | 171 | 0.561789 | import configparser
import requests
import time
import numpy as np
from PIL import Image
from io import BytesIO
import configparser
from code import CodeRecognizer
from utils import *
class UCAS:
def __init__(self, user):
self.__session = requests.Session()
self.__user = user
# self.login()
def login(self):
pass
@property
def session(self):
return self.__session
@property
def user(self):
return self.__user
class PaymentUCAS(UCAS):
def __init__(self, user, order):
self.host_url = "http://payment.ucas.ac.cn"
self.base_url = "http://payment.ucas.ac.cn/NetWorkUI/"
self.order = order
super(PaymentUCAS, self).__init__(user)
def login(self):
if self.request("login", "post", data={
'nickName': self.user.get("username"),
'password': self.user.get("password"),
'checkCode': self.checkCode(),
'logintype': 'PLATFORM',
}).text.find("退出") != -1:
return True
# }).text.find("退出") != -1:
#return True
def routes_vali(self):
"""
Get Nominated Route's Information, check the remaining seats.
"""
msg = self.request("querySeat", "post", data={
"bookingdate": self.order["date"],
"factorycode": "R001",
"routecode": self.order["routecode"],
}).json()
remainseat = int(msg["returndata"]["freeseat"])
if msg["returncode"] != "SUCCESS" or not remainseat:
log("查询余票失败,剩余座位:%s" % (remainseat))
else:
log("剩余票:%s" % (remainseat))
return True
def bookTicket(self):
msg = self.request("bookTicket", "post", data={
"routecode": self.order["routecode"],
"payAmt": "6.00",
"bookingdate": self.order["date"],
"payProjectId": "4",
"tel": self.user.get("telnum"),
"factorycode": "R001",
}).json()
if msg["returncode"] == "SUCCESS":
log("预订成功,请打开这个网址完成支付: {baseurl}{bookprefix}{orderno}".format(baseurl=self.base_url, bookprefix="showUserSelectPayType25", orderno=msg["payOrderTrade"]["id"]))
return True
else:
log("预订失败,请重试")
def checkCode(self):
img = Image.open(BytesIO(self.request("checkCode", "get").content))
# binary and gray
img = img.point(lambda p: 255 if p > 127 else 0).convert("1")
# Cut the image brims with width 1
w, h = img.size
img = img.crop((1, 1, w-1, h-1))
# divide checkcode image into 4 pieces.
w, h = img.size
imgs = [img.crop((w/4*i, 0, w/4*(i+1), h)) for i in range(4)]
return "".join([CodeRecognizer(img) for img in imgs])
def request(self, step="login", type="get", data=None):
opts = {
"login": "fontuserLogin",
"checkCode": "authImage?temp=0.10349747758414296",
"queryBusDate": "queryBusByDate",
"querySeat": "queryRemainingSeats",
"bookTicket": "reservedBusCreateOrder",
}
return getattr(self.session, type)(self.base_url + opts[step], data=data)
def process_orders(cf):
user, order_keys = cf["user"], [order for order in cf.sections() if order.startswith("order")]
for order in order_keys:
log("正在处理订单:%s" % (",".join(["%s: %s" % (k, v) for k, v in cf[order].items()])))
payment = PaymentUCAS(user, cf[order])
if not payment.login():
raise(Exception("用户信息错误!"))
elif not payment.routes_vali():
cf.remove_section(order)
log("移除了此订单")
elif not payment.bookTicket():
cf.remove_section(order)
log("移除了此订单")
return not [order for order in cf.sections() if order.startswith("order")]
if __name__ == "__main__":
cf = configparser.ConfigParser()
cf.read("info.conf")
while process_orders(cf):
time.sleep(2) | 2,894 | 891 | 69 |
a353593d14c77c62907e98f74374cf5b3f4d0eb0 | 635 | py | Python | SUBS/009_SUBS.py | domenicosolazzo/Rosalind | 288fa79aa715de6632413ddaa0e2527a1d7ee265 | [
"MIT"
] | null | null | null | SUBS/009_SUBS.py | domenicosolazzo/Rosalind | 288fa79aa715de6632413ddaa0e2527a1d7ee265 | [
"MIT"
] | null | null | null | SUBS/009_SUBS.py | domenicosolazzo/Rosalind | 288fa79aa715de6632413ddaa0e2527a1d7ee265 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
A solution to a ROSALIND bioinformatics problem.
Problem Title: Finding a Motif in DNA
Rosalind ID: SUBS
Rosalind #: 009
URL: http://rosalind.info/problems/subs/
'''
if __name__ == "__main__":
main()
| 24.423077 | 57 | 0.620472 | #!/usr/bin/env python
'''
A solution to a ROSALIND bioinformatics problem.
Problem Title: Finding a Motif in DNA
Rosalind ID: SUBS
Rosalind #: 009
URL: http://rosalind.info/problems/subs/
'''
def main():
with open('data/rosalind_subs.txt') as input_data:
s,t = input_data.readlines()
s = s.rstrip()
t = t.rstrip()
locations = []
for i in range(0, len(s)-len(t)+1):
if s[i:i+len(t)] == t:
locations.append(str(i+1))
print ' '.join(locations)
with open('output/009_SUBS.txt', 'w') as output_data:
output_data.write(' '.join(locations))
if __name__ == "__main__":
main()
| 382 | 0 | 22 |
d1bd912e0d30e4645b27be8b0a5b658a2ec85d3b | 32 | py | Python | library/python/dd/data_reader/__init__.py | darwinbeing/deepdriving-tensorflow | 036a83871f3515b2c041bc3cd5e845f6d8f7b3b7 | [
"MIT"
] | 1 | 2018-12-13T14:00:03.000Z | 2018-12-13T14:00:03.000Z | library/python/dd/data_reader/__init__.py | darwinbeing/deepdriving-tensorflow | 036a83871f3515b2c041bc3cd5e845f6d8f7b3b7 | [
"MIT"
] | null | null | null | library/python/dd/data_reader/__init__.py | darwinbeing/deepdriving-tensorflow | 036a83871f3515b2c041bc3cd5e845f6d8f7b3b7 | [
"MIT"
] | null | null | null | from .wrapper import CDataReader | 32 | 32 | 0.875 | from .wrapper import CDataReader | 0 | 0 | 0 |
dde944103b320bbb0108dbd91b7db350f5d0fa72 | 1,339 | py | Python | website/baseapp/models/order.py | raviarrow88/Django-ecommerce | 9fd0340e4d2f1ff024092ba574702cd9dbfd2162 | [
"MIT"
] | null | null | null | website/baseapp/models/order.py | raviarrow88/Django-ecommerce | 9fd0340e4d2f1ff024092ba574702cd9dbfd2162 | [
"MIT"
] | 9 | 2020-06-06T01:49:00.000Z | 2022-03-12T00:30:35.000Z | website/baseapp/models/order.py | raviarrow88/Django-ecommerce | 9fd0340e4d2f1ff024092ba574702cd9dbfd2162 | [
"MIT"
] | null | null | null | from django.db import models
from .timestamp import TimeStamp
# from .user import UserProfile
from customer.models import UserProfile
from .item import Item
| 30.431818 | 87 | 0.678865 | from django.db import models
from .timestamp import TimeStamp
# from .user import UserProfile
from customer.models import UserProfile
from .item import Item
class Order(TimeStamp):
user =models.ForeignKey(UserProfile,on_delete=models.SET_NULL,null=True,blank=True)
completed = models.BooleanField(default=False)
delivery_charge = models.IntegerField(default=40,blank=False)
transaction_id = models.CharField(max_length=255,null=True)
def __str__(self):
return str(self.id)
@property
def get_total_order_price(self):
order_items = self.orderitem_set.all()
return sum([item.total_value for item in order_items])
@property
def get_no_items(self):
order_items = self.orderitem_set.all()
return sum([item.quantity for item in order_items ])
@property
def get_cart_total(self):
order_items = self.orderitem_set.all()
total = sum([item.total_value for item in order_items])
if total < 500:
return total+self.delivery_charge
else:
return total
@property
def get_delivery_fee(self):
order_items = self.orderitem_set.all()
total = sum([item.total_value for item in order_items])
if total > 500:
return 0
else:
return self.delivery_charge
| 696 | 462 | 23 |
f2f6448df1215a4699a0b8cfe080518be476866d | 2,243 | py | Python | tests/utils.py | codefever/dremel.py | 42d44a843e1ba5cefd32c14490a124f85f4c1c4e | [
"MIT"
] | 2 | 2021-04-09T12:22:50.000Z | 2022-03-27T14:04:31.000Z | tests/utils.py | codefever/dremel.py | 42d44a843e1ba5cefd32c14490a124f85f4c1c4e | [
"MIT"
] | null | null | null | tests/utils.py | codefever/dremel.py | 42d44a843e1ba5cefd32c14490a124f85f4c1c4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import random
from google.protobuf import text_format
from google.protobuf.descriptor import Descriptor, FieldDescriptor
from .document_pb2 import Document
from dremel.consts import *
from dremel.simple import create_simple_storage
| 30.310811 | 93 | 0.598306 | #!/usr/bin/env python
import os
import random
from google.protobuf import text_format
from google.protobuf.descriptor import Descriptor, FieldDescriptor
from .document_pb2 import Document
from dremel.consts import *
from dremel.simple import create_simple_storage
def read_docs():
sample_dir = os.path.join(os.path.dirname(__file__), 'samples')
files = os.listdir(sample_dir)
for f in files:
with open(os.path.join(sample_dir, f)) as fd:
doc = Document()
text_format.Merge(fd.read(), doc)
yield doc
def trim_doc(doc, fields):
paths = {ROOT}
for f in fields:
p = ROOT
for seg in f.split('.'):
p += '.' + seg
paths.add(p)
def _trim(msg, root):
for f,v in msg.ListFields():
p = f'{root}.{f.name}'
if p not in paths:
msg.ClearField(f.name)
continue
if f.type in (FieldDescriptor.TYPE_GROUP, FieldDescriptor.TYPE_MESSAGE):
if f.label == FieldDescriptor.LABEL_REPEATED:
for e in v: _trim(e, p)
else:
_trim(v, p)
return msg
return _trim(doc, ROOT)
def _random_string(n):
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:/+_(*^%$#@!~<>?)'
return ''.join([random.choice(chars) for _ in range(n)])
def create_random_doc():
doc = Document()
doc.doc_id = random.randint(0, 999999)
# links
if random.random() < 0.3:
fcnt, bcnt = random.randint(0, 10), random.randint(0, 7)
doc.links.forward.extend([random.randint(1000, 4000) for _ in range(fcnt)])
doc.links.backward.extend([random.randint(5000, 7000) for _ in range(bcnt)])
# name
while random.random() < 0.777:
name = doc.name.add()
while random.random() < 0.5:
language = name.language.add()
language.code = _random_string(6)
if random.random() < 0.5:
language.country = _random_string(9)
if random.random() < 0.8:
name.url = _random_string(12)
return doc
def create_test_storage():
return create_simple_storage(Document.DESCRIPTOR, read_docs())
| 1,860 | 0 | 115 |
f957582cf4088d5cebfe722f507924fba380cb9d | 427 | py | Python | simple-examples/07_other-examples/dict.py | wiltonpaulo/python-fullcourse | 5befe60221a2e6f8a567a11e2f449245c11b3447 | [
"MIT"
] | null | null | null | simple-examples/07_other-examples/dict.py | wiltonpaulo/python-fullcourse | 5befe60221a2e6f8a567a11e2f449245c11b3447 | [
"MIT"
] | null | null | null | simple-examples/07_other-examples/dict.py | wiltonpaulo/python-fullcourse | 5befe60221a2e6f8a567a11e2f449245c11b3447 | [
"MIT"
] | null | null | null | print("Way to transform two lists into one dict")
purchases = ["rice", "beans", "pasta"]
prices = ["2.00", "3.80", "4.90"]
new_list = {}
# for x in range(len(purchases)):
# new_list[purchases[x]] = prices[x]
# for id, item in enumerate(compras):
# new_list[purchases[id]] = prices[id]
new_list = {item: prices[purchases.index(item)] for item in purchases}
# new_list = dict(zip(purchases, prices))
print(new_list)
| 22.473684 | 70 | 0.662763 | print("Way to transform two lists into one dict")
purchases = ["rice", "beans", "pasta"]
prices = ["2.00", "3.80", "4.90"]
new_list = {}
# for x in range(len(purchases)):
# new_list[purchases[x]] = prices[x]
# for id, item in enumerate(compras):
# new_list[purchases[id]] = prices[id]
new_list = {item: prices[purchases.index(item)] for item in purchases}
# new_list = dict(zip(purchases, prices))
print(new_list)
| 0 | 0 | 0 |
cccd40a89b5d3ec8ef38be6a8a9d8358e4b4a7ee | 4,907 | py | Python | Modules/visualization.py | EVA4-RS-Group/Phase2 | 7c551e3894979cc425dd51baeddbfa5a51b7878d | [
"Apache-2.0"
] | null | null | null | Modules/visualization.py | EVA4-RS-Group/Phase2 | 7c551e3894979cc425dd51baeddbfa5a51b7878d | [
"Apache-2.0"
] | null | null | null | Modules/visualization.py | EVA4-RS-Group/Phase2 | 7c551e3894979cc425dd51baeddbfa5a51b7878d | [
"Apache-2.0"
] | 2 | 2020-08-26T02:33:33.000Z | 2021-03-16T10:51:40.000Z | '''Plotting Utility.
Grad-CAM implementation in Pytorch
Reference:
[1] xyz
[2] xyz
'''
import matplotlib.pyplot as plt
import numpy as np
import torch
def denormalize(tensor, mean, std):
"""Denormalize the image for given mean and standard deviation.
Args:
tensor: Image tensor
mean: Dataset mean
std: Dataset standard deviation
Returns:
tensor
Raises:
No Exception
"""
if not tensor.ndimension() == 4:
raise TypeError('tensor should be 4D')
mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
return tensor.mul(std).add(mean)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.5404, 0.5918, 0.6219])
std = np.array([0.2771, 0.2576, 0.2998])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title[:4])
plt.pause(0.001) # pause a bit so that plots are updated
| 32.282895 | 113 | 0.592011 | '''Plotting Utility.
Grad-CAM implementation in Pytorch
Reference:
[1] xyz
[2] xyz
'''
import matplotlib.pyplot as plt
import numpy as np
import torch
def denormalize(tensor, mean, std):
"""Denormalize the image for given mean and standard deviation.
Args:
tensor: Image tensor
mean: Dataset mean
std: Dataset standard deviation
Returns:
tensor
Raises:
No Exception
"""
if not tensor.ndimension() == 4:
raise TypeError('tensor should be 4D')
mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
return tensor.mul(std).add(mean)
def visualize_model(model, data, device, save_as="visualize.jpg"):
dataloaders, class_names = data.dataloaders, data.class_names
was_training = model.training
model.eval()
images_so_far = 0
figure = plt.figure(figsize=(15, 10))
num_images=5
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
inputs = denormalize(inputs,mean=(0.5404, 0.5918, 0.6219),std=(0.2771, 0.2576, 0.2998)).cpu().numpy()
for j in range(inputs.shape[0]):
images_so_far += 1
img = inputs[j]
npimg = np.clip(np.transpose(img,(1,2,0)), 0, 1)
ax = figure.add_subplot(1, 5, images_so_far, xticks=[], yticks=[])
ax.imshow(npimg, cmap='gray')
ax.set_title('predicted:\n{}'.format(class_names[preds[j]]),fontsize=14)
if images_so_far == num_images:
model.train(mode=was_training)
figure.savefig(save_as)
return
model.train(mode=was_training)
figure.tight_layout()
plt.show()
def visualize_face_recog_model(model, data, device, save_as="visualize.jpg"):
dataloaders, class_names = data.dataloaders, data.class_names
was_training = model.training
model.eval()
images_so_far = 0
figure = plt.figure(figsize=(15, 12))
num_images=35
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
inputs = denormalize(inputs,mean=(0.485, 0.456, 0.406),std=(0.229, 0.224, 0.225)).cpu().numpy()
for j in range(inputs.shape[0]):
images_so_far += 1
img = inputs[j]
npimg = np.clip(np.transpose(img,(1,2,0)), 0, 1)
ax = figure.add_subplot(5, 7, images_so_far, xticks=[], yticks=[])
ax.imshow(npimg, cmap='gray')
ax.set_title('{}'.format(class_names[preds[j]]),fontsize=12)
if images_so_far == num_images:
model.train(mode=was_training)
figure.savefig(save_as)
return
model.train(mode=was_training)
figure.tight_layout()
#plt.title("Predicted Label",fontsize=16)
figure.suptitle("Predicted Label",fontsize=16)
figure.subplots_adjust(top=0.88)
plt.show()
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.5404, 0.5918, 0.6219])
std = np.array([0.2771, 0.2576, 0.2998])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title[:4])
plt.pause(0.001) # pause a bit so that plots are updated
def imshow_save(inp, save_as="sample.jpg",title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.5404, 0.5918, 0.6219])
std = np.array([0.2771, 0.2576, 0.2998])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
figure = plt.figure()
plt.imshow(inp)
if title is not None:
plt.title(title[:4])
plt.pause(0.001) # pause a bit so that plots are updated
figure.savefig(save_as)
def PlotGraph(plotData,save_as):
fig, (axs1,axs2) = plt.subplots(2, 1,figsize=(15,10))
axs1.plot(plotData['trainLoss'], label = " Train")
axs1.plot(plotData['valLoss'], label = " Test")
axs1.set_title("Loss", fontsize=16)
axs2.plot(plotData['trainAccu'], label = " Train")
axs2.plot(plotData['valAccu'], label = " Test")
axs2.set_title("Accuracy", fontsize=16)
axs1.legend(fontsize=14)
axs2.legend(fontsize=14)
axs1.tick_params(axis='both', which='major', labelsize=12)
axs2.tick_params(axis='both', which='major', labelsize=12)
plt.show()
fig.savefig(save_as) | 3,694 | 0 | 92 |
0c35895d716286e0c3bf45a9d0405b8ccf2b63fd | 1,935 | py | Python | deploy_java/DockerJava/javaTopo_image.py | vmeta42/neodata | 00a42ab8d1adbb14f09bc4cc04720b0067241985 | [
"Apache-2.0"
] | null | null | null | deploy_java/DockerJava/javaTopo_image.py | vmeta42/neodata | 00a42ab8d1adbb14f09bc4cc04720b0067241985 | [
"Apache-2.0"
] | null | null | null | deploy_java/DockerJava/javaTopo_image.py | vmeta42/neodata | 00a42ab8d1adbb14f09bc4cc04720b0067241985 | [
"Apache-2.0"
] | 1 | 2022-03-16T08:31:28.000Z | 2022-03-16T08:31:28.000Z | # coding:utf-8
import os
import subprocess
if __name__ == "__main__":
j = JavaImage(codeDir=os.path.abspath(os.curdir), shell_file="javapack.sh",
imageTag="harbor.dev.21vianet.com/cmdb/cmdb_javatopo:latest")
j.begin()
# print()
| 33.947368 | 115 | 0.61137 | # coding:utf-8
import os
import subprocess
class JavaImage():
def __init__(self, *args, **kwargs):
self.codeDir = kwargs["codeDir"]
self.mvnScript = kwargs["shell_file"]
self.dockerImage = kwargs["imageTag"]
def mvn_command(self, capture_output=True):
self.out = subprocess.run([os.path.join(self.codeDir, self.mvnScript), self.codeDir],
capture_output=capture_output)
def DockerBuild(self, capture_output=True):
# os.chdir(self.codeDir)
command = "/usr/bin/docker build -t %s -f Dockerfile . " % self.dockerImage
print(command)
# self.out = subprocess.run(["docker","version"], capture_output=capture_output)
self.out = subprocess.run(command, shell=True, encoding="utf-8", capture_output=capture_output, timeout=30)
def DockerPush(self, capture_output=True):
command = "/usr/bin/docker push %s " % self.dockerImage
print(command)
self.out = subprocess.run(command, shell=True, encoding="utf-8", capture_output=capture_output)
def begin(self):
self.mvn_command(False)
if self.out.returncode == 0:
print("jar mvn success")
else:
print("jar mvn fail")
print(self.out.stderr.__str__())
self.DockerBuild(False)
if self.out.returncode == 0:
print("docker build success")
else:
print("docker build fail")
print(self.out.stderr.__str__())
self.DockerPush(False)
if self.out.returncode == 0:
print("docker push success")
else:
print("docker push fail")
print(self.out.stderr.__str__())
if __name__ == "__main__":
j = JavaImage(codeDir=os.path.abspath(os.curdir), shell_file="javapack.sh",
imageTag="harbor.dev.21vianet.com/cmdb/cmdb_javatopo:latest")
j.begin()
# print()
| 1,520 | -3 | 157 |
0472b3c394edd47e2e182c61417a754346641796 | 979 | py | Python | uploads/core/forms.py | lindsay777/ITRI_hospital_UI | ee82b44e7e0edd28580a4fbb37c277ff85da8192 | [
"MIT"
] | null | null | null | uploads/core/forms.py | lindsay777/ITRI_hospital_UI | ee82b44e7e0edd28580a4fbb37c277ff85da8192 | [
"MIT"
] | 1 | 2018-08-12T08:30:54.000Z | 2018-08-12T08:30:54.000Z | uploads/core/forms.py | lindsay777/ITRI_hospital_UI | ee82b44e7e0edd28580a4fbb37c277ff85da8192 | [
"MIT"
] | null | null | null | from django import forms
from uploads.core.models import Document
#from uploads.core.models import File
# 創造一個依照model的form,會繼承欄位description document
# class FileForm(forms.ModelForm):
# class Meta:
# model = File
# fields = ('filename',)
# file = forms.FileField()
# pid = forms.CharField(max_length=20)
# name = forms.CharField(max_length=20)
# sex = forms.CharField()
# age = forms.IntegerField()
# mp = forms.IntegerField()
# scanType = forms.CharField(max_length=10)
# fracture = forms.IntegerField()
# tscore = forms.CharField()
# zscore = forms.CharField()
# region = forms.CharField()
# lva = forms.CharField()
# apspine = forms.CharField()
# dualfemur = forms.CharField()
# combination = forms.CharField() | 29.666667 | 47 | 0.657814 | from django import forms
from uploads.core.models import Document
#from uploads.core.models import File
# 創造一個依照model的form,會繼承欄位description document
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = ('description', 'document', )
class nameForm(forms.Form):
rename=forms.CharField()
# class FileForm(forms.ModelForm):
# class Meta:
# model = File
# fields = ('filename',)
# file = forms.FileField()
# pid = forms.CharField(max_length=20)
# name = forms.CharField(max_length=20)
# sex = forms.CharField()
# age = forms.IntegerField()
# mp = forms.IntegerField()
# scanType = forms.CharField(max_length=10)
# fracture = forms.IntegerField()
# tscore = forms.CharField()
# zscore = forms.CharField()
# region = forms.CharField()
# lva = forms.CharField()
# apspine = forms.CharField()
# dualfemur = forms.CharField()
# combination = forms.CharField() | 0 | 138 | 45 |
cd9cb66122591a5591685af112fa2e4503ad0c79 | 5,119 | py | Python | gene_finding/genes_ppi_enrichr.py | ddhostallero/cxplain | c17a119faa384ffd2ca01529d470df2bd4b16813 | [
"MIT"
] | null | null | null | gene_finding/genes_ppi_enrichr.py | ddhostallero/cxplain | c17a119faa384ffd2ca01529d470df2bd4b16813 | [
"MIT"
] | null | null | null | gene_finding/genes_ppi_enrichr.py | ddhostallero/cxplain | c17a119faa384ffd2ca01529d470df2bd4b16813 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pathway_file = '../../drp-data/pathways/9606.enrichr_pathway.edge'
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
print("pathways:", pathway[0].nunique())
print("pathway genes:", pathway[1].nunique())
gsc_filtered = '../../KnowEng_GSC/GSC_10mod/drawr_filtered/DraWR_GSC_Enrichr_STRINGExp.xlsx'
ppi_file = '../../drp-data/pathways/9606.STRING_experimental.edge'
ppi = pd.read_csv(ppi_file, sep='\t', header=None)
print("PPI original edges:", len(ppi))
ppi['norm_score'] = ppi[2]/ppi[2].max()
ppi = ppi.loc[ppi['norm_score'] > 0.5]
print("PPI filtered edges:", len(ppi))
nodes = list(set(ppi[0]).union(set(ppi[1])))
print("PPI nodes:", len(nodes) )
folder = 'CX_ens10'
mean_attribution_file = 'results/CX_ens10/all_attributions.csv'
feature_attr = pd.read_csv(mean_attribution_file, index_col=0)
top_genes_file = 'results/CX_ens10/top_genes_mean_aggregation_info.xlsx'
writer_a = pd.ExcelWriter('results/%s/one_hop.xlsx'%folder, engine='xlsxwriter')
drugs = [
'bleomycin',
'cisplatin',
'cyclophosphamide',
'docetaxel',
'doxorubicin',
'etoposide',
'gemcitabine',
'irinotecan',
'oxaliplatin',
'paclitaxel',
'pemetrexed',
'tamoxifen',
'temozolomide',
'vinorelbine']
# use dictionary coz it's faster
conv_file = '../../drp-data/lists/hgnc2ensembl.txt'
f = open(conv_file, 'r')
conv_table = {}
for line in f:
line = line.strip().split(',')
if line[1] != "":
conv_table[line[0]] = line[1]
# print(conv_table)
for drug in drugs:
gsc_pathways = pd.read_excel(gsc_filtered, sheet_name=drug, index_col='property_gene_set_id')
pathway_genes = pathway.loc[pathway[0].isin(gsc_pathways.index)][1].unique()
top_features = pd.read_excel(top_genes_file, sheet_name=drug, index_col='ensembl')
one_hop_from_top_feats_left = ppi.loc[ppi[0].isin(top_features.index)][1]
one_hop_from_top_feats_right = ppi.loc[ppi[1].isin(top_features.index)][0]
one_hop_from_top_feats = set(one_hop_from_top_feats_left).union(set(one_hop_from_top_feats_right))
one_hop_from_pathway_left = ppi.loc[ppi[0].isin(pathway_genes)][1]
one_hop_from_pathway_right = ppi.loc[ppi[1].isin(pathway_genes)][0]
one_hop_from_pathway = set(one_hop_from_pathway_left).union(set(one_hop_from_pathway_right))
one_hop = one_hop_from_top_feats.union(one_hop_from_pathway)
nodes_of_interest = set(top_features.index).union(set(pathway_genes)).union(one_hop)
features = feature_attr[drug].sort_values(ascending=False).index
ranks = pd.Series(range(1, len(features) + 1), index=features)
paths = list(gsc_pathways.index)
cols = ['hgnc', 'is_feature', 'attribution', 'rank',
'is_top_feat', 'is_1H_from_pathway',
'is_1H_from_top_feat'] + paths
df = pd.DataFrame(columns=cols)
print(drug)
print('nodes of interest:', len(nodes_of_interest))
for node in nodes_of_interest:
info = {"hgnc": node}
if node in conv_table:
info['hgnc'] = conv_table[node]
if node in features:
info['attribution'] = feature_attr.loc[node][drug]
info['rank'] = ranks[node]
info['is_feature'] = 1
else:
info['attribution'] = np.nan
info['rank'] = np.nan
info['is_feature'] = 0
info['is_1H_from_pathway'] = 1*(node in one_hop_from_pathway)
info['is_1H_from_top_feat'] = 1*(node in one_hop_from_top_feats)
info['is_top_feat'] = 1*(node in top_features.index)
for path in paths:
info[path] = 1*(node in (pathway.loc[pathway[0] == path][1].unique()))
df.loc[node] = info
df['score'] = 0.5*(df['is_1H_from_pathway'] + df['is_1H_from_top_feat']) + df['is_top_feat'] + 1*(df[paths].sum(axis=1) > 0)
# df['score'] = df['is_1H_from_top_feat']*0.5*(df['is_1H_from_top_feat']==0) + df['is_1H_from_top_feat'] \
# + 1*(df[paths].sum(axis=1) > 0) + (df[paths].sum(axis=1) == 0)*0.5*df['is_1H_from_pathway']
df = df.sort_values(['score', 'rank'],ascending=[False, True])
# df = df.sort_values('rank')
df.to_excel(writer_a, sheet_name=drug)
desc = {
'hgnc':'HGNC gene name',
'is_feature': 'gene is used as a feature by the model',
'attribution': 'attribution value for the feature/gene',
'rank': 'ranking of the attribution value for the feature/gene',
'is_top_feat': '1 if the feature/gene is in the top features found by kneedle method',
'is_1H_from_pathway': '1 if the gene is an immediate neighbor of a member of any of our pathways-of-interest',
'is_1H_from_top_feat': '1 if the gene is an immediate neighbor of a top feature/gene',
'score': 'arbitrary scoring for sorting (0.5*(is_1H_from_pathway+is_1H_from_top_feat) + is_top_feat + is_a_pathway_member)',
'other columns': '1 if the gene is a member of the specific pathway'
}
# df = pd.Series(df)
df = pd.DataFrame(index=desc.keys())
df['description'] = desc.values()
df.to_excel(writer_a, sheet_name='legend')
writer_a.save()
| 35.548611 | 128 | 0.676109 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pathway_file = '../../drp-data/pathways/9606.enrichr_pathway.edge'
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
print("pathways:", pathway[0].nunique())
print("pathway genes:", pathway[1].nunique())
gsc_filtered = '../../KnowEng_GSC/GSC_10mod/drawr_filtered/DraWR_GSC_Enrichr_STRINGExp.xlsx'
ppi_file = '../../drp-data/pathways/9606.STRING_experimental.edge'
ppi = pd.read_csv(ppi_file, sep='\t', header=None)
print("PPI original edges:", len(ppi))
ppi['norm_score'] = ppi[2]/ppi[2].max()
ppi = ppi.loc[ppi['norm_score'] > 0.5]
print("PPI filtered edges:", len(ppi))
nodes = list(set(ppi[0]).union(set(ppi[1])))
print("PPI nodes:", len(nodes) )
folder = 'CX_ens10'
mean_attribution_file = 'results/CX_ens10/all_attributions.csv'
feature_attr = pd.read_csv(mean_attribution_file, index_col=0)
top_genes_file = 'results/CX_ens10/top_genes_mean_aggregation_info.xlsx'
writer_a = pd.ExcelWriter('results/%s/one_hop.xlsx'%folder, engine='xlsxwriter')
drugs = [
'bleomycin',
'cisplatin',
'cyclophosphamide',
'docetaxel',
'doxorubicin',
'etoposide',
'gemcitabine',
'irinotecan',
'oxaliplatin',
'paclitaxel',
'pemetrexed',
'tamoxifen',
'temozolomide',
'vinorelbine']
# use dictionary coz it's faster
conv_file = '../../drp-data/lists/hgnc2ensembl.txt'
f = open(conv_file, 'r')
conv_table = {}
for line in f:
line = line.strip().split(',')
if line[1] != "":
conv_table[line[0]] = line[1]
# print(conv_table)
for drug in drugs:
gsc_pathways = pd.read_excel(gsc_filtered, sheet_name=drug, index_col='property_gene_set_id')
pathway_genes = pathway.loc[pathway[0].isin(gsc_pathways.index)][1].unique()
top_features = pd.read_excel(top_genes_file, sheet_name=drug, index_col='ensembl')
one_hop_from_top_feats_left = ppi.loc[ppi[0].isin(top_features.index)][1]
one_hop_from_top_feats_right = ppi.loc[ppi[1].isin(top_features.index)][0]
one_hop_from_top_feats = set(one_hop_from_top_feats_left).union(set(one_hop_from_top_feats_right))
one_hop_from_pathway_left = ppi.loc[ppi[0].isin(pathway_genes)][1]
one_hop_from_pathway_right = ppi.loc[ppi[1].isin(pathway_genes)][0]
one_hop_from_pathway = set(one_hop_from_pathway_left).union(set(one_hop_from_pathway_right))
one_hop = one_hop_from_top_feats.union(one_hop_from_pathway)
nodes_of_interest = set(top_features.index).union(set(pathway_genes)).union(one_hop)
features = feature_attr[drug].sort_values(ascending=False).index
ranks = pd.Series(range(1, len(features) + 1), index=features)
paths = list(gsc_pathways.index)
cols = ['hgnc', 'is_feature', 'attribution', 'rank',
'is_top_feat', 'is_1H_from_pathway',
'is_1H_from_top_feat'] + paths
df = pd.DataFrame(columns=cols)
print(drug)
print('nodes of interest:', len(nodes_of_interest))
for node in nodes_of_interest:
info = {"hgnc": node}
if node in conv_table:
info['hgnc'] = conv_table[node]
if node in features:
info['attribution'] = feature_attr.loc[node][drug]
info['rank'] = ranks[node]
info['is_feature'] = 1
else:
info['attribution'] = np.nan
info['rank'] = np.nan
info['is_feature'] = 0
info['is_1H_from_pathway'] = 1*(node in one_hop_from_pathway)
info['is_1H_from_top_feat'] = 1*(node in one_hop_from_top_feats)
info['is_top_feat'] = 1*(node in top_features.index)
for path in paths:
info[path] = 1*(node in (pathway.loc[pathway[0] == path][1].unique()))
df.loc[node] = info
df['score'] = 0.5*(df['is_1H_from_pathway'] + df['is_1H_from_top_feat']) + df['is_top_feat'] + 1*(df[paths].sum(axis=1) > 0)
# df['score'] = df['is_1H_from_top_feat']*0.5*(df['is_1H_from_top_feat']==0) + df['is_1H_from_top_feat'] \
# + 1*(df[paths].sum(axis=1) > 0) + (df[paths].sum(axis=1) == 0)*0.5*df['is_1H_from_pathway']
df = df.sort_values(['score', 'rank'],ascending=[False, True])
# df = df.sort_values('rank')
df.to_excel(writer_a, sheet_name=drug)
desc = {
'hgnc':'HGNC gene name',
'is_feature': 'gene is used as a feature by the model',
'attribution': 'attribution value for the feature/gene',
'rank': 'ranking of the attribution value for the feature/gene',
'is_top_feat': '1 if the feature/gene is in the top features found by kneedle method',
'is_1H_from_pathway': '1 if the gene is an immediate neighbor of a member of any of our pathways-of-interest',
'is_1H_from_top_feat': '1 if the gene is an immediate neighbor of a top feature/gene',
'score': 'arbitrary scoring for sorting (0.5*(is_1H_from_pathway+is_1H_from_top_feat) + is_top_feat + is_a_pathway_member)',
'other columns': '1 if the gene is a member of the specific pathway'
}
# df = pd.Series(df)
df = pd.DataFrame(index=desc.keys())
df['description'] = desc.values()
df.to_excel(writer_a, sheet_name='legend')
writer_a.save()
| 0 | 0 | 0 |
d39c4f37b45ab8280f00f4481b2167d271367281 | 215 | py | Python | libDep.py | TheSinOfSloth/Remote_switchOff | 7ae27a712a5702536bc51257374c7649375d6d92 | [
"MIT"
] | 1 | 2020-04-06T14:13:57.000Z | 2020-04-06T14:13:57.000Z | libDep.py | 1hef001/Automatic_switchOff | 7ae27a712a5702536bc51257374c7649375d6d92 | [
"MIT"
] | null | null | null | libDep.py | 1hef001/Automatic_switchOff | 7ae27a712a5702536bc51257374c7649375d6d92 | [
"MIT"
] | null | null | null | import os
import platform
if(platform.system() == 'Windows'):
FILENAME = 'terminateFile.bat'
elif(platform.system() == 'Linux'):
FILENAME = 'idle.sh'
TIME = 900
PATH = os.getcwd()
# print(platform.system()) | 21.5 | 35 | 0.669767 | import os
import platform
if(platform.system() == 'Windows'):
FILENAME = 'terminateFile.bat'
elif(platform.system() == 'Linux'):
FILENAME = 'idle.sh'
TIME = 900
PATH = os.getcwd()
# print(platform.system()) | 0 | 0 | 0 |
0b8766629c87c39c0ef02b967e9144a3b7d6946f | 1,050 | py | Python | deeptrack/benchmarks/test_fluorescence.py | HarshithBachimanchi/DeepTrack-2.0 | 5983f5224b75aef4ce3932662bd15723f13841a0 | [
"MIT"
] | 1 | 2022-03-18T17:25:18.000Z | 2022-03-18T17:25:18.000Z | deeptrack/benchmarks/test_fluorescence.py | HarshithBachimanchi/DeepTrack-2.0 | 5983f5224b75aef4ce3932662bd15723f13841a0 | [
"MIT"
] | null | null | null | deeptrack/benchmarks/test_fluorescence.py | HarshithBachimanchi/DeepTrack-2.0 | 5983f5224b75aef4ce3932662bd15723f13841a0 | [
"MIT"
] | null | null | null | import sys
import numpy as np
import itertools
import deeptrack as dt
import pytest
u = dt.units
@pytest.mark.parametrize(
"size,gpu",
[
*itertools.product(
(64, 256, 512),
[True, False],
)
],
)
| 21.875 | 81 | 0.625714 | import sys
import numpy as np
import itertools
import deeptrack as dt
import pytest
u = dt.units
def create_pipeline(output_region=(0, 0, 128, 128), num_particles=1):
optics = dt.Fluorescence(output_region=output_region)
mie = dt.Sphere(
radius=2e-6,
refractive_index=1.45,
z=10,
position=lambda: output_region[2:] * np.random.randn(2),
)
field = optics(mie ^ num_particles)
return field
@pytest.mark.parametrize(
"size,gpu",
[
*itertools.product(
(64, 256, 512),
[True, False],
)
],
)
def test_simulate_mie(size, gpu, benchmark):
benchmark.group = f"fluorescence_{size}_px_image"
benchmark.name = f"test_fluorescence_{'gpu' if gpu else 'cpu'}"
if gpu:
dt.config.enable_gpu()
else:
dt.config.disable_gpu()
pipeline = create_pipeline(output_region=(0, 0, size, size), num_particles=1)
# One cold run for performance
pipeline.update()()
benchmark(
lambda: pipeline.update()(),
)
| 753 | 0 | 45 |
5d71ad984bf074d92f237d6404d33add30a5bd3f | 523 | py | Python | scripts/euca2ools.py | nii-clouds/Literate-Computing-Basics | 9bc03edafd503c2215f0e9028389e3ce31a418ec | [
"CC-BY-4.0"
] | 13 | 2016-07-24T07:03:50.000Z | 2021-07-26T07:50:56.000Z | scripts/euca2ools.py | nii-clouds/Literate-Computing-Basics | 9bc03edafd503c2215f0e9028389e3ce31a418ec | [
"CC-BY-4.0"
] | 1 | 2016-07-14T03:06:01.000Z | 2016-07-14T03:06:01.000Z | scripts/euca2ools.py | nii-clouds/Literate-Computing-Basics | 9bc03edafd503c2215f0e9028389e3ce31a418ec | [
"CC-BY-4.0"
] | 5 | 2017-01-10T16:15:01.000Z | 2019-08-29T06:21:12.000Z | # helper functions for Euca2ools
import subprocess
import os
import time
| 29.055556 | 60 | 0.544933 | # helper functions for Euca2ools
import subprocess
import os
import time
def run_euca2ools(envfile, cmd):
env = os.environ.copy()
with open(os.path.expanduser(envfile), 'r') as f:
for l in f.readlines():
if l.startswith('export '):
l = l[6:].strip()
name, value = tuple(l.split('='))
if value.startswith('"'):
value = value[1:-1]
env[name] = value
return subprocess.check_output(cmd, env=env).split('\n')
| 426 | 0 | 23 |
eb75ac8fdab6ae6405b604f70c5f5ada8b3bf5a7 | 2,186 | py | Python | Psc2/songs/claudius_irae.py | psc-g/Psc2 | 6676fc67263c9268ff65784d583cb838cfd42c28 | [
"Apache-2.0"
] | 24 | 2018-10-09T22:36:43.000Z | 2021-10-15T16:34:42.000Z | Psc2/songs/claudius_irae.py | psc-g/Psc2 | 6676fc67263c9268ff65784d583cb838cfd42c28 | [
"Apache-2.0"
] | 2 | 2018-12-07T20:01:13.000Z | 2018-12-11T15:19:23.000Z | Psc2/songs/claudius_irae.py | psc-g/Psc2 | 6676fc67263c9268ff65784d583cb838cfd42c28 | [
"Apache-2.0"
] | 5 | 2018-12-07T15:37:57.000Z | 2020-03-28T11:59:40.000Z | """ClaudiusIrae song logic."""
import OSC
from Psc2.songs import song
from Psc2.modes import bass_doubler
from Psc2.modes import looper
class ClaudiusIrae(song.Song):
"""This defines the logic for ClaudiusIrae.
For most of the song it is in bass-doubling mode, except for the solo section
where the bass is automated.
"""
def __init__(self, client):
"""Initialize the ClaudiusIrae Song.
Args:
client: OSCClient, used to send messages for playback.
"""
self.client = client
self.eighth_note_duration = 0.5
self.avg_velocity = 60
self.modes = {
'doubler': bass_doubler.BassDoubler(client, highest_bass_note=54),
'solo': looper.Looper(client,
[[(45, 5, 5), (52, 3, 3), (50, 5, 5), (57, 3, 3),
(52, 5, 5), (59, 3, 3), (61, 5, 5), (57, 3, 3),
(50, 5, 5), (57, 3, 3), (53, 5, 5), (57, 3, 3),
(56, 5, 5), (52, 3, 3), (49, 5, 5), (52, 3, 3)
]],
eigths_per_tap=4)
}
self.current_mode = 'doubler'
self.modes_to_process = ['doubler'] # Add 'solo' to auto-detect solo sect.
self.mode_detected = None
def process_program(self, program):
"""Process program hits (footpedal)."""
if self.current_mode == 'solo':
self.modes['solo'].increment_loop()
elif program == 0: # Tap to set tempo.
self.modes['solo'].set_tempo()
else: # Start bass for solo.
msg = OSC.OSCMessage()
msg.setAddress('/allnotesoff')
self.client.send(msg)
self.modes_to_process = []
self.current_mode = 'solo'
self.modes['solo'].start_looper_thread()
| 34.15625 | 79 | 0.588747 | """ClaudiusIrae song logic."""
import OSC
from Psc2.songs import song
from Psc2.modes import bass_doubler
from Psc2.modes import looper
class ClaudiusIrae(song.Song):
"""This defines the logic for ClaudiusIrae.
For most of the song it is in bass-doubling mode, except for the solo section
where the bass is automated.
"""
def __init__(self, client):
"""Initialize the ClaudiusIrae Song.
Args:
client: OSCClient, used to send messages for playback.
"""
self.client = client
self.eighth_note_duration = 0.5
self.avg_velocity = 60
self.modes = {
'doubler': bass_doubler.BassDoubler(client, highest_bass_note=54),
'solo': looper.Looper(client,
[[(45, 5, 5), (52, 3, 3), (50, 5, 5), (57, 3, 3),
(52, 5, 5), (59, 3, 3), (61, 5, 5), (57, 3, 3),
(50, 5, 5), (57, 3, 3), (53, 5, 5), (57, 3, 3),
(56, 5, 5), (52, 3, 3), (49, 5, 5), (52, 3, 3)
]],
eigths_per_tap=4)
}
self.current_mode = 'doubler'
self.modes_to_process = ['doubler'] # Add 'solo' to auto-detect solo sect.
self.mode_detected = None
def process_note(self, pitch, velocity, time):
if self.current_mode == 'solo' and not self.modes['solo'].playing:
self.current_mode = 'post solo'
self.modes_to_process = ['doubler']
for mode in self.modes_to_process:
self.modes[mode].process_note(pitch, velocity)
def process_note_off(self, pitch, velocity, time):
for mode in self.modes_to_process:
self.modes[mode].process_note_off(pitch, velocity)
def process_program(self, program):
"""Process program hits (footpedal)."""
if self.current_mode == 'solo':
self.modes['solo'].increment_loop()
elif program == 0: # Tap to set tempo.
self.modes['solo'].set_tempo()
else: # Start bass for solo.
msg = OSC.OSCMessage()
msg.setAddress('/allnotesoff')
self.client.send(msg)
self.modes_to_process = []
self.current_mode = 'solo'
self.modes['solo'].start_looper_thread()
| 393 | 0 | 50 |
a3642725c5e6f2e8169b1cd241aec6bc6bae5734 | 3,035 | py | Python | exercises/demo.py | Zaph-x/DistributedExercisesAAU | 40ca2716e252aca46f840405a89fac3ffd37f122 | [
"MIT"
] | null | null | null | exercises/demo.py | Zaph-x/DistributedExercisesAAU | 40ca2716e252aca46f840405a89fac3ffd37f122 | [
"MIT"
] | null | null | null | exercises/demo.py | Zaph-x/DistributedExercisesAAU | 40ca2716e252aca46f840405a89fac3ffd37f122 | [
"MIT"
] | null | null | null | import random
from emulators.Device import Device
from emulators.Medium import Medium
from emulators.MessageStub import MessageStub
# We extend the MessageStub here for the message-types we wish to communicate
# the constructor-function takes the source and destination as arguments. These are used for "routing" but also
# for pretty-printing. Here we also take the specific flag of "is_ping"
# remember to implement the __str__ method such that the debug of the framework works!
# This class extends on the basic Device class. We will implement the protocol in the run method
# The constructor must have exactly this form.
# this method implements the actual algorithm
# for pretty-printing and debugging, implement this function
| 43.357143 | 115 | 0.639868 | import random
from emulators.Device import Device
from emulators.Medium import Medium
from emulators.MessageStub import MessageStub
# We extend the MessageStub here for the message-types we wish to communicate
class PingMessage(MessageStub):
# the constructor-function takes the source and destination as arguments. These are used for "routing" but also
# for pretty-printing. Here we also take the specific flag of "is_ping"
def __init__(self, sender: int, destination: int, is_ping: bool):
# first thing to do (mandatory), is to send the arguments to the "MessageStub" constructor
super().__init__(sender, destination)
# finally we set the field
self.is_ping = is_ping
# remember to implement the __str__ method such that the debug of the framework works!
def __str__(self):
return f'{self.source} -> {self.destination} : ping? {self.is_ping}'
# This class extends on the basic Device class. We will implement the protocol in the run method
class PingPong(Device):
# The constructor must have exactly this form.
def __init__(self, index: int, number_of_devices: int, medium: Medium):
# forward the constructor arguments to the super-constructor
super().__init__(index, number_of_devices, medium)
# for this small example, all UNEVEN indexed devices will start with a ping
self._is_ping = (index % 2) == 0
self._rec_ping = 0
self._rec_pong = 0
# this method implements the actual algorithm
def run(self):
# for this algorithm, we will repeat the protocol 10 times and then stop
for repetetions in range(0, 10):
# in each repetition, let us send the ping to one random other device
message = PingMessage(self.index(), random.randint(0, self.number_of_devices()), self._is_ping)
# we send the message via a "medium"
self.medium().send(message)
# in this instance, we also try to receive some messages, there can be multiple, but
# eventually the medium will return "None"
while True:
ingoing = self.medium().receive()
if ingoing is None:
break
# let's keep some statistics
if ingoing.is_ping:
self._rec_ping += 1
else:
self._rec_pong += 1
# in this protocol, we ignore the message if it is the same state as us
if self._is_ping == ingoing.is_ping:
continue
else:
# we were ping and got pong, or were pong and got ping
self._is_ping = ingoing.is_ping
# this call is only used for synchronous networks
self.medium().wait_for_next_round()
# for pretty-printing and debugging, implement this function
def print_result(self):
print(f'\tDevice {self.index()} got pings: {self._rec_ping} and pongs: {self._rec_pong}')
| 2,083 | 12 | 174 |
e41a1c4a736dca7523e7626d2638116f83876bd8 | 19,910 | py | Python | bike/refactor/test_renameMethod.py | debiancn/bicyclerepair | dd054e802d6d8ad80baeccee0396da68144f2a26 | [
"ICU"
] | 2 | 2020-05-29T06:31:53.000Z | 2020-12-19T21:49:25.000Z | bike/refactor/test_renameMethod.py | debiancn/bicyclerepair | dd054e802d6d8ad80baeccee0396da68144f2a26 | [
"ICU"
] | null | null | null | bike/refactor/test_renameMethod.py | debiancn/bicyclerepair | dd054e802d6d8ad80baeccee0396da68144f2a26 | [
"ICU"
] | null | null | null | #!/usr/bin/env python
import setpath
import unittest
from rename import rename
import compiler
from bike import testdata
from bike.testutils import*
from bike.transformer.save import save
# Generic tests. These tests are designed to be run in the context of a ui
# and in a package hierarchy structure
# tests that cover stuff not renamed automatically
# (I.e. are renamed after user manually expresses desire to do so)
# template method
MethodTestdata = trimLines("""
class TheClass:
def theMethod(self):
pass
def differentMethod(self):
pass
class DifferentClass:
def theMethod(self):
pass
""")
if __name__ == "__main__":
unittest.main()
| 28.002813 | 140 | 0.547212 | #!/usr/bin/env python
import setpath
import unittest
from rename import rename
import compiler
from bike import testdata
from bike.testutils import*
from bike.transformer.save import save
class RenameMethodTests:
def test_renamesTheMethod(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
pass
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_doesntRenameMethodOfSameNameOnOtherClasses(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
pass
class b:
def theMethod(self):
pass
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
pass
class b:
def theMethod(self):
pass
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_doesntRenameOtherMethodsOfSameClass(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
a=b
def aMethod(self):
pass
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
a=b
def aMethod(self):
pass
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodWhenClassNestedInFunction(self):
srcBefore=trimLines("""
def theFunction():
class TheClass:
def theMethod(self):
pass
""")
srcAfter=trimLines("""
def theFunction():
class TheClass:
def newName(self):
pass
""")
src = self.rename(srcBefore,3,12,"newName")
self.assertEqual(srcAfter,src)
def test_doesntBarfOnInheritanceHierarchies(self):
srcBefore=trimLines("""
from b.bah import DifferentClass
class TheClass(foo.bah):
def theMethod(self):
pass
""")
src = self.rename(srcBefore,2,8,"newName")
def test_renamesMethodWhenMethodCallFromOtherMethodInSameClass(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
pass
def anotherMethod(self):
self.theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
pass
def anotherMethod(self):
self.newName()
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_doesntBarfOnNestedClasses(self):
srcBefore=trimLines("""
class TheClass:
class AnotherClass:
pass
def theMethod(self):
pass
""")
src = self.rename(srcBefore,4,8,"newName")
def test_renamesMethodWhenBaseClassesArentInAST(self):
srcBefore=trimLines("""
class TheClass(notInAst):
def theMethod(self):
pass
""")
srcAfter=trimLines("""
class TheClass(notInAst):
def newName(self):
pass
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodInRelatedClasses(self):
srcBefore=trimLines("""
class root:
def theMethod(self):
pass
class a(root):
def theMethod(self):
pass
class b(root):
pass
class TheClass(b):
def theMethod(self):
pass
""")
srcAfter=trimLines("""
class root:
def newName(self):
pass
class a(root):
def newName(self):
pass
class b(root):
pass
class TheClass(b):
def newName(self):
pass
""")
src = self.rename(srcBefore,13,8,"newName")
self.assertEqual(srcAfter,src)
def test_renameMethodDoesntBarfOnNoneAsDefaultArgToMethod(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self, root, flist, stack=None):
pass
""")
src = self.rename(srcBefore,2,8,"newName")
class RenameMethodTests_ImportsClass:
def test_renamesMethodOnDerivedClassInstance(self):
srcBefore = trimLines("""
from b.bah import TheClass as BaseClass
class DerivedClass(BaseClass):
pass
class DerivedDerivedClass(DerivedClass):
def theMethod(self):
print 'hello'
""")
srcAfter = trimLines("""
from b.bah import TheClass as BaseClass
class DerivedClass(BaseClass):
pass
class DerivedDerivedClass(DerivedClass):
def newName(self):
print 'hello'
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
class RenameMethodReferenceTests:
# Generic tests. These tests are designed to be run in the context of a ui
# and in a package hierarchy structure
def test_doesntBarfWhenConfrontedWithComplexReturnTypes(self):
src = trimLines("""
import a
class TheClass:
def theMethod(self):
pass
def bah():
return a[35]
b = bah()
b.theMethod()
""")
self.rename(src,3,8,"newName")
def test_doesntbarfWhenCallMadeOnInstanceReturnedFromFnCall(self):
srcBefore=trimLines("""
from foo import e
class TheClass:
def theMethod(self):
pass
ast = e().f(src)
""")
self.rename(srcBefore,3,8,"newName")
def test_doesntStackOverflowOnRecursiveFunctions(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
pass
def foo(a):
return foo(a)
""")
self.rename(srcBefore,2,8,"newName")
def test_renamesMethodReferenceOfInstanceCreatedInParentScopeAfterFunction(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
pass
a = TheClass()
def foo():
a.theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
pass
a = TheClass()
def foo():
a.newName()
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodReferenceOfInstanceObtainedByCallingFunction(self):
srcBefore=trimLines("""
class TheClass:
def theMethod():
pass
def foo():
b = TheClass()
return b
a = foo()
a.theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName():
pass
def foo():
b = TheClass()
return b
a = foo()
a.newName()
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodReferenceOfInstanceCreatedInAnotherFunction(self):
srcBefore=trimLines("""
class TheClass:
def theMethod():
pass
def bah():
return TheClass()
def foo():
a = bah()
a.theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName():
pass
def bah():
return TheClass()
def foo():
a = bah()
a.newName()
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodReferenceOfInstanceCreatedInSubsequentFunction(self):
srcBefore = trimLines("""
class TheClass:
def theMethod():
pass
class NotTheClass:
def theMethod():
pass
def foo():
a = bah()
a.theMethod()
def bah():
return TheClass()
""")
srcAfter=trimLines("""
class TheClass:
def newName():
pass
class NotTheClass:
def theMethod():
pass
def foo():
a = bah()
a.newName()
def bah():
return TheClass()
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodReferenceOnInstanceThatIsAnAttributeOfSelf(self):
srcBefore = trimLines("""
class TheClass:
def theMethod(self):
pass
class AnotherClass:
def __init__(self):
self.a = TheClass()
def anotherFn(self):
self.a.theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
pass
class AnotherClass:
def __init__(self):
self.a = TheClass()
def anotherFn(self):
self.a.newName()
""")
src = self.rename(srcBefore,2,8,"newName")
self.assertEqual(srcAfter,src)
def test_doesntBarfOnGetattrThatItCantDeduceTypeOf(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self):
pass
a = TheClass
a.b.bah = 3
""")
self.rename(srcBefore,2,8,"newName")
class RenameMethodReferenceTests_ImportsClass:
def test_renamesReferenceOfClassImportedAsAnotherName(self):
srcBefore=trimLines("""
from b.bah import TheClass as MyTheClass
def foo():
a = MyTheClass()
a.theMethod()
""")
srcAfter=trimLines("""
from b.bah import TheClass as MyTheClass
def foo():
a = MyTheClass()
a.newName()
""")
src = self.renameMethod(srcBefore,2,8, "newName")
self.assertEqual(srcAfter,src)
def test_renamesReferenceWhenObjectCreationAndReferenceInModuleScope(self):
srcBefore=trimLines("""
from b.bah import TheClass
a = TheClass()
a.theMethod()
""")
srcAfter=trimLines("""
from b.bah import TheClass
a = TheClass()
a.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
def test_renamesReferenceWhenObjectCreatedInSameFunctionAsReference(self):
srcBefore=trimLines("""
import b.bah
def foo():
a = b.bah.TheClass()
a.theMethod()
""")
srcAfter=trimLines("""
import b.bah
def foo():
a = b.bah.TheClass()
a.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
def test_doesntrenameDifferentMethodReferenceWhenObjectCreatedInSameScope(self):
srcBefore=trimLines("""
import b.bah.TheClass
def foo():
a = b.bah.TheClass()
a.theMethod()
""")
src = self.renameMethod(srcBefore, 4,8, "newName")
self.assertEqual(srcBefore,src)
def test_doesntrenameMethodReferenceWhenDifferentObjectCreatedInSameScope(self):
srcBefore=trimLines("""
import b.bah.TheClass
def foo():
a = b.bah.TheClass()
a.theMethod()
""")
src = self.renameMethod(srcBefore, 8,8,"newName")
self.assertEqual(srcBefore,src)
def test_renamesReferenceOfImportedClass(self):
srcBefore=trimLines("""
import b.bah
def foo():
a = b.bah.TheClass()
a.theMethod()
""")
srcAfter=trimLines("""
import b.bah
def foo():
a = b.bah.TheClass()
a.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
def test_doesntRenameReferenceOfDifferentImportedClass(self):
srcBefore=trimLines("""
from b.bah import DifferentClass
def foo():
a = b.bah.TheClass()
a.theMethod()
""")
src = self.renameMethod(srcBefore, 8,8,
"newName")
self.assertEqual(srcBefore,src)
def test_renamesReferenceOfClassImportedWithFromClause(self):
srcBefore=trimLines("""
from b.bah import TheClass
def foo():
a = TheClass()
a.theMethod()
""")
srcAfter=trimLines("""
from b.bah import TheClass
def foo():
a = TheClass()
a.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
def test_doesntrenameReferenceOfClassImportedWithDifferentAsClause(self):
srcBefore = trimLines("""
from b.bah import TheClass as MyClass
def foo():
a = TheClass()
a.theMethod()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcBefore,src)
def test_renamesReferenceOfClassImportedWithFromFooImportStar(self):
srcBefore=trimLines("""
from b.bah import *
a = TheClass()
a.theMethod()
""")
srcAfter=trimLines("""
from b.bah import *
a = TheClass()
a.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
def test_renamesMethodReferenceOfInstanceCreatedInParentScope(self):
srcBefore=trimLines("""
from b.bah import TheClass
a = TheClass()
def foo():
a.theMethod()
""")
srcAfter=trimLines("""
from b.bah import TheClass
a = TheClass()
def foo():
a.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
def test_doesntRenameMethodWhenObjectCreatedInChildScopeToMethodReference(self):
srcBefore = trimLines("""
from b.bah import TheClass
a = AnotherClass()
def foo():
a = TheClass()
a.theMethod()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcBefore,src)
def test_renamesReferenceOnDerivedClassInstance(self):
srcBefore=trimLines("""
import b
class DerivedClass(b.bah.TheClass):
pass
class DerivedDerivedClass(DerivedClass):
pass
theInstance = DerivedDerivedClass()
theInstance.theMethod()
""")
srcAfter=trimLines("""
import b
class DerivedClass(b.bah.TheClass):
pass
class DerivedDerivedClass(DerivedClass):
pass
theInstance = DerivedDerivedClass()
theInstance.newName()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
# tests that cover stuff not renamed automatically
# (I.e. are renamed after user manually expresses desire to do so)
class RenameMethodAfterPromptTests:
def test_renamesReferenceWhenMethodCallDoneOnInstanceCreation(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self): pass
TheClass().theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName(self): pass
TheClass().newName()
""")
src = self.renameMethod(srcBefore,2,8, "newName")
self.assertEqual(srcAfter,src)
def test_renamesReferenceInMiddleOfBiggerCompoundCall(self):
srcBefore = trimLines("""
class TheClass:
def theMethod(self): return AnotherClass()
TheClass().theMethod().anotherMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName(self): return AnotherClass()
TheClass().newName().anotherMethod()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
class TestRenameMethodWithSingleModule(BRMTestCase, RenameMethodTests, RenameMethodReferenceTests):
# template method
def rename(self, src, line, col, newname):
try:
createPackageStructure(src, "pass")
rename(pkgstructureFile1,line,col,newname)
save()
return file(pkgstructureFile1).read()
finally:
removePackageStructure()
class TestRenameMethodWithDirectoryStructure(RenameMethodTests, RenameMethodReferenceTests, BRMTestCase):
def rename(self, src, line, col, newname):
try:
createPackageStructure("pass",src)
rename(pkgstructureFile2,line,col,newname)
save()
return file(pkgstructureFile2).read()
finally:
removePackageStructure()
class TestRenameMethodReferenceWithDirectoryStructure(BRMTestCase, RenameMethodTests_ImportsClass, RenameMethodReferenceTests_ImportsClass):
def renameMethod(self, src, line, col, newname):
try:
createPackageStructure(src,MethodTestdata)
rename(pkgstructureFile2,line,col,newname)
save()
return file(pkgstructureFile1).read()
finally:
removePackageStructure()
class TestRenameMethodStuffCorrectlyAfterPromptReturnsTrue(BRMTestCase,
RenameMethodAfterPromptTests):
def callback(self, filename, line, colbegin, colend):
return 1
def renameMethod(self, src, line, col, newname):
createPackageStructure(src, MethodTestdata)
rename(pkgstructureFile1,line,col,newname,self.callback)
save()
return file(pkgstructureFile1).read()
class TestDoesntRenameMethodIfPromptReturnsFalse(BRMTestCase):
def callback(self, filename, line, colbegin, colend):
return 0
def renameMethod(self, src, line, col, newname):
createPackageStructure(src, MethodTestdata)
rename(pkgstructureFile1,line,col,newname,self.callback)
save()
return file(pkgstructureFile1).read()
def test_doesntRenameMethodIfPromptReturnsFalse(self):
srcBefore = trimLines("""
class TheClass:
def theMethod(self):
pass
b = TheClass()
b.theMethod()
a = someFunction()
a.theMethod()
""")
srcAfter=trimLines("""
class TheClass:
def newName(self):
pass
b = TheClass()
b.newName()
a = someFunction()
a.theMethod()
""")
src = self.renameMethod(srcBefore, 2,8, "newName")
self.assertEqual(srcAfter,src)
MethodTestdata = trimLines("""
class TheClass:
def theMethod(self):
pass
def differentMethod(self):
pass
class DifferentClass:
def theMethod(self):
pass
""")
if __name__ == "__main__":
unittest.main()
| 17,269 | 528 | 1,399 |
d37465f5ca29b0c5a0c74f20169fdda80f35c186 | 718 | py | Python | care/users/api/serializers/lsg.py | CoronaSafeUP/care | 22a658bdefa0e9af3727416a5c31ff0aaa5af873 | [
"MIT"
] | null | null | null | care/users/api/serializers/lsg.py | CoronaSafeUP/care | 22a658bdefa0e9af3727416a5c31ff0aaa5af873 | [
"MIT"
] | null | null | null | care/users/api/serializers/lsg.py | CoronaSafeUP/care | 22a658bdefa0e9af3727416a5c31ff0aaa5af873 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from care.users.models import District, LocalBody, State, Ward, Block
| 20.514286 | 69 | 0.68663 | from rest_framework import serializers
from care.users.models import District, LocalBody, State, Ward, Block
class StateSerializer(serializers.ModelSerializer):
class Meta:
model = State
fields = "__all__"
class DistrictSerializer(serializers.ModelSerializer):
class Meta:
model = District
fields = "__all__"
class LocalBodySerializer(serializers.ModelSerializer):
class Meta:
model = LocalBody
fields = "__all__"
class WardSerializer(serializers.ModelSerializer):
class Meta:
model = Ward
fields = "__all__"
class BlockSerializer(serializers.ModelSerializer):
class Meta:
model = Block
fields = "__all__"
| 0 | 487 | 115 |
fc39730a027f1990317acfd6696c41b7fecb8c57 | 2,196 | py | Python | Combined Data/duplicate_addresses.py | alfredholmes/UK-Company-Data | 69f87980309fbd9ef3434770a8a87cd4b600e6cb | [
"MIT"
] | 1 | 2022-01-27T22:29:19.000Z | 2022-01-27T22:29:19.000Z | Combined Data/duplicate_addresses.py | alfredholmes/UK-Company-Data | 69f87980309fbd9ef3434770a8a87cd4b600e6cb | [
"MIT"
] | null | null | null | Combined Data/duplicate_addresses.py | alfredholmes/UK-Company-Data | 69f87980309fbd9ef3434770a8a87cd4b600e6cb | [
"MIT"
] | null | null | null | import ijson, csv, json, datetime
import sys
sys.path.append('../lib')
from accounts.company import Company
if __name__ == '__main__':
main() | 29.28 | 249 | 0.638434 | import ijson, csv, json, datetime
import sys
sys.path.append('../lib')
from accounts.company import Company
def main():
enterprises = {}
print('loading enterprises')
with open('combined_data.json', 'r') as f:
for i, c in enumerate(ijson.items(f, 'item')):
if i % 10000 == 0:
print(i)
address = frozenset(c['address'].values())
if address in enterprises:
enterprises[address].append(Company(c['company_number'], c['birth_date'], c['address'], c['status'], c['sic_codes'], c['accounts']))
else:
enterprises[address] = [Company(c['company_number'], c['birth_date'], c['address'], c['status'], c['sic_codes'], c['accounts'])]
print('writing enterprises')
with open('enterprises.json', 'w') as f:
f.write('[\n')
asset_dates = [datetime.datetime(2012, 1, 1), datetime.datetime(2013, 1, 1), datetime.datetime(2014, 1, 1), datetime.datetime(2015, 1, 1), datetime.datetime(2016, 1, 1)]
i= 0
for address, enterprise in enterprises.items():
if i % 10000 == 0:
print(i)
i += 1
dead = True
birth_date = None
death_date = None
sic_codes = set()
assets = {d: 0 for d in asset_dates}
for company in enterprise:
for s in company.sic_codes.values():
sic_codes = sic_codes.union(set(s))
if company.death_date is None:
dead = False
else:
if dead and (death_date is None or (company.death_date - death_date).days > 0):
death_date = company.death_date
if birth_date is None or (company.birth_date - birth_date).days < 0:
birth_date = company.birth_date
for date in asset_dates:
assets[date] += company.asset_at_date('assets', date)
enterprise_data = {'address': {d.strftime('%Y-%m-%d'): v for d, v in company.addresses.items()}, 'birth_date': birth_date.strftime('%Y-%m-%d'), 'assets': {d.strftime('%Y-%m-%d'): v for d, v in assets.items()}, 'sic_codes': [x for x in sic_codes]}
if death_date is not None:
enterprise_data['death_date'] = death_date.strftime('%Y-%m-%d')
f.write(' ')
f.write(json.dumps(enterprise_data))
if i != len(enterprises):
f.write(',\n')
else:
f.write('\n')
f.write(']\n')
if __name__ == '__main__':
main() | 2,024 | 0 | 23 |
3a6a9407591ebbd3493d14eb399531f2d9d4dcaa | 5,549 | py | Python | dataLoader/batch.py | JRC1995/SocialMediaNER | 236b22ded48f64516ebf0577c3b9d9d907db84e0 | [
"MIT"
] | null | null | null | dataLoader/batch.py | JRC1995/SocialMediaNER | 236b22ded48f64516ebf0577c3b9d9d907db84e0 | [
"MIT"
] | null | null | null | dataLoader/batch.py | JRC1995/SocialMediaNER | 236b22ded48f64516ebf0577c3b9d9d907db84e0 | [
"MIT"
] | null | null | null | import numpy as np
import random
import re
import copy
| 37.493243 | 260 | 0.609299 | import numpy as np
import random
import re
import copy
def batcher(sample_tuples,
pad_types,
batch_size=32, bucket_size_factor=5,
sort_by_idx=0, sort=True, shuffle=True,
SEED=None):
# sample_tuples = List of different types of lists of samples
# Example: [train_data, train_labels]
# PAD types corresponding to different sequences:
# Example: [5000, 3] # where 5000 could be the pad id of the train data sequences, and 3 could be the id of the pad -corresponding labels for a sequence labelling task (eg. 'O')
# .....................(Though the batch masks should be used ideally to nullify any effect of PAD. So it doesn't matter too much, but can be useful in certain cases)
# bucket_size_factor controls "bucketing". We don't want sequences of too different lengths to be batched together (can cause issues if pads not handled well, or can be inefficient -- short sequences will get over padded need more compute even for sorting)
# To make sure that similar sized sequences can batched together one can sort first and then batch.
# But sorting can result in less diverse batch. If there are few sequences with the same sequences they will be always in the same batch. Not ideal if we want more "chaos" in the training (could result in some bias in batch updates).
# bucketing provides a middle way. We first do sorting, and then create buckets out of the sorted samples. The samples within the buckets are shuffled and batched.
# bucket size = bucket_size_factor * batch_size
# You can disable "bucketing" by setting sort=False which results in the "bucketing" turning into just more random shuffling for no reason (unless shuffling too is disabled)
if SEED is not None:
random.seed(SEED)
data1 = sample_tuples[0]
data_len = len(data1)
for i in range(data_len):
# print(len(sorted_sample_tuples[0][i]))
# print(len(sorted_sample_tuples[1][i]))
assert len(sample_tuples[0][i]) == len(sample_tuples[-1][i])
def reorder(samples, idx):
return [samples[i] for i in idx]
def reorder_all(sample_tuples, idx):
return [reorder(samples, idx) for samples in sample_tuples]
if shuffle:
random_idx = [i for i in range(data_len)]
random.shuffle(random_idx)
shuffled_sample_tuples = reorder_all(sample_tuples, random_idx)
else:
shuffled_sample_tuples = sample_tuples
if sort:
data1 = shuffled_sample_tuples[sort_by_idx]
true_seq_lens = [len(sample) for sample in data1]
sorted_idx = np.flip(np.argsort(true_seq_lens), 0)
sorted_sample_tuples = reorder_all(shuffled_sample_tuples, sorted_idx)
else:
sorted_sample_tuples = sample_tuples
#print("AFTER SORT AND SHUFFLE")
for i in range(data_len):
# print(len(sorted_sample_tuples[0][i]))
# print(len(sorted_sample_tuples[1][i]))
assert len(sorted_sample_tuples[0][i]) == len(sorted_sample_tuples[-1][i])
bucket_size = bucket_size_factor*batch_size
c = 0
buckets = []
while c < data_len:
start = c
end = c+bucket_size
if end > data_len:
end = data_len
bucket = [samples[start:end] for samples in sorted_sample_tuples]
buckets.append(bucket)
c = end
if shuffle:
random.shuffle(buckets)
def max_len_in_span(samples, start, end):
if isinstance(samples[0], list):
return max([len(samples[i]) for i in range(start, end)])
else:
return -1
for bucket in buckets:
data1 = bucket[0]
bucket_len = len(data1)
if shuffle:
random_idx = [i for i in range(bucket_len)]
random.shuffle(random_idx)
bucket = reorder_all(bucket, random_idx)
i = 0
while i < bucket_len:
if i+batch_size > bucket_len:
incr = bucket_len-i
else:
incr = batch_size
max_lens = [max_len_in_span(samples, i, i+incr) for samples in bucket]
# print(max_lens)
batch = [[]]*len(bucket)
batch_masks = [[]]*len(bucket)
for j in range(i, i+incr):
sample_type_id = 0
for samples, max_len, PAD in zip(bucket, max_lens, pad_types):
sample = copy.deepcopy(samples[j])
if max_len != -1 and PAD is not None: # -1 means not list type object. No need of padding
sample_len = len(sample)
if type(PAD) != type(sample[0]):
raise ValueError("INVALID PAD TYPE for Sample Type {}: ".format(sample_type_id) +
"PAD data type mismatch")
mask = [1]*sample_len
while len(sample) < max_len:
sample.append(PAD)
mask.append(0)
else:
mask = []
batch[sample_type_id] = batch[sample_type_id] + [sample]
batch_masks[sample_type_id] = batch_masks[sample_type_id] + [mask]
sample_type_id += 1
i += incr
# print(batch[2])
#batch = [np.asarray(batch_samples) for batch_samples in batch]
#batch_masks = [np.asarray(batch_mask) for batch_mask in batch_masks]
yield batch, batch_masks
| 5,470 | 0 | 23 |
81559963f29d21ffc38158ededa6ad16e94ccb86 | 96 | py | Python | yamldirs/__init__.py | datakortet/yamldirs | 529f20a5ff3da46448828fc9c7c80ca417bb6e91 | [
"MIT"
] | 13 | 2017-10-23T12:39:20.000Z | 2022-03-04T16:58:44.000Z | yamldirs/__init__.py | datakortet/yamldirs | 529f20a5ff3da46448828fc9c7c80ca417bb6e91 | [
"MIT"
] | 5 | 2017-10-24T12:20:13.000Z | 2017-10-25T13:31:04.000Z | yamldirs/__init__.py | datakortet/yamldirs | 529f20a5ff3da46448828fc9c7c80ca417bb6e91 | [
"MIT"
] | 3 | 2017-10-24T12:20:39.000Z | 2021-12-22T13:19:44.000Z | # -*- coding: utf-8 -*-
__version__ = '1.1.15'
from .filemaker import create_files # noqa
| 16 | 46 | 0.625 | # -*- coding: utf-8 -*-
__version__ = '1.1.15'
from .filemaker import create_files # noqa
| 0 | 0 | 0 |
da0919706a2516295cd9fc9fb8d46007f77a23bf | 2,468 | py | Python | sscutils/metadata/bedrock/conversion.py | papsebestyen/sscutils | dff8b62ab31c9dfe1494264f9319e287945762bc | [
"MIT"
] | null | null | null | sscutils/metadata/bedrock/conversion.py | papsebestyen/sscutils | dff8b62ab31c9dfe1494264f9319e287945762bc | [
"MIT"
] | 21 | 2021-09-15T15:31:22.000Z | 2022-03-20T17:10:50.000Z | sscutils/metadata/bedrock/conversion.py | papsebestyen/sscutils | dff8b62ab31c9dfe1494264f9319e287945762bc | [
"MIT"
] | 2 | 2021-09-08T14:12:00.000Z | 2021-09-29T10:58:08.000Z | from dataclasses import dataclass
from functools import partial
from typing import Callable, List, Optional
from colassigner.constants import PREFIX_SEP
from ...utils import chainmap
from .artifact_metadata import ArtifactMetadata
from .column import Column
from .feature_types import CompositeFeature, ForeignKey, PrimitiveFeature
from .namespace_metadata import NamespaceMetadata
from .namespaced_id import NamespacedId
@dataclass
| 33.351351 | 74 | 0.673825 | from dataclasses import dataclass
from functools import partial
from typing import Callable, List, Optional
from colassigner.constants import PREFIX_SEP
from ...utils import chainmap
from .artifact_metadata import ArtifactMetadata
from .column import Column
from .feature_types import CompositeFeature, ForeignKey, PrimitiveFeature
from .namespace_metadata import NamespaceMetadata
from .namespaced_id import NamespacedId
@dataclass
class FeatConverter:
ns_meta: NamespaceMetadata
a_meta: ArtifactMetadata
wrapper: Callable = lambda x: x
proc_fk: Optional[Callable] = None
def feats_to_cols(self, feats) -> List[Column]:
return chainmap(self.feat_to_cols, feats)
def feat_to_cols(
self,
feat,
init_prefix=(),
calling_ns_prefix=None,
open_to_fk=True,
) -> List[Column]:
new_open_to_fk = True
fk_to = None
if isinstance(feat, PrimitiveFeature):
name = PREFIX_SEP.join([*init_prefix, feat.name])
return [self.wrapper(Column(name, feat.dtype, feat.nullable))]
if isinstance(feat, CompositeFeature):
sub_id = feat.dtype
subfeats = self._get_atom(sub_id, calling_ns_prefix).features
elif isinstance(feat, ForeignKey):
new_open_to_fk = False
sub_id = feat.table
fk_to = self._get_id(sub_id, calling_ns_prefix)
table_obj = self._get_atom(sub_id, calling_ns_prefix)
subfeats = table_obj.index
new_ns_prefix = (
sub_id.ns_prefix if not sub_id.is_local else calling_ns_prefix
)
new_feat_prefix = (*init_prefix, feat.prefix)
new_fun = partial(
self.feat_to_cols,
init_prefix=new_feat_prefix,
calling_ns_prefix=new_ns_prefix,
open_to_fk=new_open_to_fk,
)
out = chainmap(new_fun, subfeats)
if fk_to is not None and open_to_fk and self.proc_fk:
self.proc_fk(out, fk_to, new_feat_prefix)
return out
def _get_id(self, id_: NamespacedId, calling_namespace=None):
if not id_.is_local:
return id_
if calling_namespace is None:
calling_namespace = self.ns_meta.local_name
return NamespacedId(calling_namespace, id_.obj_id)
def _get_atom(self, id_: NamespacedId, calling_namespace=None):
return self.a_meta.get_atom(self._get_id(id_, calling_namespace))
| 1,766 | 243 | 22 |
fdda3437a4cf366dbb07a6363fb2a6bbb3f104d3 | 637 | py | Python | jinfo/utils/__init__.py | JBwdn/jinfo | b5933edd3ea3d27f4f7c1e0153e16750de0d1726 | [
"MIT"
] | null | null | null | jinfo/utils/__init__.py | JBwdn/jinfo | b5933edd3ea3d27f4f7c1e0153e16750de0d1726 | [
"MIT"
] | 1 | 2020-12-07T14:07:14.000Z | 2020-12-07T14:07:14.000Z | jinfo/utils/__init__.py | JBwdn/jinfo | b5933edd3ea3d27f4f7c1e0153e16750de0d1726 | [
"MIT"
] | null | null | null | from jinfo.utils.one_hot_dna import one_hot_dna
from jinfo.utils.random_DNASeq import random_DNASeq
from jinfo.utils.DNASeq_from_NCBI import DNASeq_from_NCBI
from jinfo.utils.seq_list_to_fasta import seq_list_to_fasta
from jinfo.utils.seq_list_from_fasta import seq_list_from_fasta
from jinfo.utils.seq_from_fasta import seq_from_fasta
from jinfo.utils.alignment_from_fasta import alignment_from_fasta
from jinfo.utils.multialign import multialign
from jinfo.utils.calc_phylo_tree import calc_phylo_tree
from jinfo.utils.percentage_identity import percentage_identity
from jinfo.utils.remove_degenerate_seqs import remove_degenerate_seqs | 57.909091 | 69 | 0.897959 | from jinfo.utils.one_hot_dna import one_hot_dna
from jinfo.utils.random_DNASeq import random_DNASeq
from jinfo.utils.DNASeq_from_NCBI import DNASeq_from_NCBI
from jinfo.utils.seq_list_to_fasta import seq_list_to_fasta
from jinfo.utils.seq_list_from_fasta import seq_list_from_fasta
from jinfo.utils.seq_from_fasta import seq_from_fasta
from jinfo.utils.alignment_from_fasta import alignment_from_fasta
from jinfo.utils.multialign import multialign
from jinfo.utils.calc_phylo_tree import calc_phylo_tree
from jinfo.utils.percentage_identity import percentage_identity
from jinfo.utils.remove_degenerate_seqs import remove_degenerate_seqs | 0 | 0 | 0 |
3be4143b4fa68543582d6616184d19d293e1fabd | 1,464 | py | Python | examples/command/actions.py | artificially-ai/python-design-patterns | f3b192f204e87f2c3164a854f70e5a2e6fd0707b | [
"Apache-2.0"
] | 1 | 2021-06-28T15:21:15.000Z | 2021-06-28T15:21:15.000Z | examples/command/actions.py | artificially-ai/python-design-patterns | f3b192f204e87f2c3164a854f70e5a2e6fd0707b | [
"Apache-2.0"
] | null | null | null | examples/command/actions.py | artificially-ai/python-design-patterns | f3b192f204e87f2c3164a854f70e5a2e6fd0707b | [
"Apache-2.0"
] | null | null | null | from multiprocessing import Pool
from random import randrange
from absl import logging
from patterns.command.action import Action
from patterns.command.callback.handler import Callback
| 31.826087 | 98 | 0.642077 | from multiprocessing import Pool
from random import randrange
from absl import logging
from patterns.command.action import Action
from patterns.command.callback.handler import Callback
class SwitchOnAction(Action):
def perform(self):
logging.info('Switching on the machine.')
class SwitchOffAction(Action):
def perform(self):
logging.info('Switching off the machine.')
class SendMessageAction(Action):
def __init__(self, callback: Callback):
self.callback = callback
self.pool: Pool = Pool(1)
def perform(self):
try:
logging.info('The message will be sent in a different process.')
# Silly way to simulate an issue before the call is dispatched to the callback.
random_failure = randrange(0, 5)
logging.info(f'Random failure state: {random_failure}')
if random_failure:
logging.info('Random failure has been triggered.')
raise SystemError(f'Random failure has been triggered. Failure: {random_failure}')
# Do something asynchronously.
self.pool.apply(self.callback.on_success, [f'The message was sent successfully. '
f'Success code: {random_failure}'])
except SystemError as e:
# If something doesn't go okay, report an error.
self.pool.apply(self.callback.on_error, [f'SystemError: {e}'])
| 1,069 | 28 | 177 |
e8186f362f3a5c6e2c0bf8ddc167f936786e9755 | 829 | py | Python | com.ppc.Microservices/intelligence/daylight/location_midnight_microservice.py | peoplepower/botlab | 21cc90c558a17b7ef4a42bca247b437d2f968dc0 | [
"Apache-2.0"
] | 16 | 2017-03-31T04:41:51.000Z | 2020-07-15T07:03:06.000Z | com.ppc.Microservices/intelligence/daylight/location_midnight_microservice.py | peoplepower/botlab | 21cc90c558a17b7ef4a42bca247b437d2f968dc0 | [
"Apache-2.0"
] | 4 | 2018-07-03T05:39:36.000Z | 2018-07-06T02:59:32.000Z | com.ppc.Microservices/intelligence/daylight/location_midnight_microservice.py | peoplepower/botlab | 21cc90c558a17b7ef4a42bca247b437d2f968dc0 | [
"Apache-2.0"
] | 8 | 2017-04-01T21:07:59.000Z | 2019-09-18T15:23:37.000Z | '''
Created on February 25, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
class LocationMidnightMicroservice(Intelligence):
"""
Announce midnight throughout the microservices framework
"""
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
if schedule_id == "MIDNIGHT":
self.parent.distribute_datastream_message(botengine, "midnight_fired", None, internal=True, external=False)
| 33.16 | 119 | 0.723764 | '''
Created on February 25, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
class LocationMidnightMicroservice(Intelligence):
"""
Announce midnight throughout the microservices framework
"""
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
if schedule_id == "MIDNIGHT":
self.parent.distribute_datastream_message(botengine, "midnight_fired", None, internal=True, external=False)
| 0 | 0 | 0 |
1cbc07813f54a675480a7ef867561b7d1a5fd88a | 311 | py | Python | 2015/day04/python/part2.py | jmkacz/practice-advent-of-code | c06f474576e91ed0778c8a30a51bad848a602eb6 | [
"MIT"
] | null | null | null | 2015/day04/python/part2.py | jmkacz/practice-advent-of-code | c06f474576e91ed0778c8a30a51bad848a602eb6 | [
"MIT"
] | null | null | null | 2015/day04/python/part2.py | jmkacz/practice-advent-of-code | c06f474576e91ed0778c8a30a51bad848a602eb6 | [
"MIT"
] | null | null | null | import hashlib
from typing import List
| 22.214286 | 46 | 0.572347 | import hashlib
from typing import List
def compute_answer(lines: List[str]) -> int:
result = 1
line = lines[0]
while True:
secret = (line + str(result)).encode()
hash = hashlib.md5(secret).hexdigest()
if hash[0:6] == "000000":
return result
result += 1
| 248 | 0 | 23 |
bc4831896be9b443c435adb7e470a5ed2b0aa1f6 | 9,224 | py | Python | ravenframework/BaseClasses/Assembler.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | ravenframework/BaseClasses/Assembler.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | ravenframework/BaseClasses/Assembler.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 20, 2015
@author: senrs
based on alfoa design
"""
from __future__ import division, print_function, unicode_literals, absolute_import
#External Modules------------------------------------------------------------------------------------
import abc
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ..utils import utils, InputData
from ..BaseClasses import MessageUser
from .. import MessageHandler
#Internal Modules End--------------------------------------------------------------------------------
class Assembler(MessageUser):
"""
Assembler class is used as base class for all the objects that need, for initialization purposes,
to get pointers (links) of other objects at the Simulation stage (Simulation.run() method)
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.type = self.__class__.__name__ # type
self.name = self.__class__.__name__ # name
self.assemblerObjects = {} # {MainClassName(e.g.Distributions):[class(e.g.Models),type(e.g.ROM),objectName]}
# where name_list is the tokens required (if check_number is True)
# and number_list is a list of InputData.Quantity for the number required
self._requiredAsmbObject = [False, [], []]
self.assemblerDict = {} # {'class':[['class','type','name',instance]]}}
# list. first entry boolean flag. True if the XML parser must look for objects;
# second entry tuple.first entry list of object can be retrieved, second entry multiplicity (-1,-2,-n means optional (max 1 object,2 object, no number limit))
def whatDoINeed(self):
"""
This method is used mainly by the Simulation class at the Step construction stage.
It is used for inquiring the class, which is implementing the method, about the kind of objects the class needs to
be initialize.
@ In, None
@ Out, needDict, dict, dictionary of objects needed (class:tuple(object type{if None, Simulation does not check the type}, object name))
"""
if '_localWhatDoINeed' in dir(self):
needDict = self._localWhatDoINeed()
else:
needDict = {}
for val in self.assemblerObjects.values():
for value in val:
if value[0] not in needDict.keys():
needDict[value[0]] = []
needDict[value[0]].append((value[1],value[2]))
return needDict
def generateAssembler(self, initDict):
"""
This method is used mainly by the Simulation class at the Step construction stage.
It is used for sending to the instanciated class, which is implementing the method, the objects that have been requested through "whatDoINeed" method
It is an abstract method -> It must be implemented in the derived class!
@ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases):{specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'})
@ Out, None
"""
if '_localGenerateAssembler' in dir(self):
self._localGenerateAssembler(initDict)
for key, value in self.assemblerObjects.items():
self.assemblerDict[key] = []
for entity, etype, name in value:
self.assemblerDict[key].append([entity, etype, name, initDict[entity][name]])
def _readAssemblerObjects(self, subXmlNode, found, testObjects):
"""
This method is used to look for the assemble objects in an subNodes of an xmlNode
@ In, subXmlNode, ET, the XML node that needs to be inquired
@ In, found, dict, a dictionary that check if all the tokens (requested) are found
@ In, testObjects, dict, a dictionary that contains the number of time a token (requested) has been found
@ Out, returnObject, tuple, tuple(found, testObjects) containing in [0], found -> a dictionary that check if all the tokens (requested) are found ;
[1], testObjects -> a dictionary that contains the number of time a token (requested) has been found
"""
for subNode in subXmlNode:
for token in self._requiredAsmbObject[1]:
if subNode.tag == token:
found[token] = True
if 'class' not in subNode.attrib.keys():
self.raiseAnError(IOError, 'In '+self.type+' Object ' + self.name+ ', block ' + subNode.tag + ' does not have the attribute class!!')
tag = subNode.tag.strip()
if tag not in self.assemblerObjects:
self.assemblerObjects[tag] = []
# check if already present
entry = [subNode.attrib['class'],subNode.attrib['type'],subNode.text.strip()]
if entry not in self.assemblerObjects.get(tag, []):
self.assemblerObjects[tag].append(entry)
testObjects[token] += 1
returnObject = found, testObjects
return returnObject
def _readMoreXML(self, xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class
and initialize some variables based on the inputs got. This method is used to automatically generate the Assembler 'request'
based on the input of the daughter class.
@ In, self, Any, an instance of the class to read into this.
@ In, xmlNode, xml.etree.ElementTree.Element, XML element node that represents the portion of the input that belongs to this class
@ Out, None
"""
self.type = xmlNode.tag
if 'name' in xmlNode.attrib:
self.name = xmlNode.attrib['name']
if 'verbosity' in xmlNode.attrib.keys():
self.verbosity = xmlNode.attrib['verbosity'].lower()
#XXX Once InputData checks numbers of subnodes, everything in this
# if block can be removed
if self._requiredAsmbObject[0]:
testObjects = {}
for token in self._requiredAsmbObject[1]:
testObjects[token] = 0
found = dict.fromkeys(testObjects.keys(),False)
found, testObjects = self._readAssemblerObjects(xmlNode, found, testObjects)
for subNode in xmlNode:
found, testObjects = self._readAssemblerObjects(subNode, found, testObjects)
for i,token in enumerate(self._requiredAsmbObject[1]):
quantity = self._requiredAsmbObject[2][i]
if not InputData.checkQuantity(quantity, testObjects[token]):
self.raiseAnError(IOError, 'the object '+token+' has wrong quantity Expected: '+str(quantity)+' Found: '+str(testObjects[token])+ ' in block '+self.name)
if '_handleInput' in dir(self) and self._handleInput.__func__.__qualname__.split(".")[0] == self.__class__.__name__:
#_handleInput in class and not from superclass
#print(self, self.getInputSpecification, self.getInputSpecification.__func__.__qualname__, self._handleInput, self._handleInput.__func__.__qualname__)
paramInput = self.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
elif '_localReadMoreXML' in dir(self):
self._localReadMoreXML(xmlNode)
def addAssemblerObject(self, name, flag):
"""
Method to add required assembler objects to the _requiredAsmbObject dictionary.
@ In, name, string, the node name to search for (e.g. Function, Model)
@ In, flag, InputData.Quantity, the number of nodes to look for
@ Out, None
"""
self._requiredAsmbObject[0] = True
self._requiredAsmbObject[1].append(name)
self._requiredAsmbObject[2].append(flag)
def retrieveObjectFromAssemblerDict(self, objectMainClass, objectName, pop=False):
"""
Method to retrieve an object from the assembler
@ In, objectName, str, the object name that needs to be retrieved
@ In, objectMainClass, str, the object main Class name (e.g. Input, Model, etc.) of the object that needs to be retrieved
@ In, pop, bool, optional, if found, pop it out (i.e. remove it from the self.assemblerDict?). Default = False
@ Out, assemblerObject, instance, the instance requested (None if not found)
"""
assemblerObject = None
if objectMainClass in self.assemblerDict.keys():
for assemblerObj in self.assemblerDict[objectMainClass]:
if objectName == assemblerObj[2]:
assemblerObject = assemblerObj[3]
break
if pop and assemblerObject is not None:
self.assemblerDict[objectMainClass].remove(assemblerObj)
if assemblerObject is None:
self.raiseAnError(IOError, 'Required Object: ', objectName, 'is not found among', objectMainClass)
return assemblerObject
| 50.681319 | 174 | 0.66598 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 20, 2015
@author: senrs
based on alfoa design
"""
from __future__ import division, print_function, unicode_literals, absolute_import
#External Modules------------------------------------------------------------------------------------
import abc
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ..utils import utils, InputData
from ..BaseClasses import MessageUser
from .. import MessageHandler
#Internal Modules End--------------------------------------------------------------------------------
class Assembler(MessageUser):
"""
Assembler class is used as base class for all the objects that need, for initialization purposes,
to get pointers (links) of other objects at the Simulation stage (Simulation.run() method)
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.type = self.__class__.__name__ # type
self.name = self.__class__.__name__ # name
self.assemblerObjects = {} # {MainClassName(e.g.Distributions):[class(e.g.Models),type(e.g.ROM),objectName]}
# where name_list is the tokens required (if check_number is True)
# and number_list is a list of InputData.Quantity for the number required
self._requiredAsmbObject = [False, [], []]
self.assemblerDict = {} # {'class':[['class','type','name',instance]]}}
# list. first entry boolean flag. True if the XML parser must look for objects;
# second entry tuple.first entry list of object can be retrieved, second entry multiplicity (-1,-2,-n means optional (max 1 object,2 object, no number limit))
def whatDoINeed(self):
"""
This method is used mainly by the Simulation class at the Step construction stage.
It is used for inquiring the class, which is implementing the method, about the kind of objects the class needs to
be initialize.
@ In, None
@ Out, needDict, dict, dictionary of objects needed (class:tuple(object type{if None, Simulation does not check the type}, object name))
"""
if '_localWhatDoINeed' in dir(self):
needDict = self._localWhatDoINeed()
else:
needDict = {}
for val in self.assemblerObjects.values():
for value in val:
if value[0] not in needDict.keys():
needDict[value[0]] = []
needDict[value[0]].append((value[1],value[2]))
return needDict
def generateAssembler(self, initDict):
"""
This method is used mainly by the Simulation class at the Step construction stage.
It is used for sending to the instanciated class, which is implementing the method, the objects that have been requested through "whatDoINeed" method
It is an abstract method -> It must be implemented in the derived class!
@ In, initDict, dict, dictionary ({'mainClassName(e.g., Databases):{specializedObjectName(e.g.,DatabaseForSystemCodeNamedWolf):ObjectInstance}'})
@ Out, None
"""
if '_localGenerateAssembler' in dir(self):
self._localGenerateAssembler(initDict)
for key, value in self.assemblerObjects.items():
self.assemblerDict[key] = []
for entity, etype, name in value:
self.assemblerDict[key].append([entity, etype, name, initDict[entity][name]])
def _readAssemblerObjects(self, subXmlNode, found, testObjects):
"""
This method is used to look for the assemble objects in an subNodes of an xmlNode
@ In, subXmlNode, ET, the XML node that needs to be inquired
@ In, found, dict, a dictionary that check if all the tokens (requested) are found
@ In, testObjects, dict, a dictionary that contains the number of time a token (requested) has been found
@ Out, returnObject, tuple, tuple(found, testObjects) containing in [0], found -> a dictionary that check if all the tokens (requested) are found ;
[1], testObjects -> a dictionary that contains the number of time a token (requested) has been found
"""
for subNode in subXmlNode:
for token in self._requiredAsmbObject[1]:
if subNode.tag == token:
found[token] = True
if 'class' not in subNode.attrib.keys():
self.raiseAnError(IOError, 'In '+self.type+' Object ' + self.name+ ', block ' + subNode.tag + ' does not have the attribute class!!')
tag = subNode.tag.strip()
if tag not in self.assemblerObjects:
self.assemblerObjects[tag] = []
# check if already present
entry = [subNode.attrib['class'],subNode.attrib['type'],subNode.text.strip()]
if entry not in self.assemblerObjects.get(tag, []):
self.assemblerObjects[tag].append(entry)
testObjects[token] += 1
returnObject = found, testObjects
return returnObject
def _readMoreXML(self, xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class
and initialize some variables based on the inputs got. This method is used to automatically generate the Assembler 'request'
based on the input of the daughter class.
@ In, self, Any, an instance of the class to read into this.
@ In, xmlNode, xml.etree.ElementTree.Element, XML element node that represents the portion of the input that belongs to this class
@ Out, None
"""
self.type = xmlNode.tag
if 'name' in xmlNode.attrib:
self.name = xmlNode.attrib['name']
if 'verbosity' in xmlNode.attrib.keys():
self.verbosity = xmlNode.attrib['verbosity'].lower()
#XXX Once InputData checks numbers of subnodes, everything in this
# if block can be removed
if self._requiredAsmbObject[0]:
testObjects = {}
for token in self._requiredAsmbObject[1]:
testObjects[token] = 0
found = dict.fromkeys(testObjects.keys(),False)
found, testObjects = self._readAssemblerObjects(xmlNode, found, testObjects)
for subNode in xmlNode:
found, testObjects = self._readAssemblerObjects(subNode, found, testObjects)
for i,token in enumerate(self._requiredAsmbObject[1]):
quantity = self._requiredAsmbObject[2][i]
if not InputData.checkQuantity(quantity, testObjects[token]):
self.raiseAnError(IOError, 'the object '+token+' has wrong quantity Expected: '+str(quantity)+' Found: '+str(testObjects[token])+ ' in block '+self.name)
if '_handleInput' in dir(self) and self._handleInput.__func__.__qualname__.split(".")[0] == self.__class__.__name__:
#_handleInput in class and not from superclass
#print(self, self.getInputSpecification, self.getInputSpecification.__func__.__qualname__, self._handleInput, self._handleInput.__func__.__qualname__)
paramInput = self.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
elif '_localReadMoreXML' in dir(self):
self._localReadMoreXML(xmlNode)
def addAssemblerObject(self, name, flag):
"""
Method to add required assembler objects to the _requiredAsmbObject dictionary.
@ In, name, string, the node name to search for (e.g. Function, Model)
@ In, flag, InputData.Quantity, the number of nodes to look for
@ Out, None
"""
self._requiredAsmbObject[0] = True
self._requiredAsmbObject[1].append(name)
self._requiredAsmbObject[2].append(flag)
def retrieveObjectFromAssemblerDict(self, objectMainClass, objectName, pop=False):
"""
Method to retrieve an object from the assembler
@ In, objectName, str, the object name that needs to be retrieved
@ In, objectMainClass, str, the object main Class name (e.g. Input, Model, etc.) of the object that needs to be retrieved
@ In, pop, bool, optional, if found, pop it out (i.e. remove it from the self.assemblerDict?). Default = False
@ Out, assemblerObject, instance, the instance requested (None if not found)
"""
assemblerObject = None
if objectMainClass in self.assemblerDict.keys():
for assemblerObj in self.assemblerDict[objectMainClass]:
if objectName == assemblerObj[2]:
assemblerObject = assemblerObj[3]
break
if pop and assemblerObject is not None:
self.assemblerDict[objectMainClass].remove(assemblerObj)
if assemblerObject is None:
self.raiseAnError(IOError, 'Required Object: ', objectName, 'is not found among', objectMainClass)
return assemblerObject
| 0 | 0 | 0 |
58d602b65cc23b213c43e06ad720d99779ca9444 | 836 | py | Python | meiduo_mall/meiduo_mall/apps/meiduo_admin/serializer/orders.py | 572314705/meiduo_24 | d279719506c536d21141b65eabf92fb3fa2bcff0 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/serializer/orders.py | 572314705/meiduo_24 | d279719506c536d21141b65eabf92fb3fa2bcff0 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/serializer/orders.py | 572314705/meiduo_24 | d279719506c536d21141b65eabf92fb3fa2bcff0 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
class SKUSerializer(serializers.ModelSerializer):
'''SKU'''
class OrderGoodsSerialzier(serializers.ModelSerializer):
"""
订单商品表
"""
sku = SKUSerializer()
class OrderSerializer(serializers.ModelSerializer):
"""
订单表序列化
"""
user = serializers.StringRelatedField(read_only=True)
address = serializers.StringRelatedField(read_only=True)
skus = OrderGoodsSerialzier(many=True) | 25.333333 | 60 | 0.648325 | from rest_framework import serializers
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
class SKUSerializer(serializers.ModelSerializer):
'''SKU'''
class Meta:
model = SKU
fields = ('name','default_image')
class OrderGoodsSerialzier(serializers.ModelSerializer):
"""
订单商品表
"""
sku = SKUSerializer()
class Meta:
model = OrderGoods
fields = ('count','price','sku')
class OrderSerializer(serializers.ModelSerializer):
"""
订单表序列化
"""
user = serializers.StringRelatedField(read_only=True)
address = serializers.StringRelatedField(read_only=True)
skus = OrderGoodsSerialzier(many=True)
class Meta:
# 指定根据那个模型类生成序列化器字段
model = OrderInfo
# 指定那些字段生成
fields = '__all__' | 0 | 250 | 78 |
9489bfa545777ad8ca14dfc0924aa4cbbb07cf88 | 5,776 | py | Python | examples/AllInOne/allinone_ownloop.py | irmen/Pyro3 | 5bd531088d9a11ec83556a0429f18df6cb5cd437 | [
"MIT"
] | 3 | 2018-01-13T20:50:41.000Z | 2020-02-24T13:35:08.000Z | examples/AllInOne/allinone_ownloop.py | irmen/Pyro3 | 5bd531088d9a11ec83556a0429f18df6cb5cd437 | [
"MIT"
] | null | null | null | examples/AllInOne/allinone_ownloop.py | irmen/Pyro3 | 5bd531088d9a11ec83556a0429f18df6cb5cd437 | [
"MIT"
] | 6 | 2015-03-21T20:34:05.000Z | 2021-06-08T04:04:33.000Z | #!/usr/bin/env python
#
# This application creates a Name Server, Event Server,
# Pyro server, and clients, and uses a custom event loop to keep them
# all running in parallel.
# The custom loop runs in its own server thread otherwise we
# can't run client invocations, obviously.
# The main loop calls Pyro objects to set some artificial
# properties. Those objects publish those events on a ES channel,
# on which an event listener is subscribed. That listener prints
# the events that it receives.
#
import time
import random
import string
import Pyro.naming
import Pyro.EventService.Server
from Pyro.EventService.Clients import Publisher, Subscriber
from Pyro.errors import *
import Pyro.util
import select
from threading import Thread
####################### EVENT SERVER LISTENER & PUBLISHER #################
################ Multi-purpose monolithic server. #####################
# handles all socket events from NS, ES, Pyro daemon.
############################# MAIN LOOP #############################
if __name__=="__main__":
main()
| 31.911602 | 86 | 0.721434 | #!/usr/bin/env python
#
# This application creates a Name Server, Event Server,
# Pyro server, and clients, and uses a custom event loop to keep them
# all running in parallel.
# The custom loop runs in its own server thread otherwise we
# can't run client invocations, obviously.
# The main loop calls Pyro objects to set some artificial
# properties. Those objects publish those events on a ES channel,
# on which an event listener is subscribed. That listener prints
# the events that it receives.
#
import time
import random
import string
import Pyro.naming
import Pyro.EventService.Server
from Pyro.EventService.Clients import Publisher, Subscriber
from Pyro.errors import *
import Pyro.util
import select
from threading import Thread
####################### EVENT SERVER LISTENER & PUBLISHER #################
class PropertyChangePublisher(Pyro.core.ObjBase, Publisher):
def __init__(self, name):
Pyro.core.ObjBase.__init__(self)
Publisher.__init__(self)
self.name=name
def setProperty(self, property, value):
print self.name,"sets",property,"to",value
self.publish(self.name+"."+property, value)
class PropertyChangeListener(Subscriber):
def __init__(self):
Subscriber.__init__(self)
def event(self,event):
# event.msg, subject, time
print "Listener got Event: %s=%s"%(event.subject, event.msg)
################ Multi-purpose monolithic server. #####################
# handles all socket events from NS, ES, Pyro daemon.
class Server(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(1)
self.ns_starter=None
self.ns_sockets=[]
self.es_starter=None
self.es_sockets=[]
self.pdaemon=None
# daemon sockets are dynamic...
self.listener=None
self.listener_sockets=[]
def setNameServerStarter(self, starter):
starter.waitUntilStarted()
self.ns_starter=starter
self.ns_sockets=starter.getServerSockets()
def setEventServerStarter(self, starter):
starter.waitUntilStarted()
self.es_starter=starter
self.es_sockets=starter.getServerSockets()
def setPyroDaemon(self, pdaemon):
self.pdaemon=pdaemon
def setEventListener(self, listener):
self.listener=listener
self.listener_sockets=listener.getDaemon().getServerSockets()
def run(self):
Pyro.core.initServer()
while 1:
all_sockets = self.ns_sockets + self.es_sockets + self.listener_sockets
daemon_sockets=[]
if self.pdaemon:
# daemon sockets are dynamic.
daemon_sockets = self.pdaemon.getServerSockets()
all_sockets.extend(daemon_sockets)
################### CUSTOM EVENT LOOP ####################
if all_sockets:
ins,outs,exs=select.select(all_sockets,[],[],1)
else:
# windows doesn't like empty select. Just wait a while.
time.sleep(1)
continue
##########################################################
# check for Name Server sockets...
for ns_sock in self.ns_sockets:
if ns_sock in ins:
self.ns_starter.handleRequests(timeout=0)
break
# check for Event Server sockets....
for es_sock in self.es_sockets:
if es_sock in ins:
self.es_starter.handleRequests(timeout=0)
break
# check for Daemon Server sockets....
for d_sock in daemon_sockets:
if d_sock in ins:
self.pdaemon.handleRequests(timeout=0)
break
# check for Event listener sockets...
for l_sock in self.listener_sockets:
if l_sock in ins:
self.listener.getDaemon().handleRequests(timeout=0)
break
############################# MAIN LOOP #############################
def main():
if not Pyro.config.PYRO_MULTITHREADED:
print "Sorry, this example requires multithreading."
print "Either your Python doesn't support it or it has been disabled in the config."
return
Pyro.core.initClient()
server = Server()
server.start()
# We are starting the different servers in a separate thread (here),
# otherwise the custom server thread cannot handle the concurrent
# invocations (for instance, the ES needs the NS when it starts...)
print "STARTING NAME SERVER"
starter = Pyro.naming.NameServerStarter() # no special identification
starter.initialize()
server.setNameServerStarter(starter)
print "NAME SERVER STARTED ON PORT",starter.daemon.port
print "STARTING EVENT SERVER"
starter = Pyro.EventService.Server.EventServiceStarter() # no special identification
# use port autoselect
es_port=0
starter.initialize(port=es_port, norange=(es_port==0))
server.setEventServerStarter(starter)
print "EVENT SERVER STARTED ON PORT",starter.daemon.port
print "CREATING PYRO SERVER OBJECTS AND PYRO DAEMON"
# use port autoselect
port=0
daemon = Pyro.core.Daemon(port=port, norange=(port==0))
daemon.useNameServer(Pyro.naming.NameServerLocator().getNS())
daemon.connect(PropertyChangePublisher("publisher1"), "publisher1")
daemon.connect(PropertyChangePublisher("publisher2"), "publisher2")
daemon.connect(PropertyChangePublisher("publisher3"), "publisher3")
server.setPyroDaemon(daemon)
print "PYRO SERVER ACTIVATED ON PORT",daemon.port
listener = PropertyChangeListener()
listener.subscribeMatch("^publisher.\\..*$")
server.setEventListener(listener)
print "EVENT LISTENER ACTIVATED"
print "ALL SERVERS WERE STARTED!"
time.sleep(1)
p1 = Pyro.core.getProxyForURI("PYRONAME://publisher1")
p2 = Pyro.core.getProxyForURI("PYRONAME://publisher2")
p3 = Pyro.core.getProxyForURI("PYRONAME://publisher3")
try:
while True:
print "MAIN LOOP CHANGES PROPERTIES..."
p1.setProperty(random.choice(string.uppercase), random.randint(0,1000))
p2.setProperty(random.choice(string.uppercase), random.randint(0,1000))
p3.setProperty(random.choice(string.uppercase), random.randint(0,1000))
time.sleep(1)
except Exception,x:
print "".join(Pyro.util.getPyroTraceback(x))
if __name__=="__main__":
main()
| 4,338 | 59 | 322 |
fd6536b19aafb8bd6cbe2ae5992f2e0633363eb1 | 511 | py | Python | examples/hello_template.py | orest-d/liquer | 7a5b5a69cf673b4a849dd2da3050ccd75081e454 | [
"MIT"
] | 3 | 2019-12-10T10:22:36.000Z | 2019-12-12T16:36:11.000Z | examples/hello_template.py | orest-d/liquer | 7a5b5a69cf673b4a849dd2da3050ccd75081e454 | [
"MIT"
] | null | null | null | examples/hello_template.py | orest-d/liquer | 7a5b5a69cf673b4a849dd2da3050ccd75081e454 | [
"MIT"
] | 2 | 2019-11-14T16:26:52.000Z | 2021-07-26T04:53:54.000Z | # Make it run from the examples directory
import sys
sys.path.append("..")
from liquer import *
@first_command
@command
# with default delimiters
print (evaluate_template("""
Template example [[]]
- $hello$
- $hello/greet$
- $hello/greet-everybody$
"""))
# with custom delimiters
print (evaluate_template("""
Template example $$$
- [[hello]]
- [[hello/greet]]
- [[hello/greet-everybody]]
""","[[","]]")) | 15.484848 | 41 | 0.655577 | # Make it run from the examples directory
import sys
sys.path.append("..")
from liquer import *
@first_command
def hello():
return "Hello"
@command
def greet(greeting, who="world"):
return f"{greeting}, {who}!"
# with default delimiters
print (evaluate_template("""
Template example [[]]
- $hello$
- $hello/greet$
- $hello/greet-everybody$
"""))
# with custom delimiters
print (evaluate_template("""
Template example $$$
- [[hello]]
- [[hello/greet]]
- [[hello/greet-everybody]]
""","[[","]]")) | 55 | 0 | 44 |
400dd9d970b186e950943a1ba8bfd942a1d184e9 | 710 | py | Python | testbed.py | TuKo/rl | 6ffc25c164f3160e5b747a1896b7953eada7ff46 | [
"BSD-3-Clause"
] | null | null | null | testbed.py | TuKo/rl | 6ffc25c164f3160e5b747a1896b7953eada7ff46 | [
"BSD-3-Clause"
] | null | null | null | testbed.py | TuKo/rl | 6ffc25c164f3160e5b747a1896b7953eada7ff46 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
| 33.809524 | 67 | 0.569014 | import numpy as np
class Testbed(object):
def __init__(self, agents, steps=1000):
self._agents = agents
self._steps = steps
self._action_history = np.zeros((len(self._agents), steps))
self._reward_history = np.zeros((len(self._agents), steps))
def run(self, env):
for a in range(len(self._agents)):
env.restart()
self._agents[a].reset()
for t in range(self._steps):
action = self._agents[a].get_action()
reward = env.reward(action)
self._agents[a].update(action, reward)
self._action_history[a, t] = action
self._reward_history[a, t] = reward
| 613 | 1 | 76 |