content
stringlengths 5
1.05M
|
|---|
from django import forms
from django.core.exceptions import ValidationError
from django.utils import timezone
from .models import ChoiceBet, DateBet
from .util import create_choices
from ledger.models import Account
from profiles.models import ForbiddenUser
class ChoiceBetCreationForm(forms.ModelForm):
class Meta:
model = ChoiceBet
fields = ['name', 'description', 'end_bets_date', 'end_date']
pub_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
end_bets_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
end_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
forbidden = forms.ModelMultipleChoiceField(queryset=ForbiddenUser.objects.all(), required=False)
def __init__(self, *args, **kwargs):
super(ChoiceBetCreationForm, self).__init__(*args, **kwargs)
self.fields['forbidden'].widget.attrs["size"] = ForbiddenUser.objects.all().count()
def clean_pub_date(self):
pub_date = self.cleaned_data.get('pub_date')
if pub_date is None:
return pub_date
if pub_date <= timezone.now().date():
raise ValidationError(
'If you set a publication date, it has to be in the future. If you want the bet to be visible '
'immediately, do not set a publication date.',
code='pub_date_not_in_future')
return pub_date
def clean_end_bets_date(self):
pub_date = self.cleaned_data.get('pub_date')
end_bets_date = self.cleaned_data.get('end_bets_date')
if end_bets_date is None:
return end_bets_date
if pub_date is None:
if end_bets_date <= timezone.now().date():
raise ValidationError('Must give at least 1 day to place bets.', code='end_bets_not_in_future')
elif end_bets_date <= pub_date:
raise ValidationError('Bet placement has to be open after publish date.',
code='end_bets_date_before_pub_date')
return end_bets_date
def clean_end_date(self):
pub_date = self.cleaned_data.get('pub_date')
end_bets_date = self.cleaned_data.get('end_bets_date')
end_date = self.cleaned_data.get('end_date')
if end_date is None:
return end_date
if end_date < end_bets_date:
raise ValidationError('Placement of bets cannot be sustained after the bet is closed',
code='end_date_before_end_bets_date')
if end_date <= pub_date:
raise ValidationError('The timespan between the publishement date and end date must be at least one day.',
code='bet_timespan_too_short')
return end_date
def save(self, request):
name = self.cleaned_data['name']
description = self.cleaned_data['description']
pub_date = self.cleaned_data['pub_date']
end_bets_date = self.cleaned_data['end_bets_date']
end_date = self.cleaned_data.get('end_date')
forbidden = self.cleaned_data['forbidden']
account = Account(name=name, type='b')
account.save()
new_bet = ChoiceBet(
owner=request.user.profile,
name=name,
description=description,
end_bets_date=end_bets_date,
end_date=end_date,
account=account
)
try:
choices = create_choices(request, new_bet)
except ValidationError:
raise
new_bet.save()
for choice in choices:
choice.save()
for forbidden_user in forbidden:
new_bet.forbidden.add(forbidden_user)
if pub_date is not None:
new_bet.pub_date = pub_date
new_bet.save()
return new_bet
class DateBetCreationForm(forms.ModelForm):
class Meta:
model = DateBet
fields = ['name', 'description', 'end_bets_date', 'time_period_start', 'time_period_end']
pub_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
end_bets_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
time_period_start = forms.DateField(widget=forms.SelectDateWidget, required=False)
time_period_end = forms.DateField(widget=forms.SelectDateWidget, required=False)
forbidden = forms.ModelMultipleChoiceField(queryset=ForbiddenUser.objects.all(), required=False)
def clean_pub_date(self):
pub_date = self.cleaned_data.get('pub_date')
if pub_date is None:
return pub_date
if pub_date <= timezone.now().date():
raise ValidationError(
'If you set a publication date, it has to be in the future. If you want the bet to be visible '
'immediately, do not set a publication date.',
code='pub_date_not_in_future')
return pub_date
def clean_end_bets_date(self):
pub_date = self.cleaned_data.get('pub_date')
end_bets_date = self.cleaned_data.get('end_bets_date')
if end_bets_date is None:
return end_bets_date
if pub_date is None:
if end_bets_date <= timezone.now().date():
raise ValidationError('Must give at least 1 day to place bets.', code='end_bets_not_in_future')
elif end_bets_date < pub_date:
raise ValidationError('Bet placement has to be open after publish date.',
code='end_bets_date_before_pub_date')
return end_bets_date
def clean_time_period_start(self):
pub_date = self.cleaned_data.get('pub_date')
time_period_start = self.cleaned_data.get('time_period_start')
if time_period_start is None:
return time_period_start
if pub_date is None:
if time_period_start <= timezone.now().date():
raise ValidationError(
'The period to bet on must be in the future.', code='time_period_start_not_in_future')
elif time_period_start <= pub_date:
raise ValidationError(
'The period to bet on has to start after Publication. Do not set a start date if you want the '
'period to start at publication.',
code='time_period_start_not_greater_pub')
return time_period_start
def clean_time_period_end(self):
pub_date = self.cleaned_data.get('pub_date')
time_period_start = self.cleaned_data.get('time_period_start')
time_period_end = self.cleaned_data.get('time_period_end')
if time_period_end is None:
return time_period_end
if (pub_date is None) and (time_period_start is None):
if time_period_end <= timezone.now().date():
raise ValidationError('The period to bet on must not end in the past', code='period_end_not_in_future')
elif not (time_period_start is None):
if time_period_start >= time_period_end:
raise ValidationError('The period to bet on must end after it has started',
code='period_end_not_greater_period_start')
elif not (pub_date is None):
if time_period_end <= pub_date:
raise ValidationError('The period to bet on must not end before the bet is visible',
code='period_end_not_greater_pub')
return time_period_end
def save(self, user):
name = self.cleaned_data['name']
description = self.cleaned_data['description']
pub_date = self.cleaned_data['pub_date']
end_bets_date = self.cleaned_data['end_bets_date']
time_period_start = self.cleaned_data['time_period_start']
time_period_end = self.cleaned_data['time_period_end']
forbidden = self.cleaned_data['forbidden']
account = Account(name=name, type='b')
account.save()
new_bet = DateBet.objects.create(
owner=user,
name=name,
description=description,
end_bets_date=end_bets_date,
time_period_start=time_period_start,
time_period_end=time_period_end,
account=account
)
for forbidden_user in forbidden:
new_bet.forbidden.add(forbidden_user)
if pub_date is not None:
new_bet.pub_date = pub_date
new_bet.save()
return new_bet
|
from django.test import SimpleTestCase
import mock
cursor_wrapper = mock.Mock()
cursor_wrapper.side_effect = RuntimeError("No touching the database!")
@mock.patch("django.db.backends.utils.CursorWrapper", cursor_wrapper)
class SimpleTest(SimpleTestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
import configparser
from pathlib import Path
from typing import TYPE_CHECKING
from radiant_mlhub.cli import mlhub
if TYPE_CHECKING:
from click.testing import CliRunner as CliRunner_Type
class TestCLI:
def test_version(self, cli_runner: "CliRunner_Type") -> None:
result = cli_runner.invoke(mlhub, ['--version'])
assert result.output.rstrip('\n') == 'mlhub, version 0.3.1'
def test_configure(self, isolated_cli_runner: "CliRunner_Type") -> None:
new_home = Path.cwd()
# Monkeypatch the user's home directory to be the temp directory (CWD)
env = {
'HOME': str(new_home),
'USERPROFILE': str(new_home)
}
result = isolated_cli_runner.invoke(mlhub, ['configure'], input='testapikey\n', env=env)
assert result.exit_code == 0, result.output
# Should create a profiles file in the "HOME" directory
profile_path = new_home / '.mlhub' / 'profiles'
assert profile_path.exists()
config = configparser.ConfigParser()
config.read(profile_path)
assert config.get('default', 'api_key') == 'testapikey'
# Should abort if an api key exists and user does not confirm overwrite
result = isolated_cli_runner.invoke(mlhub, ['configure'], input='testapikey\nn\n', env=env)
assert result.exit_code == 1, result.output
def test_configure_user_defined_home(self, isolated_cli_runner: "CliRunner_Type") -> None:
new_home = Path.cwd()
mlhub_home = new_home / 'some-directory' / '.mlhub'
mlhub_home.mkdir(parents=True)
result = isolated_cli_runner.invoke(
mlhub,
['configure'],
input='userdefinedhome\n',
env={'MLHUB_HOME': str(mlhub_home)}
)
assert result.exit_code == 0, result.output
# Should create a profiles file in the "HOME" directory
profile_path = mlhub_home / 'profiles'
assert profile_path.exists()
config = configparser.ConfigParser()
config.read(profile_path)
assert config.get('default', 'api_key') == 'userdefinedhome'
|
# Generated by Django 2.2.10 on 2021-07-23 04:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0011_auto_20210722_1131'),
]
operations = [
migrations.AddField(
model_name='salesuser',
name='Sales_User_include_bnkdetails',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='accessrequest',
name='Request_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 23, 10, 13, 11, 217633), null=True, verbose_name='Date of Request'),
),
migrations.AlterField(
model_name='invoice',
name='Invoice_Date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 23, 10, 13, 11, 217005), null=True, verbose_name='Date of invoice'),
),
]
|
from Mymqtt import Mymqtt
from receive import *
import pygame
# 屏幕刷新
flash = 0
# 背景图以及移动小车图
car_image_name = "car.png"
pygame.init()
# 生成窗口以及窗口标题
screen_show = pygame.display.set_mode((1000, 600), 0, 32)
pygame.display.set_caption("Little Case")
# 加载并转换图片
car0 = pygame.image.load(car_image_name).convert_alpha()
car1 = pygame.image.load(car_image_name).convert_alpha()
car2 = pygame.image.load(car_image_name).convert_alpha()
car3 = pygame.image.load(car_image_name).convert_alpha()
# 小车初始参数
a = 0.061*ratio
# 必要信息:当前位置,当前速度,上一次运行时间,加速度,标志位(用于判断是否开始匀减速),旋转角度,倒车标志位
list_value_car0 = [1*ratio, Coordinate_trans(0*ratio), 0, 0, 0, 1, 90, 0]
list_value_car1 = [1*ratio, Coordinate_trans(0*ratio), 0, 0, 0, 1, 90, 0]
list_value_car2 = [6.1*ratio, Coordinate_trans(0*ratio), 0, 0, 0, 1, 90, 0]
list_value_car3 = [6.1*ratio, Coordinate_trans(0*ratio), 0, 0, 0, 1, 90, 0]
list_value_car0[4] = a
list_value_car1[4] = a
list_value_car2[4] = a
list_value_car3[4] = a
# 油门档位分为1——10,标志位1代表前进,2代表后退,3代表急刹
# x,y,T,flag
# flag,1代表前进,2代表倒车,3代表急刹
list_init_car0 = [1*ratio, Coordinate_trans(0*ratio), 0, 1]
list_init_car1 = [1*ratio, Coordinate_trans(0*ratio), 0, 1]
list_init_car2 = [6.1*ratio, Coordinate_trans(0*ratio), 0, 1]
list_init_car3 = [6.1*ratio, Coordinate_trans(0*ratio), 0, 1]
# 初始化小车客户端
client_car0 = Mymqtt("car1")
client_car0.connect()
client_car1 = Mymqtt("car2")
client_car1.connect()
client_car2 = Mymqtt("car3")
client_car2.connect()
client_car3 = Mymqtt("car4")
client_car3.connect()
# 可以在这里由小车发布第一条数据
# 具体发送什么数据暂时没写
# client.push_info(position)
while True:
# 循环20次,刷新一次屏幕
if flash == 20:
screen_show.fill((0, 0, 0))
flash = 0
else:
flash = flash + 1
# 获取数据
car0_data = get_data(client_car0, list_value_car0)
if car0_data:
list_init_car0 = car0_data[0]
list_value_car0 = car0_data[1]
car1_data = get_data(client_car1, list_value_car1)
if car1_data:
list_init_car1 = car1_data[0]
list_value_car1 = car1_data[1]
car2_data = get_data(client_car2, list_value_car2)
if car2_data:
list_init_car2 = car2_data[0]
list_value_car2 = car2_data[1]
car3_data = get_data(client_car3, list_value_car3)
if car3_data:
list_init_car3 = car3_data[0]
list_value_car3 = car3_data[1]
# 运动
list_value_car0 = car_straight(car0, screen_show, list_init_car0, list_value_car0)
list_value_car1 = car_straight(car1, screen_show, list_init_car1, list_value_car1)
list_value_car2 = car_straight(car2, screen_show, list_init_car2, list_value_car2)
list_value_car3 = car_straight(car3, screen_show, list_init_car3, list_value_car3)
# 发布数据
client_list = [client_car0, client_car1, client_car2, client_car3]
# client_list = [client_car0]
value_list = [list_value_car0, list_value_car1, list_value_car2, list_value_car3]
# value_list = [list_value_car0]
publish_data(client_list, value_list)
|
from PIL import Image
import numpy as np
#-- read pixels to ndarray
im1 = np.array( Image.open('../images/img001.png').convert('RGB') )
#-- pixel operation
im1[:, :, 0] = 0
#im1[:, :, 1] = 0
im1[:, :, 2] = 0
#-- save to png
im3 = Image.fromarray(im1)
im3.save('z201.png')
|
def getPairs(li, x):
myDic = dict()
for i in li:
remVal = x - i
if i in myDic:
print remVal, ',', i
myDic[remVal] = True
li = [110, 175, 317, 50, 1178, 819, 70]
getPairs(li, 427)
# time is linear
|
"""
Test of constraints value_precede and value_precede_chain in cpmpy
The global constraint
value_precede(s,t, x)
ensures that the value s precedesl the value t in array x
if both s and t are in x.
The global constraint
value_precede_chain(c, x)
ensures that the value c[i-1] precedes the value c[i] is the array x
if both c[i-1] and c[i] are in x.
These constraints are often used for symmetry breaking.
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
import operator
#
# Testing value_precede
#
def value_precede_test(n):
# variables
x = intvar(1,n,shape=n,name="x")
# constraints
model = Model([value_precede(4,3,x)])
ortools_wrapper2(model,[x])
#
# Testing value_precede_chain
#
def value_precede_chain_test(n):
# variables
x = intvar(1,n,shape=n,name="x")
cs = list(range(1,n+1))
print("cs:",cs),
# constraints
model = Model([value_precede_chain(cs,x)])
ortools_wrapper2(model,[x])
n = 4
print("value_precede_test")
value_precede_test(n)
print("\nvalue_precede_chain_test")
value_precede_chain_test(n)
|
from onnx_graphsurgeon.logger.logger import G_LOGGER
|
"""High level entry point for processing a sample.
Samples may include multiple lanes, or barcoded subsections of lanes,
processed together.
"""
import copy
import os
import toolz as tz
from bcbio import utils, bam, broad
from bcbio.log import logger
from bcbio.pipeline.merge import merge_bam_files
from bcbio.bam import fastq, callable
from bcbio.bam.trim import trim_adapters
from bcbio.ngsalign import postalign
from bcbio.pipeline.fastq import get_fastq_files
from bcbio.pipeline.alignment import align_to_sort_bam
from bcbio.pipeline import cleanbam
from bcbio.variation import bedutils, recalibrate
from bcbio.variation import multi as vmulti
import bcbio.pipeline.datadict as dd
def prepare_sample(data):
"""Prepare a sample to be run, potentially converting from BAM to
FASTQ and/or downsampling the number of reads for a test run
"""
NUM_DOWNSAMPLE = 10000
logger.debug("Preparing %s" % data["rgnames"]["sample"])
file1, file2 = get_fastq_files(data)
if data.get("test_run", False):
if bam.is_bam(file1):
file1 = bam.downsample(file1, data, NUM_DOWNSAMPLE)
file2 = None
else:
file1, file2 = fastq.downsample(file1, file2, data,
NUM_DOWNSAMPLE, quick=True)
data["files"] = [file1, file2]
return [[data]]
def trim_sample(data):
"""Trim from a sample with the provided trimming method.
Support methods: read_through.
"""
to_trim = [x for x in data["files"] if x is not None]
dirs = data["dirs"]
config = data["config"]
# this block is to maintain legacy configuration files
trim_reads = config["algorithm"].get("trim_reads", False)
if not trim_reads:
logger.info("Skipping trimming of %s." % (", ".join(to_trim)))
return [[data]]
out_dir = os.path.join(dd.get_work_dir(data), "trimmed")
utils.safe_makedir(out_dir)
if trim_reads == "read_through":
logger.info("Trimming low quality ends and read through adapter "
"sequence from %s." % (", ".join(to_trim)))
out_files = trim_adapters(to_trim, out_dir, config)
data["files"] = out_files
return [[data]]
# ## Alignment
def link_bam_file(orig_file, new_dir):
"""Provide symlinks of BAM file and existing indexes.
"""
new_dir = utils.safe_makedir(new_dir)
sym_file = os.path.join(new_dir, os.path.basename(orig_file))
utils.symlink_plus(orig_file, sym_file)
return sym_file
def _add_supplemental_bams(data):
"""Add supplemental files produced by alignment, useful for structural
variant calling.
"""
file_key = "work_bam"
if data.get(file_key):
for supext in ["disc", "sr"]:
base, ext = os.path.splitext(data[file_key])
test_file = "%s-%s%s" % (base, supext, ext)
if os.path.exists(test_file):
sup_key = file_key + "-plus"
if not sup_key in data:
data[sup_key] = {}
data[sup_key][supext] = test_file
return data
def process_alignment(data):
"""Do an alignment of fastq files, preparing a sorted BAM output file.
"""
fastq1, fastq2 = dd.get_input_sequence_files(data)
config = data["config"]
aligner = config["algorithm"].get("aligner", None)
if fastq1 and utils.file_exists_or_remote(fastq1) and aligner:
logger.info("Aligning lane %s with %s aligner" % (data["rgnames"]["lane"], aligner))
data = align_to_sort_bam(fastq1, fastq2, aligner, data)
data = _add_supplemental_bams(data)
elif fastq1 and os.path.exists(fastq1) and fastq1.endswith(".bam"):
sort_method = config["algorithm"].get("bam_sort")
bamclean = config["algorithm"].get("bam_clean")
if bamclean is True or bamclean == "picard":
if sort_method and sort_method != "coordinate":
raise ValueError("Cannot specify `bam_clean: picard` with `bam_sort` other than coordinate: %s"
% sort_method)
out_bam = cleanbam.picard_prep(fastq1, data["rgnames"], data["sam_ref"], data["dirs"],
data)
elif sort_method:
runner = broad.runner_from_config(config)
out_file = os.path.join(data["dirs"]["work"], "{}-sort.bam".format(
os.path.splitext(os.path.basename(fastq1))[0]))
out_bam = runner.run_fn("picard_sort", fastq1, sort_method, out_file)
else:
out_bam = link_bam_file(fastq1, os.path.join(data["dirs"]["work"], "prealign",
data["rgnames"]["sample"]))
bam.check_header(out_bam, data["rgnames"], data["sam_ref"], data["config"])
dedup_bam = postalign.dedup_bam(out_bam, data)
data["work_bam"] = dedup_bam
elif fastq1 and utils.file_exists_or_remote(fastq1) and fastq1.endswith(".cram"):
data["work_bam"] = fastq1
elif fastq1 is None and "vrn_file" in data:
data["config"]["algorithm"]["variantcaller"] = False
data["work_bam"] = None
else:
raise ValueError("Could not process input file: %s" % fastq1)
return [[data]]
def prep_samples(*items):
"""Handle any global preparatory steps for samples with potentially shared data.
Avoids race conditions in postprocess alignment when performing prep tasks
on shared files between multiple similar samples.
Cleans input BED files to avoid issues with overlapping input segments.
"""
out = []
for data in (x[0] for x in items):
data = bedutils.clean_inputs(data)
out.append([data])
return out
def postprocess_alignment(data):
"""Perform post-processing steps required on full BAM files.
Prepares list of callable genome regions allowing subsequent parallelization.
"""
if vmulti.bam_needs_processing(data) and data["work_bam"].endswith(".bam"):
callable_region_bed, nblock_bed, callable_bed = \
callable.block_regions(data["work_bam"], data["sam_ref"], data["config"])
data["regions"] = {"nblock": nblock_bed, "callable": callable_bed}
if (os.path.exists(callable_region_bed) and
not data["config"]["algorithm"].get("variant_regions")):
data["config"]["algorithm"]["variant_regions"] = callable_region_bed
data = bedutils.clean_inputs(data)
data = _recal_no_markduplicates(data)
return [[data]]
def _recal_no_markduplicates(data):
orig_config = copy.deepcopy(data["config"])
data["config"]["algorithm"]["mark_duplicates"] = False
data = recalibrate.prep_recal(data)[0][0]
data["config"] = orig_config
return data
def _merge_out_from_infiles(in_files):
"""Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts.
"""
fname = os.path.commonprefix([os.path.basename(f) for f in in_files])
while fname.endswith(("-", "_", ".")):
fname = fname[:-1]
ext = os.path.splitext(in_files[0])[-1]
dirname = os.path.dirname(in_files[0])
while dirname.endswith(("split", "merge")):
dirname = os.path.dirname(dirname)
return os.path.join(dirname, "%s%s" % (fname, ext))
def delayed_bam_merge(data):
"""Perform a merge on previously prepped files, delayed in processing.
Handles merging of associated split read and discordant files if present.
"""
if data.get("combine"):
assert len(data["combine"].keys()) == 1
file_key = data["combine"].keys()[0]
extras = []
for x in data["combine"][file_key].get("extras", []):
if isinstance(x, (list, tuple)):
extras.extend(x)
else:
extras.append(x)
if file_key in data:
extras.append(data[file_key])
in_files = sorted(list(set(extras)))
out_file = tz.get_in(["combine", file_key, "out"], data, _merge_out_from_infiles(in_files))
sup_exts = data.get(file_key + "-plus", {}).keys()
for ext in sup_exts + [""]:
merged_file = None
if os.path.exists(utils.append_stem(out_file, "-" + ext)):
cur_out_file, cur_in_files = out_file, []
if ext:
cur_in_files = list(filter(os.path.exists, (utils.append_stem(f, "-" + ext) for f in in_files)))
cur_out_file = utils.append_stem(out_file, "-" + ext) if len(cur_in_files) > 0 else None
else:
cur_in_files, cur_out_file = in_files, out_file
if cur_out_file:
config = copy.deepcopy(data["config"])
config["algorithm"]["save_diskspace"] = False
if len(cur_in_files) > 0:
merged_file = merge_bam_files(cur_in_files, os.path.dirname(cur_out_file), config,
out_file=cur_out_file)
else:
assert os.path.exists(cur_out_file)
merged_file = cur_out_file
if merged_file:
if ext:
data[file_key + "-plus"][ext] = merged_file
else:
data[file_key] = merged_file
data.pop("region", None)
data.pop("combine", None)
return [[data]]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from mock import MagicMock
from django.test import TestCase
from auth_backend.plugins import utils
from auth_backend.tests.mock_path import * # noqa
class UtilsTestCase(TestCase):
def setUp(self):
self.resource_type = 'resource_type_token'
self.resource_type_name = 'resource_type_name_token'
self.base_info = {
'resource': {
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name
},
'base': 'base_token',
'scope_id': 'base_scope_id_token'
}
self.action_id = 'action_id_token'
self.action_name = 'action_name_token'
self.instance = 'instance_token'
self.scope_id = 'scope_id_token'
self.resource_name = 'resource_name_token'
self.resource_id = 'resource_id_token'
self.instance_object = MagicMock()
self.auth_resource = MagicMock()
self.auth_resource.base_info = MagicMock(return_value=self.base_info)
self.auth_resource.resource_id = MagicMock(return_value=self.resource_id)
self.auth_resource.resource_name = MagicMock(return_value=self.resource_name)
self.action = MagicMock()
self.action.name = self.action_name
self.auth_resource.actions_map = {
self.action_id: self.action
}
def test_build_need_permission__with_none_instance(self):
permission = utils.build_need_permission(auth_resource=self.auth_resource,
action_id=self.action_id,
scope_id=self.scope_id)
expect_permission = {
'base': 'base_token',
'scope_id': self.scope_id,
'action_id': self.action_id,
'action_name': self.action_name,
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name,
'resources': []
}
self.assertEqual(permission, expect_permission)
def test_build_need_permission__with_none_scope_id(self):
permission = utils.build_need_permission(auth_resource=self.auth_resource,
action_id=self.action_id)
expect_permission = {
'base': 'base_token',
'scope_id': 'base_scope_id_token',
'action_id': self.action_id,
'action_name': self.action_name,
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name,
'resources': []
}
self.assertEqual(permission, expect_permission)
def test_build_need_permission__with_instance_object(self):
permission = utils.build_need_permission(auth_resource=self.auth_resource,
action_id=self.action_id,
instance=self.instance_object,
scope_id=self.scope_id)
expect_permission = {
'base': 'base_token',
'scope_id': 'scope_id_token',
'action_id': self.action_id,
'action_name': self.action_name,
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name,
'resources': [
[
{
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name,
'resource_id': self.resource_id,
'resource_name': self.resource_name
}
]
]
}
self.assertEqual(permission, expect_permission)
self.auth_resource.resource_id.assert_called_once_with(self.instance_object)
def test_build_need_permission__with_instance_id(self):
permission = utils.build_need_permission(auth_resource=self.auth_resource,
action_id=self.action_id,
instance=self.instance,
scope_id=self.scope_id)
expect_permission = {
'base': 'base_token',
'scope_id': 'scope_id_token',
'action_id': self.action_id,
'action_name': self.action_name,
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name,
'resources': [
[
{
'resource_type': self.resource_type,
'resource_type_name': self.resource_type_name,
'resource_id': self.instance,
'resource_name': self.resource_name
}
]
]
}
self.assertEqual(permission, expect_permission)
self.auth_resource.resource_id.assert_not_called()
|
import math
from openerp.osv import osv, fields
import openerp.addons.product.product
class res_users(osv.osv):
_inherit = 'res.users'
_columns = {
'ean13' : fields.char('EAN13', size=13, help="BarCode"),
'pos_config' : fields.many2one('pos.config', 'Default Point of Sale', domain=[('state', '=', 'active')]),
}
def _check_ean(self, cr, uid, ids, context=None):
return all(
openerp.addons.product.product.check_ean(user.ean13) == True
for user in self.browse(cr, uid, ids, context=context)
)
def edit_ean(self, cr, uid, ids, context):
return {
'name': "Edit EAN",
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.ean_wizard',
'target' : 'new',
'view_id': False,
'context':context,
}
_constraints = [
(_check_ean, "Error: Invalid ean code", ['ean13'],),
]
|
import csv
import collections, itertools
import nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews, stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import random
from nltk.stem.snowball import EnglishStemmer
stopset = set(stopwords.words('english'))
lmtzr = EnglishStemmer(True)
#Different frequencies/scoring function between unigram and bigram
def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=150):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(ngram, True) for ngram in itertools.chain(words, bigrams)])
#Construction du dictionnaire en tenant compte des stopwords
def stopword_filtered_word_feats(words):
return dict([(word, True) for word in words if word not in stopset])
#Construction du dictionnaire en tenant compte des stopwords
def stemming_word_feats(words):
return dict([(lmtzr.stem(word), True) for word in words])
# Construction du dictionnaire avec variable bool indiquant presence du mot
def word_feats(words):
return dict([(word, True) for word in words])
#Classe permettant l'extraction des phrases du fichier
class PipeDialect(csv.Dialect):
delimiter = "|"
quotechar = None
escapechar = None
doublequote = None
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
skipinitialspace = False
#Classifieur binaire de base
def evaluate_classifier(featx):
fneg = "data.neg.txt"
fpos = "data.pos.txt"
f = "data.txt"
fileNeg = open(fneg, "rb")
filePos = open(fpos, "rb")
file = open(f, "rb")
reader = csv.reader(file, PipeDialect())
readerNeg = csv.reader(fileNeg, PipeDialect())
readerPos = csv.reader(filePos, PipeDialect())
sentencesNeg = []
sentencesPos = []
wordsNeg = []
wordsPos = []
for row in readerNeg:
sentencesNeg.append(row[2].lower())
for row in readerPos:
sentencesPos.append(row[2].lower())
tokenizer = RegexpTokenizer(r'\w+')
for i in range(0, len(sentencesNeg)-1):
wordsNeg.append(tokenizer.tokenize(sentencesNeg[i]))
for i in range(0, len(sentencesPos)-1):
wordsPos.append(tokenizer.tokenize(sentencesPos[i]))
words = wordsNeg + wordsPos
print len(set([y for x in words for y in x]))
negfeats = [(featx(wordsNeg[i]), 'neg') for i in range(0, len(wordsNeg)-1)]
posfeats = [(featx(wordsPos[i]), 'pos') for i in range(0, len(wordsPos)-1)]
print len(set([lmtzr.stem(y) for x in words for y in x]))
random.shuffle(negfeats)
random.shuffle(posfeats)
negcutoff = len(negfeats)*3/4
poscutoff = len(posfeats)*3/4
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
print 'train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats))
classifier = NaiveBayesClassifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
print 'pos precision:', nltk.metrics.precision(refsets['pos'], testsets['pos'])
print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
classifier.show_most_informative_features()
file.close()
filePos.close()
fileNeg.close()
print 'evaluating single word features'
evaluate_classifier(word_feats)
print 'evaluating single word features with no stopword'
evaluate_classifier(stopword_filtered_word_feats)
print 'evaluating single word features with no stopword and stemming'
evaluate_classifier(stemming_word_feats)
|
from python_framework import Enum, EnumItem
@Enum()
class PoolingStatusEnumeration :
NONE = EnumItem()
POOLING = EnumItem()
GOOGLE_SEARCHING = EnumItem()
ERROR_POOLING = EnumItem()
ERROR_DELIVERING = EnumItem()
SUCCESS = EnumItem()
PoolingStatus = PoolingStatusEnumeration()
|
# Copyright 2014 eBay Software Foundation
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock, patch, PropertyMock
from trove.backup.models import Backup
from trove.common.context import TroveContext
from trove.instance.tasks import InstanceTasks
from trove.taskmanager.manager import Manager
from trove.taskmanager import models
from trove.taskmanager import service
from trove.common.exception import TroveError, ReplicationSlaveAttachError
from proboscis.asserts import assert_equal
from trove.tests.unittests import trove_testtools
class TestManager(trove_testtools.TestCase):
def setUp(self):
super(TestManager, self).setUp()
self.manager = Manager()
self.context = TroveContext()
self.mock_slave1 = Mock()
self.mock_slave2 = Mock()
type(self.mock_slave1).id = PropertyMock(return_value='some-inst-id')
type(self.mock_slave2).id = PropertyMock(return_value='inst1')
self.mock_old_master = Mock()
type(self.mock_old_master).slaves = PropertyMock(
return_value=[self.mock_slave1, self.mock_slave2])
self.mock_master = Mock()
type(self.mock_master).slaves = PropertyMock(
return_value=[self.mock_slave1, self.mock_slave2])
def tearDown(self):
super(TestManager, self).tearDown()
self.manager = None
def test_getattr_lookup(self):
self.assertTrue(callable(self.manager.delete_cluster))
self.assertTrue(callable(self.manager.mongodb_add_shard_cluster))
def test_most_current_replica(self):
master = Mock()
master.id = 32
def test_case(txn_list, selected_master):
with patch.object(self.manager, '_get_replica_txns',
return_value=txn_list):
result = self.manager._most_current_replica(master, None)
assert_equal(result, selected_master)
with self.assertRaisesRegexp(TroveError,
'not all replicating from same'):
test_case([['a', '2a99e-32bf', 2], ['b', '2a', 1]], None)
test_case([['a', '2a99e-32bf', 2]], 'a')
test_case([['a', '2a', 1], ['b', '2a', 2]], 'b')
test_case([['a', '2a', 2], ['b', '2a', 1]], 'a')
test_case([['a', '2a', 1], ['b', '2a', 1]], 'a')
test_case([['a', None, 0]], 'a')
test_case([['a', None, 0], ['b', '2a', 1]], 'b')
def test_detach_replica(self):
slave = Mock()
master = Mock()
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[slave, master]):
self.manager.detach_replica(self.context, 'some-inst-id')
slave.detach_replica.assert_called_with(master)
@patch.object(Manager, '_set_task_status')
def test_promote_to_replica_source(self, mock_set_task_status):
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1,
self.mock_old_master,
self.mock_slave2]):
self.manager.promote_to_replica_source(
self.context, 'some-inst-id')
self.mock_slave1.detach_replica.assert_called_with(
self.mock_old_master, for_failover=True)
self.mock_old_master.attach_replica.assert_called_with(
self.mock_slave1)
self.mock_slave1.make_read_only.assert_called_with(False)
self.mock_slave2.detach_replica.assert_called_with(
self.mock_old_master, for_failover=True)
self.mock_slave2.attach_replica.assert_called_with(self.mock_slave1)
self.mock_old_master.demote_replication_master.assert_any_call()
mock_set_task_status.assert_called_with(([self.mock_old_master] +
[self.mock_slave1,
self.mock_slave2]),
InstanceTasks.NONE)
@patch.object(Manager, '_set_task_status')
@patch.object(Manager, '_most_current_replica')
def test_eject_replica_source(self, mock_most_current_replica,
mock_set_task_status):
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_master, self.mock_slave1,
self.mock_slave2]):
self.manager.eject_replica_source(self.context, 'some-inst-id')
mock_most_current_replica.assert_called_with(self.mock_master,
[self.mock_slave1,
self.mock_slave2])
mock_set_task_status.assert_called_with(([self.mock_master] +
[self.mock_slave1,
self.mock_slave2]),
InstanceTasks.NONE)
@patch.object(Manager, '_set_task_status')
def test_exception_TroveError_promote_to_replica_source(self, *args):
self.mock_slave2.detach_replica = Mock(side_effect=TroveError)
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1, self.mock_old_master,
self.mock_slave2]):
self.assertRaises(ReplicationSlaveAttachError,
self.manager.promote_to_replica_source,
self.context, 'some-inst-id')
@patch.object(Manager, '_set_task_status')
@patch.object(Manager, '_most_current_replica')
def test_exception_TroveError_eject_replica_source(
self, mock_most_current_replica, mock_set_tast_status):
self.mock_slave2.detach_replica = Mock(side_effect=TroveError)
mock_most_current_replica.return_value = self.mock_slave1
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_master, self.mock_slave1,
self.mock_slave2]):
self.assertRaises(ReplicationSlaveAttachError,
self.manager.eject_replica_source,
self.context, 'some-inst-id')
@patch.object(Manager, '_set_task_status')
def test_error_promote_to_replica_source(self, *args):
self.mock_slave2.detach_replica = Mock(
side_effect=RuntimeError('Error'))
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1, self.mock_old_master,
self.mock_slave2]):
self.assertRaisesRegexp(RuntimeError, 'Error',
self.manager.promote_to_replica_source,
self.context, 'some-inst-id')
def test_error_demote_replication_master_promote_to_replica_source(self):
self.mock_old_master.demote_replication_master = Mock(
side_effect=RuntimeError('Error'))
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1, self.mock_old_master,
self.mock_slave2]):
self.assertRaises(ReplicationSlaveAttachError,
self.manager.promote_to_replica_source,
self.context, 'some-inst-id')
@patch.object(Manager, '_set_task_status')
@patch.object(Manager, '_most_current_replica')
def test_error_eject_replica_source(self, mock_most_current_replica,
mock_set_tast_status):
self.mock_slave2.detach_replica = Mock(
side_effect=RuntimeError('Error'))
mock_most_current_replica.return_value = self.mock_slave1
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_master, self.mock_slave1,
self.mock_slave2]):
self.assertRaisesRegexp(RuntimeError, 'Error',
self.manager.eject_replica_source,
self.context, 'some-inst-id')
@patch.object(Backup, 'delete')
def test_create_replication_slave(self, mock_backup_delete):
mock_tasks = Mock()
mock_snapshot = {'dataset': {'snapshot_id': 'test-id'}}
mock_tasks.get_replication_master_snapshot = Mock(
return_value=mock_snapshot)
mock_flavor = Mock()
with patch.object(models.FreshInstanceTasks, 'load',
return_value=mock_tasks):
self.manager.create_instance(self.context, ['id1'], Mock(),
mock_flavor, Mock(), None, None,
'mysql', 'mysql-server', 2,
'temp-backup-id', None,
'some_password', None, Mock(),
'some-master-id', None)
mock_tasks.get_replication_master_snapshot.assert_called_with(
self.context, 'some-master-id', mock_flavor, 'temp-backup-id',
replica_number=1)
mock_backup_delete.assert_called_with(self.context, 'test-id')
@patch.object(models.FreshInstanceTasks, 'load')
@patch.object(Backup, 'delete')
def test_exception_create_replication_slave(self, mock_delete, mock_load):
mock_load.return_value.create_instance = Mock(side_effect=TroveError)
self.assertRaises(TroveError, self.manager.create_instance,
self.context, ['id1', 'id2'], Mock(), Mock(),
Mock(), None, None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'some_password', None,
Mock(), 'some-master-id', None)
def test_AttributeError_create_instance(self):
self.assertRaisesRegexp(
AttributeError, 'Cannot create multiple non-replica instances.',
self.manager.create_instance, self.context, ['id1', 'id2'],
Mock(), Mock(), Mock(), None, None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'some_password', None, Mock(), None, None)
def test_create_instance(self):
mock_tasks = Mock()
mock_flavor = Mock()
mock_override = Mock()
with patch.object(models.FreshInstanceTasks, 'load',
return_value=mock_tasks):
self.manager.create_instance(self.context, 'id1', 'inst1',
mock_flavor, 'mysql-image-id', None,
None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'password',
None, mock_override, None, None)
mock_tasks.create_instance.assert_called_with(mock_flavor,
'mysql-image-id', None,
None, 'mysql',
'mysql-server', 2,
'temp-backup-id', None,
'password', None,
mock_override, None)
mock_tasks.wait_for_instance.assert_called_with(36000, mock_flavor)
def test_create_cluster(self):
mock_tasks = Mock()
with patch.object(models, 'load_cluster_tasks',
return_value=mock_tasks):
self.manager.create_cluster(self.context, 'some-cluster-id')
mock_tasks.create_cluster.assert_called_with(self.context,
'some-cluster-id')
def test_delete_cluster(self):
mock_tasks = Mock()
with patch.object(models, 'load_cluster_tasks',
return_value=mock_tasks):
self.manager.delete_cluster(self.context, 'some-cluster-id')
mock_tasks.delete_cluster.assert_called_with(self.context,
'some-cluster-id')
class TestTaskManagerService(trove_testtools.TestCase):
def test_app_factory(self):
test_service = service.app_factory(Mock())
self.assertIsInstance(test_service, service.TaskService)
|
def read_input():
# for puzzles where each input line is an object
with open('input.txt') as fh:
decks = [[], []]
cur_player = 0
for line in fh.readlines():
if not line.strip():
cur_player = 1
continue
if line.startswith('P'):
continue
decks[cur_player].append(int(line.strip()))
return decks[0], decks[1]
def main():
p1, p2 = read_input()
while len(p1) and len(p2):
a = p1.pop(0)
b = p2.pop(0)
if a > b:
p1.append(a)
p1.append(b)
elif b > a:
p2.append(b)
p2.append(a)
winner = p1
if not len(winner):
winner = p2
print(winner)
result = 0
for i in range(len(winner)):
result += winner[i]*(len(winner)-i)
print(result)
if __name__ == '__main__':
main()
|
from django.http import request
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.views import LoginView, PasswordResetConfirmView, PasswordResetView
from django.urls import reverse_lazy
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.encoding import force_bytes, force_text
from django.views.generic.base import TemplateView, View
from accounts.forms import AccountForm, LoginForm, PwdResetConfirmForm, PwdResetForm, RegistrationForm
from django.views.generic import CreateView
from django.contrib.auth import get_user_model, login
from accounts.utils.tokens import account_activation_token
from accounts.tasks import send_confirmation_mail
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.views import LogoutView
User = get_user_model()
# Create your views here.
class CustomLoginView(LoginView):
form_class = LoginForm
template_name = 'login.html'
def post(self, request, *args, **kwargs):
response = super().post(request, *args, **kwargs)
return response
def account_register(request):
if request.user.is_authenticated:
return redirect("/")
if request.method == "POST":
registerForm = RegistrationForm(request.POST)
if registerForm.is_valid():
user = User(
email = registerForm.cleaned_data.get('email'),
first_name = registerForm.cleaned_data.get('first_name'),
last_name = registerForm.cleaned_data.get('last_name')
)
user.set_password(registerForm.cleaned_data["password1"])
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = "Activate your Account"
message = render_to_string(
"email/confirmation_email.html",
{
"user": user,
"domain": current_site.domain,
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"token": account_activation_token.make_token(user),
},
)
user.email_user(subject=subject, message=message)
return render(request, "email/register_email_confirm.html", {"form": registerForm})
else:
registerForm = RegistrationForm()
return render(request, "register.html", {"form": registerForm})
class CustomLogoutView(LogoutView):
next_page = '/'
def activate(request, uidb64, token):
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.filter(pk=uid, is_active=False).first()
if user is not None and account_activation_token.check_token(user, token):
messages.success(request, 'Your profile is activated')
user.is_active = True
user.save()
CustomLogoutView()
return redirect('accounts:login')
else:
messages.error(request, 'Your session is expired')
return redirect('/')
# Password Reset
class CustomPasswordResetView(PasswordResetView):
email_template_name = 'password/password_reset_email.html'
form_class = PwdResetForm
template_name = 'password/password_reset_form.html'
success_url="password_reset_email_confirm/"
class CustomPasswordResetConfirmView(PasswordResetConfirmView):
template_name = "password/password-reset-confirm.html"
# success_url = "password_reset_complete/"
success_url = '/'
form_class = PwdResetConfirmForm
class CustomResetEmailConfirmView(TemplateView):
template_name = "password/reset_status.html"
class CustomPasswordResetCompleteView(TemplateView):
template_name = "password/reset_status.html"
class AccountView(View):
template_name = 'account_information.html'
http_method_names = ['post', 'get']
def get(self, request):
form = AccountForm(instance=request.user)
return render(request, 'account_information.html', {'form': form})
def post(self, request):
form = AccountForm(request.POST)
if form.is_valid():
user = User.objects.get(pk=request.user.pk)
user.first_name = form.cleaned_data.get('first_name')
user.last_name = form.cleaned_data.get('last_name')
user.email = form.cleaned_data.get('email')
user.save()
messages.success(request, 'Your profile is updated')
return redirect('accounts:account')
else:
messages.error(request, 'Your profile is not updated')
return redirect('accounts:account')
def AddressBook(request):
return render(request, 'address_book.html')
|
import re
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.views.decorators.http import require_GET
import waffle
from statsd import statsd
from rest_framework import viewsets, serializers, mixins, filters, permissions
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import permission_classes, api_view
from rest_framework.authtoken import views as authtoken_views
from kitsune.access.decorators import login_required
from kitsune.sumo.api import CORSMixin
from kitsune.sumo.decorators import json_view
from kitsune.users.helpers import profile_avatar
from kitsune.users.models import Profile, RegistrationProfile
def display_name_or_none(user):
try:
return user.profile.name
except (Profile.DoesNotExist, AttributeError):
return None
@login_required
@require_GET
@json_view
def usernames(request):
"""An API to provide auto-complete data for user names."""
term = request.GET.get('term', '')
query = request.GET.get('query', '')
pre = term or query
if not pre:
return []
if not request.user.is_authenticated():
return []
with statsd.timer('users.api.usernames.search'):
profiles = (
Profile.objects.filter(Q(name__istartswith=pre))
.values_list('user_id', flat=True))
users = (
User.objects.filter(
Q(username__istartswith=pre) | Q(id__in=profiles))
.extra(select={'length': 'Length(username)'})
.order_by('length').select_related('profile'))
if not waffle.switch_is_active('users-dont-limit-by-login'):
last_login = datetime.now() - timedelta(weeks=12)
users = users.filter(last_login__gte=last_login)
return [{'username': u.username,
'display_name': display_name_or_none(u)}
for u in users[:10]]
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def test_auth(request):
return Response({
'username': request.user.username,
'authorized': True,
})
class GetToken(CORSMixin, authtoken_views.ObtainAuthToken):
"""Add CORS headers to the ObtainAuthToken view."""
class OnlySelfEdits(permissions.BasePermission):
"""
Only allow users/profiles to be edited and deleted by themselves.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
# SAFE_METHODS is a list containing all the read-only methods.
if request.method in permissions.SAFE_METHODS:
return True
# If flow gets here, the method will modify something.
request_user = getattr(request, 'user', None)
user = getattr(obj, 'user', None)
# Only the owner can modify things.
return request_user == user
class ProfileSerializer(serializers.ModelSerializer):
username = serializers.WritableField(source='user.username')
display_name = serializers.WritableField(source='name', required=False)
date_joined = serializers.Field(source='user.date_joined')
avatar = serializers.SerializerMethodField('get_avatar_url')
# These are write only fields. It is very important they stays that way!
email = serializers.WritableField(
source='user.email', write_only=True, required=False)
password = serializers.WritableField(
source='user.password', write_only=True)
class Meta:
model = Profile
fields = [
'username',
'display_name',
'date_joined',
'avatar',
'bio',
'website',
'twitter',
'facebook',
'irc_handle',
'timezone',
'country',
'city',
'locale',
# Password and email are here so they can be involved in write
# operations. They is marked as write-only above, so will not be
# visible.
# TODO: Make email visible if the user has opted in, or is the
# current user.
'email',
'password',
]
def get_avatar_url(self, obj):
return profile_avatar(obj.user)
def restore_object(self, attrs, instance=None):
"""
Override the default behavior to make a user if one doesn't exist.
This user may not be saved here, but will be saved if/when the .save()
method of the serializer is called.
"""
instance = (super(ProfileSerializer, self)
.restore_object(attrs, instance))
if instance.user_id is None:
# The Profile doesn't have a user, so create one. If an email is
# specified, the user will be inactive until the email is
# confirmed. Otherwise the user can be created immediately.
if 'user.email' in attrs:
u = RegistrationProfile.objects.create_inactive_user(
attrs['user.username'],
attrs['user.password'],
attrs['user.email'])
else:
u = User(username=attrs['user.username'])
u.set_password(attrs['user.password'])
instance._nested_forward_relations['user'] = u
return instance
def validate_username(self, attrs, source):
obj = self.object
if obj is None:
# This is a create
if User.objects.filter(username=attrs['user.username']).exists():
raise ValidationError('A user with that username exists')
else:
# This is an update
new_username = attrs.get('user.username', obj.user.username)
if new_username != obj.user.username:
raise ValidationError("Can't change this field.")
if re.match(r'^[\w.-]{4,30}$', attrs['user.username']) is None:
raise ValidationError(
'Usernames may only be letters, numbers, "." and "-".')
return attrs
def validate_display_name(self, attrs, source):
if attrs.get('name') is None:
attrs['name'] = attrs.get('user.username')
return attrs
def validate_email(self, attrs, source):
email = attrs.get('user.email')
if email and User.objects.filter(email=email).exists():
raise ValidationError('A user with that email address '
'already exists.')
return attrs
class ProfileViewSet(CORSMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
model = Profile
serializer_class = ProfileSerializer
paginate_by = 20
# User usernames instead of ids in urls.
lookup_field = 'user__username'
permission_classes = [
OnlySelfEdits,
]
filter_backends = [
filters.DjangoFilterBackend,
filters.OrderingFilter,
]
filter_fields = []
ordering_fields = []
# Default, if not overwritten
ordering = ('-user__date_joined',)
|
import pymysql
MYSQL_CONFIG = {
'host': 'mysql', # mysql or '127.0.0.1'
'port': 3306,
'user': 'root',
'password': 'mysql520',
'charset': 'utf8',
'use_unicode': True,
'cursorclass': pymysql.cursors.DictCursor,
'connect_timeout': 60,
'maxconnections': 50
}
|
from rest_framework.pagination import LimitOffsetPagination,PageNumberPagination
class StandardResultsSetPagination(PageNumberPagination):
page_size = 2
page_size_query_param = 'page_size'
max_page_size = 20
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for weighted stats functions.
"""
from __future__ import division
import os
import sys
import platform
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir))
from weightedstats import *
class TestWeightedStats(unittest.TestCase):
def setUp(self):
self.data = [
[7, 1, 2, 4, 10],
[7, 1, 2, 4, 10],
[7, 1, 2, 4, 10, 15],
[1, 2, 4, 7, 10, 15],
[0, 10, 20, 30],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[30, 40, 50, 60, 35],
[2, 0.6, 1.3, 0.3, 0.3, 1.7, 0.7, 1.7, 0.4],
[3.7, 3.3, 3.5, 2.8],
[100, 125, 123, 60, 45, 56, 66],
[2, 2, 2, 2, 2, 2],
[2.3],
[-2, -3, 1, 2, -10],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[5, 4, 3, 2, 1],
[-2, 2, -1, 3, 6],
[-10, 1, 1, -10, -10],
[2, 4],
[2, 2, 4, 4],
[2, 2, 2, 4],
]
self.weights = [
[1, 1/3, 1/3, 1/3, 1],
[1, 1, 1, 1, 1],
[1, 1/3, 1/3, 1/3, 1, 1],
[1/3, 1/3, 1/3, 1, 1, 1],
[30, 191, 9, 0],
[10, 1, 1, 1, 9],
[10, 1, 1, 1, 900],
[1, 3, 5, 4, 2],
[2, 2, 0, 1, 2, 2, 1, 6, 0],
[5, 5, 4, 1],
[30, 56, 144, 24, 55, 43, 67],
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
[12],
[7, 1, 1, 1, 6],
[1, 0, 0, 0, 2],
[10, 1, 1, 1, 9],
[10, 1, 1, 1, 10],
[1, 2, -3, 4, -5],
[0.1, 0.2, 0.3, -0.2, 0.1],
[-1, -1, -1, -1, 1],
[1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
]
self.median_answers = [7.0, 4.0, 8.5,
8.5, 10.0, 2.5,
5.0, 50.0, 1.7,
3.5, 100.0, 2.0,
2.3, -2.0, 5.0,
2.5, 3.0, 2.0,
-1.0, -10.0, 3.0,
3.0, 2.0]
self.mean_answers = [6.444444, 4.800000, 8.583333,
8.583333, 9.086956, 2.909091,
4.949617, 47.333333, 1.275000,
3.453333, 91.782816, 2.000000,
2.300000, -4.625000, 3.666667,
2.909091, 3.000000, -7.000000,
-0.200000, -2.666667, 3.000000,
3.000000, 2.500000]
def test_mean(self):
datum = [7, 1, 1, 1, 6]
self.assertTrue(weighted_mean(datum) == mean(datum) == 3.2)
def test_weighted_mean(self):
for datum, weight, answer in zip(self.data, self.weights, self.mean_answers):
self.assertTrue(abs(weighted_mean(datum, weights=weight) - answer) <= 1e-6)
def test_numpy_weighted_mean(self):
for datum, weight, answer in zip(self.data, self.weights, self.mean_answers):
self.assertTrue(abs(numpy_weighted_mean(datum, weights=weight) - answer) <= 1e-6)
def test_median(self):
datum = [4, 3, 2, 1]
self.assertTrue(weighted_median(datum) == numpy_weighted_median(datum) == median(datum) == 2.5)
datum = [7, 1, 1, 1, 6]
self.assertTrue(weighted_median(datum) == numpy_weighted_median(datum) == median(datum) == 1.0)
def test_weighted_median(self):
for datum, weight, answer in zip(self.data, self.weights, self.median_answers):
self.assertTrue(abs(weighted_median(datum, weights=weight) - answer) <= 1e-16)
self.assertTrue(weighted_median([4, 3, 2, 1], weights=[0, 0, 0, 0]) is None)
def test_numpy_weighted_median(self):
for datum, weight, answer in zip(self.data, self.weights, self.median_answers):
self.assertTrue(abs(numpy_weighted_median(datum, weights=weight) - answer) <= 1e-16)
self.assertTrue(numpy_weighted_median([4, 3, 2, 1], weights=[0, 0, 0, 0]) is None)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestWeightedStats)
unittest.TextTestRunner(verbosity=2).run(suite)
|
#!/usr/bin/env python3
import sys
import random
import requests
import populate
if len(sys.argv) < 2:
print("no seed given.abort.")
sys.exit(1)
seed = sys.argv[1]
users,msgs = populate.get_data(seed)
URL = "http://127.0.0.1:80"
# test for users
resp = requests.get(URL+"/users")
assert resp.status_code == 200
assert 'application/json' in resp.headers['content-type']
assert len(resp.json()["users"]) == len(users)
length = random.randint(1,len(users))
resp = requests.get(URL+"/users?limit=" + str(length))
assert resp.status_code == 200
assert 'application/json' in resp.headers['content-type']
assert len(resp.json()["users"]) == length
length = -1
resp = requests.get(URL+"/users?limit=" + str(length))
assert resp.status_code == 500
resp = requests.get(URL+"/users?limit=1' or '1'='1")
assert resp.status_code == 500
# test for messages
resp = requests.get(URL+"/messages")
assert resp.status_code == 200
assert 'application/json' in resp.headers['content-type']
assert len(resp.json()) == len(msgs)
first = resp.json()[0]["name"]
resp = requests.post(URL+"/messages",data = {'name':first})
assert resp.status_code == 200
assert len(resp.json()) == 1
print("TESTING /users SQLI")
resp = requests.post(URL+"/messages",data = {'name':"he' OR '1'='1"})
print("TESTING DONE #1/users SQLI => %d" % resp.status_code)
assert resp.status_code == 200
print("TESTING DONE #2/users SQLI")
a = "G"
b = "O"
c = "D"
print(a + b * 2 + c)
|
import pytest
pytest_plugins = "pytester"
@pytest.fixture
def state():
return "gASVqA4AAAAAAABLAyiKBQAAAIAAigUA6cbnAEpzTpUZShSh2SCKBeZcQ8cASkEkuF1KzTQ8HYoFrsovkQCKBduZrZoASnkaKB+KBY6ACtMAigWJT3rQAIoFacE8kgBKk5nNJUrW0lENSs72zSpKZX2Ackq9/DMCSlTVumGKBRkHO+cAStvjkBBKNMMMF0r6PTR0igVWh6/rAIoFiCzh2QBKaLVtN4oFWa0QyACKBcxjPdgASlZEQ0WKBaHukMMAigVYB7SaAIoF4uV3lwBKUPoYb4oFZfkj0wCKBcu+J5IASkrwiyZKpi6sDIoFOeiD+wCKBejoz5wAigUrhBPsAIoFePk38ACKBSFcSPYAigXvMoyJAIoF06RB5wCKBfjHPIgAigUvM3uSAIoFKhbY/gCKBf6LcfMASo7PwkWKBUgWkYQAigWpbSfwAIoFcI1/jgBK33VkCEqZOq1nSm2W+iFKlenxbooFymj6vwBK9eeFf0r3h3h7igUKRQ7jAIoFDZ+A5wCKBe15D7cASnj1zzxKFA2ocIoFx/nYjACKBeGpANAAigU5TPHyAIoFniBHuACKBfin340ASmjDfyWKBSay0sYAigWYXCSHAErgYtg1igUPUgnMAEr/vyosigU8tWvWAIoFHV0P3wBKeLsbVYoFPd4+0gCKBbcYzssAigVLojjqAEp5p1IUSnMcKTVKqt0PQ4oFqpd84ACKBTYSZYMASrcdyUBK6/vHGEp0ywsiigVabGOtAEota1QvigWIgCr/AIoF25/atACKBSYKtPQAigVlP0+9AIoF/4z/jQCKBYMeOa4ASkOB8W2KBc1DX7UAigUAqZjjAIoF04xKzwCKBVSd6eMAigU7FyKdAIoFyn3kugBKkjtJL0r1uvZ6igV8lCOpAEoX95wqShRIl3GKBfwm840ASqH2PhhKtkaZTkr+tX0XSlYFS2VKuBalXkrs9M1oSjS4yx2KBau9HqUAigWYRLXRAErwhe57SsEyrnWKBegT084AigWFnG/dAEoSZS8KigUqV0y4AEqIcAQ/igXr8XfOAIoFD9QwwwBKfBppREpkAAMGigVE47DuAIoF14tf2ACKBQPnW/4AigWExTvTAEqkoMwISuTlFFdKY1RNeooFRYqipACKBQ/7b/UASvIUMQ2KBU6b/eUASoh5omdKzh+OTooF9qQyxgCKBUpA3KQASlqhehOKBc6nEqYASiOWLSVK0fl/SkrQ0mJ6SimWd2uKBYosk+4AigWiaInDAErgZCUIigXS8oWZAEoHcCgHSkbHnB6KBT6OFZgASgFZdn2KBfBPOJEASilmY3mKBWQmEucAigW+GmiUAEqdqT95igV2uLaRAEqChGYISo3QUQ5KGQ2UaYoFmtH7zQCKBSOi6IgAigXHIpmjAEpdb90mSgTDvGCKBWmRZNgAigVdzc73AEoBzBpxigWHzxm8AEqnRv8JigVvOMb3AIoFfwh52QBKOBtNc0rd0CVIigUmcXzUAEr1d61HSkfYbi1KyfLuWUpzFwx6Sk0CcyVK7Y1MIUqobMlFSiRp9xJKVvl8PUoxfqxySr0FW16KBZSO7q0ASqZ0tVGKBVQvMuUASkuKj1NKbuViVkpMrfw5SmEw3QBKZtYFZkr/pAtRigX7zMH/AEpb2FN7igVUqJ3sAIoFhoterQBKbbf2aEqqWHdeSro9vXhKd3WUbEo2SCw5SuyJzzeKBW3kzagAShgI4kVKzc4dUUquPJAHigXUh7LMAIoFVboL0QBKYe9Pe0q4eIZ5SqbaQjWKBcROWaYASsYcXHNK9SZHbkrK32YjigUaspvKAEphlhpdigUr8ZvhAIoFo7Fi9ACKBXFGFZQAigVHB+3dAEp+sz9ySk0RlAiKBdh40NUAigX7YobaAIoFXa3S3QCKBUo248IAStplc3uKBULgfPUAigXKtnT0AIoF29IrkQBK5ZdXb4oF0nAS/QBKqDM3LEoFLrMoigWtVuLVAErO/xB/igVhFxDdAEraOwV0igVmO4qOAIoFc6/6nQCKBc+Nl+MAigU5ZZnTAEpvjToeigU1TiajAIoFgLgo2QCKBceOpKoASh8+yBdKGITeL4oFUniByACKBZ0RVPwAigU60rbDAEr9L+8bigXU85CrAEqPC+9sigUdH42sAEr9isVLSqwZ9FqKBYsxxskASvPiWhuKBbeEvrsASn32KCVKRKohZIoFPBRH6QBKSolzR4oFuCt59wBKz+9hPkqtvq0higUDWLfvAIoFema5ggCKBTGYx4sASp6NzS2KBdcUgZ4ASk9LPWtK9LMeckonn4k8SsCQoglKTsJxFYoFbqnwjQCKBdL0CbUAigUdJx+dAEpYfJ9MigWtSmakAIoFCJoY9wCKBXDjzJ0ASkJWSlpKI3zXdYoF+b54/ABKvOajSIoFAUMA6ACKBenLJc4AigXtUJqfAIoFFBKTwQBKxUB/WooFpcfozwBKCxVzNUpqPpohSoRF9EiKBZM8++AAShR6aTuKBVorhuQAigVXFWDiAIoFFDYV8gCKBTaWP7IAStLK7GOKBZJzCtEAigXMbJmFAIoF1jqhrgBKiefNXEpGs6sgSqzpo2OKBYgQUNcASpWPODRKZdExbkrVa/QrSvShpCtK16psXUrjaYsHigUwSV7pAIoFX2u9owCKBcFIA+EAigWyLGCIAIoFnUlwuACKBbRiXewAigVvO9/2AIoFPxV6/gBK14qEfEpYZyEXSt43zQ9Ks38BbooFyxQQtgBKiiC8W0q3sQwZSoWBTClK2iBbTEpnoo4bigWKUS2xAEopdrQiigVB9+jlAIoF/seJkQCKBQgC7dcASrhR7kRKgscyF0pNERdxigX3u5GVAIoF82ci6gCKBYL/qv8AigXSf+K+AIoFpZAN9gBKMq58GUqkIixsSgkBj3ZKrSXbRUq2PqFbSpBH7E1KCTgfQooF08mAnACKBZwJl70AigWclYi3AIoF4LC+3QBKG8pHckrtEA4YigWqyr+hAIoFJtcezQCKBYCLrMAAigWCUyrrAIoFFLmfyABKKH2hFUq/vSccigVncruBAIoF3jZB3wCKBZAuIZ8ASsdVSSqKBckGE4cAigXCyuC1AIoFciBqmgBKkKhqbooFcuSwggCKBU9u7PIASjS+4RmKBUrTOfkAigXPDnXZAEoHnDBySoEDjj1K2iNuS0pcI5kkSpomRiFK236KaUqFNN8HigVEqqXGAIoFVYk+mgBKDMXba4oFW2cC1gCKBUUqVsUAigVVEkuoAIoFj8RlwQCKBVVc9r8ASsaKDlhKXQkVU4oFMn61vwCKBd94pN0AigUjLOTcAIoFHReKyQCKBUxDf5cASqMV4GyKBbJmXdMAigW60i22AIoFb0/UrgCKBf98tvsASrlTuyRKWGrFO4oFnKEejACKBR00JtsAigUuH7/OAIoFlUJ7zABKLxORdooFVrRv7wBKb/hiNEofGggKigV0+AnfAEqZK48jigVYfQPjAEqPcppUigWeyfr2AIoF8I39kwBKTiroMUo/gOkLSj9ud3WKBdVhMb0AigX9eeO7AErLuIAyigXhMdOGAEru9mBOSr0AJBdKRkJbYkpZT2FHShUvu0OKBW6YksUASqpJqy5K1q8cHYoFkmyghQCKBexnWZgASjGc+yiKBRcY/IQAigV4B6W3AIoFQD9UrQCKBRAC9M4ASu3GmnhKHNu9Jkq3TbJZigVRPrOqAEpa2IRJSm+xaUOKBTglh4EASjHpdjRKbMDsIooFSF8i/QBKL/kmLkqbtSV/Ssp3HlCKBVKdfaYASvCFSxiKBZ3oReUAigVSZM/+AEqgvKhpigVQPJOqAIoFMWtq3QCKBfmMyo8ASrbOKSNKMudlL0qRjedeigXEYkPAAEr/2TxDigUe9TvSAEoHpe0sSmPxGRJKxjEwX4oFHJwknABKqoHXcUpzYxsUSmgD32SKBT0FKM0ASmi8XVRKilG2D4oF5hyupABKO7wIWooF4ba/hQBKmAfiAIoFqnMn8ABKQglcOUpgP8NWSndsgh5KIyZRd4oFHlcEywCKBaeU7NUASp70Hw5KRh4zJEo4Fr0eigXMgrqIAIoFMWJnsABKPmq2UEpleisYigUMSQPAAIoFup5FlABKIBfXVkog8b99ShifswuKBb6ApasASrSJ912KBQlYM9EAigV+u1OvAEo97/MfSjktuSNKXWB5AIoFhIrp4wBKfcDDCUootMx6ShSr7GtK2w35XIoF2W/n2wBKju7TOUppu+JOigWj+OznAEprMtQoSmdX635K2K/tOIoFGcXKkwBKR1a2Hkq7gYMGSvlSQg1K/g0HcIoFAHTf2gBKFVUtcIoF6OFt9gBKfv+0MIoFbZo9uwCKBSF8zuEAigUHmhStAEp0xOgQigWvWYXNAEpu0tR+StNW5SqKBevOMcgAigX6ULHkAEqH/5QnigV061SsAIoFKVaLnwCKBYdNHaYAigW5RbTzAIoFyDACpwCKBdmLl78AigVmZb6LAIoFF65k7QCKBTK6JooASpa+Tw5KKIAFZYoFU+IZ5ABKh254d4oFasnu8ABKiOo+cooFJTPG+gCKBamaFKEAigW8RQ21AEr88bcSStsZvgpKZ4bWMYoF6JQ9xQCKBbFK1JYASn4ENTOKBdHua9EAigU9+nbIAIoFUWBnpgCKBQ48i9QAigXH6rq3AErWE7ZxSmvLZiiKBfAVZs8ASgMSBUqKBcVY/9kASp37aDNKiuqGeEr7I7hUShQEkx9K6/Okb0qvjK50igUW/naOAIoFodRX4QBK9Hl4UErRHHwHSiRdlWCKBeWd/ZQAigWR/LChAIoF+kaYxQCKBQAhIfgAigU1EQ3gAIoF7sxg2gCKBfQ0kecAigWZz03WAEqWwHFhSo3ORUmKBaWKs5kASuyG/0hKt+hqBkp3cTVtigXmxT+VAEqsUH5gSqXfiQpK+1qrc0rZ72BkSoU6Y1SKBVWomckASopaYG1K9Sv/KE1wAnSUToeULg==" # noqa E501
|
# -*- coding: utf-8 -*-
# This module contains all the Session classes used by the MIT Core Concept
# Catalog (MC3) Handcar based implementation of the OSID Learning Service.
# Note that it includes the core OsidSession typically found in the osid
# package as well as the learning package sessions for Objective, Activities
# and ObjectiveBanks.
from dlkit.abstract_osid.osid import sessions as abc_osid_sessions
from ..osid.osid_errors import *
COMPARATIVE = 0
PLENARY = 1
ISOLATED = 0
FEDERATED = 1
CREATED = True
UPDATED = True
class OsidSession(abc_osid_sessions.OsidSession):
"""The OsidSession is the top level interface for all OSID sessions.
An OsidSession is created through its corresponding OsidManager. A
new OsidSession should be created for each user of a service and for
each processing thread. A session maintains a single authenticated
user and is not required to ensure thread-protection. A typical OSID
session defines a set of service methods corresponding to some
compliance level as defined by the service and is generally
responsible for the management and retrieval of OsidObjects.
OsidSession defines a set of common methods used throughout all OSID
sessions. An OSID session may optionally support transactions
through the transaction interface.
"""
def get_locale(self):
"""Gets the locale indicating the localization preferences in
effect for this session.
return: (osid.locale.Locale) - the locale
compliance: mandatory - This method must be implemented.
"""
raise Unimplemented()
def is_authenticated(self):
"""Tests if an agent is authenticated to this session.
return: (boolean) - true if valid authentication credentials
exist, false otherwise
compliance: mandatory - This method must be implemented.
"""
return False
def get_authenticated_agent_id(self):
"""Gets the Id of the agent authenticated to this session.
This is the agent for which credentials are used either acquired
natively or via an OsidProxyManager.
return: (osid.id.Id) - the authenticated agent Id
raise: IllegalState - is_authenticated() is false
compliance: mandatory - This method must be implemented.
"""
raise IllegalState()
def get_authenticated_agent(self):
"""Gets the agent authenticated to this session.
This is the agent for which credentials are used either acquired
natively or via an OsidProxyManager.
return: (osid.authentication.Agent) - the authenticated agent
raise: IllegalState - is_authenticated() is false
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
"""
raise IllegalState()
def get_effective_agent_id(self):
"""Gets the Id of the effective agent in use by this session.
If is_authenticated() is true, then the effective agent may be
the same as the agent returned by get_authenticated_agent(). If
is_authenticated() is false, then the effective agent may be a
default agent used for authorization by an unknwon or anonymous
user.
return: (osid.id.Id) - the effective agent
compliance: mandatory - This method must be implemented.
"""
raise Unimplemented()
def get_effective_agent(self):
"""Gets the effective agent in use by this session.
If is_authenticated() is true, then the effective agent may be
the same as the agent returned by get_authenticated_agent(). If
is_authenticated() is false, then the effective agent may be a
default agent used for authorization by an unknwon or anonymous
user.
return: (osid.authentication.Agent) - the effective agent
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
"""
raise Unimplemented()
def get_date(self):
"""Gets the service date which may be the current date or the effective date in which this session exists.
return: (timestamp) - the service date
*compliance: mandatory -- This method must be implemented.*
"""
raise Unimplemented()
date = property(fget=get_date)
def get_clock_rate(self):
"""Gets the rate of the service clock.
return: (decimal) - the clock rate
*compliance: mandatory -- This method must be implemented.*
"""
raise Unimplemented()
clock_rate = property(fget=get_clock_rate)
def get_format_type(self):
"""Gets the ``DisplayText`` format ``Type`` preference in effect for this session.
return: (osid.type.Type) - the effective ``DisplayText`` format
``Type``
*compliance: mandatory -- This method must be implemented.*
"""
raise Unimplemented()
format_type = property(fget=get_format_type)
def supports_transactions(self):
"""Tests for the availability of transactions.
return: (boolean) - true if transaction methods are available,
false otherwise
compliance: mandatory - This method must be implemented.
"""
return False
def start_transaction(self):
"""Starts a new transaction for this sesson.
Transactions are a means for an OSID to provide an all-or-
nothing set of operations within a session and may be used to
coordinate this service from an external transaction manager. A
session supports one transaction at a time. Starting a second
transaction before the previous has been committed or aborted
results in an IllegalState error.
return: (osid.transaction.Transaction) - a new transaction
raise: IllegalState - a transaction is already open
raise: OperationFailed - unable to complete request
raise: Unsupported - transactions not supported
compliance: optional - This method must be implemented if
supports_transactions() is true.
implementation notes: Ideally, a provider that supports
transactions should guarantee atomicity, consistency, isolation
and durability in a 2 phase commit process. This is not always
possible in distributed systems and a transaction provider may
simply allow for a means of processing bulk updates. To
maximize interoperability, providers should honor the one-
transaction-at-a-time rule.
"""
raise Unsupported()
locale = property(get_locale)
authenticated_agent_id = property(get_authenticated_agent_id)
authenticated_agent = property(get_authenticated_agent)
effective_agent_id = property(get_effective_agent_id)
effective_agent = property(get_effective_agent)
|
/home/runner/.cache/pip/pool/ca/50/b8/a7144c957f295287fa48370bfdc0ba4d270242f10a2a241ea74a6a064e
|
#!/usr/bin/env python
import sys
import getopt
import re
import struct
import socket
import stat
import os
debug = 0
QEMUCMDTEMPLATE = """#!/bin/bash
set -u
ARCHEND=%(ARCHEND)s
IID=%(IID)i
if [ -e ./firmadyne.config ]; then
source ./firmadyne.config
elif [ -e ../firmadyne.config ]; then
source ../firmadyne.config
elif [ -e ../../firmadyne.config ]; then
source ../../firmadyne.config
else
echo "Error: Could not find 'firmadyne.config'!"
exit 1
fi
IMAGE=`get_fs ${IID}`
KERNEL=`get_kernel ${ARCHEND}`
QEMU=`get_qemu ${ARCHEND}`
QEMU_MACHINE=`get_qemu_machine ${ARCHEND}`
QEMU_ROOTFS=`get_qemu_disk ${ARCHEND}`
WORK_DIR=`get_scratch ${IID}`
%(START_NET)s
function cleanup {
pkill -P $$
%(STOP_NET)s
}
trap cleanup EXIT
echo "Starting firmware emulation... use Ctrl-a + x to exit"
sleep 1s
%(QEMU_ENV_VARS)s ${QEMU} -m 256 -M ${QEMU_MACHINE} -kernel ${KERNEL} \\
%(QEMU_DISK)s -append "root=${QEMU_ROOTFS} console=ttyS0 nandsim.parts=64,64,64,64,64,64,64,64,64,64 rdinit=/firmadyne/preInit.sh rw debug ignore_loglevel print-fatal-signals=1 user_debug=31 firmadyne.syscall=0" \\
-nographic \\
%(QEMU_NETWORK)s | tee ${WORK_DIR}/qemu.final.serial.log
"""
def stripTimestamps(data):
lines = data.split("\n")
#throw out the timestamps
lines = [re.sub(r"^\[[^\]]*\] firmadyne: ", "", l) for l in lines]
return lines
def findMacChanges(data, endianness):
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith("ioctl_SIOCSIFHWADDR"), lines)
if debug:
print("Mac Changes %r" % candidates)
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
for c in candidates:
g = re.match(r"^ioctl_SIOCSIFHWADDR\[[^\]]+\]: dev:([^ ]+) mac:0x([0-9a-f]+) 0x([0-9a-f]+)", c)
if g:
(iface, mac0, mac1) = g.groups()
m0 = struct.pack(fmt, int(mac0, 16))[2:]
m1 = struct.pack(fmt, int(mac1, 16))
mac = "%02x:%02x:%02x:%02x:%02x:%02x" % struct.unpack("BBBBBB", m0+m1)
result.append((iface, mac))
return result
# Get the netwokr interfaces in the router, except 127.0.0.1
def findNonLoInterfaces(data, endianness):
#lines = data.split("\r\n")
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith("__inet_insert_ifa"), lines) # logs for the inconfig process
if debug:
print("Candidate ifaces: %r" % candidates)
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
for c in candidates:
g = re.match(r"^__inet_insert_ifa\[[^\]]+\]: device:([^ ]+) ifa:0x([0-9a-f]+)", c)
if g:
(iface, addr) = g.groups()
addr = socket.inet_ntoa(struct.pack(fmt, int(addr, 16)))
if addr != "127.0.0.1" and addr != "0.0.0.0":
result.append((iface, addr))
return result
def findIfacesForBridge(data, brif):
#lines = data.split("\r\n")
lines = stripTimestamps(data)
result = []
candidates = filter(lambda l: l.startswith("br_dev_ioctl") or l.startswith("br_add_if"), lines)
for c in candidates:
for p in [r"^br_dev_ioctl\[[^\]]+\]: br:%s dev:(.*)", r"^br_add_if\[[^\]]+\]: br:%s dev:(.*)"]:
pat = p % brif
g = re.match(pat, c)
if g:
iface = g.group(1)
#we only add it if the interface is not the bridge itself
#there are images that call brctl addif br0 br0 (e.g., 5152)
if iface != brif:
result.append(iface.strip())
return result
def findVlanInfoForDev(data, dev):
#lines = data.split("\r\n")
lines = stripTimestamps(data)
results = []
candidates = filter(lambda l: l.startswith("register_vlan_dev"), lines)
for c in candidates:
g = re.match(r"register_vlan_dev\[[^\]]+\]: dev:%s vlan_id:([0-9]+)" % dev, c)
if g:
results.append(int(g.group(1)))
return results
def ifaceNo(dev):
g = re.match(r"[^0-9]+([0-9]+)", dev)
return int(g.group(1)) if g else -1
def qemuArchNetworkConfig(i, arch, n):
if not n:
if arch == "arm":
return "-device virtio-net-device,netdev=net%(I)i -netdev socket,id=net%(I)i,listen=:200%(I)i" % {'I': i}
else:
return "-netdev socket,id=net%(I)i,listen=:200%(I)i -device e1000,netdev=net%(I)i" % {'I': i}
else:
(ip, dev, vlan, mac) = n
# newer kernels use virtio only
if arch == "arm":
return "-device virtio-net-device,netdev=net%(I)i -netdev tap,id=net%(I)i,ifname=${TAPDEV_%(I)i},script=no" % {'I': i}
else:
vlan_id = vlan if vlan else i
mac_str = "" if not mac else ",mac=%s" % mac
return "-netdev tap,id=net%(I)i,ifname=${TAPDEV_%(I)i},script=no -device e1000,netdev=net%(I)i%(MAC)s" % { 'I' : i, 'MAC' : mac_str}
def qemuNetworkConfig(arch, network):
output = []
assigned = []
# Fix Id conflict bug
flag = 0
for k in range(0, 4):
for j, n in enumerate(network):
# need to connect the jth emulated network interface to the corresponding host interface
if k == ifaceNo(n[1]):
output.append(qemuArchNetworkConfig(j, arch, n))
assigned.append(n)
flag = j
break
for i in range(0, 4):
if i != flag:
# otherwise, put placeholder socket connection
if len(output) <= i:
output.append(qemuArchNetworkConfig(i, arch, None))
# find unassigned interfaces
for j, n in enumerate(network):
if n not in assigned:
# guess assignment
print("Warning: Unmatched interface: %s" % (n,))
output[j] = qemuArchNetworkConfig(j, arch, n)
assigned.append(n)
return ' '.join(output)
def buildConfig(brif, iface, vlans, macs):
#there should be only one ip
ip = brif[1]
br = brif[0]
#strip vlanid from interface name (e.g., eth2.2 -> eth2)
dev = iface.split(".")[0]
#check whether there is a different mac set
mac = None
d = dict(macs)
if br in d:
mac = d[br]
elif dev in d:
mac = d[dev]
vlan_id = None
if len(vlans):
vlan_id = vlans[0]
return (ip, dev, vlan_id, mac)
def getIP(ip):
tups = [int(x) for x in ip.split(".")]
if tups[3] != 1:
tups[3] -= 1
else:
tups[3] = 2
return ".".join([str(x) for x in tups])
def startNetwork(network):
template_1 = """
TAPDEV_%(I)i=tap${IID}_%(I)i
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}
echo "Creating TAP device ${TAPDEV_%(I)i}..."
sudo tunctl -t ${TAPDEV_%(I)i} -u ${USER}
"""
template_vlan = """
echo "Initializing VLAN..."
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}.%(VLANID)i
sudo ip link add link ${TAPDEV_%(I)i} name ${HOSTNETDEV_%(I)i} type vlan id %(VLANID)i
sudo ip link set ${TAPDEV_%(I)i} up
"""
template_2 = """
echo "Bringing up TAP device..."
sudo ip link set ${HOSTNETDEV_%(I)i} up
sudo ip addr add %(HOSTIP)s/24 dev ${HOSTNETDEV_%(I)i}
echo "Adding route to %(GUESTIP)s..."
sudo ip route add %(GUESTIP)s via %(GUESTIP)s dev ${HOSTNETDEV_%(I)i}
"""
output = []
for i, (ip, dev, vlan, mac) in enumerate(network):
output.append(template_1 % {'I' : i})
if vlan:
output.append(template_vlan % {'I' : i, 'VLANID' : vlan})
output.append(template_2 % {'I' : i, 'HOSTIP' : getIP(ip), 'GUESTIP': ip})
return '\n'.join(output)
def stopNetwork(network):
template_1 = """
echo "Deleting route..."
sudo ip route flush dev ${HOSTNETDEV_%(I)i}
echo "Bringing down TAP device..."
sudo ip link set ${TAPDEV_%(I)i} down
"""
template_vlan = """
echo "Removing VLAN..."
sudo ip link delete ${HOSTNETDEV_%(I)i}
"""
template_2 = """
echo "Deleting TAP device ${TAPDEV_%(I)i}..."
sudo tunctl -d ${TAPDEV_%(I)i}
"""
output = []
for i, (ip, dev, vlan, mac) in enumerate(network):
output.append(template_1 % {'I' : i})
if vlan:
output.append(template_vlan % {'I' : i})
output.append(template_2 % {'I' : i})
return '\n'.join(output)
def qemuCmd(iid, network, arch, endianness):
if arch == "mips":
qemuEnvVars = ""
qemuDisk = "-drive if=ide,format=raw,file=${IMAGE}"
if endianness != "eb" and endianness != "el":
raise Exception("You didn't specify a valid endianness")
elif arch == "arm":
qemuDisk = "-drive if=none,file=${IMAGE},format=raw,id=rootfs -device virtio-blk-device,drive=rootfs"
if endianness == "el":
qemuEnvVars = "QEMU_AUDIO_DRV=none"
elif endianness == "eb":
raise Exception("armeb currently not supported")
else:
raise Exception("You didn't specify a valid endianness")
else:
raise Exception("Unsupported architecture")
return QEMUCMDTEMPLATE % {'IID': iid,
'ARCHEND' : arch + endianness,
'START_NET' : startNetwork(network),
'STOP_NET' : stopNetwork(network),
'QEMU_DISK' : qemuDisk,
'QEMU_NETWORK' : qemuNetworkConfig(arch, network),
'QEMU_ENV_VARS' : qemuEnvVars}
def process(infile, iid, arch, endianness=None, makeQemuCmd=False, outfile=None):
brifs = []
vlans = []
data = open(infile).read()
network = set()
success = False
#find interfaces with non loopback ip addresses
ifacesWithIps = findNonLoInterfaces(data, endianness)
#find changes of mac addresses for devices
macChanges = findMacChanges(data, endianness)
print("Interfaces: %r" % ifacesWithIps)
deviceHasBridge = False
for iwi in ifacesWithIps:
#find all interfaces that are bridged with that interface
brifs = findIfacesForBridge(data, iwi[0])
if debug:
print("brifs for %s %r" % (iwi[0], brifs))
for dev in brifs:
#find vlan_ids for all interfaces in the bridge
vlans = findVlanInfoForDev(data, dev)
#create a config for each tuple
network.add((buildConfig(iwi, dev, vlans, macChanges)))
deviceHasBridge = True
#if there is no bridge just add the interface
if not brifs and not deviceHasBridge:
vlans = findVlanInfoForDev(data, iwi[0])
network.add((buildConfig(iwi, iwi[0], vlans, macChanges)))
ips = set()
pruned_network = []
for n in network:
if n[0] not in ips:
ips.add(n[0])
pruned_network.append(n)
else:
if debug:
print("duplicate ip address for interface: ", n)
if makeQemuCmd:
qemuCommandLine = qemuCmd(iid, pruned_network, arch, endianness)
if qemuCommandLine:
success = True
if outfile:
with open(outfile, "w") as out:
out.write(qemuCommandLine)
os.chmod(outfile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
else:
print(qemuCommandLine)
return success
def archEnd(value):
tmp = value.lower()
return (tmp[:-2], tmp[-2:])
def main():
infile = None
makeQemuCmd = False
iid = None
outfile = None
arch = None
endianness = None
(opts, argv) = getopt.getopt(sys.argv[1:], 'f:i:S:a:oqd')
for (k, v) in opts:
if k == '-f':
infile = v
if k == '-d':
global debug
debug += 1
if k == '-q':
makeQemuCmd = True
if k == '-i':
iid = int(v)
if k == '-S':
SCRATCHDIR = v
if k == '-o':
outfile = True
if k == '-a':
(arch, endianness) = archEnd(v)
if not arch or not endianness:
raise Exception("Either arch or endianness not found try mipsel/mipseb/armel/armeb")
if not infile and iid:
infile = "%s/%i/qemu.initial.serial.log" % (SCRATCHDIR, iid)
if outfile and iid:
outfile = """%s/%i/run.sh""" % (SCRATCHDIR, iid)
if debug:
print("processing %i" % iid)
if infile:
process(infile, iid, arch, endianness, makeQemuCmd, outfile)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
from typing import List, Tuple
"""
Difficulty: 4
A group of executives at a large oil company had gone to a conference about ML. After
hearing that it "could learn like a human" they decided to fire all their Geologists
and Geophysicists. They were replaced with a neural network they named Fred. In the
beginning Fred looked great. He was finding oil deposits everywhere. The executives
ordered the building of a lot of new oil platforms following this. After having spent
all their money it became apparent that something was wrong. There was no oil or gass
underneath most of the newly built oil platforms. The executives stopped all current
projects.
This left them with a problem. Not all the pipes to shore were finished, so they are
not sure which of the platforms are actually connected to shore. You are brought on as
a consultant to find the extent of the problem. Being a good consultant you know that
platform 1 is already connected to shore.
"""
def num_connected_platforms(platforms: List[bool], pipes: List[Tuple[int, int]]) -> int:
"""
Args:
platforms: A list booleans. Each boolean signifies whether the platform is
currently producing oil.
pipes: A list of tuples, specifying that there is a pipe going from platform a
to platform b. Pipes only go in one direction.
Notes:
The list is 1 indexed. See the `Examples` section for illustration. Platfrom 1
is connected to shore.
Returns:
The number of oil platforms (which produces oil) that are connected to shore.
Examples:
>>> platforms([False, True], [(2, 1)])
1
>>> platforms([False, True, True, True], [(3, 2), (4, 3)])
0
>>> platfroms([True, False, True], [(3, 2), (2, 1)])
2
"""
raise NotImplementedError()
|
from django.contrib import admin
from . models import User
# Register your models here.
admin.site.register(User)
|
n, s = int(raw_input()), raw_input()
print max(s.count(';1';), s.count(';2';), s.count(';3';), s.count(';4';)), min(s.count(';1';), s.count(';2';), s.count(';3';), s.count(';4';))
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protos/controllable.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from protos import broker_pb2 as protos_dot_broker__pb2
from protos import clock_pb2 as protos_dot_clock__pb2
from protos import data_pb2 as protos_dot_data__pb2
from protos import structs_pb2 as protos_dot_structs__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19protos/controllable.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13protos/broker.proto\x1a\x12protos/clock.proto\x1a\x11protos/data.proto\x1a\x14protos/structs.proto\"i\n\x12InitializeResponse\x12\x16\n\x0e\x64\x61ta_frequency\x18\x01 \x01(\t\x12\x10\n\x08shorting\x18\x03 \x01(\x08\x12\x15\n\rasset_classes\x18\x04 \x01(\t\x12\x12\n\nfractional\x18\x05 \x01(\x08\"\xef\x02\n\x0b\x45ngineSetup\x12=\n\x11\x63ommission_models\x18\x01 \x03(\x0b\x32\".EngineSetup.CommissionModelsEntry\x12\x39\n\x0fslippage_models\x18\x02 \x03(\x0b\x32 .EngineSetup.SlippageModelsEntry\x12/\n\ntax_models\x18\x03 \x03(\x0b\x32\x1b.EngineSetup.TaxModelsEntry\x1a>\n\x15\x43ommissionModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x14\n\x05value\x18\x02 \x01(\x0b\x32\x05.Data:\x02\x38\x01\x1a<\n\x13SlippageModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x14\n\x05value\x18\x02 \x01(\x0b\x32\x05.Data:\x02\x38\x01\x1a\x37\n\x0eTaxModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x14\n\x05value\x18\x02 \x01(\x0b\x32\x05.Data:\x02\x38\x01\"\x87\x01\n\x14\x46ractionalTradeSetup\x12\x18\n\x10minimum_purchase\x18\x01 \x01(\x02\x12\x18\n\x10minimal_fraction\x18\x02 \x01(\x05\x12\x13\n\x0border_types\x18\x03 \x03(\t\x12\x13\n\x0b\x61sset_types\x18\x04 \x03(\t\x12\x11\n\tcan_short\x18\x05 \x01(\x08\"I\n\nTradeSetup\x12\x13\n\x0border_types\x18\x01 \x03(\t\x12\x13\n\x0b\x61sset_types\x18\x02 \x03(\t\x12\x11\n\tcan_short\x18\x03 \x01(\x08\"q\n\x0b\x42rokerSetup\x12\x12\n\nfractional\x18\x01 \x01(\x08\x12,\n\rf_trade_setup\x18\x02 \x01(\x0b\x32\x15.FractionalTradeSetup\x12 \n\x0btrade_setup\x18\x03 \x01(\x0b\x32\x0b.TradeSetup\"\x1f\n\x08RunSetup\x12\x13\n\x0bstrategy_id\x18\x01 \x01(\t\"\x96\x02\n\nRunRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\nsession_id\x18\x02 \x01(\t\x12\x0f\n\x07\x63\x61pital\x18\x03 \x01(\x02\x12\x10\n\x08leverage\x18\x04 \x01(\x02\x12)\n\x05start\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03\x65nd\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\"\n\x0c\x65ngine_setup\x18\x07 \x01(\x0b\x32\x0c.EngineSetup\x12\"\n\x0c\x62roker_setup\x18\n \x01(\x0b\x32\x0c.BrokerSetup\x12\x15\n\runiverse_name\x18\x08 \x01(\t\x12\x12\n\nclear_perf\x18\t \x01(\x08\"a\n\x0bStopRequest\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x11\n\tliquidate\x18\x02 \x01(\x08\x12+\n\x07real_ts\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"}\n\rUpdateRequest\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x19\n\x05\x65vent\x18\x02 \x01(\x0e\x32\n.EventType\x12\"\n\x0c\x62roker_state\x18\x03 \x01(\x0b\x32\x0c.BrokerState\"\r\n\x0bUpdateReply\"\xc5\x02\n\x11\x43ontrollableState\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x15\n\rsession_state\x18\x02 \x01(\t\x12\x0f\n\x07\x63\x61pital\x18\x03 \x01(\x02\x12\x14\n\x0cmax_leverage\x18\x0b \x01(\x02\x12\x10\n\x08universe\x18\x04 \x01(\t\x12\x11\n\tlook_back\x18\x05 \x01(\x05\x12\x16\n\x0e\x64\x61ta_frequency\x18\x06 \x01(\t\x12)\n\x05start\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03\x65nd\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\ncheckpoint\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1d\n\x15metrics_tracker_state\x18\n \x01(\x0c\"D\n\x17ParametersUpdateRequest\x12\x0f\n\x07\x63\x61pital\x18\x01 \x01(\x02\x12\x18\n\x10maximum_leverage\x18\x02 \x01(\x02\x32\xf4\x03\n\x0c\x43ontrollable\x12\x44\n\x10UpdateParameters\x12\x18.ParametersUpdateRequest\x1a\x16.google.protobuf.Empty\x12,\n\nInitialize\x12\t.RunSetup\x1a\x13.InitializeResponse\x12*\n\x03Run\x12\x0b.RunRequest\x1a\x16.google.protobuf.Empty\x12\x32\n\x0b\x43lockUpdate\x12\x0b.ClockEvent\x1a\x16.google.protobuf.Empty\x12\x31\n\rUpdateAccount\x12\x06.Chunk\x1a\x16.google.protobuf.Empty(\x01\x12,\n\x04Stop\x12\x0c.StopRequest\x1a\x16.google.protobuf.Empty\x12\x37\n\x05Watch\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12>\n\x0cStopWatching\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12\x36\n\x04Join\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Emptyb\x06proto3')
_INITIALIZERESPONSE = DESCRIPTOR.message_types_by_name['InitializeResponse']
_ENGINESETUP = DESCRIPTOR.message_types_by_name['EngineSetup']
_ENGINESETUP_COMMISSIONMODELSENTRY = _ENGINESETUP.nested_types_by_name['CommissionModelsEntry']
_ENGINESETUP_SLIPPAGEMODELSENTRY = _ENGINESETUP.nested_types_by_name['SlippageModelsEntry']
_ENGINESETUP_TAXMODELSENTRY = _ENGINESETUP.nested_types_by_name['TaxModelsEntry']
_FRACTIONALTRADESETUP = DESCRIPTOR.message_types_by_name['FractionalTradeSetup']
_TRADESETUP = DESCRIPTOR.message_types_by_name['TradeSetup']
_BROKERSETUP = DESCRIPTOR.message_types_by_name['BrokerSetup']
_RUNSETUP = DESCRIPTOR.message_types_by_name['RunSetup']
_RUNREQUEST = DESCRIPTOR.message_types_by_name['RunRequest']
_STOPREQUEST = DESCRIPTOR.message_types_by_name['StopRequest']
_UPDATEREQUEST = DESCRIPTOR.message_types_by_name['UpdateRequest']
_UPDATEREPLY = DESCRIPTOR.message_types_by_name['UpdateReply']
_CONTROLLABLESTATE = DESCRIPTOR.message_types_by_name['ControllableState']
_PARAMETERSUPDATEREQUEST = DESCRIPTOR.message_types_by_name['ParametersUpdateRequest']
InitializeResponse = _reflection.GeneratedProtocolMessageType('InitializeResponse', (_message.Message,), {
'DESCRIPTOR' : _INITIALIZERESPONSE,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:InitializeResponse)
})
_sym_db.RegisterMessage(InitializeResponse)
EngineSetup = _reflection.GeneratedProtocolMessageType('EngineSetup', (_message.Message,), {
'CommissionModelsEntry' : _reflection.GeneratedProtocolMessageType('CommissionModelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ENGINESETUP_COMMISSIONMODELSENTRY,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:EngineSetup.CommissionModelsEntry)
})
,
'SlippageModelsEntry' : _reflection.GeneratedProtocolMessageType('SlippageModelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ENGINESETUP_SLIPPAGEMODELSENTRY,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:EngineSetup.SlippageModelsEntry)
})
,
'TaxModelsEntry' : _reflection.GeneratedProtocolMessageType('TaxModelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ENGINESETUP_TAXMODELSENTRY,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:EngineSetup.TaxModelsEntry)
})
,
'DESCRIPTOR' : _ENGINESETUP,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:EngineSetup)
})
_sym_db.RegisterMessage(EngineSetup)
_sym_db.RegisterMessage(EngineSetup.CommissionModelsEntry)
_sym_db.RegisterMessage(EngineSetup.SlippageModelsEntry)
_sym_db.RegisterMessage(EngineSetup.TaxModelsEntry)
FractionalTradeSetup = _reflection.GeneratedProtocolMessageType('FractionalTradeSetup', (_message.Message,), {
'DESCRIPTOR' : _FRACTIONALTRADESETUP,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:FractionalTradeSetup)
})
_sym_db.RegisterMessage(FractionalTradeSetup)
TradeSetup = _reflection.GeneratedProtocolMessageType('TradeSetup', (_message.Message,), {
'DESCRIPTOR' : _TRADESETUP,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:TradeSetup)
})
_sym_db.RegisterMessage(TradeSetup)
BrokerSetup = _reflection.GeneratedProtocolMessageType('BrokerSetup', (_message.Message,), {
'DESCRIPTOR' : _BROKERSETUP,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:BrokerSetup)
})
_sym_db.RegisterMessage(BrokerSetup)
RunSetup = _reflection.GeneratedProtocolMessageType('RunSetup', (_message.Message,), {
'DESCRIPTOR' : _RUNSETUP,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:RunSetup)
})
_sym_db.RegisterMessage(RunSetup)
RunRequest = _reflection.GeneratedProtocolMessageType('RunRequest', (_message.Message,), {
'DESCRIPTOR' : _RUNREQUEST,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:RunRequest)
})
_sym_db.RegisterMessage(RunRequest)
StopRequest = _reflection.GeneratedProtocolMessageType('StopRequest', (_message.Message,), {
'DESCRIPTOR' : _STOPREQUEST,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:StopRequest)
})
_sym_db.RegisterMessage(StopRequest)
UpdateRequest = _reflection.GeneratedProtocolMessageType('UpdateRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEREQUEST,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:UpdateRequest)
})
_sym_db.RegisterMessage(UpdateRequest)
UpdateReply = _reflection.GeneratedProtocolMessageType('UpdateReply', (_message.Message,), {
'DESCRIPTOR' : _UPDATEREPLY,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:UpdateReply)
})
_sym_db.RegisterMessage(UpdateReply)
ControllableState = _reflection.GeneratedProtocolMessageType('ControllableState', (_message.Message,), {
'DESCRIPTOR' : _CONTROLLABLESTATE,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:ControllableState)
})
_sym_db.RegisterMessage(ControllableState)
ParametersUpdateRequest = _reflection.GeneratedProtocolMessageType('ParametersUpdateRequest', (_message.Message,), {
'DESCRIPTOR' : _PARAMETERSUPDATEREQUEST,
'__module__' : 'protos.controllable_pb2'
# @@protoc_insertion_point(class_scope:ParametersUpdateRequest)
})
_sym_db.RegisterMessage(ParametersUpdateRequest)
_CONTROLLABLE = DESCRIPTOR.services_by_name['Controllable']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_ENGINESETUP_COMMISSIONMODELSENTRY._options = None
_ENGINESETUP_COMMISSIONMODELSENTRY._serialized_options = b'8\001'
_ENGINESETUP_SLIPPAGEMODELSENTRY._options = None
_ENGINESETUP_SLIPPAGEMODELSENTRY._serialized_options = b'8\001'
_ENGINESETUP_TAXMODELSENTRY._options = None
_ENGINESETUP_TAXMODELSENTRY._serialized_options = b'8\001'
_INITIALIZERESPONSE._serialized_start=173
_INITIALIZERESPONSE._serialized_end=278
_ENGINESETUP._serialized_start=281
_ENGINESETUP._serialized_end=648
_ENGINESETUP_COMMISSIONMODELSENTRY._serialized_start=467
_ENGINESETUP_COMMISSIONMODELSENTRY._serialized_end=529
_ENGINESETUP_SLIPPAGEMODELSENTRY._serialized_start=531
_ENGINESETUP_SLIPPAGEMODELSENTRY._serialized_end=591
_ENGINESETUP_TAXMODELSENTRY._serialized_start=593
_ENGINESETUP_TAXMODELSENTRY._serialized_end=648
_FRACTIONALTRADESETUP._serialized_start=651
_FRACTIONALTRADESETUP._serialized_end=786
_TRADESETUP._serialized_start=788
_TRADESETUP._serialized_end=861
_BROKERSETUP._serialized_start=863
_BROKERSETUP._serialized_end=976
_RUNSETUP._serialized_start=978
_RUNSETUP._serialized_end=1009
_RUNREQUEST._serialized_start=1012
_RUNREQUEST._serialized_end=1290
_STOPREQUEST._serialized_start=1292
_STOPREQUEST._serialized_end=1389
_UPDATEREQUEST._serialized_start=1391
_UPDATEREQUEST._serialized_end=1516
_UPDATEREPLY._serialized_start=1518
_UPDATEREPLY._serialized_end=1531
_CONTROLLABLESTATE._serialized_start=1534
_CONTROLLABLESTATE._serialized_end=1859
_PARAMETERSUPDATEREQUEST._serialized_start=1861
_PARAMETERSUPDATEREQUEST._serialized_end=1929
_CONTROLLABLE._serialized_start=1932
_CONTROLLABLE._serialized_end=2432
# @@protoc_insertion_point(module_scope)
|
from __future__ import print_function, division
import numpy as np
# Header
__author__ = "Maria A. Pena-Guerrero"
__version__ = "1.0"
"""
This script can transform from sky to detector coordinates (backward direction), and from
detector to sky (forward direction).
Keyword arguments:
transf_direction -- Direction of transformation, string: forward or backward
detector -- Which detector are we working with, integer: 491 or 492
filter_input -- Filter being used, string: F140X, CLEAR, or F110W
x_input -- Depending on transform direction, float: either X or V2 centroid
y_input -- Depending on transform direction, float: either X or V2 centroid
tilt -- Use tilt: True or False
arcsecs -- Units of V2 and V3, either arcsecs (=True) or degrees (=False)
debug -- Print diagnostics to screen: True or False
Output(s):
x_out = transformed X position
y_out = transformed Y position
Example usage:
import coords_transform as ct
x_out, y_out = ct.coords_transf(transf_direction, detector, filter_input, x_input, y_input, tilt, arcsecs, debug)
*** Testing suite of the script at the bottom
"""
# HISTORY
# 1. Sept 2015 - Vr. 1.0: Initial Python translation of IDL code.
###########################################################################################################
def coords_transf(transf_direction, detector, filter_input, x_input, y_input, tilt=False, arcsecs=False, debug=False):
if transf_direction == "forward":
x_out, y_out = ct_forward(transf_direction, detector, filter_input, x_input, y_input, tilt, debug)
elif transf_direction == "backward":
x_out, y_out = ct_backward(transf_direction, detector, filter_input, x_input, y_input, tilt, debug)
if arcsecs:
x_out, y_out = x_out*3600.0, y_out * 3600.0
return x_out, y_out
def set_params(transf_direction, detector, filter_input, tilt, debug):
path2text_files = "../Coords_transforms/files_from_tony/"
gwa_xtil = 0.0
gwa_ytil = 0.0
if not tilt:
# Constants used for the no tilt case
gwa_xtil = 0.343027200
gwa_ytil = 0.197058170
# Read the tilt correction file
tilt_file = path2text_files+"tilt_correction.txt"
AX, AY, rx0, ry0 = np.loadtxt(tilt_file, skiprows=1, unpack=True)
if debug:
print ("(coords_transform - set_params): slopeAX=", AX, " SlopeAY=", AY, " intercept_rx0=", rx0, " intercept_ry0=", ry0)
print (" : gwa_xtil=", gwa_xtil, "gwa_ytil=", gwa_xtil)
delta_theta_x = 0.5 * AX * (gwa_ytil - rx0)
delta_theta_y = 0.5 * AY * (gwa_xtil - ry0)
# Read the detector correction file
detector_gwa_txt = path2text_files+str(detector)+"_GWA.txt"
detidx, detxfor, detyfor, detxback, detyback = np.loadtxt(detector_gwa_txt, skiprows=2, unpack=True)
FitOrder_det = 5
if debug:
print ("(coords_transform - set_params): For detector: ", detector, " and transf_direction: ", transf_direction)
print (" delta_theta_x=", delta_theta_x, " delta_theta_y=",delta_theta_y)
if transf_direction == "backward":
print ("(coords_transform - set_params): lenght_index", len(detidx),
" lenght_xBackwardCoefficients_ote=", len(detxback), " lenght_yBackwardCoefficients_ote=", len(detyback))
#print (" test_index xBackwardCoefficients_ote yBackwardCoefficients_ote ")
#for i in detidx:
# print (" ", i, detxback[i], detyback[i])
elif transf_direction == "forward":
print ("(coords_transform - set_params): lenght_index", len(detidx),
" lenght_xForwardCoefficients_ote=", len(detxfor), " lenght_yForwardCoefficients_ote=", len(detyfor))
#print (" test_index xForwardCoefficients_ote yForwardCoefficients_ote ")
#for i in detidx:
# print (" ", i, detxfor[i], detyfor[i])
# Read the filter correction file
filter_gwa_txt = path2text_files+filter_input+"_GWA_OTE.txt"
filidx, filxfor, filyfor, filxback, filyback = np.loadtxt(filter_gwa_txt, skiprows=2, unpack=True)
if debug:
print ("(coords_transform - set_params): For filter: ", filter_input, " and transf_direction: ", transf_direction)
if transf_direction == "backward":
print ("(coords_transform - set_params): length_index", len(filidx),
" length_xBackwardCoefficients_det=", len(filxback), " lenght_yBackwardCoefficients_det=", len(filyback))
#print (" test_index, xBackwardCoefficients_det yBackwardCoefficients_det ")
#for i in filidx:
# print (" ", i, filxback[i], filyback[i])
elif transf_direction == "forward":
print ("(coords_transform - set_params): lenght_index", len(filidx),
" length_xForwardCoefficients_det=", len(filxfor), " length_yForwardCoefficients_det=", len(filyfor))
#print (" test_index, xForwardCoefficients_det yForwardCoefficients_det ")
#for i in filidx:
# print (" ", i, filxfor[i], filyfor[i])
FitOrder_ote = 5
if transf_direction == "backward":
direction_data = [delta_theta_x, delta_theta_y, detxback, detyback, FitOrder_det, filxback, filyback, FitOrder_ote]
elif transf_direction == "forward":
direction_data = [delta_theta_x, delta_theta_y, detxfor, detyfor, FitOrder_det, filxfor, filyfor, FitOrder_ote]
return direction_data
def ct_backward(transf_direction, detector, filter_input, x_input, y_input, tilt, debug):
""" Perform coordinates transform in the BACKWARD direction (sky to detector) """
direction_data = set_params(transf_direction, detector, filter_input, tilt, debug)
delta_theta_x, delta_theta_y, detxback, detyback, FitOrder_det, filxback, filyback, FitOrder_ote = direction_data
# Coord transformation to go from OTE to GWA
ict = -1
xt, yt = 0.0, 0.0
for i in range(FitOrder_ote+1):
for j in range(FitOrder_ote+1-i):
ict += 1
xt += filxback[ict] * x_input ** i * y_input ** j
yt += filyback[ict] * x_input ** i * y_input ** j
#print ("ict, i, j, xt, yt", ict, i, j, xt, yt)
if debug:
print ("(coords_transform - ct_backward): x_input=", x_input, " y_input=", y_input)
print (" GWA_x=",xt," GWA_y=",yt)
#raw_input("*** press enter to continue... ")
# Now that we have angular position at GWA of xy SCA pixels in xt, yt, do tilt-correction
w = 180.0 * 3600.0 # arcsec in pi radians, so 1 arcsec is pi/w, get pi from cos(-1.0)
w1 = np.arccos(-1.0) # gives value of pi
w2 = w1 / w # 1 arcsec expressed in radians
delta_theta_xrad = delta_theta_x * w2 # now calculated for the general case
delta_theta_yrad = delta_theta_y * w2
if debug:
print ("w, w1, w2: ", w, w1, w2)
print ("(coords_transform - ct_backward): delta_theta_x=", delta_theta_x, " delta_theta_y=", delta_theta_y)
print (" delta_theta_xrad=", delta_theta_xrad, "delta_theta_yrad=", delta_theta_yrad)
# Do backward rotation
# calculate direction cosines of xt, yt, (xgwa, ygwa)
v = np.abs(np.sqrt(1.0 + xt*xt + yt*yt))
x3 = xt / v
y3 = yt / v
z3 = 1.0 / v
# do inverse rotation around the x-axis
x2 = x3
y2 = y3 + delta_theta_xrad*z3
z2 = np.sqrt(1.0 - x2*x2 - y2*y2)
# rotate to mirror reference system with small angle approx. and perform rotation
x1 = x2 - delta_theta_yrad*z2
y1 = y2
z1 = np.sqrt(1.0 - x1*x1 - y1*y1)
# rotate reflected ray back to reference GWA coordinate system (use small angle approx.),
# first with an inverse rotation around the y-axis:
x0 = -1.0*x1 + delta_theta_yrad*np.sqrt(1.0 - x1*x1 - (y1+delta_theta_xrad*z1)*(y1+delta_theta_xrad*z1))
y0 = -1.0*y1 - delta_theta_xrad*z1
z0 = np.sqrt(1.0 - x0*x0 - y0*y0)
xt_corr = x0/z0
yt_corr = y0/z0
if debug:
print ("(coords_transform - ct_backward): Checking tilt rotation")
print (" v=", v)
print (" x0=", x0, " y0=", y0, " z0=", z0)
print (" x1=", x1, " y1=", y1, " z1=", z1)
print (" x2=", x2, " y2=", y2, " z2=", z2)
print (" x3=", x3, " y3=", y3, " z3=", z3)
print (" xt_corr=", xt_corr, " yt_corr", yt_corr)
#raw_input("*** press enter to continue... ")
# coord transform to go from tilt-corrected GWA to detector - 5th order polynomial in backward coefficients:
# detxback, detyback
ict_det = -1
xt_det, yt_det = 0.0, 0.0
for i in range(FitOrder_det+1):
for j in range(FitOrder_det+1-i):
ict_det += 1
xt_det += detxback[ict_det] * xt_corr ** i * yt_corr ** j
yt_det += detyback[ict_det] * xt_corr ** i * yt_corr ** j
#print ("ict, i, j, xt, yt", ict_det, i, j, xt_det, yt_det)
if debug:
print ("(coords_transform - ct_backward): x_input=", x_input, " y_input=", y_input, " OTE_x=", xt_det, "OTE_y=", yt_det)
#raw_input("*** press enter to continue... ")
# Final backward output
x_out, y_out = xt_det, yt_det
return x_out, y_out
def ct_forward(transf_direction, detector, filter_input, x_input, y_input, tilt, debug):
# Perform coordinates transform in the FORWARD direction (detector to sky)
direction_data = set_params(transf_direction, detector, filter_input, tilt, debug)
delta_theta_x, delta_theta_y, detxfor, detyfor, FitOrder_det, filxfor, filyfor, FitOrder_ote = direction_data
# Coord transformation to go from OTE to GWA
ict = -1
xt, yt = 0.0, 0.0
for i in range(FitOrder_det+1):
for j in range(FitOrder_det+1-i):
ict += 1
xt += detxfor[ict] * x_input ** i * y_input ** j
yt += detyfor[ict] * x_input ** i * y_input ** j
if debug:
print ("(coords_transform - ct_forward): x_input=", x_input, " y_input=", x_input)
print (" GWA_x=", xt, " GWA_y=", yt)
#raw_input("*** press enter to continue... ")
# Now that we have angular position at GWA of xy SCA pixels in xt, yt, do tilt-correction
w = 180. * 3600. # arcsec in pi radians, so 1 arcsec is pi/w, get pi from cos(-1.0)
w1 = np.arccos(-1.0) # gives value of pi
w2 = w1 / w # 1 arcsec expressed in radians
delta_theta_xrad = delta_theta_x * w2 # now calculated for the general case
delta_theta_yrad = delta_theta_y * w2
if debug:
print ("(coords_transform - ct_forward): delta_theta_x=", delta_theta_x, " delta_theta_y=", delta_theta_y)
print (" delta_theta_xrad=", delta_theta_xrad, "delta_theta_yrad=", delta_theta_yrad)
#raw_input("*** press enter to continue... ")
# Do forward rotation
# calculate direction cosines of xt, yt, (xgwa, ygwa)
v = np.abs(np.sqrt(1.0 + xt*xt + yt*yt))
x0 = xt / v
y0 = yt / v
z0 = 1.0 / v
# rotate to mirror reference system with small angle approx. and perform rotation
x1 = -1 * (x0 - delta_theta_yrad * np.sqrt(1.0 - x0**2 - (y0+delta_theta_xrad*z0)**2))
y1 = -1 * (y0 + delta_theta_xrad * z0)
z1 = np.sqrt(1.0 - x1**2 - y1**2)
# rotate reflected ray back to ref GWA coord system with small angle approx.,
# but first with an inverse rotation around the y-axis
x2 = x1 + delta_theta_yrad *z1
y2 = y1
z2 = np.sqrt(1.0 - x2**2 - y2**2)
# now do an inverse rotation around the x-axis
x3 = x2
y3 = y2 - delta_theta_xrad*2
z3 = np.sqrt(1.0 - x3**2 - y3**2)
# compute the cosines from direction cosines
xt_corr = x3 / z3
yt_corr = y3 / z3
if debug:
print ("(coords_transform - ct_forward): Checking tilt rotation")
print (" v=", v)
print (" x0=", x0, " y0=", y0, " z0=", z0)
print (" x1=", x1, " y1=", y1, " z1=", z1)
print (" x2=", x2, " y2=", y2, " z2=", z2)
print (" x3=", x3, " y3=", y3, " z3=", z3)
print (" xt_corr=", xt_corr, " yt_corr", yt_corr)
#raw_input("*** press enter to continue... ")
# coord transform to go from tilt-corrected GWA to detector - 5th order polynomial in forward coefficients:
# detxfor, detyfor
ict_ote = -1
xt_ote, yt_ote = 0.0, 0.0
for i in range(FitOrder_ote+1):
for j in range(FitOrder_ote+1-i):
ict_ote += 1
xt_ote += filxfor[ict_ote] * xt_corr ** i * yt_corr ** j
yt_ote += filyfor[ict_ote] * xt_corr ** i * yt_corr ** j
if debug:
print ("(coords_transform - ct_forward): x_input=", x_input, " y_input=", y_input, " OTE_x=", xt_ote, "OTE_y=", yt_ote)
# Final forward output
x_out, y_out = xt_ote, yt_ote
return x_out, y_out
if __name__ == '__main__':
# Print diagnostic load message
print("(coords_transform): Coordinate transform algorithm Version {} loaded!".format(__version__))
###########################################################################################################
# Test functions
testing = False
if testing:
# Set test script parameters
transf_direction = "forward" # Direction of transformation, string: forward or backward
detector = 491 # Which detector are we working with: 491 or 492
filter_input = "F140X" # Filter being used, string: F140X, CLEAR, or F110W
x_input = 1542.5 # Depending on transform direction, either X or V2 centroid
y_input = 1542.5 # Depending on transform direction, either X or V2 centroid
tilt = False # Use tilt: True or False
debug = False # Print diagnostics to screen: True or False
# Run transformation
x_out, y_out = coords_transf(transf_direction, detector, filter_input, x_input, y_input, tilt, debug)
print ("Final results: ")
if transf_direction=="forward":
print ("* For direction=", transf_direction, " \n coordinates are=", x_out, y_out, " arcsec")
if transf_direction=="backward":
print ("* For direction=", transf_direction, " \n coordinates are=", x_out, y_out, " pixels")
|
import PIL
import numpy as np
from typing import Tuple
class IDriver:
""" Interface that every GAN driver must meet. """
def __init__(
self,
path: str, # Path to network (local file or URL)
z_dim: int, # Input (z) vector dimension (e.g. 512 for StyleGan neworks)
c_dim: int = 0 # Number of conditional labels (0 = non-conditional network)
):
self.path = path
self.z_dim = z_dim
self.c_dim = c_dim
def seed_to_z(self, seed: int):
""" Converts seed to vector in the z latent space """
return np.random.RandomState(seed).randn(self.z_dim)
def generate_image(
self,
z: np.ndarray,
label_id: int = None, # Label for conditional networks (ignore on non-conditional)
trunc: float = 1,
translate: Tuple[float, float] = (0, 0), # Ignore if network doesn't support translation
rotate: float = 0, # Ignore if network doesn't support rotation
noise_mode='const', # 'const', 'random', 'none'
** kwargs # Allow passing additional specific parameters
):
""" Generates image for specified z-vector """
raise NotImplementedError("Should be implemented.")
|
import os
import pandas as pd
from Bio.SeqIO.FastaIO import FastaIterator
from Bio.SeqRecord import SeqRecord
class FastaReader:
"""
Universal class for reading FASTA files with genome or protein
sequence or multi-FASTA with chunks of sequences
Example:
fname = 'NC_001604.fasta'
fr = FastaReader(fname)
kmers_sequence = fr.get_sequence()
ks_df = fr.to_df()
"""
def __init__(self, fasta_file_path: str):
self.fasta_file_path = fasta_file_path
self.fasta_name = os.path.basename(self.fasta_file_path)
@staticmethod
def _fasta_reader(filename: str) -> SeqRecord:
"""
FASTA file reader as iterator
"""
with open(filename) as handle:
for record in FastaIterator(handle):
yield record
@staticmethod
def _normalize(entry: SeqRecord) -> str:
"""
Each of the sequence is normalized into uppercase
format without blank chars at the end
"""
return str(entry.seq).upper().strip()
def get_sequence(self) -> str:
"""
Final genome or protein sequence string after normalization
"""
sequence: str = ""
for entry in self._fasta_reader(self.fasta_file_path):
sequence += f"{self._normalize(entry)} "
return sequence.strip()
def to_df(self) -> pd.DataFrame:
"""
Return pandas DataFrame with k-mers sequence
format what is expected by KMersTransformer
"""
return pd.DataFrame(data=[self.get_sequence()], columns=["sequence"])
|
import numpy as np
from PIL import Image
count = 1
while count <= 10000:
filecount = str(count)
basewidth = 4000
img = Image.open('1.1/'+filecount+'.png')
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
img.save('1.8/'+filecount+'.png')
count += 1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 2022 02:33:58 AM
@author: ozgur oney
"""
import os
import random
from docx import Document
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Pt
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
from datetime import date
window = Tk()
window.title('Workpaper Creator')
# set minimum&maximum window size value - for fixed window
window.minsize(600, 400)
window.maxsize(600, 400)
window.config(bg='#456')
f = ('sans-serif', 13)
btn_font = ('sans-serif', 10)
bgcolor = '#BF5517'
#array keeping unique title values
genvar = StringVar()
genopt = ['Denetçi Yardımcısı',
'BT Denetçi Yardımcısı',
'Yetkili Denetçi Yardımcısı',
'Yetkili BT Denetçi Yardımcısı',
'Denetçi',
'Müfettiş',
'Başdenetçi',
'Başmüfettiş']
genvar.set('BT Denetçi Yardımcısı')
#function of Clear button
def clear_inputs():
baslangictarihi.delete(0, 'end')
bitistarihi.delete(0, 'end')
denetim_elemani_adi.delete(0, 'end')
pathLabel.delete(0,'end')
#change user-input text of date to timestamp object
def date_to_timestamp(d) :
day, month, year = d.split('/')
return date(int(year), int(month), int(day))
#create random date in given interval
def randomDate(start, end):
stime = date_to_timestamp(start)
etime = date_to_timestamp(end)
ptime = stime + random.random() * (etime-stime)
return str(ptime)
#select file contains audit steps
def select_file():
#hide window (optional)
window.withdraw()
#define action
file_path = askopenfilename(title="Open file",
filetypes=[("Word",".docx"),("TXT",".txt"),
("All files",".*")])
#if selected path is valid
if file_path != "":
print ("you chose file with path:", file_path)
else:
print ("you didn't open anything!")
#add selected path to label, so that user can see it
pathLabel.delete(0, END)
pathLabel.insert(0, file_path)
#mutate path so that its valid in all OS'
file_path = os.path.dirname(file_path)
#bring back the lights! you can see window after selection
window.deiconify()
return file_path
def generate():
#get file path from label
file_path = pathLabel.get()
with open (file_path, encoding='utf8') as f:
# read lines for auto-numbering
lines = f.readlines()
lines = [line.rstrip() for line in lines]
#for each line in the doc
for line in lines:
#create document
document = Document()
# Create a character level style Object ("CommentsStyle") then defines its parameters
obj_styles = document.styles
obj_charstyle = obj_styles.add_style('CommentStyle', WD_STYLE_TYPE.CHARACTER)
obj_font = obj_charstyle.font
obj_font.size = Pt(12)
obj_font.name = 'Times New Roman'
#add header in bold
baslik = document.add_paragraph()
baslik.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
baslik.add_run('S-100 ÇALIŞMA KAĞIDI', style='CommentStyle').bold=True
#create a random date, add it to document
tarih = document.add_paragraph()
tarih.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
tarih.add_run('Tarih: ', style='CommentStyle').bold=True
tarih.add_run(randomDate(baslangictarihi.get(), bitistarihi.get()), style='CommentStyle')
#add workpaper number in increasing order for each step
ck_num= document.add_paragraph()
ck_num.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
ck_num.add_run('Çalışma Kâğıdı Numarası: ', style='CommentStyle').bold=True
index = [x for x in range(len(lines)) if line in lines[x]]
ck_num.add_run(str(index[0]+1).strip("[]"), style='CommentStyle')
#add audit step
denetim_adimi = document.add_paragraph()
denetim_adimi.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
denetim_adimi.add_run('İlgili Denetim Adımı: ', style='CommentStyle').bold=True
denetim_adimi.add_run(line, style='CommentStyle')
#add auditor name /with his/her title
denetim_elemani = document.add_paragraph()
denetim_elemani.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
denetim_elemani.add_run('Testi Gerçekleştiren Denetim Elemanı: ', style='CommentStyle').bold=True
isim = str(denetim_elemani_adi.get()) + ", " + str(genvar.get()) + ", Denetim Genel Müdürlüğü"
denetim_elemani.add_run(isim, style='CommentStyle')
#sampling
orneklem = document.add_paragraph()
orneklem.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
orneklem.add_run('Örneklem Yöntemi: ', style='CommentStyle').bold=True
#documents viewed/researched
incelenen = document.add_paragraph()
incelenen.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
incelenen.add_run('İncelenen Dokümanlar: ', style='CommentStyle').bold=True
#related (if exists) non-conformity number
bulgu_num = document.add_paragraph()
bulgu_num.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
bulgu_num.add_run('Bulgu Numarası: ', style='CommentStyle').bold=True
#test procedure
test_prod = document.add_paragraph()
test_prod.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT
test_prod.add_run('Ayrıntılı Test Prosedürü: ', style='CommentStyle').bold=True
#detect filename
filename = line.rstrip() + '.docx'
#save document
document.save(filename)
#message informing last user about process
messagebox.showinfo("Sonuc", "Dosya(lar) başarıyla oluşturuldu.")
#void
return None
# frame carrying items in the window
frame = Frame(window, padx=20, pady=20, bg=bgcolor)
frame.pack(expand=True, fill=BOTH)
#labels
Label(frame, text= "Den. Programı: ", font=f,
bg=bgcolor).grid(column=0, row=0, padx=15, pady=15)
pathLabel = Entry(frame,textvariable="")
pathLabel.grid(column=1, row=0, padx=15, pady=15)
Label(frame, text = "Baş. tarihi : ", font=f,
bg=bgcolor).grid(column=0, row=1, padx=15, pady=15)
btn_frame = Frame(frame, bg=bgcolor)
btn_frame.grid(columnspan=2, pady=(50, 0))
Label(frame, text = "Bit. tarihi : ", font=f,
bg=bgcolor).grid(column=0, row=2, padx=15, pady=15)
btn_frame = Frame(frame, bg=bgcolor)
btn_frame.grid(columnspan=2, pady=(50, 0))
Label(frame, text = "Den. Elemanı İsim: ", font=f,
bg=bgcolor).grid(column=0, row=3, padx=15, pady=15)
btn_frame = Frame(frame, bg=bgcolor)
btn_frame.grid(columnspan=2, pady=(50, 0))
Label(frame, text = "Title: ", font=f,
bg=bgcolor).grid(column=0, row=4, padx=15, pady=15)
btn_frame = Frame(frame, bg=bgcolor)
btn_frame.grid(columnspan=2, pady=(50, 0))
#input widgets
fileSelect = Button(
frame,
text='Dosya Seç',
command=select_file,
font=btn_font,
padx=10,
pady=5,
width=3
)
fileSelect.grid(row=0, column=2)
baslangictarihi = Entry(frame, width=20, font=f)
baslangictarihi.grid(row=1, column=1)
bitistarihi = Entry(frame, width=20, font=f)
bitistarihi.grid(row=2, column=1)
denetim_elemani_adi = Entry(frame, width=20, font=f)
denetim_elemani_adi.grid(row=3, column=1)
rutbe = OptionMenu(
frame,
genvar,
*genopt
)
rutbe.grid(row=4, column=1, pady=(5,0))
rutbe.config(width=16, font=f)
#default values
baslangictarihi.insert(0,'01/01/2022')
bitistarihi.insert(0,'14/04/2022')
denetim_elemani_adi.insert(0,'John Doe')
#buttons
submit_btn = Button(
btn_frame,
text='Oluştur',
command=generate,
font=btn_font,
padx=10,
pady=5
)
submit_btn.pack(side=LEFT, expand=True, padx=(15, 0))
clear_btn = Button(
btn_frame,
text='Temizle',
command=clear_inputs,
font=btn_font,
padx=10,
pady=5,
width=7
)
clear_btn.pack(side=LEFT, expand=True, padx=15)
exit_btn = Button(
btn_frame,
text='Çıkış',
command=lambda:window.destroy(),
font=btn_font,
padx=10,
pady=5
)
exit_btn.pack(side=LEFT, expand=True)
# mainloop
window.mainloop()
|
from data import data
from data.data import Dataset
import torch
import fire
import time
import os, sys
import tqdm
from typing import Dict, Tuple
from torch import nn, optim
from torch.nn import functional
from collections import namedtuple
from visdom import Visdom
from face_detection import cig
from face_detection import FasterRcnn
from face_detection.utils import *
class EasyTrainer(nn.Module): ...
LossTuple = namedtuple("LossTuple", [
'rpn_loc_loss',
'rpn_cls_loss',
'roi_loc_loss',
'roi_cls_loss',
'total_loss'
])
def time_count(f):
def wrapper(*args, **kwargs):
start = time.time()
temp = f(*args, **kwargs)
print("\033[32mcost time\033[0m:", round(time.time() - start, 3), "\033[33msecond(s)\033[0m")
return temp
return wrapper
# A single wrapper for easy training process
class EasyTrainer(nn.Module):
def __init__(self, faster_rcnn : FasterRcnn):
super().__init__()
self.faster_rcnn : FasterRcnn = faster_rcnn
self.loc_nor_mean : Tuple[float] = faster_rcnn.loc_nor_mean
self.loc_nor_std : Tuple[float] = faster_rcnn.loc_nor_std
self.optimizer : optim = faster_rcnn.set_optimizer()
self.rpn_sigma : float = cig.rpn_sigma
self.roi_sigma : float = cig.roi_sigma
# create target creater
self.anchorTC : AnchorTargetCreator = AnchorTargetCreator()
self.proposalTC : ProposalTargetCreator = ProposalTargetCreator()
def forward(self, images : torch.Tensor, bboxes : torch.Tensor, labels : torch.Tensor, scale : float) -> LossTuple:
if bboxes.shape[0] != 1:
raise RuntimeError("batch_size must be 1!!!")
_, _, H, W = images.shape
feature_mapping = self.faster_rcnn.extractor(images)
rpn_locs, rpn_scores, rois, _, anchor = self.faster_rcnn.rpn(
x=feature_mapping,
img_size=[H, W],
scale=scale
)
# note that batch size is 1
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# align to get the proposal target value
sample_roi, gt_roi_loc, gt_roi_label = self.proposalTC(
roi=roi,
bbox=safe_to_numpy(bbox),
label=safe_to_numpy(label),
loc_normalize_mean=self.loc_nor_mean,
loc_normalize_std=self.loc_nor_std
)
sample_roi = safe_to_tensor(sample_roi)
gt_roi_loc = safe_to_tensor(gt_roi_loc)
gt_roi_label = safe_to_tensor(gt_roi_label)
# note that we do the forwarding for one data in a batch
# so all the choosen data in one batch is the first data, whose
# corresponding index is 0
sample_roi_indices = torch.zeros(len(sample_roi))
roi_cls_loc, roi_score = self.faster_rcnn.roi_head(
x=feature_mapping,
rois=sample_roi,
roi_indices=sample_roi_indices
)
"""calculate the RPN loss"""
gt_rpn_loc, gt_rpn_label = self.anchorTC(
bbox=safe_to_numpy(bbox),
anchor=anchor,
img_size=[H, W]
)
gt_rpn_label : torch.Tensor = torch.LongTensor(gt_rpn_label)
gt_rpn_loc : torch.Tensor = safe_to_tensor(gt_rpn_loc)
rpn_loc_loss: torch.Tensor = fast_rcnn_loc_loss(
pred_loc=rpn_loc,
gt_loc=gt_rpn_loc,
gt_label=gt_rpn_label.data,
sigma=self.rpn_sigma
)
# remember to ignore the bbox whose tag is -1
rpn_cls_loss : torch.Tensor = functional.cross_entropy(
input=rpn_score,
target=gt_rpn_label.cuda() if cig.use_cuda else gt_rpn_label.cpu(),
ignore_index=-1
)
# cut the path of gradient to reduce the cost on GPU and remove all the label is -1
mask : torch.Tensor = gt_rpn_label > -1
gt_rpn_label : torch.Tensor = gt_rpn_label[mask]
rpn_score : torch.Tensor = rpn_score[mask]
"""calculate the RoI loss"""
n : int = roi_cls_loc.shape[0]
roi_cls_loc : torch.Tensor = roi_cls_loc.view(n, -1, 4)
roi_loc : torch.Tensor = roi_cls_loc[
torch.arange(0, n),
gt_roi_label
].contiguous()
roi_loc_loss : torch.Tensor = fast_rcnn_loc_loss(
pred_loc=roi_loc,
gt_loc=gt_roi_loc,
gt_label=gt_roi_label,
sigma=self.roi_sigma
)
roi_cls_loss = functional.cross_entropy(
input=roi_score,
target=gt_roi_label
)
# count all the loss
total_loss = rpn_cls_loss + rpn_loc_loss + roi_cls_loss + roi_loc_loss
loss_tuple = LossTuple(
rpn_loc_loss=rpn_loc_loss,
rpn_cls_loss=rpn_cls_loss,
roi_loc_loss=roi_loc_loss,
roi_cls_loss=roi_cls_loss,
total_loss=total_loss
)
return loss_tuple
def train_one_image(self, images : torch.Tensor, bboxes : torch.Tensor, labels : torch.Tensor, scale : float) -> LossTuple:
"""
Args
- images : Actually it is an image, which is shaped as [1, C, H, W]
- bboxes : GT bbox of the items, which is shaped as [1, d, 4]
- labels : class of each bboxes, which is shaped as [1, d]
- scale : ratio between preprocessed image and original image
"""
self.optimizer.zero_grad()
loss_tuple = self.forward(
images=images,
bboxes=bboxes,
labels=labels,
scale=scale
)
loss_tuple.total_loss.backward()
self.optimizer.step()
return loss_tuple
def save(self, save_path : str = None, save_optimizer : bool = True, **kwargs):
save_dict = {
"model" : self.faster_rcnn.state_dict(),
"config" : cig.state_dict(),
"optimizer" : self.optimizer.state_dict() if save_optimizer else None,
"info" : kwargs
}
if save_path is None:
local_time = time.strftime("%m%d%H%M")
save_path = "checkpoints/fasterrcnn_{}".format(local_time)
for value in kwargs.values():
save_path += "_{}".format(value)
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(save_dict, save_path)
def load(self, path : str, load_optimizer : bool = True, load_config : bool = True) -> EasyTrainer:
state_dict = torch.load(path)
self.faster_rcnn.load_state_dict(
state_dict=state_dict["model"] if "model" in state_dict else state_dict
)
if load_optimizer and "optimizer" in state_dict and state_dict["optimizer"] is not None:
self.optimizer.load_state_dict(state_dict["optimizer"])
if load_config and "config" in state_dict and state_dict["config"] is not None:
cig.load_dict(state_dict["config"])
return self
@time_count
def train(**kwargs):
# load the configer
cig.load_dict(**kwargs)
# create model and training wrapper
model = FasterRcnn()
trainer = EasyTrainer(model)
print("\033[32m{}\033[0m".format("complete creating model and trainer"))
if cig.use_cuda:
trainer = trainer.cuda()
if cig.model_path:
trainer.load(
path=cig.model_path,
load_optimizer=True,
load_config=True
)
# create visdom
vis = Visdom()
# for decay of the learning rate
cur_lr = cig.learning_rate
# create loader of dataset
data_set = Dataset()
epoch_iter = tqdm.tqdm(range(cig.epoch), **cig.EPOCH_LOOP_TQDM)
for epoch in epoch_iter:
loader = data_set.get_train_loader()
indices = range(data_set.training_sample_num()) # for progress bar in tqdm
index_iter = tqdm.tqdm(indices, **cig.BATCH_LOOP_TQDM)
epoch_iter.set_description_str("\033[32mEpoch {}\033[0m".format(epoch))
for index, (b_img, b_bbox, b_label, scales) in zip(index_iter, loader):
scale : float = scales[0]
loss_tuple = trainer.train_one_image(
images=b_img,
bboxes=b_bbox,
labels=b_label,
scale=scale
)
post_info = "\033[33m{},{},{},{},{}\033[0m".format(
round(loss_tuple.rpn_cls_loss.item(), 2),
round(loss_tuple.rpn_loc_loss.item(), 2),
round(loss_tuple.roi_cls_loss.item(), 2),
round(loss_tuple.roi_loc_loss.item(), 2),
round(loss_tuple.total_loss.item(), 2)
)
# set prefix and suffix info for tqdm iterator
index_iter.set_description_str("\033[32mEpoch {} complete!\033[0m".format(epoch)
if index == (data_set.training_sample_num() - 1) else "\033[35mtraining...\033[0m")
index_iter.set_postfix_str(post_info)
trainer.save()
if __name__ == "__main__":
fire.Fire(train)
|
from megnet.layers.graph import MEGNetLayer, CrystalGraphLayer, InteractionLayer
from megnet.layers.readout import Set2Set, LinearWithIndex
from keras.layers import deserialize as keras_layer_deserialize
from megnet.losses import mean_squared_error_with_scale
from megnet.activations import softplus2
_CUSTOM_OBJECTS = globals()
|
from __future__ import annotations
import logging
from typing import TextIO
from ..cli import run_with_file_argument
from .task_1 import Vector, fire, get_target_area
logger = logging.getLogger(__name__)
def find_min_x_velocity(min_x: int) -> int:
distance = 1
x_velocity = 1
while distance < min_x:
x_velocity += 1
distance += x_velocity
return x_velocity
def main(input: TextIO) -> str:
target_area = get_target_area(input)
logger.info("Target area: %s", target_area)
min_x_velocity = find_min_x_velocity(target_area.min_x)
logger.info("Min X velocity %d", min_x_velocity)
max_x_velocity = target_area.max_x
logger.info("Max X velocity %d", max_x_velocity)
min_y_velocity = target_area.min_y
logger.info("Min Y velocity %d", min_y_velocity)
y = min_y_velocity
hits = 0
while True:
for x in range(min_x_velocity, max_x_velocity + 1):
velocity = Vector(x=x, y=y)
if fire(velocity, target_area) is not None:
hits += 1
logger.info("Scored %d hit at x=%d,y=%d", hits, x, y)
y += 1
return f"{hits}"
if __name__ == "__main__":
run_with_file_argument(main)
|
import logging
from typing import List
from cryptoxlib.WebsocketMgr import Subscription, CallbacksType, Websocket
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.binance.exceptions import BinanceException
from cryptoxlib.clients.binance.functions import map_ws_pair, extract_ws_symbol
from cryptoxlib.clients.binance.BinanceCommonWebsocket import BinanceCommonWebsocket, BinanceSubscription
from cryptoxlib.clients.binance import enums
from cryptoxlib.clients.binance.types import PairSymbolType
LOG = logging.getLogger(__name__)
class BinanceFuturesWebsocket(BinanceCommonWebsocket):
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
websocket_uri: str = None, builtin_ping_interval: float = None, periodic_timeout_sec: int = None,
ssl_context = None) -> None:
super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,
sec_key = sec_key,
websocket_uri = websocket_uri,
builtin_ping_interval = builtin_ping_interval,
periodic_timeout_sec = periodic_timeout_sec,
ssl_context = ssl_context)
def is_authenticated(self) -> bool:
for subscription in self.subscriptions:
if subscription.is_authenticated():
return True
return False
async def _process_periodic(self, websocket: Websocket) -> None:
if self.is_authenticated() is True:
LOG.info(f"[{self.id}] Refreshing listen key.")
await self.binance_client.keep_alive_listen_key()
class BinanceUSDSMFuturesWebsocket(BinanceFuturesWebsocket):
WEBSOCKET_URI = "wss://fstream.binance.com/"
BULTIN_PING_INTERVAL_SEC = 30
LISTEN_KEY_REFRESH_INTERVAL_SEC = 1800
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
ssl_context = None) -> None:
super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,
sec_key = sec_key, websocket_uri = BinanceUSDSMFuturesWebsocket.WEBSOCKET_URI,
builtin_ping_interval = BinanceUSDSMFuturesWebsocket.BULTIN_PING_INTERVAL_SEC,
periodic_timeout_sec = BinanceUSDSMFuturesWebsocket.LISTEN_KEY_REFRESH_INTERVAL_SEC,
ssl_context = ssl_context)
class BinanceUSDSMFuturesTestnetWebsocket(BinanceFuturesWebsocket):
WEBSOCKET_URI = "wss://stream.binancefuture.com/"
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
ssl_context = None) -> None:
super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,
sec_key = sec_key, websocket_uri = BinanceUSDSMFuturesTestnetWebsocket.WEBSOCKET_URI,
ssl_context = ssl_context)
class BinanceCOINMFuturesWebsocket(BinanceFuturesWebsocket):
WEBSOCKET_URI = "https://dstream.binance.com/"
BULTIN_PING_INTERVAL_SEC = 30
LISTEN_KEY_REFRESH_INTERVAL_SEC = 1800
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
ssl_context = None) -> None:
super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,
sec_key = sec_key, websocket_uri = BinanceCOINMFuturesWebsocket.WEBSOCKET_URI,
builtin_ping_interval = BinanceCOINMFuturesWebsocket.BULTIN_PING_INTERVAL_SEC,
periodic_timeout_sec = BinanceCOINMFuturesWebsocket.LISTEN_KEY_REFRESH_INTERVAL_SEC,
ssl_context = ssl_context)
class BinanceCOINMFuturesTestnetWebsocket(BinanceFuturesWebsocket):
WEBSOCKET_URI = "wss://dstream.binancefuture.com/"
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
ssl_context = None) -> None:
super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,
sec_key = sec_key, websocket_uri = BinanceCOINMFuturesTestnetWebsocket.WEBSOCKET_URI,
ssl_context = ssl_context)
class AggregateTradeSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = extract_ws_symbol(symbol)
def get_channel_name(self):
return f"{self.symbol}@aggTrade"
class IndexPriceSubscription(BinanceSubscription):
def __init__(self, pair: Pair, frequency1sec: bool = False, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
self.frequency1sec = frequency1sec
def get_channel_name(self):
return map_ws_pair(self.pair) + "@indexPrice" + ("@1s" if self.frequency1sec else "")
class MarkPriceSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, frequency1sec: bool = False, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = extract_ws_symbol(symbol)
self.frequency1sec = frequency1sec
def get_channel_name(self):
return f"{self.symbol}@markPrice" + ("@1s" if self.frequency1sec else "")
class MarkPriceAllSubscription(BinanceSubscription):
def __init__(self, frequency1sec: bool = False, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.frequency1sec = frequency1sec
def get_channel_name(self):
return "!markPrice@arr" + ("@1s" if self.frequency1sec else "")
class CandlestickSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, interval: enums.Interval, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.interval = interval
self.symbol = extract_ws_symbol(symbol)
def get_channel_name(self):
return f"{self.symbol}@kline_{self.interval.value}"
class ContContractCandlestickSubscription(BinanceSubscription):
def __init__(self, pair: Pair, interval: enums.Interval, contract_type: enums.ContractType, callbacks: CallbacksType = None):
super().__init__(callbacks)
if contract_type not in [enums.ContractType.PERPETUAL, enums.ContractType.CURRENT_QUARTER, enums.ContractType.NEXT_QUARTER]:
raise BinanceException(f"Level [{contract_type.value}] must be one of {enums.ContractType.PERPETUAL.value}, "
f"{enums.ContractType.CURRENT_QUARTER.value} or {enums.ContractType.NEXT_QUARTER.value}.")
self.pair = pair
self.interval = interval
self.contract_type = contract_type
def get_channel_name(self):
return f"{map_ws_pair(self.pair)}_{self.contract_type.value.lower()}@continuousKline_{self.interval.value}"
class IndexPriceCandlestickSubscription(BinanceSubscription):
def __init__(self, pair: Pair, interval: enums.Interval, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
self.interval = interval
def get_channel_name(self):
return f"{map_ws_pair(self.pair)}@indexPriceKline_{self.interval.value}"
class MarkPriceCandlestickSubscription(BinanceSubscription):
def __init__(self, symbol: str, interval: enums.Interval, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = symbol
self.interval = interval
def get_channel_name(self):
return f"{self.symbol}@markPriceKline_{self.interval.value}"
class AllMarketMiniTickersSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_channel_name(self):
return "!miniTicker@arr"
class MiniTickerSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = extract_ws_symbol(symbol)
def get_channel_name(self):
return f"{self.symbol}@miniTicker"
class AllMarketTickersSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_channel_name(self):
return "!ticker@arr"
class TickerSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = extract_ws_symbol(symbol)
def get_channel_name(self):
return f"{self.symbol}@ticker"
class OrderBookTickerSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_channel_name(self):
return "!bookTicker"
class OrderBookSymbolTickerSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = extract_ws_symbol(symbol)
def get_channel_name(self):
return f"{self.symbol}@bookTicker"
class LiquidationOrdersSubscription(BinanceSubscription):
def __init__(self, symbol: PairSymbolType, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.symbol = extract_ws_symbol(symbol)
def get_channel_name(self):
return f"{self.symbol}@forceOrder"
class AllMarketLiquidationOrdersSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_channel_name(self):
return "!forceOrder@arr"
class DepthSubscription(BinanceSubscription):
DEFAULT_FREQUENCY = 250
DEFAULT_LEVEL = 0
def __init__(self, symbol: PairSymbolType, level: int = DEFAULT_LEVEL, frequency: int = DEFAULT_FREQUENCY,
callbacks: CallbacksType = None):
super().__init__(callbacks)
if level not in [0, 5, 10, 20]:
raise BinanceException(f"Level [{level}] must be one of 0, 5, 10 or 20.")
if frequency not in [100, 250, 500]:
raise BinanceException(f"Frequency [{frequency}] must be one of 100, 250 or 500.")
self.symbol = extract_ws_symbol(symbol)
self.level = level
self.frequency = frequency
def get_channel_name(self):
if self.level == DepthSubscription.DEFAULT_LEVEL:
level_str = ""
else:
level_str = f"{self.level}"
if self.frequency == DepthSubscription.DEFAULT_FREQUENCY:
frequency_str = ""
else:
frequency_str = f"@{self.frequency}ms"
return f"{self.symbol}@depth{level_str}{frequency_str}"
class BlvtSubscription(BinanceSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_channel_name(self):
return f"{map_ws_pair(self.pair).upper()}@tokenNav"
class BlvtCandlestickSubscription(BinanceSubscription):
def __init__(self, pair: Pair, interval: enums.Interval, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
self.interval = interval
def get_channel_name(self):
return f"{map_ws_pair(self.pair).upper()}@nav_Kline_{self.interval.value}"
class CompositeIndexSubscription(BinanceSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_channel_name(self):
return f"{map_ws_pair(self.pair)}@compositeIndex"
class AccountSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.listen_key = None
async def initialize(self, **kwargs):
binance_client = kwargs['binance_client']
listen_key_response = await binance_client.get_listen_key()
self.listen_key = listen_key_response["response"]["listenKey"]
LOG.debug(f'Listen key: {self.listen_key}')
def get_channel_name(self):
return self.listen_key
def is_authenticated(self) -> bool:
return True
|
import torch
import numpy as np
from matplotlib import pyplot as plt
import sys
from policy_network import PolicyNetwork
from value_network import ValueNetwork
from buffer import Buffer
class TRPO(object):
def __init__(self, alpha, input_size, output_size):
self.buffer = Buffer()
self.value_network = ValueNetwork(alpha, input_size=input_size,
output_size=1)
self.policy_network = PolicyNetwork(0.0001, input_size=input_size,
output_size=output_size)
self.old_policy_network = PolicyNetwork(0.0001, input_size=input_size,
output_size=output_size)
# store policy state
self.buffer.store_parameters(self.policy_network.state_dict())
self.avg_rewards = []
def update(self, iter=80):
observations = self.buffer.get_observations()
#actions = self.buffer.get_actions()
rewards = self.buffer.get_rewards()
advantages = self.buffer.get_advantages()
log_probs = self.buffer.get_log_probs()
self.old_policy_network.load_state_dict(self.buffer.old_parameters)
old_pred = self.old_policy_network.forward(observations)
old_action_probabilities = torch.distributions.Categorical(old_pred)
old_action = old_action_probabilities.sample()
old_probs = old_action_probabilities.log_prob(old_action).reshape(-1, 1)
self.buffer.store_parameters(self.policy_network.state_dict())
self.policy_network.optimize(log_probs, old_probs,
advantages)
self.value_network.optimize(observations, rewards, iter=iter)
def calculate_advantage(self):
prev_observation = self.buffer.observation_buffer[-2]
observation = self.buffer.observation_buffer[-1]
v1 = self.value_network(prev_observation)
v2 = self.value_network(observation)
return self.buffer.reward_buffer[-1] + v2 - v1
def act(self, observation):
prediction = self.policy_network.forward(observation)
action_probabilities = torch.distributions.Categorical(prediction)
action = action_probabilities.sample()
log_prob = action_probabilities.log_prob(action)
self.buffer.store_log_prob(log_prob)
return action.item(), log_prob
def discount_rewards(self, step):
for s in reversed(range(1, step+1)):
update = 0
for k in reversed(range(1, s+1)):
update += self.buffer.reward_buffer[-k]*(0.99**k)
self.buffer.reward_buffer[-s] += update
def train(self, env, epochs=1000, steps=4000):
plt.ion()
for epoch in range(epochs):
observation = env.reset()
self.buffer.store_observation(observation)
step = 0
for step in range(steps):
step += 1
action, log_prob = self.act(observation)
self.buffer.store_action(log_prob)
observation, reward, done, info = env.step(action)
self.buffer.store_reward(reward/200 + observation[0]/2 + (1*observation[1])**2)
#env.render()
self.buffer.store_observation(observation)
advantage = self.calculate_advantage()
self.buffer.store_advantage(advantage)
if done or step == steps-1:
observation = env.reset()
self.discount_rewards(step)
step = 0
self.update(iter=5)
rwrd = self.buffer.get_rewards()
self.avg_rewards.append((torch.sum(rwrd)/rwrd.shape[0]).numpy())
self.buffer.clear_buffer()
print("Average Reward: {}".format(self.avg_rewards[-1]))
plt.title("Reward per Epoch")
plt.xlabel("Epoch")
plt.ylabel("Reward")
plt.plot(self.avg_rewards, label="average reward")
plt.legend(loc="upper left")
plt.draw()
plt.pause(0.0001)
plt.clf()
def main():
import gym
torch.manual_seed(1)
np.random.seed(1)
env = gym.make('MountainCar-v0')
trpo = TRPO(alpha=0.001, input_size=2, output_size=3)
trpo.train(env=env, epochs=200, steps=800)
if __name__ == "__main__":
main()
|
import csv, datetime as dt
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from db import *
db = create_engine('sqlite:///data/mke_wibrs_db.db', echo = False)
Session = sessionmaker(bind = db)
session = Session()
def get_data(start, end):
with open('data/wibrs.csv') as data:
read = csv.reader(data, delimiter = ',')
record = [i for i in read]
return record[start:end]
def tr_data(start, end):
record = get_data(start, end)
for i in record:
date_data = i[1]
f_date = '%Y-%m-%d %H:%M:%S'
date = datetime.strptime(date_data, f_date)
update_time = dt.datetime.now()
insert = mke_wibrs_db(i[0], date, i[2], i[9], None, None, None, i[4], i[6],
i[8], i[3], i[12], i[13], i[14], i[15], i[16], i[17],
i[18], i[19], i[20], i[21], update_time)
session.add(insert)
session.commit()
print(i[0], ' successfully entered.')
def exe(start, end):
tr_data(start, end)
if __name__ == '__main__':
exe(1, 650673)
|
# std
from typing import List, Optional
# 3rd party
from sqlalchemy.sql.expression import text
# custom
from test_system.constants import MAX_HASH_GENERATION_TRY_COUNT, TOKEN_PERIOD_OF_VALIDITY_IN_DAYS
from test_system.util import generate_unknown_hash_token
from test_system.models.database import db
from .answers import EvaluableTestAnswer
from .test import Test
class Token(db.Model):
__tablename__ = "token"
token = db.Column(db.String, primary_key=True)
max_usage_count: db.Column = db.Column(db.Integer)
personal_data_test_name = db.Column(db.String, db.ForeignKey("test.name"))
pre_collect_test_names = db.Column(db.ARRAY(db.String)) # references name (column) from test (table)
evaluable_test_name = db.Column(db.String, db.ForeignKey("test.name"))
creation_timestamp: db.Column = db.Column(db.TIMESTAMP) # no default value here, otherwise None can't be inserted
personal_data_test = db.relationship("Test", foreign_keys=[personal_data_test_name])
evaluable_test = db.relationship("Test", foreign_keys=[evaluable_test_name])
@classmethod
def generate_token(cls,
max_usage_count: Optional[int],
personal_data_test_name: str,
pre_collect_test_names: List[str],
evaluable_test_name: str,
expires: bool = True) -> "Token":
token_hash = generate_unknown_hash_token(
lambda test_token: cls.query.filter_by(token=test_token).first() is None, MAX_HASH_GENERATION_TRY_COUNT)
return cls(creation_timestamp=db.func.now() if expires else None,
token=token_hash,
max_usage_count=max_usage_count,
personal_data_test_name=personal_data_test_name,
pre_collect_test_names=pre_collect_test_names,
evaluable_test_name=evaluable_test_name)
@staticmethod
def get_earliest_valid_sql_timestamp():
"""
Returns a SQL timestamp, which will be the earliest time an unexpired token could have been created.
"""
return text(f"NOW() - INTERVAL '1 DAY' * {TOKEN_PERIOD_OF_VALIDITY_IN_DAYS}")
def __init__(self, **kwargs):
if "creation_timestamp" not in kwargs: # do manually to enable None passing
kwargs["creation_timestamp"] = db.func.now()
super().__init__(**kwargs)
def __repr__(self):
return (f"Token '{self.token}' ("
f"personal data test: {self.personal_data_test_name}, "
f"pre collect Tests: {self.pre_collect_test_names}, "
f"evaluable test: {self.evaluable_test_name}, "
f"usages: {self.max_usage_count})")
def was_used_for_answer(self, evaluable_test_answer: EvaluableTestAnswer) -> bool:
"""
Returns True, if this token was used to evaluate the evaluable_test_answer earlier.
"""
return evaluable_test_answer.was_evaluated_with_token == self.token
def is_invalid(self) -> bool:
"""
Returns, whether the token is invalid - either because it is expired or because it has no usage left.
"""
return self._is_expired() or self._has_no_usage_left()
def use_for(self, evaluable_test_answer: EvaluableTestAnswer) -> None:
"""
Should be called, when the token was used for the evaluable_test_answer.
"""
evaluable_test_answer.was_evaluated_with_token = self.token
if self.max_usage_count is not None:
self.max_usage_count -= 1
db.session.commit()
def get_pre_collect_tests(self) -> List[Test]:
"""
Returns a list of Tests, which should be used as PRE_COLLECT_TESTs, for this token.
"""
possible_pre_collect_tests = [Test.query.filter_by(name=pre_collect_test_name).first()
for pre_collect_test_name in self.pre_collect_test_names]
pre_collect_tests = list(filter(lambda test: test is not None, possible_pre_collect_tests))
return pre_collect_tests
def _is_expired(self) -> bool:
if self.creation_timestamp is None:
return False
unexpired_token = Token.query.filter_by(token=self.token)\
.filter(Token.creation_timestamp >= Token.get_earliest_valid_sql_timestamp()).first()
return unexpired_token != self
def _has_no_usage_left(self) -> bool:
return self.max_usage_count is not None and self.max_usage_count <= 0
|
from meadowgrid.config import MEADOWGRID_INTERPRETER
from meadowgrid.meadowgrid_pb2 import ServerAvailableInterpreter
from meadowgrid.runner import (
Deployment,
EC2AllocHost,
LocalHost,
SshHost,
run_command,
run_function,
)
from test_meadowgrid.test_ec2_alloc import _PRIVATE_KEY_FILENAME
async def test_run_function_local():
result = await run_function(
lambda x: x * 2,
LocalHost(),
Deployment(ServerAvailableInterpreter(interpreter_path=MEADOWGRID_INTERPRETER)),
args=[5],
)
assert result == 10
# these parameters must be changed!
# this must be a linux host with meadowgrid installed as per build_meadowgrid_amis.sh
_REMOTE_HOST = SshHost("localhost", {})
async def manual_test_run_function_remote():
result = await run_function(
lambda x: x * 2,
_REMOTE_HOST,
args=[5],
)
assert result == 10
async def manual_test_run_command_remote():
await run_command(
"pip --version",
_REMOTE_HOST,
)
# right now we don't get the stdout back, so the only way to check this is to look
# at the log file on the remote host
async def manual_test_run_function_allocated_ec2_host():
result = await run_function(
lambda x: x * 2,
EC2AllocHost(1, 1, 15, private_key_filename=_PRIVATE_KEY_FILENAME),
args=[5],
)
assert result == 10
|
from torch.nn.modules import LSTM
import torch.nn as nn
from torch.autograd import Variable
import torch
import pdb
class ChannelLSTM(nn.Module):
def __init__(self,input_size=69505, output_size=5952,hidden_size=52,num_layers=16,batch_first=True,
dropout=0.1):
super(ChannelLSTM, self).__init__()
self.hidden_size=hidden_size
self.num_layers=num_layers
self.lstm=LSTM(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,
batch_first=batch_first,dropout=dropout)
self.output=nn.Linear(self.hidden_size,output_size)
self.hx=None
self.reset_parameters()
def reset_parameters(self):
self.lstm.reset_parameters()
self.output.reset_parameters()
def init_states_each_channel(self):
# num_layers, channel, dim
h=Variable(torch.Tensor(self.num_layers, 1, self.hidden_size)).cuda().zero_()
s=Variable(torch.Tensor(self.num_layers, 1, self.hidden_size)).cuda().zero_()
return (h,s)
def assign_states_tuple(self, states_tuple):
self.hx=states_tuple
def forward(self, input):
output,statetuple=self.lstm(input,self.hx)
output=output.squeeze(1)
output=self.output(output)
return output, statetuple
#
# class ChannelLSTM2(nn.Module):
# def __init__(self,input_size=69505, output_size=5952,hidden_size=128,num_layers=32,batch_first=True,
# dropout=0.1):
# super(ChannelLSTM2, self).__init__()
# self.hidden_size=hidden_size
# self.num_layers=num_layers
# self.lstm=LSTM(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,
# batch_first=batch_first,dropout=dropout)
# self.output=nn.Linear(self.hidden_size,output_size)
# self.hx=None
# self.reset_parameters()
#
# def reset_parameters(self):
# self.lstm.reset_parameters()
# self.output.reset_parameters()
#
# def init_states_each_channel(self):
# # num_layers, channel, dim
# h=Variable(torch.Tensor(self.num_layers, 1, self.hidden_size)).cuda().zero_()
# s=Variable(torch.Tensor(self.num_layers, 1, self.hidden_size)).cuda().zero_()
# return (h,s)
#
# def assign_states_tuple(self, states_tuple):
# self.hx=states_tuple
#
# def forward(self, input):
# output,statetuple=self.lstm(input,self.hx)
# output=output.squeeze(1)
# output=self.output(output)
# return output, statetuple
#
#
#
# class Custom_LSTM(nn.Module):
#
# def __init__(self,input_size=69505, output_size=5952,hidden_size=128,num_layers=32):
# super(Custom_LSTM, self).__init__()
#
#
# class LSTM_Unit(nn.Module):
# """
# A single layer unit of LSTM
# """
#
# def __init__(self, x, R, W, h, bs):
# super(LSTM_Unit, self).__init__()
#
# self.x = x
# self.R = R
# self.W = W
# self.h = h
# self.bs = bs
#
# self.W_input = nn.Linear(self.x + self.R * self.W + 2 * self.h, self.h)
# self.W_forget = nn.Linear(self.x + self.R * self.W + 2 * self.h, self.h)
# self.W_output = nn.Linear(self.x + self.R * self.W + 2 * self.h, self.h)
# self.W_state = nn.Linear(self.x + self.R * self.W + 2 * self.h, self.h)
#
# self.old_state = Variable(torch.Tensor(self.bs, self.h).zero_().cuda(),requires_grad=False)
#
# def reset_parameters(self):
# for module in self.children():
# module.reset_parameters()
#
# def forward(self, input_x, previous_time, previous_layer):
# # a hidden unit outputs a hidden output new_hidden.
# # state also changes, but it's hidden inside a hidden unit.
#
# semicolon_input = torch.cat([input_x, previous_time, previous_layer], dim=1)
#
# # 5 equations
# input_gate = torch.sigmoid(self.W_input(semicolon_input))
# forget_gate = torch.sigmoid(self.W_forget(semicolon_input))
# new_state = forget_gate * self.old_state + input_gate * \
# torch.tanh(self.W_state(semicolon_input))
# output_gate = torch.sigmoid(self.W_output(semicolon_input))
# new_hidden = output_gate * torch.tanh(new_state)
# self.old_state = Parameter(new_state.data,requires_grad=False)
#
# return new_hidden
#
#
# def reset_batch_channel(self,list_of_channels):
# raise NotImplementedError()
# #
# # def new_sequence_reset(self):
# # raise DeprecationWarning("We no longer reset sequence together in all batch channels, this function deprecated")
# #
# # self.W_input.weight.detach()
# # self.W_input.bias.detach()
# # self.W_output.weight.detach()
# # self.W_output.bias.detach()
# # self.W_forget.weight.detach()
# # self.W_forget.bias.detach()
# # self.W_state.weight.detach()
# # self.W_state.bias.detach()
# #
# # self.old_state = Parameter(torch.Tensor(self.bs, self.h).zero_().cuda(),requires_grad=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import mock
from oauth2py.client import OauthClient
class TestQQ(unittest.TestCase):
def setUp(self):
self.config = {
'name': 'qq',
'client_id': '1234567',
'client_secret': 'secret_abcefg',
'redirect_uri': 'http://127.0.0.1/oauth/qq/callback',
'scope': ''
}
self.access_token = 'access_token123'
self.access_token_resp = 'access_token={0}&expires_in=7776000&refresh_token=88E4BE14'.format(
self.access_token)
self.user_id_resp = 'callback( {"client_id":"1234567","openid":"abcde"} );'
self.user_info_resp = {
u'ret': '0',
u'msg': '',
u'is_lost': '0',
u'nickname': 'caoyue',
u'gender': '男',
u'province': '',
u'city': '',
u'year': '1234',
u'figureurl': 'http://qzapp.qlogo.cn/qzapp/100330589/46D42C580040C4AA42FD15141CF7DCC7/30',
u'figureurl_1': 'http://qzapp.qlogo.cn/qzapp/100330589/46D42C580040C4AA42FD15141CF7DCC7/50',
u'figureurl_2': 'http://qzapp.qlogo.cn/qzapp/100330589/46D42C580040C4AA42FD15141CF7DCC7/100',
u'figureurl_qq_1': 'http://q.qlogo.cn/qqapp/100330589/46D42C580040C4AA42FD15141CF7DCC7/40',
u'figureurl_qq_2': 'http://q.qlogo.cn/qqapp/100330589/46D42C580040C4AA42FD15141CF7DCC7/100',
u'is_yellow_vip': '0',
u'vip': '0',
u'yellow_vip_level': '0',
u'level': '0',
u'is_yellow_year_vip': '0'
}
self.qq = OauthClient.load('qq')
self.qq.init(self.config)
def test_get_login_url(self):
self.assertEqual(
self.qq.get_login_url(),
'https://graph.qq.com/oauth2.0/authorize?client_id={0}&redirect_uri={1}&response_type=code'.format(
self.config['client_id'], self.config['redirect_uri'])
)
self.assertEqual(
self.qq.get_login_url(state='abc'),
'https://graph.qq.com/oauth2.0/authorize?client_id={0}&redirect_uri={1}&response_type=code&state=abc'.format(
self.config['client_id'], self.config['redirect_uri'])
)
@mock.patch('oauth2py.base.requests.post')
@mock.patch('oauth2py.base.requests.get')
def test_get_user_info(self, mock_get, mock_post):
query = 'code=12345&state=abc'
# query = {
# 'code': '12345',
# 'state':'abc'
# }
mock_get.side_effect = self._mocked_requests_get
mock_post_response = mock.Mock()
mock_post_response.status_code = 200
mock_post_response.content = self.access_token_resp
mock_post_response.json.side_effect = ValueError('Not valid json')
mock_post.return_value = mock_post_response
user = self.qq.get_user_info(query)
self.assertEqual(mock_get.call_count, 2)
self.assertEqual(mock_post.call_count, 1)
# assert state
self.assertEqual(self.qq.state, 'abc')
# assert access token
token = self.qq.get_access_token()
self.assertEqual(
token['access_token'],
self.access_token
)
# assert response uid
self.assertEqual(user['name'], self.user_info_resp['nickname'])
def _mocked_requests_get(self, *args, **kwargs):
class MockResponse:
def __init__(self, json_data, content, status_code):
self.json_data = json_data
self.content = content
self.status_code = status_code
def json(self):
if not self.json_data:
raise ValueError('not valid json')
return self.json_data
if args[0] == 'https://graph.qq.com/oauth2.0/me':
return MockResponse(None, self.user_id_resp, 200)
else:
return MockResponse(self.user_info_resp, '', 200)
return MockResponse({}, '', 404)
|
# 图片验证码Redis有效期, 单位:秒
IMAGE_CODE_REDIS_EXPIRES = 300
# 短信验证码Redis有效期,单位:秒
SMS_CODE_REDIS_EXPIRES = 300
# 七牛空间域名
# QINIU_DOMIN_PREFIX = "http://oyucyko3w.bkt.clouddn.com/"
QINIU_DOMIN_PREFIX = "http://pbbiphbgi.bkt.clouddn.com/"
# 首页展示最多的新闻数量
HOME_PAGE_MAX_NEWS = 10
# 用户的关注每一页最多数量
USER_FOLLOWED_MAX_COUNT = 4
# 用户收藏最多新闻数量
USER_COLLECTION_MAX_NEWS = 10
# 其他用户每一页最多新闻数量
OTHER_NEWS_PAGE_MAX_COUNT = 10
# 点击排行展示的最多新闻数据
CLICK_RANK_MAX_NEWS = 10
# 管理员页面用户每页多最数据条数
ADMIN_USER_PAGE_MAX_COUNT = 10
# 管理员页面新闻每页多最数据条数
ADMIN_NEWS_PAGE_MAX_COUNT = 10
|
# encoding=utf-8
'''
Created on 2015年11月9日
@author: lowitty
'''
import logging, os, random, sys
sbcPMlogger = logging.getLogger('server.SBCPM')
from com.ericsson.xn.server.parser.SbcParser import SbcNodeInfo
from xml.etree import ElementTree as ET
import threading, time
from datetime import datetime, timedelta
from threading import Lock
lock = Lock()
class SbcPMHolder():
def __init__(self):
self.pmcounters = {}
self.pmcounters["round"] = 0
mapCounters = {}
mapCounters[0] = [0, 0, 0, 0, 0, 0]
mapCounters[1] = [0, 0, 0, 0, 0, 0]
mapCounters[2] = [0, 0, 0, 0, 0, 0]
mapCounters[3] = [0, 0, 0, 0, 0, 0]
mapCounters[4] = [0, 0, 0, 0, 0, 0]
mapCounters[5] = [0, 0, 0, 0, 0, 0]
mapCounters[6] = [0, 0, 0, 0, 0, 0]
mapCounters[7] = [0, 0, 0, 0, 0, 0]
self.pmcounters["counter"] = mapCounters
def getCounters(self):
return self.pmcounters["counter"]
def getPMCounters(self):
return self.pmcounters
def updatePMCounters(self, mapCounter, iRound=None):
lock.acquire()
self.pmcounters["counter"] = mapCounter
if (iRound is not None):
self.pmcounters["round"] = iRound
lock.release()
class SbcPMWriter(threading.Thread):
def __init__(self, pmHolderInstance):
threading.Thread.__init__(self)
self.stopThread = False
filePath = os.path.dirname(os.path.abspath(__file__))
# /com/ericsson/xn/server/pm
sep = os.path.sep
self.sep = sep
packagePath = sep + 'com' + sep + 'ericsson' + sep + 'xn' + sep + 'server' + sep + 'pm'
self.parPath = filePath.split(packagePath)[0]
self.pmHoler = pmHolderInstance
sbcPMlogger.info('SBCPMGEN started.')
self.updateSBCCounters()
pass
def run(self):
while not self.stopThread:
tNow = datetime.now()
min = tNow.minute
sec = tNow.second
if (min + 1) % 5 == 0 and 35 > sec >= 30:
# if(True):
sbcPMlogger.info(
'About 30 seconds that the minutes will be multiples of 5, will simulate to update the counters, also random the next period logs.')
try:
f = open(self.parPath + self.sep + 'config' + self.sep + 'sbc' + self.sep + 'sbc_log.x', 'r')
lines = f.readlines()
f.close()
lenth = len(lines)
intsRandom = sorted(random.sample(range(0, lenth), random.randint(0, lenth)))
sbcPMlogger.info(str(intsRandom))
newLines = []
tStart = tNow + timedelta(seconds=-270)
for ir in intsRandom:
tStampt = tStart + timedelta(seconds=24 * ir)
newLines.append(
'[' + tStampt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(tStampt.microsecond / 1000) + '] ' +
lines[ir].strip() + '\n')
nowFile = self.parPath + self.sep + 'config' + self.sep + 'sbc' + self.sep + 'sbc_log.now'
lock.acquire()
open(nowFile, 'w').close()
f = open(nowFile, 'w')
f.writelines(newLines)
f.flush()
f.close()
lock.release()
nowTime = datetime.now()
t1 = nowTime + timedelta(minutes=1)
t2 = nowTime + timedelta(minutes=6)
msg = t1.strftime('%Y-%m-%d %H:%M') + ' to ' + t2.strftime('%H:%M')
sbcPMlogger.warn(msg + ', logs are: ' + '||'.join([k.strip() for k in newLines]))
self.updateSBCCounters()
except Exception as e:
sbcPMlogger.error('ERROR: ' + str(e))
'''originalMap = self.pmHoler.getPMCounters()
sbcPMlogger.info('Dic of counters: ' + str(originalMap))
counters = {}
r = originalMap['round']
mapCounter = originalMap['counter']
firEle = self.getFirstEle(r)
for k, v in mapCounter.iteritems():
nk = k % 8
counters[k] = [(firEle + r + 1) * (nk + 1) * 1, (firEle + r + 1) * (nk + 1) * 2, (firEle + r + 1) * (nk + 1) * 3, (firEle + r + 1) * (nk + 1) * 4, (firEle + r + 1) * (nk + 1) * 5, (firEle + r + 1) * (nk + 1) * 6]
if(r + 1 > 11):
counters[k] = [0, 0, 0, 0, 0, 0]
r += 1
if(r > 11):
r = 0
self.pmHoler.updatePMCounters(counters, r)
nowTime = datetime.now()
t1 = nowTime + timedelta(minutes = 1)
t2 = nowTime + timedelta(minutes = 6)
msg = t1.strftime('%Y-%m-%d %H:%M') + ' to ' + t2.strftime('%H:%M')
sbcPMlogger.info(msg + ', PM counters are: ' + str(self.pmHoler.getCounters()))'''
deltaSec = 5 - (datetime.now().second % 5)
time.sleep(deltaSec)
else:
time.sleep(5)
def updateSBCCounters(self):
xmlPath = self.parPath + self.sep + 'config' + self.sep + 'sbc' + self.sep + 'sbc_node.xml'
# insNode = SbcNodeInfo(xmlPath)
# node = insNode.getNodeInfoMap()
et = ET.parse(xmlPath)
tNow = datetime.now()
r = (tNow + timedelta(seconds=30)).minute / 5 - 1
firEle = self.getFirstEle(r)
cMap = {}
channels = et.findall('./channel')
for channel in channels:
k = int(channel.find('./channelId').text)
nk = k % 8
cMap[k] = []
c1 = str((firEle + r + 1) * (nk + 1) * 1)
c2 = str((firEle + r + 1) * (nk + 1) * 2)
c3 = str((firEle + r + 1) * (nk + 1) * 3)
c4 = str((firEle + r + 1) * (nk + 1) * 4)
c5 = str((firEle + r + 1) * (nk + 1) * 5)
c6 = str((firEle + r + 1) * (nk + 1) * 6)
channel.find('./c1').text = c1
channel.find('./c2').text = c2
channel.find('./c3').text = c3
channel.find('./c4').text = c4
channel.find('./c5').text = c5
channel.find('./c6').text = c6
cMap[k].append(c1)
cMap[k].append(c2)
cMap[k].append(c3)
cMap[k].append(c4)
cMap[k].append(c5)
cMap[k].append(c6)
versionTuple = sys.version_info[:2]
version = '.'.join(repr(v) for v in versionTuple)
lock.acquire()
if ('2.7' == version):
et.write(xmlPath, encoding='utf-8', xml_declaration=True, method='xml')
else:
et.write(xmlPath, encoding='utf-8')
lock.release()
t1 = tNow + timedelta(minutes=1)
t2 = tNow + timedelta(minutes=6)
msg = t1.strftime('%Y-%m-%d %H:%M') + ' to ' + t2.strftime('%H:%M')
sbcPMlogger.info(msg + ', Counters: ' + str(cMap))
def stop(self):
self.stopThread = True
def getFirstEle(self, num):
sum = 0
for i in range(0, num + 1):
sum += i
return sum
|
# Generated by Django 3.2.5 on 2021-07-29 04:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0012_courier_paypal_email'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='status',
field=models.CharField(choices=[('in', 'In'), ('out', 'Out')], default='in', max_length=20),
),
]
|
from django import forms
from django.forms.widgets import DateInput
from book.models import Book, Publisher, Author
class PublisherForm(forms.ModelForm):
class Meta:
model = Publisher
fields = ['name']
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['name']
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ['title', 'image', 'price', 'author', 'publisher']
#
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 17 12:38:44 2017
@author: ahefny, zmarinho
"""
from theano.tensor.shared_randomstreams import RandomStreams
'''
decorator of noisy_model.
'''
class NoisyModel(object):
def __init__(self, obs_noise=0.0, obs_loc=0.0, state_noise=0.0, state_loc=0.0,
state_dim=0, rng=None):
self._srng = RandomStreams(seed=rng.seed())
self.rng = rng
self._obs_loc = obs_loc
self._state_loc = state_loc
self._obs_std = obs_noise
self._state_std = state_noise
self._state_dim = state_dim
self._state_noise = self._srng.normal(size=[self._state_dim], std=obs_noise, avg=state_loc)
def _noisy_state(self, state):
if self._state_std>0:
state = state + self._state_noise
return state
def _noisy_obs(self, obs):
noise = 0.0
if self._obs_std>0:
noise = self.rng.normal(loc=self._obs_loc, scale=self._obs_std, size=obs.shape)
o = obs + noise
return o
|
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-12-05 16:37:57 +0000 (Mon, 05 Dec 2016)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check the time since last deployment of Blue Talon policies via the Policy Management server REST API
Outputs minutes since last deployment as well as the timestamp returned by the server, and in verbose mode also shows
the user, host and message from the last deployment
Optional thresholds may be applied against the time since last deployment in minutes, defaulting to a lower boundary
(can also use the min:max threshold format) to raise alerts when fresh policy deployments are done. This enables
triggering warning/critical alerts in a stable environment when policies shouldn't be changing that much).
Tested on Blue Talon 2.12.0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
from datetime import datetime
import logging
import json
import os
import sys
import traceback
try:
import requests
from requests.auth import HTTPBasicAuth
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, log_option, qquit, support_msg_api, isList, jsonpp
from harisekhon.utils import validate_host, validate_port, validate_user, validate_password
from harisekhon import NagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.3'
class CheckBlueTalonPolicyDeploymentAge(NagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckBlueTalonPolicyDeploymentAge, self).__init__()
# Python 3.x
# super().__init__()
self.software = 'Blue Talon'
self.default_host = 'localhost'
self.default_port = 8111
self.default_user = 'btadminuser'
self.default_password = 'P@ssw0rd'
self.host = self.default_host
self.port = self.default_port
self.user = self.default_user
self.password = None
self.protocol = 'http'
self.api_version = '1.0'
self.msg = '{0} version unknown - no message defined'.format(self.software)
self.ok()
def add_options(self):
self.add_hostoption(name=self.software,
default_host=self.default_host,
default_port=self.default_port)
self.add_useroption(name=self.software, default_user=self.default_user)
self.add_opt('-S', '--ssl', action='store_true', help='Use SSL')
self.add_thresholds()
def process_options(self):
self.host = self.get_opt('host')
self.port = self.get_opt('port')
self.user = self.get_opt('user')
self.password = self.get_opt('password')
validate_host(self.host)
validate_port(self.port)
validate_user(self.user)
validate_password(self.password)
ssl = self.get_opt('ssl')
log_option('ssl', ssl)
if ssl:
self.protocol = 'https'
self.validate_thresholds(simple='lower', optional=True)
def run(self):
log.info('querying %s', self.software)
url = '{protocol}://{host}:{port}/PolicyManagement/{api_version}/deployments'\
.format(host=self.host, port=self.port, api_version=self.api_version, protocol=self.protocol)
log.debug('GET %s', url)
try:
req = requests.get(url, auth=HTTPBasicAuth(self.user, self.password))
except requests.exceptions.RequestException as _:
errhint = ''
if 'BadStatusLine' in str(_.message):
errhint = ' (possibly connecting to an SSL secured port without using --ssl?)'
elif self.protocol == 'https' and 'unknown protocol' in str(_.message):
errhint = ' (possibly connecting to a plain HTTP port with the -S / --ssl switch enabled?)'
qquit('CRITICAL', str(_) + errhint)
log.debug("response: %s %s", req.status_code, req.reason)
log.debug("content:\n%s\n%s\n%s", '='*80, req.content.strip(), '='*80)
if req.status_code == 400 and req.reason == 'Bad Request':
qquit('CRITICAL', '{0}: {1} (possibly new install with no deployments yet?)'\
.format(req.status_code, req.reason))
if req.status_code != 200:
qquit('CRITICAL', '{0}: {1}'.format(req.status_code, req.reason))
try:
json_list = json.loads(req.content)
if log.isEnabledFor(logging.DEBUG):
print(jsonpp(json_list))
print('='*80)
if not isList(json_list):
raise ValueError('returned content is not a list')
if not json_list:
qquit('UNKNOWN', 'no deployments found')
last_deployment = json_list[0]
userid = last_deployment['UserId']
description = last_deployment['Description']
hostname = last_deployment['HostName']
timestamp = last_deployment['timestamp']
last_deploy_datetime = datetime.strptime(timestamp, '%b %d, %Y %H:%M:%S %p')
except (KeyError, ValueError) as _:
qquit('UNKNOWN', 'error parsing output from {software}: {exception}: {error}. {support_msg}'\
.format(software=self.software,
exception=type(_).__name__,
error=_,
support_msg=support_msg_api()))
timedelta = datetime.now() - last_deploy_datetime
mins = int(int(timedelta.total_seconds()) / 60)
self.msg = "{software} last deployment was at '{timestamp}', {mins} mins ago".format(software=self.software,
timestamp=timestamp,
mins=mins)
self.check_thresholds(mins)
if self.verbose:
self.msg += " by user '{userid}', host = '{hostname}', description = '{description}'"\
.format(userid=userid, hostname=hostname, description=description)
self.msg += ' | mins_since_last_deployment={mins}{thresholds}'\
.format(mins=mins, thresholds=self.get_perf_thresholds(boundary='lower'))
if __name__ == '__main__':
CheckBlueTalonPolicyDeploymentAge().main()
|
from itertools import islice
from .dbconnect import *
from .singleton import *
from .properties import Properties
p = Properties()
db = DBConnect()
class DataModel(metaclass=Singleton):
'''
DataModel is a dictionary of perImageObjectCounts indexed by (TableNumber,ImageNumber)
'''
def __init__(self):
self.data = {} # {imKey:obCount, ... }
self.groupMaps = {} # { groupName:{imKey:groupKey, }, ... }
# eg: groupMaps['Wells'][(0,4)] ==> (3,'A01')
self.revGroupMaps = {} # { groupName:{groupKey:imKey, }, ... }
# eg: groupMaps['Wells'][(3,'A01')] ==> [(0,1),(0,2),(0,3),(0,4)]
self.groupColNames = {} # {groupName:[col_names,...], ...}
# eg: {'Plate+Well': ['plate','well'], ...}
self.groupColTypes = {} # {groupName:[col_types,...], ...}
self.cumSums = [] # cumSum[i]: sum of objects in images 1..i (inclusive)
self.obCount = 0
self.keylist = []
self.filterkeys = {} # sets of image keys keyed by filter name
self.plate_map = {} # maps well names to (x,y) plate locations
self.rev_plate_map = {} # maps (x,y) plate locations to well names
def __str__(self):
return str(self.obCount)+" objects in "+ \
str(len(self.data))+" images"
def PopulateModel(self, delete_model=False):
if delete_model:
self.DeleteModel()
elif not self.IsEmpty():
# No op if already populated
return
if db is None:
logging.error("Error: No database connection!")
return
if p.check_tables == 'yes':
db.CheckTables()
# Initialize per-image object counts to zero
imKeys = db.GetAllImageKeys()
for key in imKeys:
key = tuple([int(k) for k in key]) # convert keys to to int tuples
self.data[key] = 0
# Compute per-image object counts
res = db.GetPerImageObjectCounts()
for r in res:
key = tuple([int(k) for k in r[:-1]])
self.data[key] = r[-1]
self.obCount += r[-1]
self.keylist = list(self.data.keys())
# Build a cumulative sum array to use for generating random objects quickly
self.cumSums = np.zeros(len(self.data)+1, dtype='int')
for i, imKey in enumerate(self.keylist):
self.cumSums[i+1] = self.cumSums[i]+self.data[imKey]
self.groupMaps, self.groupColNames = db.GetGroupMaps()
self.revGroupMaps, _ = db.GetGroupMaps(reverse=True)
for group in self.groupMaps:
self.groupColTypes[group] = [type(col) for col in list(self.groupMaps[group].items())[0][1]]
def DeleteModel(self):
self.data = {}
self.groupMaps = {}
self.cumSums = []
self.obCount = 0
def _if_empty_populate(self):
if self.IsEmpty:
self.PopulateModel()
def get_total_object_count(self):
self._if_empty_populate()
return self.obCount
def GetRandomObject(self, N):
'''
Returns a random object key
We expect self.data.keys() to return the keys in the SAME ORDER
every time since we build cumSums from that same ordering. This
need not necessarily be in sorted order.
'''
self._if_empty_populate()
obIdxs = random.sample(list(range(1, self.obCount + 1)), N)
obKeys = []
for obIdx in obIdxs:
imIdx = np.searchsorted(self.cumSums, obIdx, 'left')
# SUBTLETY: images which have zero objects will appear as repeated
# sums in the cumulative array, so we must pick the first index
# of any repeated sum, otherwise we are picking an image with no
# objects
while self.cumSums[imIdx] == self.cumSums[imIdx-1]:
imIdx -= 1
imKey = list(self.data.keys())[imIdx-1]
obIdx = obIdx-self.cumSums[imIdx-1] # object number relative to this image
obKeys.append(db.GetObjectIDAtIndex(imKey, obIdx))
return obKeys
def GetRandomObjects(self, N, imKeys=None, with_replacement=False):
'''
Returns N random objects.
If a list of imKeys is specified, GetRandomObjects will return
objects from only these images.
'''
self._if_empty_populate()
if N > self.obCount:
logging.info(f"{N} is greater than the number of objects. Fetching {self.obCount} objects.")
N = self.obCount
if imKeys == None:
if p.use_legacy_fetcher:
return self.GetRandomObject(N)
else:
return db.GetRandomObjectsSQL(None, N)
elif imKeys == []:
return []
else:
if p.use_legacy_fetcher:
sums = np.cumsum([self.data[imKey] for imKey in imKeys])
if sums[-1] < 1:
return []
obs = []
if with_replacement:
obIdxs = random.choices(list(range(1, sums[-1] + 1)), k=N)
else:
if N > sums[-1]:
logging.info(f"Number of available objects is less than {N}. Fetching {sums[-1]} objects.")
N = sums[-1]
obIdxs = random.sample(list(range(1, sums[-1]+1)), k=N)
for obIdx in obIdxs:
index = np.searchsorted(sums, obIdx, 'left')
if index != 0:
while sums[index] == sums[index-1]:
index -= 1
obIdx = obIdx-sums[index-1]
obKey = db.GetObjectIDAtIndex(imKeys[index], obIdx)
obs.append(obKey)
else:
obs = db.GetRandomObjectsSQL(imKeys, N)
if with_replacement and 0 < len(obs) < N:
obs = random.choices(obs, k=N)
return obs
def GetAllObjects(self, filter_name=None, gate_name=None, imkeys=[], N=None):
self._if_empty_populate()
if imkeys == []:
imkeys = self.GetAllImageKeys(filter_name=filter_name, gate_name=gate_name)
if p.use_legacy_fetcher:
obs = (x for im in imkeys for x in self.GetObjectsFromImage(im))
if N is None:
return list(obs)
else:
return list(islice(obs, N))
else:
return db.GetAllObjectsSQL(imkeys, N)
def GetObjectsFromImage(self, imKey):
self._if_empty_populate()
if p.use_legacy_fetcher:
obKeys=[]
for i in range(1,self.GetObjectCountFromImage(imKey)+1):
obKeys.append(db.GetObjectIDAtIndex(imKey, i))
else:
obKeys = db.GetAllObjectsSQL([imKey])
return obKeys
def GetAllImageKeys(self, filter_name=None, gate_name=None):
''' Returns all object keys. If a filter is passed in, only the image
keys that fall within the filter will be returned.'''
self._if_empty_populate()
if filter_name is not None:
return db.GetFilteredImages(filter_name)
elif gate_name is not None:
return db.GetGatedImages(gate_name)
else:
return list(self.data.keys())
def GetObjectCountFromImage(self, imKey):
''' Returns the number of objects in the specified image. '''
self._if_empty_populate()
return self.data[imKey]
def GetImageKeysAndObjectCounts(self, filter_name=None):
''' Returns pairs of imageKeys and object counts. '''
self._if_empty_populate()
if filter_name is None:
return list(self.data.items())
else:
return [(imKey, self.data[imKey]) for imKey in db.GetFilteredImages(filter_name)]
def GetGroupColumnNames(self, group, include_table_name=False):
''' Returns the key column names associated with the specified group. '''
self._if_empty_populate()
if include_table_name:
# return a copy of this list so it can't be modified
return list(self.groupColNames[group])
else:
return [col.split('.')[-1] for col in self.groupColNames[group]]
def GetGroupColumnTypes(self, group):
''' Returns the key column types associated with the specified group. '''
self._if_empty_populate()
return list(self.groupColTypes[group]) # return a copy of this list so it can't be modified
def SumToGroup(self, imdata, group):
'''
Takes image data of the form:
imdata = { imKey : np.array(values), ... }
and sums the data into the specified group to return:
groupdata = { groupKey : np.array(values), ... }
'''
self._if_empty_populate()
groupData = {}
nvals = len(list(imdata.values())[0])
for imKey in list(imdata.keys()):
# initialize each entry to [0,0,...]
groupData[self.groupMaps[group][imKey]] = np.zeros(nvals)
for imKey, vals in list(imdata.items()):
# add values to running sum of this group
groupData[self.groupMaps[group][imKey]] += vals
return groupData
def GetImagesInGroupWithWildcards(self, group, groupKey, filter_name=None):
'''
Returns all imKeys in a particular group.
'__ANY__' in the groupKey matches anything.
'''
self._if_empty_populate()
if '__ANY__' in groupKey:
# if there are wildcards in the groupKey then accumulate
# imkeys from all matching groupKeys
def matches(key1, key2):
return all([(a==b or b=='__ANY__') for a,b in zip(key1,key2)])
imkeys = []
for gkey, ikeys in list(self.revGroupMaps[group].items()):
if matches(gkey,groupKey):
imkeys += ikeys
return imkeys
else:
# if there are no wildcards simply lookup the imkeys
return self.GetImagesInGroup(group, groupKey, filter_name)
def GetImagesInGroup(self, group, groupKey, filter_name=None):
''' Returns all imKeys in a particular group. '''
self._if_empty_populate()
try:
imkeys = self.revGroupMaps[group][groupKey]
except KeyError:
return []
# apply filter if supplied
if filter_name is not None:
if filter_name not in list(self.filterkeys.keys()):
self.filterkeys[filter_name] = db.GetFilteredImages(filter_name)
imkeys = set(imkeys).intersection(self.filterkeys[filter_name])
return imkeys
def GetGroupKeysInGroup(self, group):
''' Returns all groupKeys in specified group '''
self._if_empty_populate()
return list(set(self.groupMaps[group].values()))
def IsEmpty(self):
return self.data == {}
def populate_plate_maps(self):
'''Computes plate_maps which maps well names to their corresponding
plate positions, and rev_plate_maps which does the reverse.
eg: plate_maps['A01'] = (0,0)
rev_plate_maps[(0,0)] = 'A01'
'''
if p.well_format == 'A01':
well_re = r'^[A-Za-z]\d+$'
elif p.well_format == '123':
well_re = r'^\d+$'
else:
raise ValueError('Unknown well format: %s' % repr(p.well_format))
pshape = p.plate_shape
res = db.execute('SELECT DISTINCT %s FROM %s '%(p.well_id, p.image_table))
for r in res:
well = r[0]
# Make sure all well entries match the naming format
if type(well) == str:
well = well.strip()
assert re.match(well_re, well), 'Well "%s" did not match well naming format "%s"'%(r[0], p.well_format)
elif type(well) == int:
if not p.well_format == '123':
'''
import wx
wx.MessageBox('Well "%s" did not match well naming format "%s".\n'
'If your wells are in numerical format then add\n'
'the line "well_format = 123" to your properties'
'file. Trying well_format = 123.'%(r[0], p.well_format), 'Error')
'''
p.well_format = '123'
try:
self.populate_plate_maps()
except:
import wx
wx.MessageBox('Error when trying well_format = 123. Try another well naming format.', 'Error')
return
if p.well_format == 'A01':
if pshape[0] <= 26:
row = 'abcdefghijklmnopqrstuvwxyz'.index(well[0].lower())
col = int(well[1:]) - 1
elif pshape[0] <= 52:
row = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'.index(well[0])
col = int(well[1:]) - 1
else:
raise ValueError('Plates with over 52 rows cannot have well format "A01" Check your properties file.')
self.plate_map[well] = (row, col)
self.rev_plate_map[(row, col)] = well
elif p.well_format == '123':
row = (int(well) - 1) // pshape[1]
col = (int(well) - 1) % pshape[1]
self.plate_map[well] = (row, col)
self.rev_plate_map[(row, col)] = well
def get_well_position_from_name(self, well_name):
'''returns the plate position tuple (row, col) corresponding to
the given well_name.
'''
try:
well_name = well_name.strip()
except:
pass
if self.plate_map == {}:
self.populate_plate_maps()
if well_name in list(self.plate_map.keys()):
return self.plate_map[well_name]
else:
raise KeyError('Well name "%s" could not be mapped to a plate position.' % well_name)
def get_well_name_from_position(self, xxx_todo_changeme):
'''returns the well name (eg: "A01") corresponding to the given
plate position tuple.
'''
(row, col) = xxx_todo_changeme
if self.plate_map == {}:
self.populate_plate_maps()
if (row, col) in list(self.rev_plate_map.keys()):
return self.rev_plate_map[(row, col)]
else:
raise KeyError('Plate position "%s" could not be mapped to a well key.' % str((row,col)))
if __name__ == "__main__":
p = Properties()
p.LoadFile('../properties/2009_02_19_MijungKwon_Centrosomes.properties')
db = DBConnect()
db.connect()
d = DataModel()
d.PopulateModel()
|
from __future__ import print_function
from constant import *
from models import *
from dataset import *
from utils import f_score
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import f1_score,precision_score,recall_score
import random
import math
import json
import copy
from time import time
predEtt_tag = 36
Ett_tag = 36
argRoles = []
class Accuracy(object):
def __init__(self):
self.correct = 0
self.total = 0
def add(self, is_correct):
self.total += 1
if is_correct:
self.correct += 1
def get(self):
if self.total == 0:
return 0.0
else:
return float(self.correct) / self.total
def clear(self):
self.correct = 0
self.total = 0
TestSet = Dataset("test")
acc_NA = Accuracy()
acc_not_NA = Accuracy()
acc_total = Accuracy()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
get_prob = Discriminator_argProb2()
get_prob.to(device, non_blocking=True)
get_prob.load_state_dict(torch.load("Pmodel_noise.tar", map_location='cpu'))
discriminator = Discriminator_argument()
discriminator.to(device, non_blocking=True)
discriminator.load_state_dict(torch.load("Amodel.tar", map_location='cpu'))
def random_index(rate):
start = 0
index = 0
randnum = random.random()
for index, scope in enumerate(rate):
start += scope
if randnum <= start:
break
return index
def gen_argRole(pred, ettIdx, ettLength):
global argRoles
for j in range(len(pred)):
argRole = np.zeros((SenLen), dtype = np.int64)
for arg_idx in range(len(pred)):
arg_in_sen = list(range(ettIdx[arg_idx], ettIdx[arg_idx] + ettLength[arg_idx]))
if arg_idx != j:
if pred[arg_idx] == 0:
argRole[arg_in_sen] = Ett_tag
else:
argRole[arg_in_sen] = pred[arg_idx]
else:
argRole[arg_in_sen] = predEtt_tag
argRoles.append(argRole)
def gibbs(dataset):
discriminator.eval()
get_prob.eval()
# get_prob2.eval()
preds = []
labels = []
pred_subtypes=[]
golden_subtypes=[]
senLabels = []
ettIdxs = []
ettLengths = []
results = []
# get the pred0
for words, pos1, pos2, loc, loc_mark, subtype, maskL, maskM, maskR, label, ettIdx, ettLength, senLabel, subtype_golden in dataset.batchs_gibbs():
words, pos1, pos2, loc, loc_mark, subtype, maskL, maskM, maskR, label, ettIdx, ettLength, senLabel = words.to(device), pos1.to(device), pos2.to(device), loc.to(device), loc_mark.to(device), subtype.to(device), maskL.to(device), maskM.to(device), maskR.to(device), label.to(device), ettIdx.to(device), ettLength.to(device), senLabel.to(device)
loss, scores, pred = discriminator(words, pos1, pos2, loc, loc_mark, subtype, maskL, maskM, maskR, label)
preds.extend(pred[1].cpu().numpy())
labels.extend(label.cpu().numpy())
pred_subtypes.extend(subtype.cpu().numpy())
golden_subtypes.extend(subtype_golden.cpu().numpy())
senLabels.extend(senLabel.cpu().numpy().tolist())
ettIdxs.extend(ettIdx)
ettLengths.extend(ettLength)
for i in range(len(words)):
state = {}
results.append(state)
preds0 = copy.deepcopy(preds)
# Transfer for N+K times
global argRoles
for trans_time in range(int(1/k_an) - 1):
if trans_time % 10 == 0:
cnt=0
cnt1=0
FN=0
FP=0
for i in range(len(preds)):
if labels[i]==0:
cnt1+=1
if preds[i]!=labels[i]:
cnt+=1
if preds[i]==0 and labels[i]!=0:
FN+=1
if preds[i]!=0 and labels[i]==0:
FP+=1
print("EVAL %s #Wrong %d #NegToPos %d #PosToNeg %d #All %d #Negs %d"%("Test",cnt,FP,FN,len(preds),cnt1))
acc, _, f1 = f_score(preds, labels, pred_subtypes, golden_subtypes)
print ("trans_time:{}, acc:{}, f1:{}".format(trans_time,acc,f1))
# generate the argRoles
L = 0
R = 0
sen_idx = senLabels[0]
argRoles = []
for i in range(len(senLabels)):
if senLabels[i] == sen_idx:
R += 1
else:
sen_idx = senLabels[i]
if L != R:
gen_argRole(preds[L:R], ettIdxs[L:R], ettLengths[L:R])
L = R
R += 1
sen_idx = senLabels[i]
if L<len(preds):
gen_argRole(preds[L:], ettIdxs[L:], ettLengths[L:])
# get prob
probs = []
probs_max = []
probs_argmax = []
words_sum = []
L = 0
for words, pos1, pos2, loc, loc_mark, subtype, maskL, maskM, maskR, label, ettIdx, ettLength, senLabel, subtype_golden in dataset.batchs_gibbs():
argRole = torch.LongTensor(argRoles[L: L+len(words)]).to(device)
L += len(words)
words, pos1, pos2, loc, loc_mark, subtype, maskL, maskM, maskR, label, ettIdx, ettLength, senLabel = words.to(device), pos1.to(device), pos2.to(device), loc.to(device), loc_mark.to(device), subtype.to(device), maskL.to(device), maskM.to(device), maskR.to(device), label.to(device), ettIdx.to(device), ettLength.to(device), senLabel.to(device)
_, prob, _ = get_prob(words, pos1, pos2, loc, loc_mark, subtype, argRole, maskL, maskM, maskR, label)
prob_max, prob_argmax = torch.max(prob, dim = 1)
prob_max = prob_max.detach().cpu().numpy().tolist()
prob_argmax = prob_argmax.detach().cpu().numpy().tolist()
prob = prob.detach().cpu().numpy().tolist()
words_sum.extend(words)
probs.extend(prob)
probs_max.extend(prob_max)
probs_argmax.extend(prob_argmax)
# transfer and sum the states
probs_max_an = []
sen_idx = 0
L = 0
R = 0
for i, prob in enumerate(probs):
if senLabels[i] == sen_idx:
R += 1
else:
sen_idx = senLabels[i]
if L != R:
probMax_sum = 0
probs_max_an = []
for idx in range(L, R):
probMax_sum += probs_max[idx] ** (1/(1 - k_an * trans_time))
for idx in range(L, R):
probs_max_an.append(probs_max[idx] ** (1/(1 - k_an * trans_time)) / probMax_sum)
idx_trans = random_index(probs_max_an) + L
preds[idx_trans] = probs_argmax[idx_trans]
L = R
R += 1
# print the results
sen_idx = 0
L = 0
R = 0
for i in range(len(senLabels)):
if senLabels[i] == sen_idx:
R += 1
else:
sen_idx = senLabels[i]
if L != R:
pred1 = np.zeros((R-L), dtype = np.int64)
print ('---sentence%d---'%sen_idx)
print ('pred0:', preds0[L:R])
print ('label:', labels[L:R])
print ('result:', preds[L:R])
L = R
R += 1
if __name__ == '__main__':
gibbs(TestSet)
|
from flask import Blueprint
bp = Blueprint("Name", __name__)
|
from typing import Any, List, Literal, TypedDict
from .FHIR_base64Binary import FHIR_base64Binary
from .FHIR_Element import FHIR_Element
from .FHIR_string import FHIR_string
from .FHIR_uri import FHIR_uri
# A type of a manufactured item that is used in the provision of healthcare without being substantially changed through that activity. The device may be a medical or non-medical device.
FHIR_Device_UdiCarrier = TypedDict(
"FHIR_Device_UdiCarrier",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# The device identifier (DI) is a mandatory, fixed portion of a UDI that identifies the labeler and the specific version or model of a device.
"deviceIdentifier": FHIR_string,
# Extensions for deviceIdentifier
"_deviceIdentifier": FHIR_Element,
# Organization that is charged with issuing UDIs for devices. For example, the US FDA issuers include :1) GS1: http://hl7.org/fhir/NamingSystem/gs1-di, 2) HIBCC:http://hl7.org/fhir/NamingSystem/hibcc-dI, 3) ICCBBA for blood containers:http://hl7.org/fhir/NamingSystem/iccbba-blood-di, 4) ICCBA for other devices:http://hl7.org/fhir/NamingSystem/iccbba-other-di.
"issuer": FHIR_uri,
# Extensions for issuer
"_issuer": FHIR_Element,
# The identity of the authoritative source for UDI generation within a jurisdiction. All UDIs are globally unique within a single namespace with the appropriate repository uri as the system. For example, UDIs of devices managed in the U.S. by the FDA, the value is http://hl7.org/fhir/NamingSystem/fda-udi.
"jurisdiction": FHIR_uri,
# Extensions for jurisdiction
"_jurisdiction": FHIR_Element,
# The full UDI carrier of the Automatic Identification and Data Capture (AIDC) technology representation of the barcode string as printed on the packaging of the device - e.g., a barcode or RFID. Because of limitations on character sets in XML and the need to round-trip JSON data through XML, AIDC Formats *SHALL* be base64 encoded.
"carrierAIDC": FHIR_base64Binary,
# Extensions for carrierAIDC
"_carrierAIDC": FHIR_Element,
# The full UDI carrier as the human readable form (HRF) representation of the barcode string as printed on the packaging of the device.
"carrierHRF": FHIR_string,
# Extensions for carrierHRF
"_carrierHRF": FHIR_Element,
# A coded entry to indicate how the data was entered.
"entryType": Literal[
"barcode", "rfid", "manual", "card", "self-reported", "unknown"
],
# Extensions for entryType
"_entryType": FHIR_Element,
},
total=False,
)
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
try:
from emoji.unicode_codes import UNICODE_EMOJI
import unidecode
except ImportError:
raise ImportError('Please `pip install emoji unidecode` for the twitter task.')
import parlai.core.build_data as build_data
import os
def replace_emoji(x):
if x in UNICODE_EMOJI.keys():
return ' ' + UNICODE_EMOJI[x].replace(':', '@') + ' '
else:
return x
def split_punctuation(x):
return (
x
.replace('.', ' . ')
.replace('. . .', '...')
.replace(',', ' , ')
.replace(';', ' ; ')
.replace(':', ' : ')
.replace('!', ' ! ')
.replace('?', ' ? ')
.replace('"', ' " ')
.replace('(', ' ( ')
.replace(')', ' ) ')
)
def create_fb_format(data, dpath):
fw1 = open(os.path.join(dpath, 'train.txt'), 'w')
fw2 = open(os.path.join(dpath, 'valid.txt'), 'w')
fw3 = open(os.path.join(dpath, 'test.txt'), 'w')
for i in range(0, len(data) - 1, 2):
fout = fw1
if (i % 500) == 0:
fout = fw2
elif (i % 500) == 2:
fout = fw3
use = True
x = data[i].rstrip(' ').lstrip(' ').replace('\t', ' ')
y = data[i + 1].rstrip(' ').lstrip(' ').replace('\t', ' ')
x = x.replace('|', ' __PIPE__ ')
y = y.replace('|', ' __PIPE__ ')
x = ''.join(list(map(replace_emoji, x)))
y = ''.join(list(map(replace_emoji, y)))
x = split_punctuation(unidecode.unidecode(x))
y = split_punctuation(unidecode.unidecode(y))
x = ' '.join(x.split())
y = ' '.join(y.split())
if len(x) < 1 or len(y) < 1:
use = False
if use:
s = 'text:' + x + '\tlabels:' + y + '\tepisode_done:True'
fout.write('{} \n'.format(s))
fw1.close()
fw2.close()
fw3.close()
def build(opt):
version = 'v1.1'
dpath = os.path.join(opt['datapath'], 'Twitter')
if not build_data.built(dpath, version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname1 = "twitter_en_big.txt.gz.partaa"
fname2 = "twitter_en_big.txt.gz.partab"
url = 'https://github.com/Marsan-Ma/chat_corpus/raw/master/'
build_data.download(url + fname1, dpath, fname1)
build_data.download(url + fname2, dpath, fname2)
file1 = os.path.join(dpath, fname1)
file2 = os.path.join(dpath, fname2)
file3 = "twitter_en_big.txt.gz"
outzipfile = os.path.join(dpath, file3)
build_data.cat(file1, file2, outzipfile)
import gzip
with gzip.open(outzipfile, 'r') as f:
file_content = bytes.decode(f.read())
data = file_content.split('\n')[2:]
create_fb_format(data, dpath)
os.remove(outzipfile)
# Mark the data as built.
build_data.mark_done(dpath, version)
|
# ATM machines allow 4 or 6 digit PIN codes and PIN codes cannot contain anything
# but exactly 4 digits or exactly 6 digits.
# If the function is passed a valid PIN string, return true, else return false.
def validate_pin(pin):
try:
if (("+" in pin) or ("\n" in pin) or ("-" in pin)):
return False
elif (int(pin) and len(pin) == 4 or len(pin) == 6):
return True
elif (pin == "0000"):
return True
else:
return False
except:
return False
Test.describe("validate_pin")
Test.it("should return False for pins with length other than 4 or 6")
Test.assert_equals(validate_pin("1"),False, "Wrong output for '1'")
Test.assert_equals(validate_pin("12"),False, "Wrong output for '12'")
Test.assert_equals(validate_pin("123"),False, "Wrong output for '123'")
Test.assert_equals(validate_pin("12345"),False, "Wrong output for '12345'")
Test.assert_equals(validate_pin("1234567"),False, "Wrong output for '1234567'")
Test.assert_equals(validate_pin("-1234"),False, "Wrong output for '-1234'")
Test.assert_equals(validate_pin("1.234"),False, "Wrong output for '1.234'")
Test.assert_equals(validate_pin("00000000"),False, "Wrong output for '00000000'")
Test.it("should return False for pins which contain characters other than digits")
Test.assert_equals(validate_pin("a234"),False, "Wrong output for 'a234'")
Test.assert_equals(validate_pin(".234"),False, "Wrong output for '.234'")
Test.assert_equals(validate_pin("-123"),False, "Wrong output for '-123'")
Test.assert_equals(validate_pin("-1.234"),False, "Wrong output for '-1.234'")
Test.it("should return True for valid pins")
Test.assert_equals(validate_pin("1234"),True, "Wrong output for '1234'")
Test.assert_equals(validate_pin("0000"),True, "Wrong output for '0000'")
Test.assert_equals(validate_pin("1111"),True, "Wrong output for '1111'")
Test.assert_equals(validate_pin("123456"),True, "Wrong output for '123456'")
Test.assert_equals(validate_pin("098765"),True, "Wrong output for '098765'")
Test.assert_equals(validate_pin("000000"),True, "Wrong output for '000000'")
Test.assert_equals(validate_pin("123456"),True, "Wrong output for '123456'")
Test.assert_equals(validate_pin("090909"),True, "Wrong output for '090909'")
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import data_processing
data = data_processing.load_data(download=True)
new_data = data_processing.convert2onehot(data)
# prepare training data
new_data = new_data.values.astype(np.float32) # change to numpy array and float32
np.random.shuffle(new_data)
sep = int(0.7*len(new_data))
train_data = new_data[:sep] # training data (70%)
test_data = new_data[sep:] # test data (30%)
# build network
tf_input = tf.placeholder(tf.float32, [None, 25], "input")
tfx = tf_input[:, :21]
tfy = tf_input[:, 21:]
l1 = tf.layers.dense(tfx, 128, tf.nn.relu, name="l1")
l2 = tf.layers.dense(l1, 128, tf.nn.relu, name="l2")
out = tf.layers.dense(l2, 4, name="l3")
prediction = tf.nn.softmax(out, name="pred")
loss = tf.losses.softmax_cross_entropy(onehot_labels=tfy, logits=out)
accuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables
labels=tf.argmax(tfy, axis=1), predictions=tf.argmax(out, axis=1),)[1]
opt = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = opt.minimize(loss)
sess = tf.Session()
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
# training
plt.ion()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
accuracies, steps = [], []
for t in range(4000):
# training
batch_index = np.random.randint(len(train_data), size=32)
sess.run(train_op, {tf_input: train_data[batch_index]})
if t % 50 == 0:
# testing
acc_, pred_, loss_ = sess.run([accuracy, prediction, loss], {tf_input: test_data})
accuracies.append(acc_)
steps.append(t)
print("Step: %i" % t,"| Accurate: %.2f" % acc_,"| Loss: %.2f" % loss_,)
# visualize testing
ax1.cla()
for c in range(4):
bp = ax1.bar(c+0.1, height=sum((np.argmax(pred_, axis=1) == c)), width=0.2, color='red')
bt = ax1.bar(c-0.1, height=sum((np.argmax(test_data[:, 21:], axis=1) == c)), width=0.2, color='blue')
ax1.set_xticks(range(4), ["accepted", "good", "unaccepted", "very good"])
ax1.legend(handles=[bp, bt], labels=["prediction", "target"])
ax1.set_ylim((0, 400))
ax2.cla()
ax2.plot(steps, accuracies, label="accuracy")
ax2.set_ylim(ymax=1)
ax2.set_ylabel("accuracy")
plt.pause(0.01)
plt.ioff()
plt.show()
|
def ack(m, n):
"""Computes the Ackermann function A(m,n).
Args:
m, n: non-negative integers.
"""
if m == 0:
return n + 1
if n == 0:
return ack(m - 1, 1)
return ack(m - 1, ack(m, n - 1))
print(ack(3, 4))
|
# -*- coding: utf-8 -*-
'''Generic test method module
'''
from __future__ import with_statement, division, absolute_import, print_function
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class ANTest(object):
'''Generic test method class
'''
__doc__ = ''
def __init__(self, *args, **kwargs):
self._is_audio = None
self._isnt_audio = None
def get_doc(self):
'''Returns method documentation (info)
'''
return self.__doc__
def is_audio(self):
'''Returns analyze result of audio-test
'''
if self._is_audio is not None:
return self._is_audio
if self._isnt_audio is not None:
return not self._isnt_audio
def isnt_audio(self):
'''Returns analyze result of non-audio-test
'''
is_audio = self.is_audio()
if is_audio is not None:
return not is_audio
def set_is_audio(self):
'''Sets analyze result of audio-test
'''
self._is_audio = True
def set_isnt_audio(self):
'''Sets analyze result of non-audio-test
'''
self._isnt_audio = True
@abstractmethod
def analyze(self, samples_store):
'''Analyzes samples
'''
raise NotImplementedError('Abstract method not overridden!')
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym.spaces import Box, Discrete
def count_vars(module):
return sum(p.numel() for p in module.parameters() if p.requires_grad)
def linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):
steps_left = decay_period + warmup_steps - step
bonus = (1.0 - epsilon) * steps_left / decay_period
bonus = np.clip(bonus, 0.0, 1.0 - epsilon)
return epsilon + bonus
class MLP(nn.Module):
def __init__(
self,
layers,
activation=torch.tanh,
output_activation=None,
output_scale=1,
output_squeeze=False,
):
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activation
self.output_scale = output_scale
self.output_squeeze = output_squeeze
for i, layer in enumerate(layers[1:]):
self.layers.append(nn.Linear(layers[i], layer))
nn.init.kaiming_normal_(
self.layers[i].weight, mode="fan_in", nonlinearity="relu"
)
nn.init.zeros_(self.layers[i].bias)
def forward(self, inputs):
x = inputs
for layer in self.layers[:-1]:
x = self.activation(layer(x))
if self.output_activation is None:
x = self.layers[-1](x) * self.output_scale
else:
x = self.output_activation(self.layers[-1](x)) * self.output_scale
return x.squeeze() if self.output_squeeze else x
class CategoricalDQNetwork(nn.Module):
def __init__(
self,
in_features,
action_space,
num_atoms=50,
Vmin=-100,
Vmax=100,
hidden_sizes=(400, 300),
activation=torch.relu,
output_activation=None,
):
super(CategoricalDQNetwork, self).__init__()
self.action_dim = action_space.n
self.num_atoms = num_atoms
self.supports = torch.linspace(Vmin, Vmax, num_atoms)
self.q = MLP(
layers=[in_features] + list(hidden_sizes) + [self.action_dim * num_atoms],
activation=activation,
output_activation=output_activation,
)
def forward(self, x, log=False):
q = self.q(x).view(-1, self.action_dim, self.num_atoms)
if log:
return F.log_softmax(q, dim=-1)
else:
return F.softmax(q, dim=-1)
def policy(self, x):
q_dist = self.forward(x) # (bsz, action_dim, num_atoms)
q_vals = (q_dist * self.supports.expand_as(q_dist)).sum(-1) # (bsz, action_dim)
action = torch.argmax(q_vals, dim=1, keepdim=True) # (bsz, 1)
return action
|
# coding: utf-8
from __future__ import print_function
from PySide2.QtWidgets import (
QDialog,
QHBoxLayout,
QPushButton,
QVBoxLayout,
QPlainTextEdit
)
from ProfileInspector.src.util import doc_file
class CaptureStdoutDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
# self.setWhatsThis(doc_file('launcher_capture_output'))
self.setWindowTitle('Profile Inspector - Capture stdout')
self.setMinimumSize(600, 300)
self.setStyleSheet('''
QPlainTextEdit {
background-color: rgb(33, 33, 33);
color: white;
font-family: Courier
}''')
self.output = QPlainTextEdit()
self.output.setReadOnly(True)
self.clear_text_btn = QPushButton('Clear Text')
self.clear_text_btn.setToolTip('Clear output text')
self.force_quit_btn = QPushButton('Force Quit')
self.force_quit_btn.setToolTip('Force quit process')
_buttons_layout = QHBoxLayout()
_buttons_layout.addWidget(self.clear_text_btn)
_buttons_layout.addWidget(self.force_quit_btn)
self.clear_text_btn.clicked.connect(self.clear_text)
_layout = QVBoxLayout()
_layout.addWidget(self.output)
_layout.addLayout(_buttons_layout)
# _layout.addWidget(self.clear_text_btn)
# _layout.addWidget(self.force_quit_btn)
self.setLayout(_layout)
def clear_text(self):
self.output.clear()
|
from rolepermissions.roles import AbstractUserRole
class Developer(AbstractUserRole):
available_permissions = {
# Tags
'access_server': True,
'access_testing': True,
'access_sources': True,
'access_admin': False,
# Templates
'create_server_template': False,
'read_server_template': False,
'update_server_template': False,
'delete_server_template': False,
# Profiles
'create_server_profile': True,
'read_server_profile': True,
'update_server_profile': True,
'delete_server_profile': True,
# Parameters
'create_server_parameter': True,
'read_server_parameter': True,
'update_server_parameter': True,
'delete_server_parameter': True,
# Keywords
'create_keyword': True,
'read_keyword': True,
'update_keyword': True,
'delete_keyword': True,
# Test Cases
'create_test_case': True,
'read_test_case': True,
'update_test_case': True,
'delete_test_case': True,
# Test suites
'create_test_suite': True,
'read_test_suite': True,
'update_test_suite': True,
'delete_test_suite': True,
# Imported Script
'create_imported_script': True,
'read_imported_script': True,
'update_imported_script': True,
'delete_imported_script': True,
# Collection
'create_collection': True,
'read_collection': True,
'update_collection': True,
'delete_collection': True,
# Scripts
'run_scripts': True,
# Sources - Robot
'create_robot': True,
'read_robot': True,
'update_robot': True,
'delete_robot': True,
# Sources - Libraries
'create_libraries': True,
'read_libraries': True,
'update_libraries': True,
'delete_libraries': True,
# Sources - Product
'create_product': False,
'read_product': False,
'update_product': False,
'delete_product': False,
# Phases
'create_phases': False,
'read_phases': False,
'update_phases': False,
'delete_phases': False,
# Commands
'create_commands': True,
'read_commands': True,
'update_commands': True,
'delete_commands': True,
# Users
'create_users': False,
'read_users': False,
'update_users': False,
'delete_users': False,
# Arguments
'create_argument': True,
'read_argument': True,
'update_argument': True,
'delete_argument': True,
}
class Tester(AbstractUserRole):
available_permissions = {
# Tags
'access_server': False,
'access_testing': True,
'access_sources': False,
'access_admin': False,
# Test Case
'read_test_case': True,
# Test Suite
'read_test_suite': True,
# Collection
'read_collection': True,
# Imported Script
'read_imported_script': True,
# Scripts
'run_scripts': True,
}
class Auditor(AbstractUserRole):
available_permissions = {
# Tags
'access_server': True,
'access_testing': True,
'access_sources': True,
'read_server_template': True,
# Profiles
'create_server_profile': True,
'read_server_profile': True,
'update_server_profile': True,
'delete_server_profile': True,
# Parameters
'create_server_parameter': True,
'read_server_parameter': True,
'update_server_parameter': True,
'delete_server_parameter': True,
# Keywords
'read_keyword': True,
# Test Case
'read_test_case': True,
# Test Suite
'read_test_suite': True,
# Collection
'read_collection': True,
# Imported Script
'read_imported_script': True,
# Sources - Robot
'read_robot': True,
# Sources - Libraries
'read_libraries': True,
# Sources - Product
'read_product': True,
# Commands
'read_commands': True,
# Phases
'read_phases': True,
}
class Owner(AbstractUserRole):
available_permissions = {
# Tags
'access_server': True,
'access_testing': True,
'access_sources': True,
'access_admin': False,
# Profiles
'create_server_profile': True,
'read_server_profile': True,
'update_server_profile': True,
'delete_server_profile': True,
# Parameters
'create_server_parameter': True,
'read_server_parameter': True,
'update_server_parameter': True,
'delete_server_parameter': True,
# Keywords
'create_keyword': True,
'read_keyword': True,
'update_keyword': True,
'delete_keyword': True,
# Test Cases
'create_test_case': True,
'read_test_case': True,
'update_test_case': True,
'delete_test_case': True,
# Test suites
'create_test_suite': True,
'read_test_suite': True,
'update_test_suite': True,
'delete_test_suite': True,
# Imported Script
'create_imported_script': True,
'read_imported_script': True,
'update_imported_script': True,
'delete_imported_script': True,
# Collection
'create_collection': True,
'read_collection': True,
'update_collection': True,
'delete_collection': True,
# Scripts
'run_scripts': True,
# Sources - Robot
'create_robot': True,
'read_robot': True,
'update_robot': True,
'delete_robot': True,
# Sources - Libraries
'create_libraries': True,
'read_libraries': True,
'update_libraries': True,
'delete_libraries': True,
# Commands
'create_commands': True,
'read_commands': True,
'update_commands': True,
'delete_commands': True,
}
|
#!/usr/bin/env python
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the copy database utility which ensures a database
is exactly the same among two servers.
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import multiprocessing
import os
import re
import sys
import time
from mysql.utilities.exception import FormatError, UtilError
from mysql.utilities.command import dbcopy
from mysql.utilities.common.ip_parser import parse_connection
from mysql.utilities.common.messages import (PARSE_ERR_DB_PAIR,
PARSE_ERR_DB_PAIR_EXT)
from mysql.utilities.common.my_print_defaults import MyDefaultsReader
from mysql.utilities.common.options import (add_skip_options, add_verbosity,
check_verbosity, check_rpl_options,
check_skip_options, add_engines,
add_all, check_all, add_locking,
add_regexp, add_rpl_mode,
add_rpl_user, add_ssl_options,
get_ssl_dict, setup_common_options,
add_character_set_option,
check_password_security)
from mysql.utilities.common.sql_transform import (is_quoted_with_backticks,
remove_backtick_quoting)
from mysql.utilities.common.tools import (check_connector_python,
print_elapsed_time)
# Constants
NAME = "MySQL Utilities - mysqldbcopy "
DESCRIPTION = "mysqldbcopy - copy databases from one server to another"
USAGE = "%prog --source=user:pass@host:port:socket " \
"--destination=user:pass@host:port:socket orig_db:new_db"
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Needed for freeze support to avoid RuntimeError when running as a Windows
# executable, otherwise ignored.
multiprocessing.freeze_support()
# Setup the command parser
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, True, False)
# Setup utility-specific options:
# Connection information for the source server
parser.add_option("--source", action="store", dest="source",
type="string", default="root@localhost:3306",
help="connection information for source server in the "
"form: <user>[:<password>]@<host>[:<port>][:<socket>]"
" or <login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>].")
# Connection information for the destination server
parser.add_option("--destination", action="store", dest="destination",
type="string",
help="connection information for destination server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>].")
# Add character set option
add_character_set_option(parser)
# Overwrite mode
parser.add_option("-d", "--drop-first", action="store_true", default=False,
help="drop the new database or object if it exists",
dest="do_drop")
# Add the exclude database option
parser.add_option("-x", "--exclude", action="append", dest="exclude",
type="string", default=None, help="exclude one or more "
"objects from the operation using either a specific "
"name (e.g. db1.t1), a LIKE pattern (e.g. db1.t% or "
"db%.%) or a REGEXP search pattern. To use a REGEXP "
"search pattern for all exclusions, you must also "
"specify the --regexp option. Repeat the --exclude "
"option for multiple exclusions.")
# Add the all database options
add_all(parser, "databases")
# Add the skip common options
add_skip_options(parser)
# Add verbosity and quiet (silent) mode
add_verbosity(parser, True)
# Add engine options
add_engines(parser)
# Add locking options
add_locking(parser)
# Add regexp
add_regexp(parser)
# Replication user and password
add_rpl_user(parser)
# Add replication options but don't include 'both'
add_rpl_mode(parser, False, False)
# Add ssl options
add_ssl_options(parser)
# Add option to skip GTID generation
parser.add_option("--skip-gtid", action="store_true", default=False,
dest="skip_gtid", help="skip creation and execution of "
"GTID statements during copy.")
# Add multiprocessing option.
parser.add_option("--multiprocess", action="store", dest="multiprocess",
type="int", default="1", help="use multiprocessing, "
"number of processes to use for concurrent execution. "
"Special values: 0 (number of processes equal to the "
"CPUs detected) and 1 (default - no concurrency).")
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args)
try:
skips = check_skip_options(opt.skip_objects)
except UtilError:
_, err, _ = sys.exc_info()
print("ERROR: {0}".format(err.errmsg))
sys.exit(1)
# Fail if no options listed.
if opt.destination is None:
parser.error("No destination server specified.")
# Fail if no db arguments or all
if len(args) == 0 and not opt.all:
parser.error("You must specify at least one database to copy or "
"use the --all option to copy all databases.")
# Fail if we have arguments and all databases option listed.
check_all(parser, opt, args, "databases")
# Warn if quiet and verbosity are both specified
check_verbosity(opt)
# Process --exclude values to remove unnecessary quotes (when used) in
# order to avoid further matching issues.
if opt.exclude:
# Remove unnecessary outer quotes.
exclude_list = [pattern.strip("'\"") for pattern in opt.exclude]
else:
exclude_list = opt.exclude
# Check multiprocessing options.
if opt.multiprocess < 0:
parser.error("Number of processes '{0}' must be greater or equal than "
"zero.".format(opt.multiprocess))
num_cpu = multiprocessing.cpu_count()
if opt.multiprocess > num_cpu and not opt.quiet:
print("# WARNING: Number of processes '{0}' is greater than the "
"number of CPUs '{1}'.".format(opt.multiprocess, num_cpu))
# Warning for non-posix (windows) systems if too many process are used.
num_db = len(args)
if (os.name != 'posix' and num_db and opt.multiprocess > num_db
and not opt.quiet):
print("# WARNING: Number of processes '{0}' is greater than the "
"number of databases to copy '{1}'.".format(opt.multiprocess,
num_db))
# Set options for database operations.
options = {
"skip_tables": "tables" in skips,
"skip_views": "views" in skips,
"skip_triggers": "triggers" in skips,
"skip_procs": "procedures" in skips,
"skip_funcs": "functions" in skips,
"skip_events": "events" in skips,
"skip_grants": "grants" in skips,
"skip_create": "create_db" in skips,
"skip_data": "data" in skips,
"do_drop": opt.do_drop,
"verbose": opt.verbosity >= 1,
"quiet": opt.quiet,
"debug": opt.verbosity == 3,
"exclude_patterns": exclude_list,
"new_engine": opt.new_engine,
"def_engine": opt.def_engine,
"all": opt.all,
"locking": opt.locking,
"use_regexp": opt.use_regexp,
"rpl_user": opt.rpl_user,
"rpl_mode": opt.rpl_mode,
"verbosity": opt.verbosity,
"skip_gtid": opt.skip_gtid,
"charset": opt.charset,
"multiprocess": num_cpu if opt.multiprocess == 0 else opt.multiprocess,
}
options.update(get_ssl_dict(opt))
# Parse source connection values
try:
# Create a basic configuration reader first for optimization purposes.
# I.e., to avoid repeating the execution of some methods in further
# parse_connection methods (like, searching my_print_defaults tool).
config_reader = MyDefaultsReader(options, False)
source_values = parse_connection(opt.source, config_reader, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Source connection values invalid: {0}.".format(err))
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Source connection values invalid: "
"{0}.".format(err.errmsg))
# Parse destination connection values
try:
dest_values = parse_connection(opt.destination, config_reader, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Destination connection values invalid: "
"{0}.".format(err))
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Destination connection values invalid: "
"{0}.".format(err.errmsg))
# Check to see if attempting to use --rpl on the same server
if (opt.rpl_mode or opt.rpl_user) and source_values == dest_values:
parser.error("You cannot use the --rpl option for copying on the "
"same server.")
# Check replication options
check_rpl_options(parser, opt)
# Build list of databases to copy
db_list = []
for db in args:
# Split the database names considering backtick quotes
grp = re.match(r"(`(?:[^`]|``)+`|\w+)(?:(?::)(`(?:[^`]|``)+`|\w+))?",
db)
if not grp:
parser.error(PARSE_ERR_DB_PAIR.format(db_pair=db,
db1_label='orig_db',
db2_label='new_db'))
db_entry = grp.groups()
orig_db, new_db = db_entry
# Verify if the size of the databases matched by the REGEX is equal to
# the initial specified string. In general, this identifies the missing
# use of backticks.
matched_size = len(orig_db)
if new_db:
# add 1 for the separator ':'
matched_size += 1
matched_size += len(new_db)
if matched_size != len(db):
parser.error(PARSE_ERR_DB_PAIR_EXT.format(db_pair=db,
db1_label='orig_db',
db2_label='new_db',
db1_value=orig_db,
db2_value=new_db))
# Remove backtick quotes (handled later)
orig_db = remove_backtick_quoting(orig_db) \
if is_quoted_with_backticks(orig_db) else orig_db
new_db = remove_backtick_quoting(new_db) \
if new_db and is_quoted_with_backticks(new_db) else new_db
db_entry = (orig_db, new_db)
db_list.append(db_entry)
try:
# Record start time.
if opt.verbosity >= 3:
start_copy_time = time.time()
# Copy databases concurrently for non posix systems (windows).
if options['multiprocess'] > 1 and os.name != 'posix':
# Create copy databases tasks.
copy_db_tasks = []
for db in db_list:
copy_task = {
'source_srv': source_values,
'dest_srv': dest_values,
'db_list': [db],
'options': options
}
copy_db_tasks.append(copy_task)
# Create process pool.
workers_pool = multiprocessing.Pool(
processes=options['multiprocess']
)
# Concurrently copy databases.
workers_pool.map_async(dbcopy.multiprocess_db_copy_task,
copy_db_tasks)
workers_pool.close()
workers_pool.join()
else:
# Copy all specified databases (no database level concurrency).
# Note: on POSIX systems multiprocessing is applied at the object
# level (not database).
dbcopy.copy_db(source_values, dest_values, db_list, options)
# Print elapsed time.
if opt.verbosity >= 3:
print_elapsed_time(start_copy_time)
except UtilError:
_, err, _ = sys.exc_info()
print("ERROR: {0}".format(err.errmsg))
sys.exit(1)
sys.exit()
|
from singleton.singleton import Singleton
@Singleton
class Config(object):
def __init__(self, vars = []):
self.vars = vars
def get_vars():
return Config.instance().vars
|
import subprocess
try:
revision = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
print ("-DPIO_SRC_REV=\\\"%s\\\"" % revision)
except:
print ("-DPIO_SRC_REV=\\\"UNKNOWN\\\"")
|
nome = str(input('Qual é seu nome? ')).upper()
if nome == 'ROBERTO' or nome == 'OBA':
print('Que nome bonito!')
elif nome == 'PEDRO' or nome =='JOSE' or nome == 'JOAO' or nome == 'MARIA':
print('Seu nome é bem popular no Brasil. !')
elif nome in ' THAIS LORENA':
print('Belo nome feminino!')
else:
print('Que nome comum você tem!')
print('Tenha um bom dia, {} !'.format(nome))
|
import shutil
import tempfile
import six
from . import (
GalaxyTestBase,
test_util
)
class TestGalaxyDatasets(GalaxyTestBase.GalaxyTestBase):
def setUp(self):
super(TestGalaxyDatasets, self).setUp()
self.history_id = self.gi.histories.create_history(name='TestShowDataset')['id']
self.dataset_contents = "line 1\nline 2\rline 3\r\nline 4"
self.dataset_id = self._test_dataset(self.history_id, contents=self.dataset_contents)
def tearDown(self):
self.gi.histories.delete_history(self.history_id, purge=True)
@test_util.skip_unless_galaxy('release_19.05')
def test_show_nonexistent_dataset(self):
with self.assertRaises(Exception):
self.gi.datasets.show_dataset('nonexistent_id')
def test_show_dataset(self):
self.gi.datasets.show_dataset(self.dataset_id)
def test_download_dataset(self):
with self.assertRaises(Exception):
self.gi.datasets.download_dataset(None)
expected_contents = six.b("\n".join(self.dataset_contents.splitlines()) + "\n")
# download_dataset() with file_path=None is already tested in TestGalaxyTools.test_paste_content()
# self._wait_and_verify_dataset(self.dataset_id, expected_contents)
tempdir = tempfile.mkdtemp(prefix='bioblend_test_')
try:
downloaded_dataset = self.gi.datasets.download_dataset(
self.dataset_id, file_path=tempdir,
maxwait=GalaxyTestBase.BIOBLEND_TEST_JOB_TIMEOUT)
self.assertTrue(downloaded_dataset.startswith(tempdir))
with open(downloaded_dataset, 'rb') as f:
self.assertEqual(f.read(), expected_contents)
finally:
shutil.rmtree(tempdir)
with tempfile.NamedTemporaryFile(prefix='bioblend_test_') as f:
download_filename = self.gi.datasets.download_dataset(
self.dataset_id, file_path=f.name, use_default_filename=False,
maxwait=GalaxyTestBase.BIOBLEND_TEST_JOB_TIMEOUT)
self.assertEqual(download_filename, f.name)
f.flush()
self.assertEqual(f.read(), expected_contents)
def test_show_stderr(self):
stderr = self.gi.datasets.show_stderr(self.dataset_id)
self.assertIsNotNone(stderr)
def test_show_stdout(self):
stdout = self.gi.datasets.show_stdout(self.dataset_id)
self.assertIsNotNone(stdout)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 5 10:36:37 2021
Get status of stored reference numbers
@author: heiko
"""
import requests
import pandas as pd
import json
import hashlib
import hmac
import base64
#%% Functions
def getSAPCOOKIE(headers):
'''
Get MYSAPSSO2 cookie.
'''
r = requests.get('https://%s/%s/%s'% (server_cct,guest_path,'/login'),headers=headers)
print(r.status_code)
new_SAP = r.cookies['MYSAPSSO2']
cookies = dict(MYSAPSSO2=new_SAP)
return cookies
def getSessionID(cookies,headers):
'''
Get session ID.
'''
r = requests.get('https://%s/%s%s'% (server_cct,SRRESTPath,'/session'),headers=headers,cookies=cookies)
print(r.status_code)
new_session = json.loads(r.text)
headers["X-Session"] = new_session['session_id']
return headers
def check_status_later(ref_no, headers, cookies):
'''
Check status of uploaded data using reference number and original header info from check_status_stored.
'''
r = requests.get('https://%s/%s%s%s'% (server_cct,SRRESTPath,'/sr/',ref_no), headers=headers,cookies=cookies)
data_output = json.loads(r.content)
print(r.status_code)
return data_output
def check_status_stored(cookie):
'''
Get reference numbers and original headers to be used to check status of uploaded data
'''
status_recall = pd.read_csv('../data/status_recall.csv')
status_current = pd.DataFrame()
for i in range(0,len(status_recall)):
headers = status_recall.iloc[i,0:3].to_dict()
ref_no = status_recall.iloc[i]['ref_no']
stat = check_status_later(ref_no,headers,cookie)
df_stat_store = pd.Series(stat)
df_stat_store = df_stat_store.to_frame().transpose()
status_current = pd.concat([status_current,df_stat_store])
return status_current
#%%
if __name__=='__main__':
keys = pd.read_csv('../data/keys.csv')
pub = keys.iloc[0,1]
priv = keys.iloc[1,1]
postman_details = [{"key": "server","value": "qaeservices1.capetown.gov.za","enabled": True},
{"key": "CURGuestRESTPath","value": "coct/api/zcur-guest/","enabled": True},
{"key": "SRRESTPath","value": "coct/api/zsreq","enabled": True}]
server_cct = postman_details[0]['value']
guest_path = postman_details[1]['value']
SRRESTPath = postman_details[2]['value']
headers = {}
headers["X-Service"] = pub # public key
cookie = getSAPCOOKIE(headers)
headers_session = getSessionID(cookie,headers)
stat_current = check_status_stored(cookie)
stat_orig = pd.read_csv('../data/status_original.csv')
# Note: I ran this again the next day, and it is definitely referring to different data now...
# not sure if that is a consequence of this being a test server?
|
import re
from collections import defaultdict
from django.conf import settings
def convert_to_valid_boolean(value):
if value is None:
return False
elif isinstance(value, list):
list_value = value[0]
if hasattr(list_value, "value"):
string_value = list_value.value
if string_value == "Yes":
return True
elif isinstance(value, bool):
return value
return False
def create_file_version_map(files, prefix=None):
version_map = defaultdict(list)
if files:
for file_name in files:
version = get_version_from_file_name(file_name)
if prefix:
version_file_name = prefix + ":" + file_name.removeprefix(settings.FILE_CLEAN_UP_PREFIX)
else:
version_file_name = file_name.removeprefix(settings.FILE_CLEAN_UP_PREFIX)
version_map[version].append(version_file_name)
return version_map
def get_version_from_file_name(file_name):
search = re.search(r"v([\d.]+)", file_name)
if search is not None:
return search.group(1)
else:
return "undefined"
def merge_version_maps(current, future):
def already_added(file, current_results):
for current_file in current_results:
if current_file in file:
return True
return False
result = defaultdict(list)
for key, value in current.items():
result[key].extend(value)
for key, value in future.items():
current_results = result[key]
if not current_results:
current_results.extend(value)
else:
for file in value:
if not already_added(file, current_results):
current_results.append(file)
return dict(result)
def create_version_map(current_file_list, future_file_list):
version_map = create_file_version_map(current_file_list)
future_version_map = create_file_version_map(future_file_list, settings.GIT_FUTURE_BRANCH)
impex_file_version_map = merge_version_maps(version_map, future_version_map)
return impex_file_version_map
def filter_impex_files(file_list):
return [name for name in file_list if name.endswith(".impex")]
def filter_non_impex_files(file_list):
return [name for name in file_list if not name.endswith(".impex")]
|
# -*- coding: utf-8 -*-
'''Decoder structures'''
from pyresult import is_error, value, ok, error
from six import u
from toolz import curry
@curry
def array(factory, vals):
'''Decode string/value as list
array :: (a -> Result e b) -> List a -> Result e (List b)
>>> from pydecoder.primitives import to_int
>>> array(to_int, [1, 2, 3])
Result(status='Ok', value=[1, 2, 3])
>>> array(to_int, None)
Result(status='Error', value="'None' isn't list or tuple.")
'''
if not isinstance(vals, (list, tuple)):
return error(u('\'{}\' isn\'t list or tuple.').format(vals))
result = []
for val in vals:
rv = factory(val)
if is_error(rv):
return rv
result.append(value(rv))
return ok(result)
|
import argparse
import math
from datetime import datetime
import numpy as np
import socket
import importlib
import zipfile
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
import dataset
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model_rpm', help='Model name [default: pointnet2_cls_ssg]')
parser.add_argument('--train_list', default='datalist/RPM_train.txt', help='Datalist for training')
parser.add_argument('--test_list', default='datalist/RPM_test.txt', help='Datalist for testing')
parser.add_argument('--save_folder', default='../output/', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 1024]')
parser.add_argument('--num_frame', type=int, default=5, help='Frames number need to be generated [default: 9]')
parser.add_argument('--max_epoch', type=int, default=121, help='Epoch to run [default: 251]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 16]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--load_checkpoint', type=str, default=None, help='checkpoint to load from')
FLAGS = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu)
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
NUM_FRAME = FLAGS.num_frame
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
TRAIN_LIST = FLAGS.train_list
TEST_LIST = FLAGS.test_list
LOAD_CHECKPOINT = FLAGS.load_checkpoint
#DATA_PATH = os.path.join(ROOT_DIR, '../data/')
DATA_PATH = '/fast/jamesn8/RPMNet/'
SAVE_DIR = os.path.join(FLAGS.save_folder, '%s_%s_%s' % (FLAGS.model, datetime.now().strftime('%Y-%m-%d-%H-%M-%S'), 'all'))
if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR)
LOG_FOUT = open(os.path.join(SAVE_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write("Comments: \n\n")
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
MODEL = importlib.import_module(FLAGS.model) # import network module
code_folder = os.path.abspath(os.path.dirname(__file__))
zip_name = os.path.join(SAVE_DIR) + "/code.zip"
filelist = []
for root, dirs, files in os.walk(code_folder):
for name in files:
filelist.append(os.path.join(root, name))
zip_code = zipfile.ZipFile(zip_name, "w", zipfile.ZIP_DEFLATED)
for tar in filelist:
arcname = tar[len(code_folder):]
zip_code.write(tar, arcname)
zip_code.close()
folder_ckpt = os.path.join(SAVE_DIR, 'ckpts')
if not os.path.exists(folder_ckpt): os.makedirs(folder_ckpt)
folder_summary = os.path.join(SAVE_DIR, 'summary')
if not os.path.exists(folder_summary): os.makedirs(folder_summary)
TRAIN_DATASET = dataset.MotionDataset(data_path=DATA_PATH, train_list=TRAIN_LIST, test_list=TEST_LIST, num_point=NUM_POINT, num_frame=NUM_FRAME, split='train', batch_size=BATCH_SIZE)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, pc_target_pl, disp_target_pl, part_seg_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, NUM_FRAME)
gt_mov_seg = tf.cast(tf.greater(part_seg_pl, 0), tf.int32)
is_training_pl = tf.placeholder(tf.bool, shape=())
batch = tf.get_variable('batch', [], initializer=tf.constant_initializer(0), trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
print("--- Get model and loss ---")
pred_pc, pred_disp, pred_seg, mov_mask, simmat_logits = MODEL.get_model(pointclouds_pl, NUM_FRAME, is_training_pl, bn_decay=bn_decay)
loss_ref = MODEL.get_ref_loss(pred_pc, pc_target_pl, gt_mov_seg)
loss_mov = MODEL.get_mov_loss(pred_pc, pc_target_pl, gt_mov_seg)
loss_mov_seg = MODEL.get_movseg_loss(pred_seg, gt_mov_seg)
loss_disp = MODEL.get_disp_loss(pred_disp, disp_target_pl, gt_mov_seg)
loss_partseg, part_err = MODEL.get_partseg_loss(simmat_logits, mov_mask, part_seg_pl)
loss_generator = loss_mov + loss_ref + loss_mov_seg + loss_disp
total_loss = loss_generator + loss_partseg
tf.summary.scalar('losses/generator_loss', loss_generator)
tf.summary.scalar('losses/partseg_loss', loss_partseg)
print("--- Get training operator ---")
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator")
generator_op = optimizer.minimize(loss_generator, var_list=generator_vars, global_step=batch)
partseg_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "partseg")
partseg_op = optimizer.minimize(loss_partseg, var_list=partseg_vars, global_step=batch)
rpm_op = optimizer.minimize(total_loss, global_step=batch)
saver = tf.train.Saver(max_to_keep=30)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(folder_summary, 'train'), sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl': pointclouds_pl,
'pc_target_pl': pc_target_pl,
'disp_target_pl': disp_target_pl,
'part_seg_pl': part_seg_pl,
'is_training_pl': is_training_pl,
'pred_pc': pred_pc,
'pred_seg': pred_seg,
'loss_mov': loss_mov,
'loss_ref': loss_ref,
'loss_disp': loss_disp,
'loss_mov_seg': loss_mov_seg,
'loss_partseg': loss_partseg,
'part_err': part_err,
'generator_op': generator_op,
'partseg_op': partseg_op,
'rpm_op': rpm_op,
'merged': merged,
'step': batch}
#latest_ckpt = tf.train.LOAD_CHECKPOINT(folder_ckpt)
epoch_offset = 0
if LOAD_CHECKPOINT is not None:
log_string('LOADING CHECKPOINT ' + LOAD_CHECKPOINT)
#load_path = os.path.join(folder_ckpt, LOAD_CHECKPOINT)
pth, fname = os.path.split(LOAD_CHECKPOINT)
epoch_offset = int(fname[fname.find('-')+1:])+1
try:
saver.restore(sess, LOAD_CHECKPOINT)
except ValueError:
log_string('failed to open '+LOAD_CHECKPOINT)
else:
log_string('TRAINING ANEW')
for epoch in range(epoch_offset, MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
stage1_epochs=30
stage2_epochs=60
stage3_epochs=30
if epoch <= stage1_epochs:
train_one_epoch(sess, ops, train_writer, 'generator_op')
elif epoch <= stage1_epochs + stage2_epochs:
train_one_epoch(sess, ops, train_writer, 'partseg_op')
else:
train_one_epoch(sess, ops, train_writer, 'rpm_op')
if epoch % 30 == 0 and epoch != 0:
save_path = saver.save(sess, os.path.join(folder_ckpt, "model.ckpt"), global_step=epoch)
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer, training_type):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string(str(datetime.now()))
# Shuffle train samples
train_idxs = np.arange(0, len(TRAIN_DATASET))
np.random.shuffle(train_idxs)
num_batches = len(TRAIN_DATASET)//BATCH_SIZE
loss_sum_mov = 0
loss_sum_ref = 0
loss_sum_disp = 0
loss_sum_movseg = 0
loss_sum_partseg = 0
total_seen_seg = 0
total_correct_seg = 0
total_part_err = 0
batch_idx = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_pc, batch_pc_target, batch_disp_target, batch_mov_seg, batch_part_seg = TRAIN_DATASET.get_batch(train_idxs, start_idx, end_idx)
feed_dict = {ops['pointclouds_pl']: batch_pc,
ops['pc_target_pl']: batch_pc_target,
ops['disp_target_pl']: batch_disp_target,
ops['part_seg_pl']: batch_part_seg,
ops['is_training_pl']: is_training}
summary, step, _, loss_mov_val, loss_ref_val, loss_movseg_val, pred_seg_val, loss_disp_val, loss_partseg_val, part_err_val = sess.run(
[ops['merged'], ops['step'], ops[training_type], ops['loss_mov'], ops['loss_ref'], ops['loss_mov_seg'], ops['pred_seg'], ops['loss_disp'], ops['loss_partseg'], ops['part_err']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
for batch in range(BATCH_SIZE):
pred_seg_label = np.argmax(pred_seg_val[batch], 1)
correct_seg = np.sum(pred_seg_label == batch_mov_seg[batch])
total_correct_seg += correct_seg
total_seen_seg += NUM_POINT
loss_sum_mov += loss_mov_val
loss_sum_ref += loss_ref_val
loss_sum_disp += loss_disp_val
loss_sum_movseg += loss_movseg_val
loss_sum_partseg += loss_partseg_val
total_part_err += part_err_val
log_string('EPOCH STAT:')
log_string('mean mov loss: %f' % (loss_sum_mov / num_batches))
log_string('mean ref loss: %f' % (loss_sum_ref / num_batches))
log_string('mean disp loss: %f' % (loss_sum_disp / num_batches))
log_string('mean mov seg loss: %f' % (loss_sum_movseg / num_batches))
log_string('mean part seg loss: %f' % (loss_sum_partseg / num_batches))
log_string('mov seg acc: %f'% (total_correct_seg / float(total_seen_seg)))
log_string('part seg err: %f' % (total_part_err / num_batches))
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
|
# Generated by Django 3.2.5 on 2021-07-22 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('devices', '0020_auto_20210721_1007'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, unique=True)),
],
options={
'verbose_name': 'City',
'verbose_name_plural': 'Cities',
},
),
]
|
"""Miscellaneous functions
"""
import logging
from math import ceil
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from collections import Counter
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_object_dtype
from bokeh.models import Legend, FuncTickFormatter
from bokeh.plotting import Figure
from scipy.stats import gaussian_kde as gaussian_kde_
from scipy.stats import ks_2samp as ks_2samp_
from scipy.stats import normaltest as normaltest_
from scipy.stats import skewtest as skewtest_
from .dtypes import drop_null
LOGGER = logging.getLogger(__name__)
def to_dask(df: Union[pd.DataFrame, dd.DataFrame]) -> dd.DataFrame:
"""Convert a dataframe to a dask dataframe."""
if isinstance(df, dd.DataFrame):
return df
elif isinstance(df, dd.Series):
return df.to_frame()
if isinstance(df, pd.Series):
df = df.to_frame()
df_size = df.memory_usage(deep=True).sum()
npartitions = ceil(df_size / 128 / 1024 / 1024) # 128 MB partition size
return dd.from_pandas(df, npartitions=npartitions)
def preprocess_dataframe(
org_df: Union[pd.DataFrame, dd.DataFrame],
used_columns: Optional[Union[List[str], List[object]]] = None,
) -> dd.DataFrame:
"""
Make a dask dataframe with only used_columns.
This function will do the following:
1. keep only used_columns.
2. transform column name to string (avoid object column name) and rename
duplicate column names in form of {col}_{id}.
3. reset index
4. transform object column to string column (note that obj column can contain
cells from different type).
5. transform to dask dataframe if input is pandas dataframe.
"""
if used_columns is None:
df = org_df.copy()
else:
# Process the case when used_columns are string column name,
# but org_df column name is object.
used_columns_set = set(used_columns)
used_cols_obj = set()
for col in org_df.columns:
if str(col) in used_columns_set or col in used_columns_set:
used_cols_obj.add(col)
df = org_df[used_cols_obj]
columns = list(df.columns)
# Resolve duplicate names in columns.
# Duplicate names will be renamed as col_{id}.
column_count = Counter(columns)
current_id: Dict[Any, int] = dict()
for i, col in enumerate(columns):
if column_count[col] > 1:
current_id[col] = current_id.get(col, 0) + 1
new_col_name = f"{col}_{current_id[col]}"
else:
new_col_name = f"{col}"
columns[i] = new_col_name
df.columns = columns
df = df.reset_index(drop=True)
# Since an object column could contains multiple types
# in different cells. transform object column to string.
for col in df.columns:
if is_object_dtype(df[col].dtype):
df[col] = df[col].astype(str)
return to_dask(df)
def sample_n(arr: np.ndarray, n: int) -> np.ndarray: # pylint: disable=C0103
"""Sample n values uniformly from the range of the `arr`,
not from the distribution of `arr`'s elems."""
if len(arr) <= n:
return arr
subsel = np.linspace(0, len(arr) - 1, n)
subsel = np.floor(subsel).astype(int)
return arr[subsel]
def relocate_legend(fig: Figure, loc: str) -> Figure:
"""Relocate legend(s) from center to `loc`."""
remains = []
targets = []
for layout in fig.center:
if isinstance(layout, Legend):
targets.append(layout)
else:
remains.append(layout)
fig.center = remains
for layout in targets:
fig.add_layout(layout, loc)
return fig
def cut_long_name(name: str, max_len: int = 12) -> str:
"""If the name is longer than `max_len`,
cut it to `max_len` length and append "..."""
# Bug 136 Fixed
name = str(name)
if len(name) <= max_len:
return name
return f"{name[:max_len]}..."
def fuse_missing_perc(name: str, perc: float) -> str:
"""Append (x.y%) to the name if `perc` is not 0."""
if perc == 0:
return name
return f"{name} ({perc:.1%})"
# Dictionary for mapping the time unit to its formatting. Each entry is of the
# form unit:(unit code for pd.Grouper freq parameter, pandas to_period strftime
# formatting for line charts, pandas to_period strftime formatting for box plot,
# label format).
DTMAP = {
"year": ("Y", "%Y", "%Y", "Year"),
"quarter": ("Q", "Q%q %Y", "Q%q %Y", "Quarter"),
"month": ("M", "%B %Y", "%b %Y", "Month"),
"week": ("W-SAT", "%d %B, %Y", "%d %b, %Y", "Week of"),
"day": ("D", "%d %B, %Y", "%d %b, %Y", "Date"),
"hour": ("H", "%d %B, %Y, %I %p", "%d %b, %Y, %I %p", "Hour"),
"minute": ("T", "%d %B, %Y, %I:%M %p", "%d %b, %Y, %I:%M %p", "Minute"),
"second": ("S", "%d %B, %Y, %I:%M:%S %p", "%d %b, %Y, %I:%M:%S %p", "Second"),
}
def _get_timeunit(min_time: pd.Timestamp, max_time: pd.Timestamp, dflt: int) -> str:
"""Auxillary function to find an appropriate time unit. Will find the
time unit such that the number of time units are closest to dflt."""
dt_secs = {
"year": 60 * 60 * 24 * 365,
"quarter": 60 * 60 * 24 * 91,
"month": 60 * 60 * 24 * 30,
"week": 60 * 60 * 24 * 7,
"day": 60 * 60 * 24,
"hour": 60 * 60,
"minute": 60,
"second": 1,
}
time_rng_secs = (max_time - min_time).total_seconds()
prev_bin_cnt, prev_unit = 0, "year"
for unit, secs_in_unit in dt_secs.items():
cur_bin_cnt = time_rng_secs / secs_in_unit
if abs(prev_bin_cnt - dflt) < abs(cur_bin_cnt - dflt):
return prev_unit
prev_bin_cnt = cur_bin_cnt
prev_unit = unit
return prev_unit
def _calc_box_stats(grp_srs: dd.Series, grp: str, dlyd: bool = False) -> pd.DataFrame:
"""
Auxiliary function to calculate the Tukey box plot statistics
dlyd is for if this function is called when dask is computing in parallel (dask.delayed)
"""
stats: Dict[str, Any] = dict()
try: # this is a bad fix for the problem of when there is no data passed to this function
if dlyd:
qntls = np.round(grp_srs.quantile([0.25, 0.50, 0.75]), 3)
else:
qntls = np.round(grp_srs.quantile([0.25, 0.50, 0.75]).compute(), 3)
stats["q1"], stats["q2"], stats["q3"] = qntls[0.25], qntls[0.50], qntls[0.75]
except ValueError:
stats["q1"], stats["q2"], stats["q3"] = np.nan, np.nan, np.nan
iqr = stats["q3"] - stats["q1"]
stats["lw"] = grp_srs[grp_srs >= stats["q1"] - 1.5 * iqr].min()
stats["uw"] = grp_srs[grp_srs <= stats["q3"] + 1.5 * iqr].max()
if not dlyd:
stats["lw"], stats["uw"] = dask.compute(stats["lw"], stats["uw"])
otlrs = grp_srs[(grp_srs < stats["lw"]) | (grp_srs > stats["uw"])]
if len(otlrs) > 100: # sample 100 outliers
otlrs = otlrs.sample(frac=100 / len(otlrs))
stats["otlrs"] = list(otlrs) if dlyd else list(otlrs.compute())
return pd.DataFrame({grp: stats})
def _calc_box_otlrs(df: dd.DataFrame) -> Tuple[List[str], List[float]]:
"""
Calculate the outliers for a box plot
"""
outx: List[str] = [] # list for the outlier groups
outy: List[float] = [] # list for the outlier values
for ind in df.index:
otlrs = df.loc[ind]["otlrs"]
outx = outx + [df.loc[ind]["grp"]] * len(otlrs)
outy = outy + otlrs
return outx, outy
def _calc_line_dt(
df: dd.DataFrame,
unit: str,
agg: Optional[str] = None,
ngroups: Optional[int] = None,
largest: Optional[bool] = None,
) -> Union[
Tuple[pd.DataFrame, Dict[str, int], str],
Tuple[pd.DataFrame, str, float],
Tuple[pd.DataFrame, str],
]:
"""
Calculate a line or multiline chart with date on the x axis. If df contains
one datetime column, it will make a line chart of the frequency of values. If
df contains a datetime and categorical column, it will compute the frequency
of each categorical value in each time group. If df contains a datetime and
numerical column, it will compute the aggregate of the numerical column grouped
by the time groups. If df contains a datetime, categorical, and numerical column,
it will compute the aggregate of the numerical column for values in the categorical
column grouped by time.
Parameters
----------
df
A dataframe
unit
The unit of time over which to group the values
agg
Aggregate to use for the numerical column
ngroups
Number of groups for the categorical column
largest
Use the largest or smallest groups in the categorical column
"""
# pylint: disable=too-many-locals
x = df.columns[0] # time column
unit = _get_timeunit(df[x].min(), df[x].max(), 100) if unit == "auto" else unit
if unit not in DTMAP.keys():
raise ValueError
grouper = pd.Grouper(key=x, freq=DTMAP[unit][0]) # for grouping the time values
# multiline charts
if ngroups and largest:
hist_dict: Dict[str, Tuple[np.ndarray, np.ndarray, List[str]]] = dict()
hist_lst: List[Tuple[np.ndarray, np.ndarray, List[str]]] = list()
agg = "freq" if agg is None else agg # default agg if unspecified for notational concision
# categorical column for grouping over, each resulting group is a line in the chart
grpby_col = df.columns[1] if len(df.columns) == 2 else df.columns[2]
df, grp_cnt_stats, largest_grps = _calc_groups(df, grpby_col, ngroups, largest)
groups = df.groupby([grpby_col])
for grp in largest_grps:
srs = groups.get_group(grp)
# calculate the frequencies or aggregate value in each time group
if len(df.columns) == 3:
dfr = srs.groupby(grouper)[df.columns[1]].agg(agg).reset_index()
else:
dfr = srs[x].to_frame().groupby(grouper).size().reset_index()
dfr.columns = [x, agg]
# if grouping by week, make the label for the week the beginning Sunday
dfr[x] = dfr[x] - pd.to_timedelta(6, unit="d") if unit == "week" else dfr[x]
# format the label
dfr["lbl"] = dfr[x].dt.to_period("S").dt.strftime(DTMAP[unit][1])
hist_lst.append((list(dfr[agg]), list(dfr[x]), list(dfr["lbl"])))
hist_lst = dask.compute(*hist_lst)
for elem in zip(largest_grps, hist_lst):
hist_dict[elem[0]] = elem[1]
return hist_dict, grp_cnt_stats, DTMAP[unit][3]
# single line charts
if agg is None: # frequency of datetime column
miss_pct = round(df[x].isna().sum() / len(df) * 100, 1)
dfr = drop_null(df).groupby(grouper).size().reset_index()
dfr.columns = [x, "freq"]
dfr["pct"] = dfr["freq"] / len(df) * 100
else: # aggregate over a second column
dfr = df.groupby(grouper)[df.columns[1]].agg(agg).reset_index()
dfr.columns = [x, agg]
dfr[x] = dfr[x] - pd.to_timedelta(6, unit="d") if unit == "week" else dfr[x]
dfr["lbl"] = dfr[x].dt.to_period("S").dt.strftime(DTMAP[unit][1])
return (dfr, DTMAP[unit][3], miss_pct) if agg is None else (dfr, DTMAP[unit][3])
def _calc_groups(
df: dd.DataFrame, x: str, ngroups: int, largest: bool = True
) -> Tuple[dd.DataFrame, Dict[str, int], List[str]]:
"""Auxillary function to parse the dataframe to consist of only the
groups with the largest counts.
"""
# group count statistics to inform the user of the sampled output
grp_cnt_stats: Dict[str, int] = dict()
srs = df.groupby(x).size()
srs_lrgst = srs.nlargest(n=ngroups) if largest else srs.nsmallest(n=ngroups)
try:
largest_grps = list(srs_lrgst.index.compute())
grp_cnt_stats[f"{x}_ttl"] = len(srs.index.compute())
except AttributeError:
largest_grps = list(srs_lrgst.index)
grp_cnt_stats[f"{x}_ttl"] = len(srs.index)
df = df[df[x].isin(largest_grps)]
grp_cnt_stats[f"{x}_shw"] = len(largest_grps)
return df, grp_cnt_stats, largest_grps
@dask.delayed(name="scipy-normaltest", pure=True, nout=2) # pylint: disable=no-value-for-parameter
def normaltest(arr: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Delayed version of scipy normaltest. Due to the dask version will
trigger a compute."""
return cast(Tuple[np.ndarray, np.ndarray], normaltest_(arr))
@dask.delayed(name="scipy-ks_2samp", pure=True, nout=2) # pylint: disable=no-value-for-parameter
def ks_2samp(data1: np.ndarray, data2: np.ndarray) -> Tuple[float, float]:
"""Delayed version of scipy ks_2samp."""
return cast(Tuple[float, float], ks_2samp_(data1, data2))
@dask.delayed( # pylint: disable=no-value-for-parameter
name="scipy-gaussian_kde", pure=True, nout=2
)
def gaussian_kde(arr: np.ndarray) -> Tuple[float, float]:
"""Delayed version of scipy gaussian_kde."""
return cast(Tuple[np.ndarray, np.ndarray], gaussian_kde_(arr))
@dask.delayed(name="scipy-skewtest", pure=True, nout=2) # pylint: disable=no-value-for-parameter
def skewtest(arr: np.ndarray) -> Tuple[float, float]:
"""Delayed version of scipy skewtest."""
return cast(Tuple[float, float], skewtest_(arr))
def tweak_figure(
fig: Figure,
ptype: Optional[str] = None,
show_yticks: bool = False,
max_lbl_len: int = 15,
) -> None:
"""
Set some common attributes for a figure
"""
fig.axis.major_label_text_font_size = "9pt"
fig.title.text_font_size = "10pt"
fig.axis.minor_tick_line_color = "white"
if ptype in ["pie", "qq", "heatmap"]:
fig.ygrid.grid_line_color = None
if ptype in ["bar", "pie", "hist", "kde", "qq", "heatmap", "line"]:
fig.xgrid.grid_line_color = None
if ptype in ["bar", "hist", "line"] and not show_yticks:
fig.ygrid.grid_line_color = None
fig.yaxis.major_label_text_font_size = "0pt"
fig.yaxis.major_tick_line_color = None
if ptype in ["bar", "nested", "stacked", "heatmap", "box"]:
fig.xaxis.major_label_orientation = np.pi / 3
fig.xaxis.formatter = FuncTickFormatter(
code="""
if (tick.length > %d) return tick.substring(0, %d-2) + '...';
else return tick;
"""
% (max_lbl_len, max_lbl_len)
)
if ptype in ["nested", "stacked", "box"]:
fig.xgrid.grid_line_color = None
if ptype in ["nested", "stacked"]:
fig.y_range.start = 0
fig.x_range.range_padding = 0.03
if ptype in ["line", "boxnum"]:
fig.min_border_right = 20
fig.xaxis.major_label_standoff = 7
fig.xaxis.major_label_orientation = 0
fig.xaxis.major_tick_line_color = None
def _format_ticks(ticks: List[float]) -> List[str]:
"""
Format the tick values
"""
formatted_ticks = []
for tick in ticks: # format the tick values
before, after = f"{tick:e}".split("e")
if float(after) > 1e15 or abs(tick) < 1e4:
formatted_ticks.append(str(tick))
continue
mod_exp = int(after) % 3
factor = 1 if mod_exp == 0 else 10 if mod_exp == 1 else 100
value = np.round(float(before) * factor, len(str(before)))
value = int(value) if value.is_integer() else value
if abs(tick) >= 1e12:
formatted_ticks.append(str(value) + "T")
elif abs(tick) >= 1e9:
formatted_ticks.append(str(value) + "B")
elif abs(tick) >= 1e6:
formatted_ticks.append(str(value) + "M")
elif abs(tick) >= 1e4:
formatted_ticks.append(str(value) + "K")
return formatted_ticks
def _format_axis(fig: Figure, minv: int, maxv: int, axis: str) -> None:
"""
Format the axis ticks
""" # pylint: disable=too-many-locals
# divisor for 5 ticks (5 results in ticks that are too close together)
divisor = 4.5
# interval
gap = (maxv - minv) / divisor
# get exponent from scientific notation
_, after = f"{gap:.0e}".split("e")
# round to this amount
round_to = -1 * int(after)
# round the first x tick
minv = np.round(minv, round_to)
# round value between ticks
gap = np.round(gap, round_to)
# make the tick values
ticks = [float(minv)]
while max(ticks) + gap < maxv:
ticks.append(max(ticks) + gap)
ticks = np.round(ticks, round_to)
ticks = [int(tick) if tick.is_integer() else tick for tick in ticks]
formatted_ticks = _format_ticks(ticks)
if axis == "x":
fig.xgrid.ticker = ticks
fig.xaxis.ticker = ticks
fig.xaxis.major_label_overrides = dict(zip(ticks, formatted_ticks))
fig.xaxis.major_label_text_font_size = "10pt"
fig.xaxis.major_label_standoff = 7
# fig.xaxis.major_label_orientation = 0
fig.xaxis.major_tick_line_color = None
elif axis == "y":
fig.ygrid.ticker = ticks
fig.yaxis.ticker = ticks
fig.yaxis.major_label_overrides = dict(zip(ticks, formatted_ticks))
fig.yaxis.major_label_text_font_size = "10pt"
fig.yaxis.major_label_standoff = 5
def _format_bin_intervals(bins_arr: np.ndarray) -> List[str]:
"""
Auxillary function to format bin intervals in a histogram
"""
bins_arr = np.round(bins_arr, 3)
bins_arr = [int(val) if float(val).is_integer() else val for val in bins_arr]
intervals = [f"[{bins_arr[i]}, {bins_arr[i + 1]})" for i in range(len(bins_arr) - 2)]
intervals.append(f"[{bins_arr[-2]},{bins_arr[-1]}]")
return intervals
|
# © 2020 Nokia
# Licensed under the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
# !/usr/bin/env python3
# coding: utf-8
# Author: Élie de Panafieu <elie.de_panafieu@nokia-bell-labs.com>
from matrix_operations import *
class VectorSpace:
def __init__(self, iterables):
self.item_to_index = map_to_index_from_iterable(iterables_union(iterables))
self.iterable_to_index = map_to_index_from_iterable(iterables)
self.item_iterable_matrix = matrix_from_iterables_and_index_maps(
iterables, self.item_to_index, self.iterable_to_index)
def item_vector_from_dict(self, item_distribution):
return vector_from_index_and_value_maps(self.item_to_index, item_distribution)
def iterable_vector_from_dict(self, iterable_distribution):
return vector_from_index_and_value_maps(self.iterable_to_index, iterable_distribution)
def item_dict_from_vector(self, item_vector):
return dict_from_index_map_and_vector(self.item_to_index, item_vector)
def iterable_dict_from_vector(self, iterable_vector):
return dict_from_index_map_and_vector(self.iterable_to_index, iterable_vector)
def iterable_vector_from_collection(self, iterable_collection):
iterable_distribution = constant_distribution_from_collection(iterable_collection)
return self.iterable_vector_from_dict(iterable_distribution)
def count_iterables_containing_item(self, item):
if item not in self.item_to_index:
return 0
return count_nonzero_entries_in_matrix_row(self.item_iterable_matrix, self.item_to_index[item])
def map_to_index_from_iterable(iterable):
dictionary = dict()
index = 0
for item in iterable:
if item not in dictionary:
dictionary[item] = index
index += 1
return dictionary
def iterables_union(iterables):
for iterable in iterables:
for item in iterable:
yield item
def constant_distribution_from_collection(collection):
return {element: 1. for element in collection}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bert_Service.py
2018/11/22 10:29 AM by berton
berton820@163.com
"""
from flask import Flask, request, jsonify
import json
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
try:
from .predict_glue import *
except:
from predict_glue import *
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
model = Evaluator()
model.init_from_config(args, processors=processors, output_modes=output_modes)
model.init_bert()
lines = [["你好吗", "我好吗"] for i in range(8)]
model.predict(lines)
# list_json input batch
@app.route('/', methods=['GET', 'POST'])
def index():
try:
if request.method == 'GET':
qq_list_json = request.args.get('qq_list', '')
qq_list = json.loads(qq_list_json)
# print(qq_list)
score = (model.predict(qq_list))
score = "\t".join(([str(_[1]) for _ in score]))
# print(score)
return jsonify({'status': '0',
"score": score
})
except Exception as e:
print(e)
return jsonify({'status': '1'})
if __name__ == '__main__':
app.run(debug=None, host='0.0.0.0', port=39000)
|
from ddf_utils.chef.model.ingredient import DataPointIngredient
def multiply_1000(chef, ingredients, result, **options):
# ingredients = [chef.dag.get_node(x) for x in ingredients]
ingredient = ingredients[0]
new_data = dict()
for k, df in ingredient.get_data().items():
df_ = df.copy()
df_[k] = df_[k] * 1000
new_data[k] = df_
return DataPointIngredient.from_procedure_result(result, ingredient.key, new_data)
|
from colorama import Fore, Style
from string import Template
from os import path, mkdir, scandir
from shutil import copyfile
import networkx as nx
from warnings import warn
import vhdl_util
VERSION = 0.1
class deparserHDL(object):
def __getlibrary(self):
"""set a dictionnary with library folder
each folder is the <name> of an entity
file component.vhdl components instantiation templates
file entity.vhdl are placement template for components
file module.vhdl are lib file to copy
"""
self.lib = {}
for d in scandir(self.tmplFolder):
if d.is_dir():
curPath = path.join(self.tmplFolder, d.name)
self.lib[d.name] = (path.join(curPath, "module.vhdl"),
path.join(curPath, "component.vhdl"),
path.join(curPath, "entity.vhdl"))
def __init__(self, deparser, outputDir,
templateFolder,
phvBus,
baseName="deparser",
libDirName="lib",
clk="clk", reset_n="reset_n"):
self.clkName = clk
self.enDep = "en_deparser"
self.rstName = reset_n
self.dep = deparser
self.phvBus = phvBus
self.headerBus = phvBus[0]["data"]
self.busValidAssocPos = phvBus[1]["data"]
self.entityName = baseName
self.tmplFolder = templateFolder
self.tmplFile = path.join(templateFolder, "deparser.vhdl")
self.libDir = path.join(outputDir, libDirName)
if not path.exists(self.libDir):
mkdir(self.libDir)
self.signals = {}
self.entities = {}
self.stateMachines = {}
self.components = {}
self.muxes = {}
self.payloadShifters = {}
self.__getlibrary()
self.dictSub = {'name': baseName,
'code': "",
'payloadConnect': "",
'payloadSize': deparser.busSize,
'outputSize': deparser.busSize,
'nbMuxes': deparser.nbStateMachine}
def getVHDLParam(self):
return self.dictSub
def _setSignalStr(self):
strSignal = ""
sigTmpl = Template("signal $n : ${t}; \n")
for n, t in self.signals.items():
strSignal += sigTmpl.substitute({"n": n, "t": t})
self.dictSub["signals"] = strSignal
def __str__(self):
self.genInputs()
self._setSignalStr()
self._setEntitiesImplCode()
self._setComponentsCode()
self._setMuxesConnectionCode()
self._setPayloadConnectionCode()
with open(self.tmplFile, 'r') as myfile:
tmpl = Template(myfile.read())
return tmpl.safe_substitute(self.dictSub)
def _setComponentsCode(self):
code = ""
for n, d in self.components.items():
if n not in self.lib:
raise NameError("component {} does not exist "
"in library".format(n))
if d is False:
with open(self.lib[n][1], 'r') as f:
code += f.read()
else:
with open(self.lib[n][1], 'r') as f:
tmpl = Template(f.read())
for n, dic in d.items():
code += tmpl.safe_substitute(dic)
self.dictSub["components"] = code
def _setEntitiesImplCode(self):
""" Gen implementation for a component
Component : component name
tmplDict : template dictionnary
Require a <component>_place.vhdl file in <component> dir
"""
implCode = ""
for c, d in self.entities.values():
if c not in self.lib:
raise NameError("component {} does not exist "
"in library".format(c))
with open(self.lib[c][2], 'r') as f:
tData = Template(f.read())
implCode += tData.safe_substitute(d)
self.dictSub["entities"] = implCode
def writeTB(self, fileName):
Tmpl = {"compVersion": VERSION,
"name": self.entityName,
"payloadSize": self.dictSub["payloadSize"],
"outputSize": self.dictSub["outputSize"],
"phvBus": self.dictSub["phvBus"],
"phvValidity": self.dictSub["phvValidity"],
"phvBusWidth": self.dictSub["phvBusWidth"],
"phvValidityWidth": self.dictSub["phvValidityWidth"]}
phvBus = ""
phvBusIn = ""
phvBusTmpl = "phvBus({} downto {}) <= {}_bus;\n"
phvInTmpl = "{}_bus : in std_logic_vector({} downto 0);\n"
for name, pos in self.headerBus.items():
phvBusIn += phvInTmpl.format(name, pos[1] - pos[0])
phvBus += phvBusTmpl.format(pos[1], pos[0], name)
vBus = ""
vBusIn = ""
for name, pos in self.busValidAssocPos.items():
vBusIn += "{}_valid : in std_logic;\n".format(name)
vBus += "validityBus({}) <= {}_valid;\n".format(pos, name)
Tmpl["setPhvBus"] = phvBus
Tmpl["setValBus"] = vBus
Tmpl["headerBuses"] = phvBusIn
Tmpl["validityBits"] = vBusIn
with open(path.join(self.tmplFolder, "deparser_tb.vhdl")) as inFile:
TB = Template(inFile.read())
with open(fileName, 'w') as outFile:
outFile.write(TB.substitute(Tmpl))
def writeFiles(self, mainFileName):
""" export all files.
mainFile + lib files in libFolder
"""
for name, d in self.components.items():
tF = self.lib[name][0]
if d is False:
oF = path.join(self.libDir,
"{}.vhdl".format(name)) # output lib file
copyfile(tF, oF)
else:
with open(tF, 'r') as tmpl:
t = Template(tmpl.read())
for n, dic in d.items():
oF = path.join(self.libDir,
"{}.vhdl".format(n)) # output lib file
with open(oF, 'w') as outFile:
outFile.write(t.substitute(dic))
with open(mainFileName, 'w') as outFile:
outFile.write(str(self))
def genInputs(self):
# value assignments
self.dictSub["phvBus"] = self.phvBus[0]["name"]
self.dictSub["phvValidity"] = self.getValidBusName()
self.dictSub["phvBusWidth"] = self.phvBus[0]["width"] - 1
self.dictSub["phvValidityWidth"] = self.getNbHeaders() - 1
def getValidBusName(self):
return self.phvBus[1]["name"]
def getNbHeaders(self):
return self.phvBus[1]["width"]
def appendCode(self, code):
oldCode = self.dictSub["code"]
if code in oldCode:
warn("append code already here : \n"
"oldCode : {}\n newCode : {}"
"\n".format(oldCode, code))
oldCode += code
self.dictSub["code"] = oldCode
def _addVector(self, name, size):
self._addSignal(name,
"std_logic_vector({} downto 0)".format(size - 1))
def _addLogic(self, name):
self._addSignal(name, "std_logic")
def _addSignal(self, name, t):
""" name : signal name
t signal Type
"""
if name in self.signals:
raise NameError("signal {} already exist".format(name))
self.signals[name] = t
def _addEntity(self, name, tmplDict):
"""Add entity name with template file template
and tmplDict
error if name exists
"""
if name in self.entities:
raise NameError("entity {} already exist".format(name))
self.entities[name] = tmplDict
def getEntity(self, name):
if name in self.entities:
return self.entities[name]
else:
raise NameError("entity {} does not exist".format(name))
def _connectVectors(self, dst, src):
""" return the connection of 2 signals
dst, src are tuples : (name, msb, lsb)
"""
tmplStr = "${dst}"
dictTmpl = {"dst": dst[0],
"src": src[0]}
if len(dst) == 3:
tmplStr += "(${dMSB} downto ${dLSB})"
dictTmpl["dLSB"] = dst[2]
dictTmpl["dMSB"] = dst[1]
tmplStr += " <= ${src}"
if len(src) == 3:
tmplStr += "(${sMSB} downto ${sLSB})"
dictTmpl["sLSB"] = src[2]
dictTmpl["sMSB"] = src[1]
tmplStr += ";\n"
tmpl = Template(tmplStr)
return tmpl.substitute(dictTmpl)
def _setPayloadConnectionCode(self):
code = Template("""
-- payload connections \n
process(payload_in_tkeep, payload_in_tdata, payload_in_tvalid) is
begin
${data}
if payload_in_tvalid = '1' then
${keepValid}
else
${keepUnvalid}
end if;
end process;
process(clk) is
begin
if rising_edge(clk) then
case $phvValidity is
${CtrlAssoc}
when others =>
${CtrlOthers}
end case;
end if;
end process;
""")
codeCtrlO = ""
codeCtrlAssoc = ""
codeData = ""
code1 = ""
code2 = ""
phvValWidth = self.dictSub["phvValidityWidth"]
payloadBusWidth = self.dictSub["payloadSize"]
paths = nx.all_simple_paths(self.dep.depG,
self.dep.init,
self.dep.last)
for p in paths:
tW = 0
phv_val_list = ["0"] * (phvValWidth+1)
# Counting total header sizes in tW
for h in p:
if h in self.headerBus:
tW += self.headerBus[h][1] - self.headerBus[h][0] + 1
# generate phv_val cond
phv_val_list[self.busValidAssocPos[h]] = "1"
codeCtrlAssoc += 'when "{}" =>\n'.format(''.join(phv_val_list[::-1]))
# get payoadShift that is 0
psList = list(self.payloadShifters.values())
pos = int((tW % payloadBusWidth)/8)
for i in range(pos):
ps = psList[i]
control = self.getEntity(ps[0])[1]["control"]
ctrlW = self.getEntity(ps[0])[1]["wControl"]
offset = i + len(psList) - pos
value = "'1' & {}".format(vhdl_util.int2vector(offset,
ctrlW - 1))
codeCtrlAssoc += self._connectVectors((control, ),
(value, ))
for j, i in enumerate(range(pos, len(psList))):
ps = psList[i]
control = self.getEntity(ps[0])[1]["control"]
ctrlW = self.getEntity(ps[0])[1]["wControl"]
value = "'0' & {}".format(vhdl_util.int2vector(j, ctrlW - 1))
codeCtrlAssoc += self._connectVectors((control, ),
(value, ))
# print(" mod {}\n cond : {}".format(int((tW % payloadBusWidth)/8),
# "".join(phv_val_list)))
for ps in self.payloadShifters.values():
codeData += self._connectVectors(ps[1]["inData"][1],
ps[1]["inData"][0])
code1 += self._connectVectors(ps[1]["inKeep"][1],
ps[1]["inKeep"][0])
code2 += self._connectVectors(ps[1]["inKeep"][1],
("(others => '0')", ))
entity = self.getEntity(ps[0])
codeCtrlO += self._connectVectors((entity[1]["control"], ),
("(others => '0')", ))
payloadTmplDict = {"phvValidity": self.dictSub["phvValidity"],
"data": codeData,
"keepValid": code1,
"keepUnvalid": code2,
"CtrlOthers": codeCtrlO,
"CtrlAssoc": codeCtrlAssoc}
self.dictSub['payloadConnect'] = code.safe_substitute(payloadTmplDict)
def _setMuxesConnectionCode(self):
def getMuxConnectStr(muxNum):
""" Generate the code to connect a Mux
"""
code = ""
_, connections = self.muxes[muxNum]
entity = self._getMuxEntity(muxNum)
pDst = ["", 0, 0]
pSrc = ["", 0, 0]
pDst = [entity["input"], 0, 0]
width = entity["width"]
for src, dst in connections.values():
pDst[1] = int((dst+1)*width - 1)
pDst[2] = int(dst * width)
pSrc[1] = int(src[1] + width - 1)
pSrc[2] = int(src[1])
pSrc[0] = src[0]
code += self._connectVectors(pDst, pSrc)
return code
allMuxStr = ""
for n in self.muxes:
allMuxStr += getMuxConnectStr(n)
self.dictSub["muxes"] = allMuxStr
def genPayloadShifter(self):
for i in range(self.dep.nbStateMachine):
self._genPayloadShifter(i)
def genMuxes(self):
for i in range(self.dep.nbStateMachine):
self._genMux(i)
self._genStateMachine(i)
def _getStMCompTmpl(self, num, name):
"""Gen template for a state machine
"""
graph = self.dep.getStateMachine(num)
stateList = {}
for u, v, d in graph.edges(data=True):
if u not in stateList:
stateList[u] = []
stateList[u].append((v, d))
def genStateTransitionCode(listTransition):
def getStateTransition(name, cond):
busAssoc = self.busValidAssocPos
transitionTmpl = "NEXT_STATE <= {}; \n"
condTmpl = "headerValid({}) = '1' then \n {} \n"
tmp = transitionTmpl.format(name)
if "label" in cond:
tmp = condTmpl.format(busAssoc[cond["label"]],
tmp)
return tmp, ("label" in cond)
transitionCode = "{} {}"
condCodeTmpl = "if {}"
condCode = ""
noCondCode = ""
for n, d in listTransition:
code, cond = getStateTransition(n, d)
if cond:
condCode += condCodeTmpl.format(code)
condCodeTmpl = "elsif {}"
else:
noCondCode += code
if len(condCode) > 0:
condCode += "end if;\n"
return transitionCode.format(noCondCode, condCode)
tmplDict = {"compVersion": VERSION,
"name": name,
"initState": self.dep.init,
"lastState": self.dep.last,
"stateList": "({})".format(", "
.join(list(graph.nodes))),
"initStateTransition":
genStateTransitionCode(stateList.pop(self.dep.init))}
otherStateTransition = ""
assocMuxIn = self.muxes[num][1] # get ctrl val to assign for a state
for k, struct in stateList.items():
otherStateTransition += "when {} =>\n".format(k)
stateMuxConv = vhdl_util.int2vector(assocMuxIn[k][1],
"outputWidth")
otherStateTransition += "output_reg <= {} ;\n".format(stateMuxConv)
otherStateTransition += genStateTransitionCode(struct)
tmplDict["otherStateTransition"] = otherStateTransition
return tmplDict
def _getStateMachineEntity(self, num):
compName = "state_machine_{}".format(num)
name = "stM_{}".format(num)
nbInput = self.getNbHeaders()
outWidth = self._getMuxEntity(num)["wControl"]
output = self._getMuxEntity(num)["control"]
if "state_machine" not in self.components:
self.components["state_machine"] = {}
if name not in self.entities:
stComp = self.components["state_machine"]
if compName not in stComp:
stComp[compName] = self._getStMCompTmpl(num, compName)
tmplDict = {"name": name,
"componentName": compName,
"nbHeader": nbInput,
"wControl": outWidth,
"clk": self.clkName,
"reset_n": self.rstName,
"start": "start_deparser",
"ready": "deparser_rdy_i({})".format(num),
"finish": "out_valid({})".format(num),
"headersValid": self.getValidBusName(),
"output": output}
self._addEntity(name, ("state_machine", tmplDict))
return self.getEntity(name)[1]
def _genStateMachine(self, num):
if num not in self.stateMachines:
entity = self._getStateMachineEntity(num)
self.stateMachines[num] = (entity["name"],)
else:
warn("trying to regenerate stateMachine {}".format(num))
def _getMuxEntity(self, muxNum):
"""Function to get a mux entity name with
nbIn as nb input and width as output size
The mux name is generated such as being unique for
a certain type of mux.
if mux does not exist, add it to entities dictionnary
"""
graph = self.dep.getStateMachine(muxNum)
nbInput = len(graph)-2
if nbInput==0:
nbInput=1
outWidth = 8
muxName = "mux_{}".format(muxNum)
outputName = "muxes_o({})".format(muxNum)
inputName = "muxes_{}_in".format(muxNum)
controlName = "muxes_{}_ctrl".format(muxNum)
if muxName not in self.entities:
if "mux" not in self.components:
self.components["mux"] = False
dictMux = {"name": muxName,
"nbInput": nbInput,
"wControl": vhdl_util.getLog2In(nbInput),
"clk": self.clkName,
"width": outWidth,
"input": inputName,
"wInput": int(nbInput * outWidth),
"output": outputName,
"control": controlName}
self._addEntity(muxName, ("mux", dictMux))
return self.getEntity(muxName)[1]
def _getPayloadShifterEntity(self, num):
# graph = self.dep.getStateMachine(num)
nbInput = int(self.dictSub['payloadSize']/8)
width = 8
name = "payloadShifter_{}".format(num)
controlName = "payload_{}_ctrl".format(num)
inDataName = "payload_shift_{}_data_in".format(num)
inKeepName = "payload_shift_{}_keep_in".format(num)
selDataName = "payload_o_data({})".format(num)
selKeepName = "payload_o_keep({})".format(num)
if name not in self.entities:
if "payload_shifter" not in self.components:
self.components["payload_shifter"] = False
dictParam = {"name": name,
"nbInput": nbInput,
"width": width,
"dataWidth": int(nbInput * width),
"keepWidthIn": int(1 * nbInput), # width on keepinput
"keepWidth": 1,
"wControl": vhdl_util.getLog2In(nbInput)+1,
"clk": self.clkName,
"control": controlName,
"inData": inDataName,
"inKeep": inKeepName,
"selKeep": selKeepName,
"selData": selDataName}
self._addEntity(name, ("payload_shifter", dictParam))
return self.getEntity(name)[1]
def _genPayloadShifter(self, num):
"""Payload shifter
"""
def genConnections(num, entity):
""" Connection of the payload shifter
Dictionnary key : input = (src, dst)
src : tuple(signalName, MSB, LSB)
dst : tuple(signalName, MSB, LSB)
"""
connections = {}
connections["inKeep"] = (("payload_in_tkeep", ),
(entity["inKeep"], ))
connections["inData"] = (("payload_in_tdata", ),
(entity["inData"], ))
return connections
if num not in self.payloadShifters:
entity = self._getPayloadShifterEntity(num)
self._addVector(entity["control"], entity["wControl"])
self._addVector(entity["inData"], entity["dataWidth"])
self._addVector(entity["inKeep"], entity["keepWidthIn"])
connections = genConnections(num, entity)
self.payloadShifters[num] = (entity["name"], connections)
else:
warn("trying to regenerate payload shifter {}".format(num))
def _genMux(self, muxNum):
""" Mux is tuple : entityName, stateMachine assignments)
"""
def genConnections(num):
""" Connection :
Dictionnary key = graph node name
value : tuple(src, dst)
src: tuple(signalName, start)
dst: mux input number
"""
connections = {}
graph = self.dep.getStateMachine(num)
i = 0
for n, d in graph.nodes(data=True):
if d != {}:
signalName = self.phvBus[0]["name"]
startPos = d["pos"][0] + self.headerBus[d["header"]][0]
connections[n] = ((signalName, startPos), i)
i += 1
return connections
if muxNum not in self.muxes:
entity = self._getMuxEntity(muxNum)
self._addVector(entity["control"], entity["wControl"])
self._addVector(entity["input"], entity["wInput"])
connections = genConnections(muxNum)
self.muxes[muxNum] = (entity["name"], connections)
else:
warn("Trying to regenerate mux {}".format(muxNum))
def _validateInputs(funcIn):
""" funcIn : list of three tuples :
(Type got, variable Name, expected type)
"""
val = True
# validate input
for g, n, e in funcIn:
if g != e:
print(Fore.YELLOW + "Wrong {} type got {}"
", expected {} {}".format(n, g, e,
Style.RESET_ALL))
val = False
return val
def exportDeparserToVHDL(deparser, outputFolder, phvBus, baseName="deparser"):
""" This function export to VHDL a deparserStateMachines
If stateMachines are not of type deparserStateMachines exit
"""
toValidate = [(type(outputFolder), "outputFolder", str),
(type(baseName), "baseName", str)]
if not _validateInputs(toValidate):
return
if not path.exists(outputFolder):
mkdir(outputFolder)
outputFiles = path.join(outputFolder, baseName + ".vhdl")
output_tb = path.join(outputFolder, "{}_tb.vhdl".format(baseName))
vhdlGen = deparserHDL(deparser, outputFolder, 'library', phvBus, baseName)
vhdlGen.genMuxes()
vhdlGen.genPayloadShifter()
vhdlGen.writeFiles(outputFiles)
vhdlGen.writeTB(output_tb)
return vhdlGen
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Weitna LI <weitian@aaronly.me>
# MIT License
#
"""
Average the 2D power spectrum within the EoR window (i.e., excluding the
foreground contaminated wedge) to derive the 1D spherically averaged
power spectrum.
"""
import os
import argparse
import numpy as np
import matplotlib
import matplotlib.style
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from eor_window import PS2D
# Matplotlib settings
matplotlib.style.use("ggplot")
for k, v in [("font.family", "monospace"),
("image.cmap", "jet"),
("xtick.major.size", 7.0),
("xtick.major.width", 2.0),
("xtick.minor.size", 4.0),
("xtick.minor.width", 1.5),
("ytick.major.size", 7.0),
("ytick.major.width", 2.0),
("ytick.minor.size", 4.0),
("ytick.minor.width", 1.5)]:
matplotlib.rcParams[k] = v
class PS1D:
"""
Calculate the 1D spherically averaged power spectrum from 2D PS.
Parameters
----------
ps2d : `~PS2D`
A `~PS2D` instance
step : float, optional
By default, a logarithmic grid with the specified step ratio
(default: 1.1) will be used to do the azimuthal averages.
If specified a value <=1 or None, then an equal-width pixel-by-pixel
(along radial direction) grid is adopted.
"""
def __init__(self, ps2d, step=1.1):
self.ps2d = ps2d
self.data = ps2d.ps2d # shape: [n_k_los, n_k_perp]
self.data_err = ps2d.ps2d_err
self.eor_window = ps2d.eor_window
if step is None or step <= 1:
self.step = None
else:
self.step = step
@property
def k_perp(self):
return self.ps2d.k_perp
@property
def k_los(self):
return self.ps2d.k_los
@property
def dk(self):
"""
The wavenumber k bin size that will be used to determine the
averaging grid. Considering that the angular and line-of-sight
wavenumber bin sizes are very different, their geometric mean
is used instead.
"""
k_perp = self.k_perp
k_los = self.k_los
dk_perp = k_perp[1] - k_perp[0]
dk_los = k_los[1] - k_los[0]
return np.sqrt(dk_perp * dk_los)
@property
def k(self):
"""
The radial k positions to determine the averaging bins to derive
the 1D power spectrum.
"""
k_max = np.sqrt(self.k_perp[-1]**2 + self.k_los[-1]**2)
dk = self.dk
nk = int(k_max / dk) + 1
x = np.arange(nk)
if self.step is None:
return x * dk
else:
xmax = x.max()
x2 = list(x[x*(self.step-1) <= 1])
v1 = x[len(x2)]
while v1 < xmax:
x2.append(v1)
v1 *= self.step
x2.append(xmax)
return np.array(x2) * dk
def calc_ps1d(self, normalize=True):
"""
Calculate the 1D spherically averaged power spectrum by averaging
the 2D cylindrical power spectrum.
Parameters
----------
normalize : bool
Whether to normalize the 1D power spectrum to obtain the
dimensionless one, i.e.,
Δ^2(k) = (k^3 / (2*π^2)) P(k)
Attributes
----------
ps1d : 2D `~numpy.ndarray`
3-column array storing the calculated 1D power spectrum,
``[k, ps1d, ps1d_err]``
ps1d_normalized : bool
Whether the calculated 1D power spectrum is normalized?
Returns
-------
ps1d
"""
ps2d = self.data
ps2d_err = self.data_err
k_perp = self.k_perp
k_los = self.k_los
ps1d_k = self.k
nk = len(ps1d_k)
print("Number of k points: %d" % nk)
# PS1D's 3 columns: [k, ps1d, ps1d_err]
ps1d = np.zeros(shape=(nk, 3))
ps1d[:, 0] = ps1d_k
print("Averaging 2D power spectrum ...")
mx, my = np.meshgrid(k_perp, k_los)
mk = np.sqrt(mx**2 + my**2)
mk[~self.eor_window] = np.inf # constrain within EoR window
for i, k in enumerate(ps1d_k):
ii, jj = (mk <= k).nonzero()
mk[ii, jj] = np.inf
data = ps2d[ii, jj]
errors = ps2d_err[ii, jj]
ncell = len(data)
if ncell > 0:
ps1d[i, 1] = np.mean(data)
# XXX: how to properly estimate the errors???
ps1d[i, 2] = np.sqrt(np.sum(errors ** 2)) / ncell
if normalize:
# XXX: is this normalization correct???
coef = ps1d_k**3 / (2*np.pi**2)
ps1d[:, 1] *= coef
ps1d[:, 2] *= coef
self.ps1d_normalized = True
else:
self.ps1d_normalized = False
self.ps1d = ps1d
return ps1d
def save(self, outfile):
if self.ps1d_normalized:
ps1d_desc = "normalized power [K^2]"
else:
ps1d_desc = "power [K^2 Mpc^3]"
header = [
"EoR window definition:",
"+ FoV: %f [deg]" % self.ps2d.fov,
"+ e_ConvWidth: %f" % self.ps2d.e,
"+ k_perp_min: %f [Mpc^-1]" % self.ps2d.k_perp_min,
"+ k_perp_max: %f [Mpc^-1]" % self.ps2d.k_perp_max,
"+ k_los_min: %f [Mpc^-1]" % self.ps2d.k_los_min,
"+ k_los_max: %f [Mpc^-1]" % self.ps2d.k_los_max,
"",
"Columns:",
"1. k: wavenumber [Mpc^-1]",
"2. ps1d: %s" % ps1d_desc,
"ps1d_err: power errors",
"",
"k ps1d ps1d_err",
]
np.savetxt(outfile, self.ps1d, header="\n".join(header))
print("Saved 1D power spectrum to file: %s" % outfile)
def plot(self, ax):
ps1d = self.ps1d
if self.ps1d_normalized:
ylabel = r"$\Delta^2(k)$ [K$^2$]"
else:
ylabel = r"$P(k)$ [K$^2$ Mpc$^3$]"
x = ps1d[:, 0]
y = ps1d[:, 1]
yerr = ps1d[:, 2]
ax.errorbar(x[1:], y[1:], yerr=yerr[1:], fmt="none")
ax.plot(x[1:], y[1:], marker="o")
ax.set(xscale="log", yscale="log",
xlabel=r"$k$ [Mpc$^{-1}$]", ylabel=ylabel,
title="1D Spherically Averaged Power Spectrum")
return ax
def main():
parser = argparse.ArgumentParser(
description="Calculate 1D power spectrum within the EoR window")
parser.add_argument("-C", "--clobber", dest="clobber", action="store_true",
help="overwrite the output files if already exist")
parser.add_argument("-s", "--step", dest="step", type=float, default=1.1,
help="step ratio (>1; default: 1.1) between 2 " +
"consecutive radial k bins, i.e., logarithmic grid. " +
"if specified a value <=1, then an equal-width grid " +
"of current k bin size will be used.")
parser.add_argument("-F", "--fov", dest="fov",
type=float, required=True,
help="instrumental FoV to determine the EoR window; " +
"SKA1-Low has FoV ~ 3.12 / (nu/200MHz) [deg], i.e., " +
"~5.03 @ 124, ~3.95 @ 158, ~3.18 @ 196")
parser.add_argument("-e", "--conv-width", dest="conv_width",
type=float, default=3.0,
help="characteristic convolution width (default: 3.0)")
parser.add_argument("-p", "--k-perp-min", dest="k_perp_min", type=float,
help="minimum k wavenumber perpendicular to LoS; " +
"unit: [Mpc^-1]")
parser.add_argument("-P", "--k-perp-max", dest="k_perp_max", type=float,
help="maximum k wavenumber perpendicular to LoS")
parser.add_argument("-l", "--k-los-min", dest="k_los_min", type=float,
help="minimum k wavenumber along LoS")
parser.add_argument("-L", "--k-los-max", dest="k_los_max", type=float,
help="maximum k wavenumber along LoS")
parser.add_argument("--no-plot", dest="noplot", action="store_true",
help="do not plot and save the calculated 1D power " +
"power within the EoR window")
parser.add_argument("-i", "--infile", dest="infile", required=True,
help="2D power spectrum FITS file")
parser.add_argument("-o", "--outfile", dest="outfile", required=True,
help="output TXT file to save the PSD data")
args = parser.parse_args()
if (not args.clobber) and os.path.exists(args.outfile):
raise OSError("outfile '%s' already exists" % args.outfile)
ps2d = PS2D(args.infile, fov=args.fov, e=args.conv_width,
k_perp_min=args.k_perp_min, k_perp_max=args.k_perp_max,
k_los_min=args.k_los_min, k_los_max=args.k_los_max)
ps1d = PS1D(ps2d, step=args.step)
ps1d.calc_ps1d()
ps1d.save(args.outfile)
if not args.noplot:
fig = Figure(figsize=(8, 8), dpi=150)
FigureCanvas(fig)
ax = fig.add_subplot(1, 1, 1)
ps1d.plot(ax=ax)
fig.tight_layout()
plotfile = os.path.splitext(args.outfile)[0] + ".png"
fig.savefig(plotfile)
print("Plotted 1D power spectrum within EoR window: %s" % plotfile)
if __name__ == "__main__":
main()
|
from typing import List, Tuple, Optional
import random
def bubble_sort_(ls: List) -> List:
nElem = len(ls)
if nElem <= 1:
return ls
swap_flag = True
while swap_flag:
swap_flag = False
for idx in range(nElem-1):
if ls[idx] > ls[idx + 1]:
ls[idx], ls[idx + 1] = ls[idx + 1], ls[idx]
swap_flag = True
return ls
def selection_sort_(ls: List) -> List:
nElem = len(ls)
if nElem <= 1:
return ls
for idx in range(nElem-1):
unsortedpart = ls[idx:]
min_val_so_far = float('inf')
min_index_so_far = None
for unsort_idx, unsort_elem in enumerate(unsortedpart):
if unsort_elem < min_val_so_far:
min_val_so_far = unsort_elem
min_index_so_far = unsort_idx
ls[idx], ls[idx + min_index_so_far] = ls[idx + min_index_so_far], ls[idx]
return ls
def merge_sort(ls: List) -> List:
nElem = len(ls)
if nElem <= 1:
return ls
first_list = merge_sort(ls[:nElem // 2])
second_list = merge_sort(ls[nElem // 2:])
new_list = []
while (len(first_list) != 0) and (len(second_list) != 0):
if first_list[-1] > second_list[-1]:
new_list.append(first_list.pop())
else:
new_list.append(second_list.pop())
new_list.reverse()
return first_list + second_list + new_list
def quicksort_(ls: List, partition_idxs: Optional[Tuple[int, int]] = None, scheme = 'hoare') -> Optional[List]:
if partition_idxs is None:
start = 0
end = len(ls) - 1
else:
start = partition_idxs[0]
end = partition_idxs[1]
if start >= end:
return
if scheme == 'hoare':
left_boundary, right_boundary = _hoare_partition_(ls, start, end)
elif scheme == 'lomuto':
left_boundary, right_boundary = _lomuto_partition_(ls, start, end)
else:
print('Unknown Scheme')
return ls
quicksort_(ls, (start, left_boundary), scheme = scheme)
quicksort_(ls, (right_boundary, end), scheme = scheme)
if partition_idxs is None:
return ls
else:
return
def _hoare_partition_(ls: List, start: int, end: int) -> Tuple[int, int]:
pivot = ls[random.randint(start, end)]
left_pointer = start
right_pointer = end
while True:
while ls[left_pointer] < pivot:
left_pointer += 1
while ls[right_pointer] > pivot:
right_pointer -= 1
if left_pointer >= right_pointer:
break
ls[right_pointer], ls[left_pointer] = ls[left_pointer], ls[right_pointer]
left_pointer += 1
right_pointer -= 1
return right_pointer, right_pointer + 1
def _lomuto_partition_(ls: List, start: int, end: int) -> Tuple[int, int]:
new_pivot_idx = start
pivot = ls[end]
for idx in range(start, end):
if ls[idx] < pivot:
ls[idx], ls[new_pivot_idx] = ls[new_pivot_idx], ls[idx]
new_pivot_idx += 1
ls[new_pivot_idx], ls[end] = ls[end], ls[new_pivot_idx]
return new_pivot_idx - 1, new_pivot_idx + 1
|
'''
Copyright 2017 Ewan Mellor
Changes authored by Hadi Esiely:
Copyright 2018 The Johns Hopkins University Applied Physics Laboratory LLC.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import json
from .utils import printable_phone_number
class MissingRecipientException(Exception):
pass
class Message(object): # pylint: disable=too-many-instance-attributes
def __init__(self, json_dict=None):
self.local_user = None
self.recipient = None
self.sender = None
self.timestamp = None
self.received_at = None
self.body = None
self.not_yet_sent = None
if json_dict:
for k in json_dict:
setattr(self, k, json_dict[k])
if self.recipient:
self.recipient_printable = printable_phone_number(self.recipient)
def _get_arrow(self):
return '←' if self.direction == 'in' else '→'
arrow = property(_get_arrow)
def _get_direction(self):
if self.recipient:
return 'out'
else:
return 'in'
direction = property(_get_direction)
def to_bytes(self):
if not self.recipient:
raise MissingRecipientException()
msg_str = self.recipient + ":" + (self.body or '')
return msg_str.encode('utf-8')
def to_json(self):
d = {}
for k in ('local_user', 'recipient', 'sender', 'timestamp',
'received_at', 'body'):
v = getattr(self, k, None)
if v is not None:
d[k] = v
return d
def to_json_str(self):
d = self.to_json()
return json.dumps(d)
def __str__(self):
return self.to_json_str()
|
import os
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class Visualization(object):
def __init__(self, output_dir= None):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.output_dir = output_dir
def compare_images(self, input_images, reconstructed_images, stamp):
# this method is inspired by https://jmetzen.github.io/2015-11-27/vae.html
print("Compare images in visualization")
if not len(input_images) == len(reconstructed_images):
raise ValueError("Inputs should have the same size")
plt.figure(figsize=(35, 20))
for i in range(0, len(input_images)):
if input_images.shape[1] == 1:
size = input_images.shape[2]
input_image = input_images[i].reshape(size, size)
reconstructed_image = reconstructed_images[i].reshape(size, size)
else:
input_image = self.deprocess_image(input_images[i])
reconstructed_image = self.deprocess_image(reconstructed_images[i])
plt.subplot(len(input_images), 10, 1 + i)
plt.imshow(input_image, cmap='gray')
plt.title("Input image")
plt.subplot(len(input_images), 10, 11 + i)
plt.imshow(reconstructed_image, cmap='gray')
plt.title("Reconstructed image")
# plt.show()
plt.tight_layout()
plt.savefig(self.output_dir + '/fig_comparison_' + stamp + '.png', bbox_inches='tight')
#plt.show()
plt.close()
#plt.savefig('figures/fig_' + str(i) + '.png')
def visualize_image_canvas(self, inputs, stamp, nx=None):
# this method is inspired by https://jmetzen.github.io/2015-11-27/vae.html
shape_x = inputs.shape[2]
shape_c = inputs.shape[1]
if nx == None:
# inputs should be a squared number
nx = ny = int(np.sqrt(inputs.shape[0]))
else:
ny = inputs.shape[0] / nx
inputs = inputs.reshape((ny, nx, shape_c, shape_x, shape_x))
if shape_c == 1:
canvas = np.empty((shape_x * ny, shape_x * nx))
else:
canvas = np.empty((shape_x * ny, shape_x * nx, shape_c))
for i in range(0, ny):
for j in range(0, nx):
if shape_c == 1:
image = inputs[i][j].reshape(shape_x, shape_x)
canvas[i*shape_x:(i+1)*shape_x, j*shape_x:(j+1)*shape_x] = image
else:
image = self.deprocess_image(inputs[i][j]) / 255.0
canvas[i * shape_x:(i + 1) * shape_x, j * shape_x:(j + 1) * shape_x] = image
plt.figure(figsize=(8, 10))
plt.imshow(canvas, origin="upper", cmap='gray')
plt.tight_layout()
plt.savefig(self.output_dir + '/fig_canvas_' + stamp + '.png', bbox_inches='tight')
#plt.show()
plt.close()
def visualize_latent_space(self, output_function, shape, stamp):
# Passing the output function from the variational autoencoder here is not very nice, but yeah... it works
nx = ny = 20
x_values = np.float32(np.linspace(-3, 3, nx))
y_values = np.float32(np.linspace(-3, 3, ny))
shape_x = shape[2]
shape_c = shape[1]
if shape_c == 1:
canvas = np.empty((shape_x * ny, shape_x * nx))
else:
canvas = np.empty((shape_x * ny, shape_x * nx, shape_c))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]])
# Get output for z
constructed_image = output_function(z_mu)
if shape_c == 1:
constructed_image = constructed_image[0].reshape(shape_x, shape_x)
else:
constructed_image = self.deprocess_image(constructed_image[0]) / 255.0
canvas[(nx - i - 1) * shape_x:(nx - i) * shape_x, j * shape_x:(j + 1) * shape_x] = constructed_image
self.visualize_canvas(canvas=canvas, stamp=stamp)
def visualize_images(self, inputs, stamp):
print("visualize image")
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(path + "/figures"):
os.makedirs(path + "/figures")
for i in range(0, len(inputs)):
if inputs.shape[1] == 1:
image = inputs[i].reshape(inputs.shape[2], inputs.shape[3])
else:
image = self.deprocess_image(inputs[i])
plt.imshow(image, cmap='gray')
plt.savefig(self.output_dir + '/fig_' + stamp + "_" + str(i) + '.png', bbox_inches='tight')
#plt.show()
plt.close()
def visualize_latent_layer_scatter(self, mu, y_values, stamp):
plt.figure(figsize=(8, 6))
plt.scatter(mu[:, 0], mu[:, 1], c=y_values, cmap='jet')
plt.colorbar()
plt.grid()
plt.savefig(self.output_dir + '/fig_latent_' + stamp + '.png', bbox_inches='tight')
#plt.show()
plt.close()
def visualize_canvas(self, canvas, stamp):
plt.figure(figsize=(8, 10))
#Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.savefig(self.output_dir + '/fig_canvas_latent_' + stamp + '.png', bbox_inches='tight')
plt.close()
def save_loss(self, lst, stamp):
filename = self.output_dir + "/loss_" + stamp
with open(filename, 'w') as fn:
for i, elem in enumerate(lst):
fn.write("{}\t{}\n".format(i+1, elem))
# Code of this method is fully copied from
# https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html
def deprocess_image(self,x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
|
"""Change step_number to int.
Revision ID: 430f2963adb9
Revises: 107d9a8cb98a
Create Date: 2013-09-25 18:02:23.582384
"""
# revision identifiers, used by Alembic.
revision = '430f2963adb9'
down_revision = '107d9a8cb98a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('step', u'step_number',
existing_type=sa.INT(),
nullable=False)
op.alter_column('step', u'next_step_number',
existing_type=sa.INT(),
nullable=True)
### end Alembic commands ###
def downgrade():
op.alter_column('step', u'step_number',
existing_type=sa.UNICODE(),
nullable=True)
op.alter_column('step', u'next_step_number',
existing_type=sa.Unicode(),
nullable=True)
### end Alembic commands ###
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from helpers import *
from js.utils.plot.colors import colorScheme
mpl.rc('font',size=30)
mpl.rc('lines',linewidth=2.)
figSize = (21, 10)
figSize = (16, 10)
cs = colorScheme("label")
#pathToTimings = "../results/ablation_fr2_xyz/fullmode/timings.txt"
#timings = ParseTimings(pathToTimings)
pathToStats = "/home/jstraub/Dropbox/0gtd/thesis/results/ablation_fr2_xyz/fullmode/stats.txt"
statsAwa = ParseTimings(pathToStats)
pathToStats = "/home/jstraub/Dropbox/0gtd/thesis/results/ablation_fr2_xyz/rndTrack/stats.txt"
statsRnd = ParseTimings(pathToStats)
for key,vals in statsAwa.iteritems():
print key, len(vals)
for key,vals in statsRnd.iteritems():
print key, len(vals)
N = len(statsRnd["NumPlanesTracked"])
fig = plt.figure(figsize = figSize, dpi = 80, facecolor="w", edgecolor="k")
plt.plot(np.arange(N),
statsRnd["NumPlanesTracked"], label="# tracked by random ICP", color=cs[1])
plt.plot(np.arange(N),
statsAwa["NumPlanesTracked"], label="# tracked by dir.-aware ICP", color=cs[0])
plt.plot(np.arange(N),
statsAwa["NumPlanesInView"], label="# total in view", color=cs[2])
#plt.plot(np.arange(len(statsRnd["NumPlanesTracked"])),
# statsRnd["NumPlanesInView"], label="random", color=cs[1])
plt.xlim([0,N])
plt.legend(loc="best")
plt.xlabel("frame")
plt.ylabel("number of surfels")
plt.savefig("trackingStratCompPlanesTracked.png", figure=fig)
fig = plt.figure(figsize = figSize, dpi = 80, facecolor="w", edgecolor="k")
plt.plot(np.arange(N),
statsRnd["trackingMaxStd"], label="min/max std of random ICP", color=cs[1])
plt.plot(np.arange(N),
statsAwa["trackingMaxStd"], label="min/max std of dir.-aware ICP", color=cs[0])
plt.plot(np.arange(N),
statsRnd["trackingMinStd"], color=cs[1])
plt.plot(np.arange(N),
statsAwa["trackingMinStd"], color=cs[0])
plt.legend(loc="upper left")
plt.xlim([0,N])
plt.xlabel("frame")
plt.ylabel("standard deviation of pose estimate")
plt.savefig("trackingStratCompStd.png", figure=fig)
fig = plt.figure(figsize = figSize, dpi = 80, facecolor="w", edgecolor="k")
plt.plot(np.arange(N),
statsRnd["trackingH"], label="entropy of random ICP", color=cs[1])
plt.plot(np.arange(N),
statsAwa["trackingH"], label="entropy of dir.-aware ICP", color=cs[0])
plt.xlim([0,N])
plt.legend(loc="best")
plt.xlabel("frame")
plt.ylabel("entropy of pose estimate")
plt.savefig("trackingStratCompEntropy.png", figure=fig)
plt.show()
|
# Generated by Django 2.2.16 on 2021-06-03 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('petition', '0011_auto_20210524_2151'),
]
operations = [
migrations.AddField(
model_name='pytitionuser',
name='moderated',
field=models.BooleanField(default=False),
),
]
|
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view(['GET', 'POST'])
def get_input_data(request):
if request.method == 'POST':
_url = request.data['inputURL']
try:
code = requests.get(_url)
text = code.text
soup = BeautifulSoup(text)
all_images = []
a = soup.findAll('img')
images = [x['src'] for x in soup.findAll('img') if x.get('src')]
for img in images:
if img[0:4] == 'http':
all_images.append(img)
else:
all_images.append(urljoin(_url, img))
return Response({"all_images":all_images})
except Exception as e:
return Response({"error_desc":"Something went wrong.",status:404})
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import os
from typing import Tuple, Union
import numpy as np
from PIL import Image
import glob
class PilotNetDataset():
"""Generic PilotNet dataset class. Returns image and ground truth value
when the object is indexed.
Parameters
----------
path : str, optional
Path of the dataset folder. If the folder does not exists, the folder
is created and the dataset is downloaded and extracted to the folder.
Defaults to '../data'.
size : list, optional
Size of the image. If it is not `200x66`, it is resized to the given
value. Defaults to [200, 66].
transform : lambda, optional
Transformation function to be applied to the input image.
Defaults to None.
train : bool, optional
Flag to indicate training or testing set. Defaults to True.
visualize : bool, optional
If true, the train/test split is ignored and the temporal sequence
of the data is preserved. Defaults to False.
sample_offset : int, optional
sample offset. Default is 0.
Usage
-----
>>> dataset = PilotNetDataset()
>>> image, gt = dataeset[0]
>>> num_samples = len(dataset)
"""
def __init__(
self,
path: str = '../data',
size: list = [200, 66],
transform: Union[bool, None] = None,
train: Union[bool, None] = True,
visualize: Union[bool, None] = False,
sample_offset: int = 0,
) -> None:
self.path = os.path.join(path, 'driving_dataset')
# check if dataset is available in path. If not download it
if len(glob.glob(self.path)) == 0: # dataset does not exist
os.makedirs(path, exist_ok=True)
print('Dataset not available locally. Starting download ...')
id = '1Ue4XohCOV5YXy57S_5tDfCVqzLr101M7'
download_cmd = 'wget --load-cookies /tmp/cookies.txt '\
+ '"https://docs.google.com/uc?export=download&confirm='\
+ '$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate '\
+ f"'https://docs.google.com/uc?export=download&id={id}' -O- | "\
+ f"sed -rn \'s/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p\')&id={id}"\
+ f'" -O {path}/driving_dataset.zip && rm -rf /tmp/cookies.txt'
print(download_cmd)
os.system(download_cmd + f' >> {path}/download.log')
print('Download complete.')
print('Extracting data (this may take a while) ...')
os.system(
f'unzip {path}/driving_dataset.zip -d {path} >> '
f'{path}/unzip.log'
)
print('Extraction complete.')
with open(os.path.join(self.path, 'data.txt'), 'r') as data:
all_samples = [line.split() for line in data]
# this is what seems to be done in https://github.com/lhzlhz/PilotNet
if visualize is True:
inds = np.arange(len(all_samples))
self.samples = [all_samples[i] for i in inds]
else:
inds = np.random.RandomState(seed=42).permutation(len(all_samples))
if train is True:
self.samples = [
all_samples[i] for i in inds[:int(len(all_samples) * 0.8)]
]
else:
self.samples = [
all_samples[i] for i in inds[-int(len(all_samples) * 0.2):]
]
self.size = size
self.transform = transform
self.sample_offset = sample_offset
def __getitem__(self, index: int) -> Tuple[np.ndarray, float]:
index = (index + self.sample_offset) % len(self.samples)
image = Image.open(
os.path.join(self.path, self.samples[index][0])
).resize(self.size, resample=Image.BILINEAR)
image = np.array(image) / 255
if self.transform is not None:
image = 2 * self.transform['weight'] * image \
- self.transform['weight'] + self.transform['bias']
image = image.astype(np.int32).transpose([1, 0, 2])
ground_truth = float(self.samples[index][1])
if ground_truth == 0:
ground_truth = (
float(self.samples[index - 1][1])
+ float(self.samples[index + 1][1])
) / 2
gt_val = ground_truth * np.pi / 180
print(f'\rSample: {index}', end='')
return image, gt_val
def __len__(self) -> int:
return len(self.samples)
|
from numpy import *
class PSO(object):
'''particle swarm optimization
f: fit function
cmp_f: compare function
init_f: initialization function
p_size: number of particles
bound_f: bound function(bool)
vmax: maximum moving velocity
gen_rate: like GA, the best biont's child in guassian
gen_r : guassian radium
nor_perceptron: the number of sense point
nor_r: the radium of sense area
'''
def __init__(self, f, cmp_f, init_f,
p_size, bound_f, vmax,
w = 0.8, c1 = 2, c2 = 2, maxiter = 2000,
plot_f = None,
gen_rate = 0.0, gen_r = 0.3,
nor_perceptron = 0, nor_r = 0.2, init_x = None):
pbest = array([None] * p_size)
pbest_pos = array([None] * p_size)
gbest = None
gbest_pos = None
v = list(random.rand(p_size)*vmax)
p = []
if init_x == None:
for i in xrange(p_size):
p.append(init_f())
else:
p = init_x
p = array(p)
for iteration in xrange(maxiter):
for ind, ele in enumerate(p):
best_f = f(ele)
for i in xrange(nor_perceptron):
t = array(map(lambda x: random.normal(x, nor_r), ele))
tmp = f(t)
if best_f == None or cmp_f(tmp, best_f):
best_f = tmp
p[ind] = t
ele = t
if pbest[ind] == None or cmp_f(best_f, pbest[ind]):
pbest[ind] = best_f
pbest_pos[ind] = ele.copy()
if gbest == None or cmp_f(best_f, gbest):
gbest = best_f
gbest_pos = ele.copy()
for ind, ele in enumerate(p):
v[ind] = w * v[ind]\
+ c1 * random.rand() * (pbest_pos[ind] - ele)\
+ c2 * random.rand() * (gbest_pos - ele)
if any(v[ind] > vmax):
v[ind] = vmax
p[ind] += v[ind]
if bound_f(p[ind])==False:
p[ind] = init_f()
print 'the %dth iter:\tbest fitness: f(' % iteration, gbest_pos, ')=',gbest,'\r',
survive = int((1-gen_rate)*p_size)
idx = pbest.argsort()
pbest = pbest[idx]
pbest_pos = pbest_pos[idx]
p = p[idx]
for ind in xrange(survive+1, len(p)):
p[ind] = map(lambda x: random.normal(x, gen_r), gbest_pos)
if plot_f != None:
plot_f(p, gbest_pos, gbest, iteration)
print 'best fitness: f(', gbest_pos, ')=', gbest
self.gbest = gbest
def get_x(self):
return self.gbest
|
import pytest
from iteretor_tutorial.itertools_examples import *
@pytest.fixture
def sequences():
return ["agctctt", "tggtgta", "gcttagt", "aaaagtctgt", "cccta"]
@pytest.fixture
def lines():
return ["# header1", "# header2", "# header3", "0", "10", "20"]
@pytest.fixture
def ages():
return [10, 15, 17, 19, 12]
def test_map(sequences):
assert list(map_example(sequences))==list(map(len, sequences))
def test_filter(sequences):
assert list(filter_example(sequences))==list(filter(lambda s: len(s)>5, sequences))
def test_longest(sequences):
lens = [2, 2, 2, 4, 3]
for seq, l in zip(sequences, lens):
assert longest_repeat(seq) == l
def test_max_value():
def func(x, y):
return x**2-2*y+y**2+1
assert max_of_function(func) == max(func(x, y) for x, y in product(range(100), repeat=2))
def test_location_from_steps(ages):
assert list(location_from_steps(ages, start=3)) == list(accumulate(ages, initial=3))
def test_comment(lines):
assert list(read_numbers(lines)) == list(int(line) for line in dropwhile(lambda line: line.startswith("#"), lines))
|
class Env:
def __init__(self, pre=None):
self.table = {}
self.pre = pre
def put(self, w, i):
self.table.update({w: i})
def get(self, w):
e = self
while e:
found = e.table.get(w)
if found:
return found
e = e.pre
return None
|
"""Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a namedtuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes(b'') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import builtins
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'i'
import audioop
import struct
import sys
from chunk import Chunk
from collections import namedtuple
_wave_params = namedtuple('_wave_params',
'nchannels sampwidth framerate nframes comptype compname')
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != b'RIFF':
raise Error('file does not start with RIFF id')
if self._file.read(4) != b'WAVE':
raise Error('not a WAVE file')
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == b'data':
if not self._fmt_chunk_read:
raise Error('data chunk before fmt chunk')
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error('fmt chunk and/or data chunk missing')
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
self._file = None
file = self._i_opened_the_file
if file:
self._i_opened_the_file = None
file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return _wave_params(self.getnchannels(), self.getsampwidth(),
self.getframerate(), self.getnframes(),
self.getcomptype(), self.getcompname())
def getmarkers(self):
return None
def getmark(self, id):
raise Error('no marks')
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return b''
data = self._data_chunk.read(nframes * self._framesize)
if self._sampwidth != 1 and sys.byteorder == 'big':
data = audioop.byteswap(data, self._sampwidth)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack_from('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack_from('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error('unknown format: %r' % (wFormatTag,))
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = int(round(framerate))
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if comptype not in ('NONE',):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error('not all parameters set')
return _wave_params(self._nchannels, self._sampwidth, self._framerate,
self._nframes, self._comptype, self._compname)
def setmark(self, id, pos, name):
raise Error('setmark() not supported')
def getmark(self, id):
raise Error('no marks')
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
if not isinstance(data, (bytes, bytearray)):
data = memoryview(data).cast('B')
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth != 1 and sys.byteorder == 'big':
data = audioop.byteswap(data, self._sampwidth)
self._file.write(data)
self._datawritten += len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
try:
if self._file:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
finally:
self._file = None
file = self._i_opened_the_file
if file:
self._i_opened_the_file = None
file.close()
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write(b'RIFF')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
try:
self._form_length_pos = self._file.tell()
except (AttributeError, OSError):
self._form_length_pos = None
self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, b'WAVE', b'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, b'data'))
if self._form_length_pos is not None:
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open # B/W compatibility
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
API JSON validators.
"""
import abc
import base64
import re
import jsonschema as schema
from ldap3.core import exceptions as ldap_exceptions
from ldap3.utils.dn import parse_dn
from OpenSSL import crypto
from oslo_utils import timeutils
import six
from barbican.api import controllers
from barbican.common import config
from barbican.common import exception
from barbican.common import hrefs
from barbican.common import utils
from barbican import i18n as u
from barbican.model import models
from barbican.model import repositories as repo
from barbican.plugin.interface import secret_store
from barbican.plugin.util import mime_types
DEFAULT_MAX_SECRET_BYTES = config.DEFAULT_MAX_SECRET_BYTES
LOG = utils.getLogger(__name__)
CONF = config.CONF
MYSQL_SMALL_INT_MAX = 32767
ACL_OPERATIONS = ['read', 'write', 'delete', 'list']
def secret_too_big(data):
if isinstance(data, six.text_type):
return len(data.encode('UTF-8')) > CONF.max_allowed_secret_in_bytes
else:
return len(data) > CONF.max_allowed_secret_in_bytes
def get_invalid_property(validation_error):
# we are interested in the second item which is the failed propertyName.
if validation_error.schema_path and len(validation_error.schema_path) > 1:
return validation_error.schema_path[1]
def validate_stored_key_rsa_container(project_id, container_ref, req):
try:
container_id = hrefs.get_container_id_from_ref(container_ref)
except Exception:
reason = u._("Bad Container Reference {ref}").format(
ref=container_ref
)
raise exception.InvalidContainer(reason=reason)
container_repo = repo.get_container_repository()
container = container_repo.get_container_by_id(entity_id=container_id,
suppress_exception=True)
if not container:
reason = u._("Container Not Found")
raise exception.InvalidContainer(reason=reason)
if container.type != 'rsa':
reason = u._("Container Wrong Type")
raise exception.InvalidContainer(reason=reason)
ctxt = controllers._get_barbican_context(req)
inst = controllers.containers.ContainerController(container)
controllers._do_enforce_rbac(inst, req,
controllers.containers.CONTAINER_GET,
ctxt)
class ValidatorBase(object, metaclass=abc.ABCMeta):
"""Base class for validators."""
name = ''
@abc.abstractmethod
def validate(self, json_data, parent_schema=None):
"""Validate the input JSON.
:param json_data: JSON to validate against this class' internal schema.
:param parent_schema: Name of the parent schema to this schema.
:returns: dict -- JSON content, post-validation and
: normalization/defaulting.
:raises: schema.ValidationError on schema violations.
"""
def _full_name(self, parent_schema=None):
"""Validator schema name accessor
Returns the full schema name for this validator,
including parent name.
"""
schema_name = self.name
if parent_schema:
schema_name = u._(
"{schema_name}' within '{parent_schema_name}").format(
schema_name=self.name,
parent_schema_name=parent_schema)
return schema_name
def _assert_schema_is_valid(self, json_data, schema_name):
"""Assert that the JSON structure is valid for the given schema.
:raises: InvalidObject exception if the data is not schema compliant.
"""
try:
schema.validate(json_data, self.schema)
except schema.ValidationError as e:
raise exception.InvalidObject(schema=schema_name,
reason=e.message,
property=get_invalid_property(e))
def _assert_validity(self, valid_condition, schema_name, message,
property):
"""Assert that a certain condition is met.
:raises: InvalidObject exception if the condition is not met.
"""
if not valid_condition:
raise exception.InvalidObject(schema=schema_name, reason=message,
property=property)
class NewSecretValidator(ValidatorBase):
"""Validate a new secret."""
def __init__(self):
self.name = 'Secret'
# TODO(jfwood): Get the list of mime_types from the crypto plugins?
self.schema = {
"type": "object",
"properties": {
"name": {"type": ["string", "null"], "maxLength": 255},
"algorithm": {"type": "string", "maxLength": 255},
"mode": {"type": "string", "maxLength": 255},
"bit_length": {
"type": "integer",
"minimum": 1,
"maximum": MYSQL_SMALL_INT_MAX
},
"expiration": {"type": "string", "maxLength": 255},
"payload": {"type": "string"},
"secret_type": {
"type": "string",
"maxLength": 80,
"enum": [secret_store.SecretType.SYMMETRIC,
secret_store.SecretType.PASSPHRASE,
secret_store.SecretType.PRIVATE,
secret_store.SecretType.PUBLIC,
secret_store.SecretType.CERTIFICATE,
secret_store.SecretType.OPAQUE]
},
"payload_content_type": {
"type": ["string", "null"],
"maxLength": 255
},
"payload_content_encoding": {
"type": "string",
"maxLength": 255,
"enum": [
"base64"
]
},
"transport_key_needed": {
"type": "string",
"enum": ["true", "false"]
},
"transport_key_id": {"type": "string"},
},
}
def validate(self, json_data, parent_schema=None):
"""Validate the input JSON for the schema for secrets."""
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
json_data['name'] = self._extract_name(json_data)
expiration = self._extract_expiration(json_data, schema_name)
self._assert_expiration_is_valid(expiration, schema_name)
json_data['expiration'] = expiration
content_type = json_data.get('payload_content_type')
if 'payload' in json_data:
content_encoding = json_data.get('payload_content_encoding')
self._validate_content_parameters(content_type, content_encoding,
schema_name)
payload = self._extract_payload(json_data)
self._assert_validity(payload, schema_name,
u._("If 'payload' specified, must be non "
"empty"),
"payload")
self._validate_payload_by_content_encoding(content_encoding,
payload, schema_name)
json_data['payload'] = payload
elif 'payload_content_type' in json_data:
# parent_schema would be populated if it comes from an order.
self._assert_validity(parent_schema is not None, schema_name,
u._("payload must be provided when "
"payload_content_type is specified"),
"payload")
if content_type:
self._assert_validity(
mime_types.is_supported(content_type),
schema_name,
u._("payload_content_type is not one of {supported}"
).format(supported=mime_types.SUPPORTED),
"payload_content_type")
return json_data
def _extract_name(self, json_data):
"""Extracts and returns the name from the JSON data."""
name = json_data.get('name')
if isinstance(name, six.string_types):
return name.strip()
return None
def _extract_expiration(self, json_data, schema_name):
"""Extracts and returns the expiration date from the JSON data."""
expiration = None
expiration_raw = json_data.get('expiration')
if expiration_raw and expiration_raw.strip():
try:
expiration_tz = timeutils.parse_isotime(expiration_raw.strip())
expiration = timeutils.normalize_time(expiration_tz)
except ValueError:
LOG.exception("Problem parsing expiration date")
raise exception.InvalidObject(
schema=schema_name,
reason=u._("Invalid date for 'expiration'"),
property="expiration")
return expiration
def _assert_expiration_is_valid(self, expiration, schema_name):
"""Asserts that the given expiration date is valid.
Expiration dates must be in the future, not the past.
"""
if expiration:
# Verify not already expired.
utcnow = timeutils.utcnow()
self._assert_validity(expiration > utcnow, schema_name,
u._("'expiration' is before current time"),
"expiration")
def _validate_content_parameters(self, content_type, content_encoding,
schema_name):
"""Content parameter validator.
Check that the content_type, content_encoding and the parameters
that they affect are valid.
"""
self._assert_validity(
content_type is not None,
schema_name,
u._("If 'payload' is supplied, 'payload_content_type' must also "
"be supplied."),
"payload_content_type")
self._assert_validity(
mime_types.is_supported(content_type),
schema_name,
u._("payload_content_type is not one of {supported}"
).format(supported=mime_types.SUPPORTED),
"payload_content_type")
self._assert_validity(
mime_types.is_content_type_with_encoding_supported(
content_type,
content_encoding),
schema_name,
u._("payload_content_encoding is not one of {supported}").format(
supported=mime_types.get_supported_encodings(content_type)),
"payload_content_encoding")
def _validate_payload_by_content_encoding(self, payload_content_encoding,
payload, schema_name):
if payload_content_encoding == 'base64':
try:
base64.b64decode(payload)
except Exception:
LOG.exception("Problem parsing payload")
raise exception.InvalidObject(
schema=schema_name,
reason=u._("Invalid payload for payload_content_encoding"),
property="payload")
def _extract_payload(self, json_data):
"""Extracts and returns the payload from the JSON data.
:raises: LimitExceeded if the payload is too big
"""
payload = json_data.get('payload', '')
if secret_too_big(payload):
raise exception.LimitExceeded()
return payload.strip()
class NewSecretMetadataValidator(ValidatorBase):
"""Validate new secret metadata."""
def __init__(self):
self.name = 'SecretMetadata'
self.schema = {
"type": "object",
"$schema": "http://json-schema.org/draft-03/schema",
"properties": {
"metadata": {"type": "object", "required": True},
}
}
def validate(self, json_data, parent_schema=None):
"""Validate the input JSON for the schema for secret metadata."""
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
return self._extract_metadata(json_data)
def _extract_metadata(self, json_data):
"""Extracts and returns the metadata from the JSON data."""
metadata = json_data['metadata']
for key in list(metadata):
# make sure key is a string and url-safe.
if not isinstance(key, six.string_types):
raise exception.InvalidMetadataRequest()
self._check_string_url_safe(key)
# make sure value is a string.
value = metadata[key]
if not isinstance(value, six.string_types):
raise exception.InvalidMetadataRequest()
# If key is not lowercase, then change it
if not key.islower():
del metadata[key]
metadata[key.lower()] = value
return metadata
def _check_string_url_safe(self, string):
"""Checks if string can be part of a URL."""
if not re.match("^[A-Za-z0-9_-]*$", string):
raise exception.InvalidMetadataKey()
class NewSecretMetadatumValidator(ValidatorBase):
"""Validate new secret metadatum."""
def __init__(self):
self.name = 'SecretMetadatum'
self.schema = {
"type": "object",
"$schema": "http://json-schema.org/draft-03/schema",
"properties": {
"key": {
"type": "string",
"maxLength": 255,
"required": True
},
"value": {
"type": "string",
"maxLength": 255,
"required": True
},
},
"additionalProperties": False
}
def validate(self, json_data, parent_schema=None):
"""Validate the input JSON for the schema for secret metadata."""
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
key = self._extract_key(json_data)
value = self._extract_value(json_data)
return {"key": key, "value": value}
def _extract_key(self, json_data):
"""Extracts and returns the metadata from the JSON data."""
key = json_data['key']
self._check_string_url_safe(key)
key = key.lower()
return key
def _extract_value(self, json_data):
"""Extracts and returns the metadata from the JSON data."""
value = json_data['value']
return value
def _check_string_url_safe(self, string):
"""Checks if string can be part of a URL."""
if not re.match("^[A-Za-z0-9_-]*$", string):
raise exception.InvalidMetadataKey()
class CACommonHelpersMixin(object):
def _validate_subject_dn_data(self, subject_dn):
"""Confirm that the subject_dn contains valid data
Validate that the subject_dn string parses without error
If not, raise InvalidSubjectDN
"""
try:
parse_dn(subject_dn)
except ldap_exceptions.LDAPInvalidDnError:
raise exception.InvalidSubjectDN(subject_dn=subject_dn)
# TODO(atiwari) - Split this validator module and unit tests
# into smaller modules
class TypeOrderValidator(ValidatorBase, CACommonHelpersMixin):
"""Validate a new typed order."""
def __init__(self):
self.name = 'Order'
self.schema = {
"type": "object",
"$schema": "http://json-schema.org/draft-03/schema",
"properties": {
"meta": {
"type": "object",
"required": True
},
"type": {
"type": "string",
"required": True,
"enum": ['key', 'asymmetric', 'certificate']
}
}
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
order_type = json_data.get('type').lower()
if order_type == models.OrderType.CERTIFICATE:
certificate_meta = json_data.get('meta')
self._validate_certificate_meta(certificate_meta, schema_name)
elif order_type == models.OrderType.ASYMMETRIC:
asymmetric_meta = json_data.get('meta')
self._validate_asymmetric_meta(asymmetric_meta, schema_name)
elif order_type == models.OrderType.KEY:
key_meta = json_data.get('meta')
self._validate_key_meta(key_meta, schema_name)
else:
self._raise_feature_not_implemented(order_type, schema_name)
return json_data
def _validate_key_meta(self, key_meta, schema_name):
"""Validation specific to meta for key type order."""
secret_validator = NewSecretValidator()
secret_validator.validate(key_meta, parent_schema=self.name)
self._assert_validity(key_meta.get('payload') is None,
schema_name,
u._("'payload' not allowed "
"for key type order"), "meta")
# Validation secret generation related fields.
# TODO(jfwood): Invoke the crypto plugin for this purpose
self._validate_meta_parameters(key_meta, "key", schema_name)
def _validate_asymmetric_meta(self, asymmetric_meta, schema_name):
"""Validation specific to meta for asymmetric type order."""
# Validate secret metadata.
secret_validator = NewSecretValidator()
secret_validator.validate(asymmetric_meta, parent_schema=self.name)
self._assert_validity(asymmetric_meta.get('payload') is None,
schema_name,
u._("'payload' not allowed "
"for asymmetric type order"), "meta")
self._validate_meta_parameters(asymmetric_meta, "asymmetric key",
schema_name)
def _get_required_metadata_value(self, metadata, key):
data = metadata.get(key, None)
if data is None:
raise exception.MissingMetadataField(required=key)
return data
def _validate_certificate_meta(self, certificate_meta, schema_name):
"""Validation specific to meta for certificate type order."""
self._assert_validity(certificate_meta.get('payload') is None,
schema_name,
u._("'payload' not allowed "
"for certificate type order"), "meta")
if 'profile' in certificate_meta:
if 'ca_id' not in certificate_meta:
raise exception.MissingMetadataField(required='ca_id')
jump_table = {
'simple-cmc': self._validate_simple_cmc_request,
'full-cmc': self._validate_full_cmc_request,
'stored-key': self._validate_stored_key_request,
'custom': self._validate_custom_request
}
request_type = certificate_meta.get("request_type", "custom")
if request_type not in jump_table:
raise exception.InvalidCertificateRequestType(request_type)
jump_table[request_type](certificate_meta)
def _validate_simple_cmc_request(self, certificate_meta):
"""Validates simple CMC (which are PKCS10 requests)."""
request_data = self._get_required_metadata_value(
certificate_meta, "request_data")
self._validate_pkcs10_data(request_data)
def _validate_full_cmc_request(self, certificate_meta):
"""Validate full CMC request.
:param certificate_meta: request data from the order
:raises: FullCMCNotSupported
"""
raise exception.FullCMCNotSupported()
def _validate_stored_key_request(self, certificate_meta):
"""Validate stored-key cert request."""
self._get_required_metadata_value(
certificate_meta, "container_ref")
subject_dn = self._get_required_metadata_value(
certificate_meta, "subject_dn")
self._validate_subject_dn_data(subject_dn)
# container will be validated by validate_stored_key_rsa_container()
extensions = certificate_meta.get("extensions", None)
if extensions:
self._validate_extensions_data(extensions)
def _validate_custom_request(self, certificate_meta):
"""Validate custom data request
We cannot do any validation here because the request
parameters are custom. Validation will be done by the
plugin. We may choose to select the relevant plugin and
call the supports() method to raise validation errors.
"""
pass
def _validate_pkcs10_data(self, request_data):
"""Confirm that the request_data is valid base64 encoded PKCS#10.
Base64 decode the request, if it fails raise PayloadDecodingError.
Then parse data into the ASN.1 structure defined by PKCS10 and
verify the signing information.
If parsing of verifying fails, raise InvalidPKCS10Data.
"""
try:
csr_pem = base64.b64decode(request_data)
except Exception:
raise exception.PayloadDecodingError()
try:
csr = crypto.load_certificate_request(crypto.FILETYPE_PEM,
csr_pem)
except Exception:
reason = u._("Bad format")
raise exception.InvalidPKCS10Data(reason=reason)
try:
pubkey = csr.get_pubkey()
csr.verify(pubkey)
except Exception:
reason = u._("Signing key incorrect")
raise exception.InvalidPKCS10Data(reason=reason)
def _validate_full_cmc_data(self, request_data):
"""Confirm that request_data is valid Full CMC data."""
"""
TODO(alee-3) complete this function
Parse data into the ASN.1 structure defined for full CMC.
If parsing fails, raise InvalidCMCData
"""
pass
def _validate_extensions_data(self, extensions):
"""Confirm that the extensions data is valid.
:param extensions: base 64 encoded ASN.1 string of extension data
:raises: CertificateExtensionsNotSupported
"""
"""
TODO(alee-3) complete this function
Parse the extensions data into the correct ASN.1 structure.
If the parsing fails, throw InvalidExtensionsData.
For now, fail this validation because extensions parsing is not
supported.
"""
raise exception.CertificateExtensionsNotSupported()
def _validate_meta_parameters(self, meta, order_type, schema_name):
self._assert_validity(meta.get('algorithm'),
schema_name,
u._("'algorithm' is required field "
"for {0} type order").format(order_type),
"meta")
self._assert_validity(meta.get('bit_length'),
schema_name,
u._("'bit_length' is required field "
"for {0} type order").format(order_type),
"meta")
self._validate_bit_length(meta, schema_name)
def _extract_expiration(self, json_data, schema_name):
"""Extracts and returns the expiration date from the JSON data."""
expiration = None
expiration_raw = json_data.get('expiration', None)
if expiration_raw and expiration_raw.strip():
try:
expiration_tz = timeutils.parse_isotime(expiration_raw)
expiration = timeutils.normalize_time(expiration_tz)
except ValueError:
LOG.exception("Problem parsing expiration date")
raise exception.InvalidObject(schema=schema_name,
reason=u._("Invalid date "
"for 'expiration'"),
property="expiration")
return expiration
def _validate_bit_length(self, meta, schema_name):
bit_length = int(meta.get('bit_length'))
if bit_length % 8 != 0:
raise exception.UnsupportedField(field="bit_length",
schema=schema_name,
reason=u._("Must be a"
" positive integer"
" that is a"
" multiple of 8"))
def _raise_feature_not_implemented(self, order_type, schema_name):
raise exception.FeatureNotImplemented(field='type',
schema=schema_name,
reason=u._("Feature not "
"implemented for "
"'{0}' order type")
.format(order_type))
class ACLValidator(ValidatorBase):
"""Validate ACL(s)."""
def __init__(self):
self.name = 'ACL'
self.schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"definitions": {
"acl_defintion": {
"type": "object",
"properties": {
"users": {
"type": "array",
"items": [
{"type": "string", "maxLength": 255}
]
},
"project-access": {"type": "boolean"}
},
"additionalProperties": False
}
},
"type": "object",
"properties": {
"read": {"$ref": "#/definitions/acl_defintion"},
},
"additionalProperties": False
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
return json_data
class ContainerConsumerValidator(ValidatorBase):
"""Validate a Consumer."""
def __init__(self):
self.name = 'Consumer'
self.schema = {
"type": "object",
"properties": {
"URL": {"type": "string", "minLength": 1},
"name": {"type": "string", "maxLength": 255, "minLength": 1}
},
"required": ["name", "URL"]
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
return json_data
class ContainerSecretValidator(ValidatorBase):
"""Validate a Container Secret."""
def __init__(self):
self.name = 'ContainerSecret'
self.schema = {
"type": "object",
"properties": {
"name": {"type": "string", "maxLength": 255},
"secret_ref": {"type": "string", "minLength": 1}
},
"required": ["secret_ref"]
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
return json_data
class ContainerValidator(ValidatorBase):
"""Validator for all types of Container."""
def __init__(self):
self.name = 'Container'
self.schema = {
"type": "object",
"properties": {
"name": {"type": ["string", "null"], "maxLength": 255},
"type": {
"type": "string",
# TODO(hgedikli): move this to a common location
"enum": ["generic", "rsa", "certificate"]
},
"secret_refs": {
"type": "array",
"items": {
"type": "object",
"required": ["secret_ref"],
"properties": {
"name": {
"type": ["string", "null"], "maxLength": 255
},
"secret_ref": {"type": "string", "minLength": 1}
}
}
}
},
"required": ["type"]
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
container_type = json_data.get('type')
secret_refs = json_data.get('secret_refs')
if not secret_refs:
return json_data
secret_refs_names = set(secret_ref.get('name', '')
for secret_ref in secret_refs)
self._assert_validity(
len(secret_refs_names) == len(secret_refs),
schema_name,
u._("Duplicate reference names are not allowed"),
"secret_refs")
# The combination of container_id and secret_id is expected to be
# primary key for container_secret so same secret id (ref) cannot be
# used within a container
secret_ids = set(self._get_secret_id_from_ref(secret_ref)
for secret_ref in secret_refs)
self._assert_validity(
len(secret_ids) == len(secret_refs),
schema_name,
u._("Duplicate secret ids are not allowed"),
"secret_refs")
# Ensure that our secret refs are valid relative to our config, no
# spoofing allowed!
req_host_href = utils.get_base_url_from_request()
for secret_ref in secret_refs:
if not secret_ref.get('secret_ref').startswith(req_host_href):
raise exception.UnsupportedField(
field='secret_ref',
schema=schema_name,
reason=u._(
"Secret_ref does not match the configured hostname, "
"please try again"
)
)
if container_type == 'rsa':
self._validate_rsa(secret_refs_names, schema_name)
elif container_type == 'certificate':
self._validate_certificate(secret_refs_names, schema_name)
return json_data
def _validate_rsa(self, secret_refs_names, schema_name):
required_names = {'public_key', 'private_key'}
optional_names = {'private_key_passphrase'}
contains_unsupported_names = self._contains_unsupported_names(
secret_refs_names, required_names | optional_names)
self._assert_validity(
not contains_unsupported_names,
schema_name,
u._("only 'private_key', 'public_key' and "
"'private_key_passphrase' reference names are "
"allowed for RSA type"),
"secret_refs")
self._assert_validity(
self._has_minimum_required(secret_refs_names, required_names),
schema_name,
u._("The minimum required reference names are 'public_key' and"
"'private_key' for RSA type"),
"secret_refs")
def _validate_certificate(self, secret_refs_names, schema_name):
required_names = {'certificate'}
optional_names = {'private_key', 'private_key_passphrase',
'intermediates'}
contains_unsupported_names = self._contains_unsupported_names(
secret_refs_names, required_names.union(optional_names))
self._assert_validity(
not contains_unsupported_names,
schema_name,
u._("only 'private_key', 'certificate' , "
"'private_key_passphrase', or 'intermediates' "
"reference names are allowed for Certificate type"),
"secret_refs")
self._assert_validity(
self._has_minimum_required(secret_refs_names, required_names),
schema_name,
u._("The minimum required reference name is 'certificate' "
"for Certificate type"),
"secret_refs")
def _contains_unsupported_names(self, secret_refs_names, supported_names):
if secret_refs_names.difference(supported_names):
return True
return False
def _has_minimum_required(self, secret_refs_names, required_names):
if required_names.issubset(secret_refs_names):
return True
return False
def _get_secret_id_from_ref(self, secret_ref):
secret_id = secret_ref.get('secret_ref')
if secret_id.endswith('/'):
secret_id = secret_id.rsplit('/', 2)[1]
elif '/' in secret_id:
secret_id = secret_id.rsplit('/', 1)[1]
return secret_id
class NewTransportKeyValidator(ValidatorBase):
"""Validate a new transport key."""
def __init__(self):
self.name = 'Transport Key'
self.schema = {
"type": "object",
"properties": {
"plugin_name": {"type": "string"},
"transport_key": {"type": "string"},
},
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
plugin_name = json_data.get('plugin_name', '').strip()
self._assert_validity(plugin_name,
schema_name,
u._("plugin_name must be provided"),
"plugin_name")
json_data['plugin_name'] = plugin_name
transport_key = json_data.get('transport_key', '').strip()
self._assert_validity(transport_key,
schema_name,
u._("transport_key must be provided"),
"transport_key")
json_data['transport_key'] = transport_key
return json_data
class ProjectQuotaValidator(ValidatorBase):
"""Validate a new project quota."""
def __init__(self):
self.name = 'Project Quota'
self.schema = {
'type': 'object',
'properties': {
'project_quotas': {
'type': 'object',
'properties': {
'secrets': {'type': 'integer'},
'orders': {'type': 'integer'},
'containers': {'type': 'integer'},
'consumers': {'type': 'integer'},
'cas': {'type': 'integer'}
},
'additionalProperties': False,
}
},
'required': ['project_quotas'],
'additionalProperties': False
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
return json_data
class NewCAValidator(ValidatorBase, CACommonHelpersMixin):
"""Validate new CA(s)."""
def __init__(self):
self.name = 'CA'
self.schema = {
'type': 'object',
'properties': {
'name': {'type': 'string', "minLength": 1},
'subject_dn': {'type': 'string', "minLength": 1},
'parent_ca_ref': {'type': 'string', "minLength": 1},
'description': {'type': 'string'},
},
'required': ['name', 'subject_dn', 'parent_ca_ref'],
'additionalProperties': False
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
subject_dn = json_data['subject_dn']
self._validate_subject_dn_data(subject_dn)
return json_data
class SecretConsumerValidator(ValidatorBase):
"""Validate a new Secret Consumer."""
def __init__(self):
self.name = "Secret Consumer"
self.schema = {
"type": "object",
"properties": {
"service": {
"type": "string",
"maxLength": 255,
"minLength": 1,
},
"resource_type": {
"type": "string",
"maxLength": 255,
"minLength": 1,
},
"resource_id": {"type": "string", "minLength": 1},
},
"required": ["service", "resource_type", "resource_id"],
}
def validate(self, json_data, parent_schema=None):
schema_name = self._full_name(parent_schema)
self._assert_schema_is_valid(json_data, schema_name)
return json_data
|
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
# i and j are the pointers to the char that we are going to check,
# if i and j reach their ends simultaneously, the two strings are matched.
# next_try stores a position(coordinates) to record the position of last star
# (plus one, so that it can be the 'next' 'try') and corresponding j.
# next_try can be seen as another possible starting point to check the two strings.
# next_try is updated when we meet a new '*'. We only care about the last '*' because
# the last '*' means that we have already matched as much as we can for p and the last
# '*' can cover any string so we can throw away the previous next_try values and start
# from here. We're greedy.
i, j, next_try = 0, 0, None
while 1:
# Quick judge by length of string: if len(p w/o '*') is bigger than len(s), return False
if len(p)-p.count('*')>len(s):
return False
# check if i outbounds p
if i >= len(p):
# both i and j reaches end of their strings simultaneously
if j >= len(s):
return True
# go to next_try
if next_try:
i, j = next_try
if j>=len(s):
# no more try
return False
else:
j+=1
next_try = [i,j]
else:
# if there is no next_try (None), return False
return False
else:
# check if '*'
if p[i] != '*':
# not '*', we compare the two chars if exist else return False
if j>=len(s):
# see if there is next_try (update it first)
if next_try:
i, j = next_try
if j>=len(s):
# cannot update next_try since it's over limit
return False
else:
j+=1
next_try = [i,j]
else:
# if not next_try available, return False
return False
else:
# j<len(s), we can compare the two chars
if (p[i]=='?') or (p[i]==s[j]):
i+=1
j+=1
else:
# if chars are not equal, we need to test next_try (update it first)
if next_try:
next_try=[next_try[0],next_try[1]+1]
i, j = next_try
else:
# if not next_try available, return False
return False
else:
# '*', we need to update branch coordinates for next_try
next_try = [i+1, j]
i, j = next_try
|
from openpyxl import Workbook
from openpyxl.styles import Alignment
class ExcelConverter:
def __init__(self, filename, data):
self.filename = filename
self.workbook = Workbook()
self.data = data
def create_file(self):
new_sheet = self.workbook.active
counter = 1
for statement in self.data:
new_sheet[f'A{counter}'].value = statement.number
new_sheet[f'B{counter}'].value = statement.date
new_sheet[f'C{counter}'].value = statement.description
new_sheet[f'D{counter}'].value = statement.value_date
new_sheet[f'E{counter}'].value = statement.amount
new_sheet[f'F{counter}'].value = statement.currency
new_sheet[f'G{counter}'].value = statement.detail_in_text
new_sheet[f'G{counter}'].alignment = Alignment(wrapText=True)
counter += 1
self.workbook.save(self.filename)
|
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
from zipfile import ZipFile
from . import Reader
class ZIPReader(Reader):
def __init__(self, source, path):
super().__init__(source, path)
with ZipFile(path, "r") as z:
self._content = z.namelist()
if len(self._content) != 1:
raise NotImplementedError("Multi-file zip not yet supported")
def to_pandas(self, **kwargs):
_, ext = os.path.splitext(self._content[0])
if ext not in (".csv", ".txt"):
raise NotImplementedError("File type", ext)
import pandas
options = dict(compression="zip")
options.update(self.source.read_csv_options())
options.update(kwargs)
return pandas.read_csv(self.path, **options)
def reader(source, path, magic):
if magic[:4] == b"PK\x03\x04":
return ZIPReader(source, path)
|
from flask import render_template , request, redirect, url_for
from . import main
from flask_wtf import FlaskForm
from .forms import BlogForm,UpdateProfile,UpdateBlog
from ..models import User,Blog
from .. import db,photos
from ..email import mail_message
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
@main.route('/',methods = ["GET","POST"])
def index():
return render_template('index.html')
@main.route('/blog', methods=['GET', 'POST'])
@login_required
def blog():
form = BlogForm()
user = User
if form.validate_on_submit():
blog = Blog(owner_id=current_user.id, title=form.title.data,
category=form.category.data, content=form.content.data)
form.title.data = ''
form.category.data = ''
form.content.data = ''
db.session.add(blog)
db.session.commit()
return render_template('blog.html', form=form, user=user)
@main.route('/user/<uname>')
@login_required
def profile(uname):
blog = Blog.query.filter_by(id=current_user.id).all()
user = User.query.filter_by(username=uname).first()
return render_template('profile/profile.html', name=current_user.username, email=current_user.email, password=current_user.password, user=user, blog=blog)
@main.route('/user/<uname>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
return redirect(url_for('.error'))
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username))
return render_template('profile/update.html', form=form)
@main.route('/user/<uname>/update/pic', methods=['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username=uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile', uname=uname))
# @main.route('/user/<uname>/updateblog', methods=['GET', 'POST'])
# @login_required
# def updateblog(uname):
# user = User.query.filter_by(username=uname).first()
# if user is None:
# return redirect(url_for('.error'))
# form = UpdateBlog()
# if form.validate_on_submit():
# content = form.content.data
# blog = Blog(user)
# db.session.add(blog)
# db.session.commit()
# return redirect(url_for('auth.dashboard', uname=user.username))
# return render_template('blogpost/updateblog.html', form=form)
|
from ncls import NCLS
import pickle
import pandas as pd
import numpy as np
starts = np.array(list(reversed([3, 5, 8])), dtype=np.long)
ends = np.array(list(reversed([6, 7, 9])), dtype=np.long)
indexes = np.array(list(reversed([0, 1, 2])), dtype=np.long)
# starts = np.array([3, 5, 8], dtype=np.long)
# ends = np.array([6, 7, 9], dtype=np.long)
# indexes = np.array([0, 1, 2], dtype=np.long)
ncls = NCLS(starts, ends, indexes)
starts2 = np.array([1, 6])
ends2 = np.array([10, 7])
indexes2 = np.array([0, 1])
print(ncls.all_overlaps_both(starts2, ends2, indexes2))
|
#!/home/rathankalluri/pys/aws-django/env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from django.conf import settings
def secret_for_invitation(request):
if request.path == '/':
return {
'invitation_secret': settings.USER_INVITATION_SECRET
}
else:
return {}
|
# !/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
# -*- coding:utf-8 -*-
# @Author : Jiazhixiang
import requests
# 引用requests库
from bs4 import BeautifulSoup
# 引用BeautifulSoup库
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
}
res_movies = requests.get('https://movie.douban.com/chart', headers=headers)
# 获取数据
bs_movies = BeautifulSoup(res_movies.text, 'html.parser')
# 解析数据
list_movies = bs_movies.find_all('div', class_='pl2')
# print(list_movies)
# 查找最小父级标签
# tag_p = list_movies[0].find('p')
# tag_p = list_movies[0].find('div', class_='star clearfix')
# print(tag_p)
# print(tag_p.text)
# # 提取第0个父级标签中的<p>标签
# information = tag_p.text.replace(' ', '').replace('\n', '')
# # 电影基本信息,使用replace方法去掉多余的空格及换行符
#
# print('电影的基本信息为:' + information)
# 输出结果
# 创建一个空列表,用于存储信息
list_all = []
for movie in list_movies:
tag_a = movie.find('a')
# 电影名
name = tag_a.text.replace(' ', '').replace('\n', '')
# 电影节
url = tag_a['href']
# 电影详情
tag_p = movie.find('p', class_='pl')
mv_information = tag_p.text.replace(' ', '').replace('\n', '')
# 电影评分
tag_div = movie.find('div', class_="star clearfix")
rating = tag_div.text.replace(' ', '').replace('\n', '')
list_all.append([name, url, mv_information, rating])
print(list_all)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.