content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
from celery.app import app_or_default
from django.core.management import BaseCommand
class Command(BaseCommand):
help = 'Flush celery queue'
def handle(self, *args, **options):
n = app_or_default().control.purge()
print '%d message(s) purged.\n' % n
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
# Start Spark Session
spark = SparkSession.builder.getOrCreate()
# Read data
data = spark.read.csv('/project2/macs30123/AWS_book_reviews/*.csv',
header='true',
inferSchema='true')
# Recast columns to correct data type
data = (data.withColumn('star_rating', col('star_rating').cast('int'))
.withColumn('total_votes', col('total_votes').cast('int'))
.withColumn('helpful_votes', col('helpful_votes').cast('int'))
)
# Summarize data by star_rating
stars_votes = (data.groupBy('star_rating')
.sum('total_votes', 'helpful_votes')
.sort('star_rating', ascending=False)
)
# Drop rows with NaN values and then print out resulting data:
stars_votes_clean = stars_votes.dropna()
stars_votes_clean.show()
|
import re
def anyof(*args):
return r'(?:%s)' % '|'.join(args)
def join(*args):
tokens = []
for tok in args:
if isinstance(tok, (list, tuple)):
tok = '(%s)' % r'\s*'.join(tok)
tokens.append(tok)
return r'\s*'.join(tokens)
lparen = r'\('
rparen = r'\)'
colon = r'\:'
asterisk = r'\*'
ws = r'\s*'
sol = r'^'
eol = r'$'
opt = r'?'
enum = join('enum', colon)
typedef = 'ctypedef'
pointer = asterisk
struct = join(typedef, 'struct')
basic_type = r'(?:void|int|char\s*\*{1,3})'
integral_type = r'MPI_(?:Aint|Offset|Count|Fint)'
struct_type = r'MPI_(?:Status|F08_status)'
opaque_type = r'MPI_(?:Datatype|Request|Message|Op|Info|Group|Errhandler|Comm|Win|File)'
any_mpi_type = r'(?:%s|%s|%s)' % (struct_type, integral_type, opaque_type)
upper_name = r'MPI_[A-Z0-9_]+'
camel_name = r'MPI_[A-Z][a-z0-9_]+'
usrfun_name = camel_name + r'_(?:function|fn)'
arg_list = r'.*'
ret_type = r'void|int|double|MPI_Aint'
canyint = anyof(r'int', r'long(?:\s+long)?')
canyptr = join(r'\w+', pointer+'?')
annotation = r'\#\:\='
fallback_value = r'\(?[A-Za-z0-9_\+\-\(\)\*]+\)?'
fallback = r'(?:%s)?' % join (annotation, [fallback_value])
INTEGRAL_TYPE = join( typedef, [canyint], [integral_type], fallback, eol)
STRUCT_TYPE = join( struct, [struct_type], colon+opt, fallback, eol)
OPAQUE_TYPE = join( typedef, canyptr, [opaque_type], eol)
FUNCTION_TYPE = join( typedef, [ret_type], [camel_name], lparen, [arg_list], rparen, fallback, eol)
ENUM_VALUE = join(sol, enum, [upper_name], fallback, eol)
HANDLE_VALUE = join(sol, [opaque_type], [upper_name], fallback, eol)
BASIC_PTRVAL = join(sol, [basic_type, pointer], [upper_name], fallback, eol)
INTEGRAL_PTRVAL = join(sol, [integral_type, pointer], [upper_name], fallback, eol)
STRUCT_PTRVAL = join(sol, [struct_type, pointer], [upper_name], fallback, eol)
FUNCT_PTRVAL = join(sol, [usrfun_name, pointer], [upper_name], fallback, eol)
FUNCTION_PROTO = join(sol, [ret_type], [camel_name], lparen, [arg_list], rparen, fallback, eol)
fint_type = r'MPI_Fint'
fmpi_type = opaque_type.replace('Datatype', 'Type')
c2f_name = fmpi_type+'_c2f'
f2c_name = fmpi_type+'_f2c'
FUNCTION_C2F = join(sol, [fint_type], [c2f_name], lparen, [opaque_type], rparen, fallback, eol)
FUNCTION_F2C = join(sol, [opaque_type], [f2c_name], lparen, [fint_type], rparen, fallback, eol)
IGNORE = anyof(join(sol, r'cdef.*', eol),
join(sol, struct, r'_mpi_\w+_t', eol),
join(sol, 'int', r'MPI_(?:SOURCE|TAG|ERROR)', eol),
join(sol, r'#.*', eol),
join(sol, eol))
# compile the RE's
glb = globals()
all = [key for key in dict(glb) if key.isupper()]
for key in all: glb[key] = re.compile(glb[key])
|
import json
import pytest
import requests
from mockserver_client.mockserver_client import (
MockServerFriendlyClient,
mock_request,
mock_response,
times,
)
from mockserver_client.mockserver_verify_exception import MockServerVerifyException
def test_mock_server_inline_fail() -> None:
test_name = "test_mock_server_inline_fail"
mock_server_url = "http://mock-server:1080"
mock_client: MockServerFriendlyClient = MockServerFriendlyClient(
base_url=mock_server_url
)
mock_client.clear(f"/{test_name}/*.*")
mock_client.expect(
mock_request(
path="/" + test_name,
method="POST",
body={
"client_id": "unitypoint_bwell",
"client_secret": "fake_client_secret",
"grant_type": "client_credentials",
},
),
mock_response(
body=json.dumps(
{
"token_type": "bearer",
"access_token": "fake access_token",
"expires_in": 54000,
}
)
),
timing=times(1),
)
http = requests.Session()
http.post(
mock_server_url + "/" + test_name,
data={"client_id": "unitypoint_bwell", "client_secret": "fake_client_secret"},
)
with pytest.raises(MockServerVerifyException):
mock_client.verify_expectations(test_name=test_name)
|
import itertools
from collections import deque
class STATUS:
READY = 0
COMPLETE = 1
WAIT = 2
class OP:
SUM = 1
MUL = 2
INPUT = 3
OUTPUT = 4
JUMP_IF_TRUE = 5
JUMP_IF_FALSE = 6
LESS_THAN = 7
EQUALS = 8
HALT = 99
class MODE:
POSITION = 0
IMMEDIATE = 1
class Parameter:
def __init__(self, mode, value):
self.mode = mode
self.value = value
class Instruction:
def __init__(self, opcode, *parameters):
self.opcode = opcode % 100
self.parameters = []
#self._opcode = opcode
#self._parameters = []
if self.opcode == OP.HALT:
return
paramCount = 1 if self.opcode in [OP.INPUT, OP.OUTPUT] else 3
paramCount = 2 if self.opcode in [OP.JUMP_IF_TRUE, OP.JUMP_IF_FALSE] else paramCount
for i in range(paramCount):
mode = Instruction.GetMode(opcode, i)
param = Parameter(mode, parameters[0][i])
#self._parameters.append(parameters[0][i])
self.parameters.append(param)
def GetMode(opcode, i):
return MODE.IMMEDIATE if opcode // 10** (i +2) % 10 == 1 else MODE.POSITION
def GetValue(self, index, memory):
p = self.parameters[index -1]
v = p.value if p.mode == MODE.IMMEDIATE else memory[p.value]
return v
def GetRawValue(self, index, memory):
p = self.parameters[index -1]
return p.value
class IntcodeComputer:
def __init__(self, memory, phase = None):
self.memory = memory
self.input = deque([phase] if phase is not None else [])
self.ip = 0
self.output = []
self.status = STATUS.READY
def Run(self, input):
self.input += input
while self.status != STATUS.COMPLETE:
self.RunWait()
return self.output
def RunWait(self, input = None):
if input is not None:
self.input += [input]
while True:
opcode = self.memory[self.ip]
rest = self.memory[self.ip +1:]
instr = Instruction(opcode, rest)
#print('self.offset', self.offset, opcode, instr.opcode)
if instr.opcode == OP.SUM:
par1 = instr.GetValue(1, self.memory)
par2 = instr.GetValue(2, self.memory)
address = instr.GetRawValue(3, self.memory)
#print('+', address, par1, par2)
self.memory[address] = par1 + par2
elif instr.opcode == OP.MUL:
par1 = instr.GetValue(1, self.memory)
par2 = instr.GetValue(2, self.memory)
address = instr.GetRawValue(3, self.memory)
#print('*', address, par1, par2)
self.memory[address] = par1 * par2
elif instr.opcode == OP.INPUT:
par1 = instr.GetRawValue(1, self.memory)
#print('in', instr.parameters[0].value)
if len(self.input) == 0:
self.status = STATUS.WAIT
return self.output[-1]
inp = self.input.popleft()
self.memory[par1] = inp
elif instr.opcode == OP.OUTPUT:
out = instr.GetValue(1, self.memory)
self.output.append(out)
#print('ou', out, instr.parameters[0].value)
#print(out)
elif instr.opcode == OP.EQUALS:
par1 = instr.GetValue(1, self.memory)
par2 = instr.GetValue(2, self.memory)
address = instr.GetRawValue(3, self.memory)
#print('=', address, par1, par2)
self.memory[address] = 1 if par1 == par2 else 0
elif instr.opcode == OP.JUMP_IF_TRUE:
par1 = instr.GetValue(1, self.memory)
par2 = instr.GetValue(2, self.memory)
if par1 != 0:
self.ip = par2
continue
elif instr.opcode == OP.JUMP_IF_FALSE:
par1 = instr.GetValue(1, self.memory)
par2 = instr.GetValue(2, self.memory)
if par1 == 0:
self.ip = par2
continue
elif instr.opcode == OP.LESS_THAN:
par1 = instr.GetValue(1, self.memory)
par2 = instr.GetValue(2, self.memory)
address = instr.GetRawValue(3, self.memory)
#print('=', address, par1, par2)
self.memory[address] = 1 if par1 < par2 else 0
elif instr.opcode == OP.HALT:
break
self.ip += 1+ len(instr.parameters)
if self.ip >= len(self.memory):
break
self.status = STATUS.COMPLETE
return self.output[-1]
class AmplifierControllerSoftware:
def __init__(self, memory):
self.memory = memory
def Run(self, rang):
d = {}
for perm in itertools.permutations(rang, len(rang)):
#print(perm)
out = [0]
for i in perm:
#print(i)
input = [i] + out
out = IntcodeComputer(self.memory).Run(input)
d[out[-1]] = perm
result = max(d.keys())
perm = d[result]
return result, perm
def Loop(self, rang):
d = {}
for perm in itertools.permutations(rang, len(rang)):
#print("perm", perm)
amp = [IntcodeComputer(list(self.memory), perm[i]) for i in range(5)]
i = 0
input = 0
#print(i, "input", input)
while True:
ampX = amp[i]
if ampX.status == STATUS.COMPLETE:
break
input = ampX.RunWait(input)
#print(i, "input", input)
i = (i +1) % 5
d[amp[4].output[-1]] = perm
result = max(d.keys())
perm = d[result]
return result, perm
return (0,0)
class Day07:
def Test1(input):
intCode = [int(s) for s in open(input, 'r').readlines()[0].strip().split(',')]
output = AmplifierControllerSoftware(intCode).Run(range(5))
return output
def Test2(input):
intCode = [int(s) for s in open(input, 'r').readlines()[0].strip().split(',')]
output = AmplifierControllerSoftware(intCode).Loop(range(5,10))
return output
# test 1
result = AmplifierControllerSoftware([3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0]).Run(range(5))
print('test 1.1', result, 'OK' if result[0] == 43210 else 'ERROR!')
result = AmplifierControllerSoftware([3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,
24,23,23,4,23,99,0,0]).Run(range(5))
print('test 1.2', result, 'OK' if result[0] == 54321 else 'ERROR!')
result = AmplifierControllerSoftware([3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,
1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0]).Run(range(5))
print('test 1.3', result, 'OK' if result[0] == 65210 else 'ERROR!')
result = Day07.Test1("inputs\Day07_1.txt")
print('test1', result)
# test 2
result = AmplifierControllerSoftware([3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,
1001,28,-1,28,1005,28,6,99,0,0,5]).Loop(range(5,10))
print('test 2.1', result, 'OK' if result[0] == 139629729 else 'ERROR!')
result = AmplifierControllerSoftware([3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,
55,26,1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,
1001,55,1,55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,
99,0,0,0,0,10]).Loop(range(5,10))
print('test 2.2', result, 'OK' if result[0] == 18216 else 'ERROR!')
result = Day07.Test2("inputs\Day07_1.txt")
print('test2', result)
|
""" Single source of truth for version number """
__version__ = "0.0.10"
|
# coding:utf-8
"""
This code generates tensors for detecting thin edges and boundaries, and their orientations.
"""
import itertools
import math as m
import numpy as np
from slam_recognition.util.attractor import euclidian_attractor_function_generator as __euclid_function_generator
from slam_recognition.util.normalize import normalize_tensor_positive_negative as __normalize_center_surround
from slam_recognition.util.orientation import above_axis_simplex_coordinates as __axis_coordinates
if False:
from typing import List, Callable
from numbers import Real
def stripe_tensor(normal_vector, # type: List[int]
center_in, # type: List[int]
center_out, # type: List[int]
surround_in, # type: List[int]
surround_out, # type: List[int]
attractor_function=__euclid_function_generator, # type: Callable[[Real], Callable[[Real], Real]]
):
"""Generates a multi-channel stripe tensor. These will isolate the n-1 boundaries, or facets, in n-dimensional
space. In 3d, they will find faces; in 2d, lines; and in 1d, points. Unlike edge_orientation_detector tensors, they
will only find these features if they have the correct thickness.
Note: stripe tensors with 11 or more dimensions may take a while to generate. Make sure you cache those.
:param attractor_function: function that determines the weights of each point in the tensor based on its distance
from the central facet.
:param normal_vector: unit vector pointing outwards from the facet/face/edge_orientation_detector.
:param center_in: colors added together on points on the edge_orientation_detector.
:param center_out: colors outputted on points on the edge_orientation_detector.
:param surround_in: colors subtracted together on points off the edge_orientation_detector
:param surround_out: colors outputted on points off the edge_orientation_detector.
"""
assert len(normal_vector) >= 1
ndim = len(normal_vector)
attractor_function = attractor_function(ndim)
if isinstance(normal_vector, list):
normal_vector = np.asarray(normal_vector)
center_surround = np.zeros(shape=[3 for _ in range(ndim)] + [len(center_out), len(center_in)])
zero_centered = np.ndarray(shape=[3 for _ in range(ndim)])
for tup in itertools.product(*[range(3) for _ in range(ndim)]):
scalar_projection = sum([(t - 1) * n for t, n in zip(tup, normal_vector)])
projection = normal_vector * scalar_projection
euclidian_dist = m.sqrt(sum([p ** 2 for p in projection]))
zero_centered[tup] = attractor_function(euclidian_dist)
__normalize_center_surround(zero_centered)
for tup in itertools.product(*[range(3) for _ in range(ndim)]):
center_surround[tup] = [[surround_out[o] * surround_in[i] * abs(zero_centered[tup]) if zero_centered[tup] < 0
else center_surround[tuple(tup + (o, i))]
for o in range(len(surround_out))] for i in range(len(surround_in))]
center_surround[tup] = [[center_out[o] * center_in[i] * abs(zero_centered[tup]) if zero_centered[tup] > 0
else center_surround[tuple(tup + (i, o))]
for o in range(len(center_out))] for i in range(len(center_in))]
return center_surround
def simplex_stripe_tensors(dimensions, # type: int
centers_in, # type: List[List[int]]
centers_out, # type: List[List[int]]
surrounds_in, # type: List[List[int]]
surrounds_out, # type: List[List[int]]
attractor_function=__euclid_function_generator,
# type: Callable[[Real], Callable[[Real], Real]]
):
""" Generates the minimum number of stripe tensors needed to represent all orientations of thin boundaries in
n-dimensional space.
:param dimensions: number of dimensions.
:param centers_in: list of colors added together on points on the edge_orientation_detector.
:param centers_out: list of colors outputted on points on the edge_orientation_detector.
:param surrounds_in: list of colors subtracted together on points off the edge_orientation_detector
:param surrounds_out: list of colors outputted on points off the edge_orientation_detector.
:param attractor_function: function that takes in the number of dimensions and outputs a function that takes in
distances and returns positive values for small distances and negative values for large distances.
:return: a list of tensors for finding all orientations of boundaries.
"""
return [stripe_tensor(simplex_vector, center_in, center_out, surround_in, surround_out, attractor_function)
for simplex_vector, center_in, center_out, surround_in, surround_out
in zip(__axis_coordinates(dimensions), centers_in, centers_out, surrounds_in, surrounds_out)]
def rgb_2d_stripe_tensors(in_channel=(1, 1, 1)):
""" Finds stripes and outputs colors based on orientation. For use on 2D sensor input only. 3D will require 4 colors
to visualize.
:return: a list of tensors for 2D boundary detection and displaying.
"""
x = 2
return sum(simplex_stripe_tensors(2, [in_channel, in_channel, in_channel],
[[2 * x, -.5 * x, -.5 * x], [-.5 * x, 2 * x, -.5 * x], [-.5 * x, -.5 * x, 2 * x]],
[in_channel, in_channel, in_channel],
[[-2 * x, .5 * x, .5 * x], [.25 * x, -2 * x, .5 * x], [.5 * x, .5 * x, -2 * x]]))
|
#!/usr/bin/python3
from db import Database
db = Database()
db.create()
|
from functions_elm_aero import *
neurons = 8
base, rows = open_csv_aero('../datasets/aero.dat')
train, test = split_dataset(base, rows, "holdout")
base_i, base_o = extract_io(base, rows)
train_i, train_o = extract_io(train, len(train))
test_i, test_o = extract_io(test, len(test))
training_final, weights_hl, weights_ol = train_elm(neurons, train_i, train_o) # hl: Hidden Layer, ol: Output Layer
testing_final = test_elm(test_i, weights_hl, weights_ol)
# Show results...
plot_results(1, base_i, base_o, '100% Dataset', 'red')
plt.savefig('plot_output/elm_aero1.png')
r2, r2_aj = quality_output(test_o, testing_final)
title1 = 'Neurons: {} / R2: {} / R2 Aj: {}\n'.format(neurons, round(r2, 3), round(r2_aj, 2))
title2 = '33% Dataset: Testing'
plot_results(2, test_i, testing_final, title1+title2, 'blue')
plt.savefig('plot_output/elm_aero2.png')
plot_results(3, base_i, base_o, '', 'red')
plot_results(3, test_i, testing_final, '', 'blue')
plt.savefig('plot_output/elm_aero3.png')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import sorl.thumbnail.fields
import peripteras.kiosks.models
class Migration(migrations.Migration):
dependencies = [
('kiosks', '0008_kiosk_info'),
]
operations = [
migrations.AddField(
model_name='kiosk',
name='image',
field=sorl.thumbnail.fields.ImageField(default=b'', upload_to=peripteras.kiosks.models._get_upload_path, blank=True),
),
]
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["SpecialArrangements"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class SpecialArrangements:
"""
Special arrangements
This value set defines a set of codes that can be used to indicate the
kinds of special arrangements in place for a patients visit.
Status: draft - Version: 4.0.1
Copyright None
http://terminology.hl7.org/CodeSystem/encounter-special-arrangements
"""
wheel = CodeSystemConcept(
{
"code": "wheel",
"definition": "The patient requires a wheelchair to be made available for the encounter.",
"display": "Wheelchair",
}
)
"""
Wheelchair
The patient requires a wheelchair to be made available for the encounter.
"""
add_bed = CodeSystemConcept(
{
"code": "add-bed",
"definition": "An additional bed made available for a person accompanying the patient, for example a parent accompanying a child.",
"display": "Additional bedding",
}
)
"""
Additional bedding
An additional bed made available for a person accompanying the patient, for example a parent accompanying a child.
"""
int_ = CodeSystemConcept(
{
"code": "int",
"definition": "The patient is not fluent in the local language and requires an interpreter to be available. Refer to the Patient.Language property for the type of interpreter required.",
"display": "Interpreter",
}
)
"""
Interpreter
The patient is not fluent in the local language and requires an interpreter to be available. Refer to the Patient.Language property for the type of interpreter required.
"""
att = CodeSystemConcept(
{
"code": "att",
"definition": "A person who accompanies a patient to provide assistive services necessary for the patient's care during the encounter.",
"display": "Attendant",
}
)
"""
Attendant
A person who accompanies a patient to provide assistive services necessary for the patient's care during the encounter.
"""
dog = CodeSystemConcept(
{
"code": "dog",
"definition": "The patient has a guide dog and the location used for the encounter should be able to support the presence of the service animal.",
"display": "Guide dog",
}
)
"""
Guide dog
The patient has a guide dog and the location used for the encounter should be able to support the presence of the service animal.
"""
class Meta:
resource = _resource
|
import pandas as pd
class Statistics(object):
def __init__(self, series, chart_type='pie'):
self.chart_type = chart_type
self.chart = {'type': chart_type, 'data': []} if chart_type == 'pie' else {'type': chart_type, 'data': {'name': [], 'value': []}}
def append(self, row):
if self.chart_type == 'pie':
self.chart['data'].append(row)
else:
self.chart['data']['name'].append(row['name'])
self.chart['data']['value'].append(row['value'])
def reduceOther(self, chart, threshold=0.04):
data = chart['data']
values = []
total = 0
indices_to_delete = []
enumeration = data if chart['type'] == 'pie' else data['value']
for entry in enumeration:
value = entry['value'] if chart['type'] == 'pie' else entry
values.append(value)
total += value
limit = int(round(threshold*total, 0))
for index, value in enumerate(values):
if value <= limit:
del values[index]
if chart['type'] == 'pie':
del data[index]
else:
del data['value'][index]
del data['name'][index]
return {'type': chart['type'], 'data': data}
def checkPatterns(self, series, patterns):
"""
Iterate over patterns and construct the distribution of their appearance in the series.
"""
col = series
for desc in patterns:
pattern = r'^' + patterns[desc] + '$'
match = col.str.match(pattern)
col = pd.Series([col[index] for index, cond in match.iteritems() if not cond])
try:
value = int(match.value_counts()[1])
except:
value = 0
else:
self.append({'name': desc, 'value': value})
if len(col)==0:
break
if len(col) > 0:
row = {'name': 'unrecognized pattern', 'value': len(col)}
self.append(row) |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from aquilon.aqdb.model import City
from aquilon.worker.templates import Plenary
from aquilon.worker.templates.panutils import pan_variable
LOGGER = logging.getLogger(__name__)
class PlenaryCity(Plenary):
prefix = "site"
@classmethod
def template_name(cls, dbcity):
return "%s/%s/%s/config" % (cls.prefix, dbcity.hub.fullname.lower(),
dbcity.name)
def body(self, lines):
pan_variable(lines, "TIMEZONE", self.dbobj.timezone)
Plenary.handlers[City] = PlenaryCity
|
# Generated by Django 2.2.8 on 2020-01-12 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0007_auto_20200112_1112'),
]
operations = [
migrations.AlterField(
model_name='adminprofile',
name='bio',
field=models.TextField(blank=True, default='', max_length=150),
),
migrations.AlterField(
model_name='adminprofile',
name='city',
field=models.CharField(blank=True, default='', max_length=15),
),
migrations.AlterField(
model_name='adminprofile',
name='github',
field=models.URLField(blank=True, default=''),
),
migrations.AlterField(
model_name='adminprofile',
name='website',
field=models.URLField(blank=True, default=''),
),
]
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# resources, event, comment
class Resources(models.Model):
resourcename=models.CharField(max_length=255)
resourcedescription=models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.resourcename
class Meta:
db_table='resources'
class Events(models.Model):
eventname=models.CharField(max_length=255)
resources=models.ForeignKey(Resources, on_delete=models.DO_NOTHING)
user=models.ForeignKey(User, on_delete=models.DO_NOTHING)
evententrydate=models.DateField()
eventurl=models.URLField(null=True, blank=True)
eventdescription=models.TextField()
def __str__(self):
return self.eventname
class Meta:
db_table='event'
class Comments(models.Model):
commenttitle=models.CharField(max_length=255)
commentdate=models.DateField()
event=models.ForeignKey(Events, on_delete=models.CASCADE)
user=models.ManyToManyField(User)
commentrating=models.SmallIntegerField()
commenttext=models.TextField()
def __str__(self):
return self.commenttitle
class Meta:
db_table='comment' |
from django import forms
from django.db.models import fields
from . import models
class SearchForm(forms.Form):
query = forms.CharField(label = "")
class BookForm(forms.ModelForm):
class Meta:
model = models.Book
fields='__all__'
|
class MenuHelper:
def __init__(self, app):
self.app = app
def home(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def groups(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
|
import numpy as np
import pandas as pd
from BioReactor.utils import loadDataFrame
import matplotlib.pyplot as plt
import os
class perfusionProcessor(object):
"""Creates an object of class perfusionProcessor
Args:
data_dir: str
Directory where input data is located
dataFiles: list of str
Files to be analyzed
output_dir: str, default None
Directory for saving results from any analysis
"""
def __init__(self, data_dir, dataFiles, logger, colnames=None, output_dir=None):
self.data_dir = data_dir
self.dataFiles = [file.split('.')[0] for file in dataFiles]
self.dataFrames = {}
if output_dir:
self.output_dir = output_dir
else:
self.output_dir = '\\'.join(data_dir.split('\\')[:-2]) + '\\Results\\'
self._load_data(logger)
self._rename_columns(col_names=colnames)
self._get_offlineIndices()
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def _load_data(self, logger):
""" Loads all files associated with the perfusion experiment"""
for file in self.dataFiles:
filename = self.data_dir + file + '.xlsx'
self.dataFrames[file] = loadDataFrame(filename=filename, logger=logger)
def _rename_columns(self, col_names=None):
"""Rename columns in names to arguments in col names"""
if col_names:
for file in self.dataFiles:
self.dataFrames[file].rename(columns=col_names, inplace=True)
else:
col_names = {
"Run": "run_id",
"BID": "batch_n",
"Amm": "NH3",
"Viability": "Via",
"Ext. pH": "ext_pH",
"%TRY_Aggregation": "%aggr",
"TRY_Avg.comp": "avg_comp",
"TRY_celldiameter": "cell_diameter",
"%DO": "DO",
"VCD": "Xv",
"Ext. VCD": "ext_Xv"}
for file in self.dataFiles:
self.dataFrames[file].rename(columns=col_names, inplace=True)
def _get_offlineIndices(self):
"""Gets the offline indices for all files in the pefusion experiment"""
self.offLineIndices = dict()
for file in self.dataFiles:
self.offLineIndices[file] = np.where(
pd.isnull(self.dataFrames[file]['run_id']) == False
)[0]
def get_TiterNonNaNs(self, file, ifOffline=False):
""" Get all those places where Titer is not NaN
Args:
file: str
- filename
Returns:
indices: list
- rows in the corresponding dataframe where Titer is not NaN
"""
titer_vals = self.dataFrames[file]['Titer']
indices = np.where(pd.isnull(titer_vals) == False)[0]
if not ifOffline:
return indices
else:
offline_indices = self.offLineIndices[file]
titer_nonNaNOffline = list()
for index in offline_indices:
if index in indices:
titer_nonNaNOffline.append(index)
return np.array(titer_nonNaNOffline)
def _checkTiterLimits(self):
self.pastTiterLimits = {}
for file in self.dataFiles:
titer_nonNans = self.get_TiterNonNaNs(file=file, ifOffline=False)
titer_vals = self.dataFrames[file]['Titer'].iloc[titer_nonNans]
self.pastTiterLimits[file] = np.where(titer_vals > 1)[0]
def get_var(self, file, var=['Titer'], indices=None):
"""For given variable, provides a dictionary with associated
column values in all files
Args:
var: str, default 'Titer'
- Variable of interest
indices: list, default None
- Indices from which values must be retrieved, If None,
then retrieves all
Returns:
var_dict: dict
- dictionary with keys as file names and value as retrieved values
"""
if len(indices):
return self.dataFrames[file][var].iloc[indices]
else:
return self.dataFrames[file][var]
def checkNaNsInOffline(self, vars=['Titer']):
""" For variables of interest present in vars, plot the frequency of NaNs
found in the offline variables"""
var_interest = {var: [] for var in vars}
for var in vars:
files = list()
for fileNo in np.arange(1, len(self.dataFiles)+1):
files.append(fileNo)
file = self.dataFiles[fileNo-1]
NaNOffline = np.sum(np.isnan(
self.dataFrames[file][var].iloc[self.offLineIndices[file]]))
NaNOffline = NaNOffline/len(self.offLineIndices[file])
var_interest[var].append(NaNOffline)
plt.plot(files, var_interest[var], color='r')
plt.xlabel('File Number')
plt.ylabel(var + ' Values')
plotFileName = var + '_NaN_Frequency'
plt.savefig(self.output_dir + plotFileName)
plt.close()
def plotAnimation(self, all=False):
def animate(spec, titer, save_file):
import matplotlib.animation as animation
plt.rcParams['animation.ffmpeg_path'] = 'C:\\Program Files\\ffmpeg\\bin\\ffmpeg.exe'
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
spec_len = 3326
x_range = range(spec_len)
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
im1, = ax1.plot(x_range, spec.iloc[0], color='b')
ax1.set_ylabel('Spectra Values')
x = [0]
y = [titer.iloc[0]]
ax2.plot(x, y, color='r')
def updatefig(i):
im1.set_data(x_range, spec.iloc[i])
x.append(i)
y.append(titer.iloc[i])
ax2.plot(x, y, color='r')
ax2.set_ylabel('Titer Values')
ani = animation.FuncAnimation(fig, updatefig,
frames=np.arange(1, len(titer)),
interval=200)
fig.tight_layout()
ani.save(save_file, writer=writer)
out_dir = self.output_dir + 'Spectra_Vs_Titer_Evolution\\'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not all:
for file in self.dataFiles:
colList = self.dataFrames[file].columns
spec_inds = [index for index in range(len(colList))
if colList[index].startswith('Spectrum_')]
spectra_cols = colList[spec_inds]
indices = self.get_TiterNonNaNs(file=file)
spectra_val = self.get_var(file=file, var=spectra_cols, indices=indices)
titer_val = self.get_var(file=file, indices=indices)
save_file = self.output_dir + 'Spectra_Vs_Titer_Evolution\\' + \
''.join(file.split('.')[:-1]) + '.mp4'
animate(spec=spectra_val, titer=titer_val, save_file=save_file)
else:
file = self.dataFiles[0]
colList = self.dataFrames[file].columns
spec_inds = [index for index in range(len(colList))
if colList[index].startswith('Spectrum_')]
spectra_cols = colList[spec_inds]
indices = self.get_TiterNonNaNs(file=file)
spectra_val = self.get_var(file=file, var=spectra_cols, indices=indices)
titer_val = self.get_var(file=file, indices=indices)
save_file = self.output_dir + 'Spectra_Vs_Titer_Evolution\\' + 'all.mp4'
for index in range(1, len(self.dataFiles)):
file = self.dataFiles[index]
colList = self.dataFrames[file].columns
spec_inds = [index for index in range(len(colList))
if colList[index].startswith('Spectrum_')]
spectra_cols = colList[spec_inds]
indices = self.get_TiterNonNaNs(file=file)
spectra_val = spectra_val.append(self.get_var(file=file,
var=spectra_cols, indices=indices))
titer_val = titer_val.append(self.get_var(file=file, indices=indices))
animate(spec=spectra_val, titer=titer_val, save_file=save_file)
|
def binary_search(array, data):
l= 0
r = len(array) - 1
while l <= r:
m = l + (r - l) // 2
if array[m] == data:
return m
elif array[m] < data:
l = m + 1
else:
r = m - 1
return -1
if __name__ == "__main__":
input_array = [ 2, 3, 4, 10, 40 ]
print(binary_search(input_array, 40))
|
#automate in directory
import xlwt
import os
import sys
import hbond_out
import make_xl_ab
def make_excel(d,suffix):
wb=xlwt.Workbook()
sheet = wb.add_sheet('Version1')
col=0
for bond in d:
row=0
lis=d[bond]
sheet.write(row,col,bond)
row+=1
for a,b,c in lis:
sheet.write(row,col,float(a))
if suffix=='_ah':
sheet.write(row,col+1,float(b))
else:
sheet.write(row,col+1,float(c))
row+=1
col+=2
print 'Making Excel files for '+suffix+' ...'
if suffix=='_ah':
wb.save('allBonds_accep_xtb.xls')
else:
wb.save('allBonds_donars_xtb.xls')
def make_output():
dic={}
for i in os.listdir(sys.argv[1]):
if i[-4:]=='.xyz':
print 'Procession the file '+i
#hbond_out.job(i)
kd=make_xl_ab.xl(i.split('/')[-1].split('.')[0]+'.txt')
for j in kd:
if j not in dic:
dic[j]=kd[j]
else:
dic[j]+=kd[j]
print '\n'
make_excel(dic,'_ah')
make_excel(dic,'_dh')
if __name__ == "__main__":
make_output()
|
name=input("What is your name?\n")
age=input("How old are you?\n")
city=input("Where do you live?\n")
print("Hello,"+ name)
print("Your age is " + age)
print("You live in " + city) |
import json
import unittest
from os.path import dirname, join
from conda.resolve import MatchSpec, Package, Resolve
with open(join(dirname(__file__), 'index.json')) as fi:
r = Resolve(json.load(fi))
f_mkl = set(['mkl'])
class TestMatchSpec(unittest.TestCase):
def test_match(self):
for spec, res in [('numpy 1.7*', True),
('numpy 1.7.1', True),
('numpy 1.7', False),
('numpy 1.5*', False),
('numpy 1.6*|1.7*', True),
('numpy 1.6*|1.8*', False),
('numpy 1.6.2|1.7*', True),
('numpy 1.6.2|1.7.1', True),
('numpy 1.6.2|1.7.0', False),
('numpy 1.7.1 py27_0', True),
('numpy 1.7.1 py26_0', False),
('python', False)]:
m = MatchSpec(spec)
self.assertEqual(m.match('numpy-1.7.1-py27_0.tar.bz2'), res)
def test_to_filename(self):
ms = MatchSpec('foo 1.7 52')
self.assertEqual(ms.to_filename(), 'foo-1.7-52.tar.bz2')
for spec in 'bitarray', 'pycosat 0.6.0', 'numpy 1.6*':
ms = MatchSpec(spec)
self.assertEqual(ms.to_filename(), None)
def test_hash(self):
a, b = MatchSpec('numpy 1.7*'), MatchSpec('numpy 1.7*')
self.assertTrue(a is not b)
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
c, d = MatchSpec('python'), MatchSpec('python 2.7.4')
self.assertNotEqual(a, c)
self.assertNotEqual(hash(a), hash(c))
class TestPackage(unittest.TestCase):
def test_llvm(self):
ms = MatchSpec('llvm')
pkgs = [Package(fn, r.index[fn]) for fn in r.find_matches(ms)]
pkgs.sort()
self.assertEqual([p.fn for p in pkgs],
['llvm-3.1-0.tar.bz2',
'llvm-3.1-1.tar.bz2',
'llvm-3.2-0.tar.bz2'])
def test_different_names(self):
pkgs = [Package(fn, r.index[fn]) for fn in [
'llvm-3.1-1.tar.bz2', 'python-2.7.5-0.tar.bz2']]
self.assertRaises(ValueError, pkgs.sort)
class TestSolve(unittest.TestCase):
def setUp(self):
r.msd_cache = {}
def assert_have_mkl(self, dists, names):
for fn in dists:
if fn.rsplit('-', 2)[0] in names:
self.assertEqual(r.features(fn), f_mkl)
def test_explicit0(self):
self.assertEqual(r.explicit([]), [])
def test_explicit1(self):
self.assertEqual(r.explicit(['pycosat 0.6.0 py27_0']), None)
self.assertEqual(r.explicit(['zlib']), None)
self.assertEqual(r.explicit(['zlib 1.2.7']), None)
# because zlib has no dependencies it is also explicit
self.assertEqual(r.explicit(['zlib 1.2.7 0']),
['zlib-1.2.7-0.tar.bz2'])
def test_explicit2(self):
self.assertEqual(r.explicit(['pycosat 0.6.0 py27_0',
'zlib 1.2.7 0']),
['pycosat-0.6.0-py27_0.tar.bz2',
'zlib-1.2.7-0.tar.bz2'])
self.assertEqual(r.explicit(['pycosat 0.6.0 py27_0',
'zlib 1.2.7']), None)
def test_empty(self):
self.assertEqual(r.solve([]), [])
def test_anaconda_14(self):
specs = ['anaconda 1.4.0 np17py33_0']
res = r.explicit(specs)
self.assertEqual(len(res), 51)
self.assertEqual(r.solve(specs), res)
specs.append('python 3.3*')
self.assertEqual(r.explicit(specs), None)
self.assertEqual(r.solve(specs), res)
def test_iopro_nomkl(self):
self.assertEqual(
r.solve2(['iopro 1.4*', 'python 2.7*', 'numpy 1.7*'],
set()),
['iopro-1.4.3-np17py27_p0.tar.bz2',
'numpy-1.7.1-py27_0.tar.bz2',
'openssl-1.0.1c-0.tar.bz2',
'python-2.7.5-0.tar.bz2',
'readline-6.2-0.tar.bz2',
'sqlite-3.7.13-0.tar.bz2',
'system-5.8-1.tar.bz2',
'tk-8.5.13-0.tar.bz2',
'unixodbc-2.3.1-0.tar.bz2',
'zlib-1.2.7-0.tar.bz2'])
def test_iopro_mkl(self):
self.assertEqual(
r.solve2(['iopro 1.4*', 'python 2.7*', 'numpy 1.7*'],
f_mkl),
['iopro-1.4.3-np17py27_p0.tar.bz2',
'mkl-rt-11.0-p0.tar.bz2',
'numpy-1.7.1-py27_p0.tar.bz2',
'openssl-1.0.1c-0.tar.bz2',
'python-2.7.5-0.tar.bz2',
'readline-6.2-0.tar.bz2',
'sqlite-3.7.13-0.tar.bz2',
'system-5.8-1.tar.bz2',
'tk-8.5.13-0.tar.bz2',
'unixodbc-2.3.1-0.tar.bz2',
'zlib-1.2.7-0.tar.bz2'])
def test_mkl(self):
self.assertEqual(r.solve(['mkl'], set()),
r.solve(['mkl'], f_mkl))
def test_accelerate(self):
self.assertEqual(
r.solve(['accelerate'], set()),
r.solve(['accelerate'], f_mkl))
def test_scipy_mkl(self):
dists = r.solve(['scipy', 'python 2.7*', 'numpy 1.7*'],
features=f_mkl)
self.assert_have_mkl(dists, ('numpy', 'scipy'))
self.assertTrue('scipy-0.12.0-np17py27_p0.tar.bz2' in dists)
def test_anaconda_nomkl(self):
dists = r.solve(['anaconda 1.5.0', 'python 2.7*', 'numpy 1.7*'])
self.assertEqual(len(dists), 107)
self.assertTrue('scipy-0.12.0-np17py27_0.tar.bz2' in dists)
def test_anaconda_mkl_2(self):
# to test "with_features_depends"
dists = r.solve(['anaconda 1.5.0', 'python 2.7*', 'numpy 1.7*'],
features=f_mkl)
self.assert_have_mkl(dists,
('numpy', 'scipy', 'numexpr', 'scikit-learn'))
self.assertTrue('scipy-0.12.0-np17py27_p0.tar.bz2' in dists)
self.assertTrue('mkl-rt-11.0-p0.tar.bz2' in dists)
self.assertEqual(len(dists), 108)
dists2 = r.solve(['anaconda 1.5.0',
'python 2.7*', 'numpy 1.7*', 'mkl'])
self.assertTrue(set(dists) <= set(dists2))
self.assertEqual(len(dists2), 110)
def test_anaconda_mkl_3(self):
# to test "with_features_depends"
dists = r.solve(['anaconda 1.5.0', 'python 3*'], features=f_mkl)
self.assert_have_mkl(dists, ('numpy', 'scipy'))
self.assertTrue('scipy-0.12.0-np17py33_p0.tar.bz2' in dists)
self.assertTrue('mkl-rt-11.0-p0.tar.bz2' in dists)
self.assertEqual(len(dists), 61)
class TestFindSubstitute(unittest.TestCase):
def setUp(self):
r.msd_cache = {}
def test1(self):
installed = r.solve(['anaconda 1.5.0', 'python 2.7*', 'numpy 1.7*'],
features=f_mkl)
for old, new in [('numpy-1.7.1-py27_p0.tar.bz2',
'numpy-1.7.1-py27_0.tar.bz2'),
('scipy-0.12.0-np17py27_p0.tar.bz2',
'scipy-0.12.0-np17py27_0.tar.bz2'),
('mkl-rt-11.0-p0.tar.bz2', None)]:
self.assertTrue(old in installed)
self.assertEqual(r.find_substitute(installed, f_mkl, old), new)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from common.findit_http_client import FinditHttpClient
from common.waterfall import failure_type
from gae_libs.gitiles.cached_gitiles_repository import CachedGitilesRepository
from libs import time_util
from model import analysis_approach_type
from model.base_build_model import BaseBuildModel
from model.wf_suspected_cl import WfSuspectedCL
def GetCLInfo(cl_info_str):
"""Gets CL's repo_name and revision."""
return cl_info_str.split('/')
def _GetsStatusFromSameFailure(builds, failures):
for build in builds.values():
if build['status'] is not None and build['failures'] == failures:
return build['status']
return None
@ndb.transactional
def UpdateSuspectedCL(repo_name, revision, commit_position, approach,
master_name, builder_name, build_number, cl_failure_type,
failures, top_score):
suspected_cl = (
WfSuspectedCL.Get(repo_name, revision) or
WfSuspectedCL.Create(repo_name, revision, commit_position))
if not suspected_cl.identified_time: # pragma: no cover.
suspected_cl.identified_time = time_util.GetUTCNow()
suspected_cl.updated_time = time_util.GetUTCNow()
if approach not in suspected_cl.approaches:
suspected_cl.approaches.append(approach)
if cl_failure_type not in suspected_cl.failure_type:
suspected_cl.failure_type.append(cl_failure_type)
build_key = BaseBuildModel.CreateBuildKey(master_name, builder_name,
build_number)
if build_key not in suspected_cl.builds:
suspected_cl.builds[build_key] = {
'approaches': [approach],
'failure_type': cl_failure_type,
'failures': failures,
'status': _GetsStatusFromSameFailure(suspected_cl.builds, failures),
'top_score': top_score
}
else:
build = suspected_cl.builds[build_key]
if approach not in build['approaches']:
build['approaches'].append(approach)
suspected_cl.put()
def _RoundConfidentToInteger(confidence):
return int(round(confidence * 100))
def GetSuspectedCLConfidenceScore(confidences, cl_from_analyzed_build):
if not confidences or not cl_from_analyzed_build:
return None
if cl_from_analyzed_build['failure_type'] == failure_type.COMPILE:
if sorted(cl_from_analyzed_build['approaches']) == sorted(
[analysis_approach_type.HEURISTIC, analysis_approach_type.TRY_JOB]):
return _RoundConfidentToInteger(
confidences.compile_heuristic_try_job.confidence)
elif cl_from_analyzed_build['approaches'] == [
analysis_approach_type.TRY_JOB
]:
return _RoundConfidentToInteger(confidences.compile_try_job.confidence)
elif (cl_from_analyzed_build['approaches'] == [
analysis_approach_type.HEURISTIC
] and cl_from_analyzed_build['top_score']):
for confidences_info in confidences.compile_heuristic:
if confidences_info.score == cl_from_analyzed_build['top_score']:
return _RoundConfidentToInteger(confidences_info.confidence)
return None
else:
if sorted(cl_from_analyzed_build['approaches']) == sorted(
[analysis_approach_type.HEURISTIC, analysis_approach_type.TRY_JOB]):
return _RoundConfidentToInteger(
confidences.test_heuristic_try_job.confidence)
elif cl_from_analyzed_build['approaches'] == [
analysis_approach_type.TRY_JOB
]:
return _RoundConfidentToInteger(confidences.test_try_job.confidence)
elif (cl_from_analyzed_build['approaches'] == [
analysis_approach_type.HEURISTIC
] and cl_from_analyzed_build['top_score']):
for confidences_info in confidences.test_heuristic:
if confidences_info.score == cl_from_analyzed_build['top_score']:
return _RoundConfidentToInteger(confidences_info.confidence)
return None
def _HasNewFailures(current_failures, new_failures):
"""Checks if there are any new failures in the current build."""
if current_failures == new_failures:
return False
for step, tests in current_failures.iteritems():
if not new_failures.get(step): # New step.
return True
for test in tests:
if not test in new_failures[step]: # New test.
return True
return False
def GetSuspectedCLConfidenceScoreAndApproach(
confidences, cl_from_analyzed_build, cl_from_first_failed_build):
if not confidences or (not cl_from_analyzed_build and
not cl_from_first_failed_build):
return None, None
if (cl_from_first_failed_build and
(not cl_from_analyzed_build or not _HasNewFailures(
cl_from_analyzed_build.get('failures'),
cl_from_first_failed_build.get('failures')))):
# For non-first-time failures, the try job result is not recorded.
# If there is no new failures in current build, use first failed build to
# make sure the confidence score is correct.
cl_from_analyzed_build = cl_from_first_failed_build
confidence = GetSuspectedCLConfidenceScore(confidences,
cl_from_analyzed_build)
approach = (
analysis_approach_type.TRY_JOB
if analysis_approach_type.TRY_JOB in cl_from_analyzed_build['approaches']
else analysis_approach_type.HEURISTIC)
return confidence, approach
@ndb.transactional
def UpdateCulpritNotificationStatus(culprit_urlsafe_key, new_status):
"""Updates a culprit (WfSuspectedCL, FalkeCulprit)'s status.
Args:
culprit_urlsafe_key (str): A urlsafe key corresponding to the culprit to
update.
"""
culprit = ndb.Key(urlsafe=culprit_urlsafe_key).get()
assert culprit
culprit.cr_notification_status = new_status
if culprit.cr_notified:
culprit.cr_notification_time = time_util.GetUTCNow()
culprit.put()
|
#!/usr/bin/env python3
# -*- coding = utf-8 -*-
import inspect
import functools
from chemsolve.element import Element
from chemsolve.utils.periodictable import PeriodicTable
from chemsolve.utils.errors import InvalidElementError
from chemsolve.utils import constants
def check_empty_values(*params, allow = 1, maybe_less = False):
"""A decorator to check the values passed as `None` to a method.
This decorator validates the parameters passed in `params`
that are in a function's input parameters, and ensures
that a specifically allowed number of them, `allow`, or
potentially less, are existent in the call arguments.
Parameters
----------
params: str
- The parameters to check from the function's signature.
allow: int
- The number of parameters that should not be None.
maybe_less: bool
- Whether to allow less than or equal to the number of
parameters given in `allow`, or just equal to.
"""
def outer_decorator(f):
@functools.wraps(f)
def inner_decorator(*args, **kwargs):
sig = inspect.getcallargs(f, *args, **kwargs)
track_none_values = 0
for param, value in sig.items():
if param in params:
if value is not None:
track_none_values += 1
if track_none_values < allow and maybe_less: pass
elif track_none_values == allow: pass
else:
err_msg_value = f"{allow}" \
+ (" or less" if maybe_less else "")
raise ValueError(
f"Received an invalid number of arguments, "
f"expected {err_msg_value}, "
f"got {track_none_values}.")
return f(*args, **kwargs)
return inner_decorator
return outer_decorator
def is_valid_element(element):
"""Checks whether a provided object is a valid element."""
# If the object is already an Element, return True.
if isinstance(element, Element):
return True
# Otherwise, check whether it is in the periodic table.
return element in [item for item in PeriodicTable()['Symbol']]
def maybe_elements(*elements):
"""Validation method to check whether provided arguments are
either elements or strings which represent valid elements."""
# Create a holder list for return purposes.
converted_elements = []
# Iterate over the provided elements.
for element in elements:
if isinstance(element, Element):
# It is already an element object.
converted_elements.append(element)
elif isinstance(element, str):
# Check whether it is a valid element.
if is_valid_element(element):
converted_elements.append(Element(element))
else:
raise InvalidElementError(element)
else:
raise InvalidElementError(
element, property_type = "type")
# Return the converted elements.
return converted_elements
def resolve_float_or_constant(input_value, accept_none = True):
"""Resolves an input as either a float or chemistry constant."""
if input_value is None:
if accept_none:
return None
else:
raise ValueError(
f"Received invalid value {type(input_value)}."
)
if isinstance(input_value, (float, int)):
return input_value
elif isinstance(input_value, str):
try:
return getattr(constants, input_value)
except AttributeError:
raise AttributeError(
f"Received invalid constant value {input_value}."
)
else:
raise TypeError(
"Received invalid float input, "
"expected either a constant or a number."
)
def assert_chemical_presence(initial, determiner):
"""Determines if items are present in both provided lists.
This method, in essence, checks that each item in the
`initial` list is also present in the `determiner` list,
and raises an error if an item is in the `initial` list
but is not inside of the `determiner` list.
This means that an item can be present in `determiner`
but not `initial`, but it cannot be present in `initial`
but not `determiner`.
Traditionally, this is used to check that a reactant
is also present as a product in a chemical reaction.
Parameters
----------
initial: list or set or tuple
The list containing the items which must be present in both.
determiner: list or set or tuple
The list to check `initial` against.
"""
for item in initial:
if item not in determiner:
raise ValueError("A reactant must also be a product in a reaction.")
|
def is_url(url: str) -> bool:
return URL(url).is_url
from urlfinderlib.url import URL
from urlfinderlib.urlfinderlib import get_url_permutations, find_urls
|
'''
На плоскости дано множество точек.
Найти такой треугольник с вершинами в этих точках,
у которого разность площадей треугольника и вписанного круга максимальна.
'''
from math import pi, sqrt
from tkinter import Tk, Canvas, Label, Entry, Button, DISABLED, messagebox, Menu
WINDOW_WIDTH40WINDOW_HEIGHT = 630
WINDOW_WIDTH = 1500
WINDOW_HEIGHT = 1000
CANVAS_WIDTH = WINDOW_WIDTH - 280
CANVAS_HEIGHT = WINDOW_HEIGHT - 120
EPS = 1e-6
def read_point(field_x, index):
string = field_x.get()
try:
x = float(string)
except:
messagebox.showwarning("Ошибка",
"Неверно введены координаты точки №%d!\n"
"Ожидался ввод двух действительных чисел." %(index + 1))
return 1, 0
return 0, x
def tranc_coord(y):
return (-1) * y + CANVAS_HEIGHT
def tranc_coord_back(y):
return (CANVAS_HEIGHT - y)
def search_coef_scaling():
x_min = point_coord[0][0]
y_min = point_coord[0][1]
x_max = point_coord[0][0]
y_max = point_coord[0][1]
for i in point_coord:
x_min = min(i[0], x_min)
y_min = max(i[1], y_min)
x_max = max(i[0], x_max)
y_max = min(i[1], y_max)
y_min = tranc_coord_back(y_min)
y_max = tranc_coord_back(y_max)
if x_max != x_min:
k_x = (0.8 * CANVAS_WIDTH) / (x_max - x_min)
else:
k_x = 0
if y_max != y_min:
k_y = (0.8 * CANVAS_HEIGHT) / (y_max - y_min)
else:
k_y = 0
return k_x, k_y, x_min, y_min
def build_points():
point_coord.clear()
new_point_coord.clear()
canvas.delete("all")
if numb_points == 0:
messagebox.showwarning("Ошибка",
"Для начала работы необходимо задать кол-во точек!")
return 1
for i in range(numb_points):
r, x = read_point(point_list[i][1], i)
if (r):
return 1
r, y = read_point(point_list[i][2], i)
if (r):
return 1
y = tranc_coord(y)
point_coord.append([x, y])
k_x, k_y, x_min, y_min = search_coef_scaling()
if k_x != 0 and k_y != 0:
indent_x = 0.1 * CANVAS_WIDTH
indent_y = 0.1 * CANVAS_HEIGHT
k_x = min(k_x, k_y)
k_y = k_x
elif k_x == 0 and k_y != 0:
indent_x = 0.5 * CANVAS_WIDTH
indent_y = 0.1 * CANVAS_HEIGHT
elif k_x != 0 and k_y == 0:
indent_x = 0.1 * CANVAS_WIDTH
indent_y = 0.5 * CANVAS_HEIGHT
else:
indent_x = 0.5 * CANVAS_WIDTH
indent_y = 0.5 * CANVAS_HEIGHT
for i in range(numb_points):
x = (point_coord[i][0] - x_min) * k_x + indent_x
y = tranc_coord((tranc_coord_back(point_coord[i][1]) - y_min) * k_y + indent_y)
new_point_coord.append([x, y])
r = 3.5
canvas.create_oval(x - r, y - r, x + r, y + r,
width = 1, outline = "red", fill = "red")
canvas.create_text(x, y - 15,
text = "%d [%.1f,%.1f]" %(i + 1, point_coord[i][0], tranc_coord_back(point_coord[i][1])),
font = ("Courier New", 16, "bold"), fill = "darkmagenta")
def sides_triangle(triangle):
x1 = triangle[0][0]
y1 = triangle[0][1]
x2 = triangle[1][0]
y2 = triangle[1][1]
x3 = triangle[2][0]
y3 = triangle[2][1]
ab = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
bc = sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
ac = sqrt((x3 - x1) ** 2 + (y3 - y1) ** 2)
return ab, bc, ac
def triangle_check(triangle):
ab, bc, ac = sides_triangle(triangle)
if ac < ab + bc and bc < ab + ac and ab < bc + ac:
return 1, ab, bc, ac
else:
return 0, 0, 0, 0
def area_difference(ab, bc, ac):
p = (ab + bc + ac) / 2
s_tr = sqrt(p * (p - ab) * (p - bc) * (p - ac))
r = s_tr / p
s_c = pi * (r ** 2)
return s_tr - s_c
def radius_inscribed_circle(triangle):
ab, bc, ac = sides_triangle(triangle)
p = (ab + bc + ac) / 2
s_tr = sqrt(p * (p - ab) * (p - bc) * (p - ac))
return s_tr / p
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x,y
else:
return False
def build_triangle():
if numb_points == 0:
messagebox.showwarning("Ошибка",
"Для начала работы необходимо задать кол-во точек!")
return
elif (numb_points < 3):
messagebox.showwarning("Ошибка",
"Для построения треугольника необходимо задать не менее 3 точек!")
return
r = build_points()
if (r):
return
canvas.delete("all")
res_triangle = []
res_area_diff = 0
for i in range(len(point_coord)):
for j in range(i + 1, len(point_coord)):
for u in range(j + 1, len(point_coord)):
triangle = [new_point_coord[i], new_point_coord[j], new_point_coord[u]]
r, ab, bc, ac = triangle_check(triangle)
if (r):
area_diff = area_difference(ab, bc, ac)
if area_diff > res_area_diff:
res_triangle = [new_point_coord[i], new_point_coord[j], new_point_coord[u]]
res_area_diff = area_diff
triangle_points[0] = i
triangle_points[1] = j
triangle_points[2] = u
if len(res_triangle) != 0:
canvas.create_line(res_triangle[0], res_triangle[1], width = 4, fill = "red")
canvas.create_line(res_triangle[1], res_triangle[2], width = 4, fill = "red")
canvas.create_line(res_triangle[0], res_triangle[2], width = 4, fill = "red")
L1 = line(res_triangle[0], res_triangle[1])
L2 = line(res_triangle[0], res_triangle[2])
den1 = sqrt(L2[0] ** 2 + L2[1] ** 2)
den2 = sqrt(L1[0] ** 2 + L1[1] ** 2)
L12_b = [L1[0] * den1 + L2[0] * den2,
L1[1] * den1 + L2[1] * den2,
L1[2] * den1 + L2[2] * den2]
L3 = line(res_triangle[0], res_triangle[2])
L4 = line(res_triangle[1], res_triangle[2])
den3 = sqrt(L4[0] ** 2 + L4[1] ** 2)
den4 = sqrt(L3[0] ** 2 + L3[1] ** 2)
L34_b = [L3[0] * den3 + L4[0] * den4,
L3[1] * den3 + L4[1] * den4,
L3[2] * den3 + L4[2] * den4]
x, y = intersection(L12_b, L34_b)
R = radius_inscribed_circle(res_triangle)
if R > 4:
R -= 4
canvas.create_oval(x - R, y - R, x + R, y + R, width = 4, outline = "blue")
else:
messagebox.showwarning("Ошибка",
"Невозможно построить треугольник!")
return
min_y = min(res_triangle[0][1], res_triangle[1][1], res_triangle[2][1])
max_y = max(res_triangle[0][1], res_triangle[1][1], res_triangle[2][1])
for i in range(numb_points):
x = new_point_coord[i][0]
y = new_point_coord[i][1]
r = 3.5
canvas.create_oval(x - r, y - r, x + r, y + r,
width = 1, outline = "black", fill = "black")
for j in range(len(triangle_points)):
if i != triangle_points[j] and j == len(triangle_points) - 1:
canvas.create_text(x, y - 15,
text = "%d [%.1f,%.1f]" %(i + 1, point_coord[i][0], tranc_coord_back(point_coord[i][1])),
font = ("Courier New", 16, "bold"), fill = "darkmagenta")
elif i == triangle_points[j]:
if abs(y - min_y) < EPS:
canvas.create_text(x, y - 15,
text = "%d [%.1f,%.1f]" %(i + 1, point_coord[i][0], tranc_coord_back(point_coord[i][1])),
font = ("Courier New", 16, "bold"), fill = "darkmagenta")
elif abs(y - max_y) < EPS:
canvas.create_text(x, y + 15,
text = "%d [%.1f,%.1f]" %(i + 1, point_coord[i][0], tranc_coord_back(point_coord[i][1])),
font = ("Courier New", 16, "bold"), fill = "darkmagenta")
else:
res_triangle.pop(j)
res_triangle.sort(key = lambda array: array[1])
x_min_y = res_triangle[0][0]
x_max_y = res_triangle[1][0]
if abs(x - x_min_y) > abs(x - x_max_y):
canvas.create_text(x, y - 15,
text = "%d [%.1f,%.1f]" %(i + 1, point_coord[i][0], tranc_coord_back(point_coord[i][1])),
font = ("Courier New", 16, "bold"), fill = "darkmagenta")
else:
canvas.create_text(x, y + 15,
text = "%d [%.1f,%.1f]" %(i + 1, point_coord[i][0], tranc_coord_back(point_coord[i][1])),
font = ("Courier New", 16, "bold"), fill = "darkmagenta")
break
def enter_points(numb_points):
clear_table()
for i in range(numb_points):
point_list[i][0] = Label(window, text = str(i + 1) + ") ", font = ("Courier New", 16))
point_list[i][0].place(width = 40, height = 30, x = WINDOW_WIDTH - 245, y = 70 + i * 40)
point_list[i][1] = Entry(window, font = ("Courier New", 16))
point_list[i][1].place(width = 90, height = 30, x = WINDOW_WIDTH - 215, y = 70 + i * 40)
point_list[i][2] = Entry(window, font = ("Courier New", 16))
point_list[i][2].place(width = 90, height = 30, x = WINDOW_WIDTH - 115, y = 70 + i * 40)
def read_numb_points():
global numb_points
string = point_txt.get()
try:
numb_points = int(string)
if numb_points < 1 or numb_points > 10:
messagebox.showwarning("Ошибка",
"Неверно введено кол-во точек!\n"
"Ожидался ввод целого числа от 1 до 10.")
return
except:
messagebox.showwarning("Ошибка",
"Неверно введено кол-во точек!\n"
"Ожидался ввод целого числа от 1 до 10.")
return
clear_fields(del_point_txt)
del_point_txt.insert(0, numb_points)
enter_points(numb_points)
point_list[0][1].focus()
def add_point():
global numb_points, triangle_points
if numb_points < 10:
i = numb_points
numb_points += 1
point_list[i][0] = Label(window, text = str(i + 1) + ") ", font = ("Courier New", 16))
point_list[i][0].place(width = 40, height = 30, x = WINDOW_WIDTH - 245, y = 70 + i * 40)
point_list[i][1] = Entry(window, font = ("Courier New", 16))
point_list[i][1].place(width = 90, height = 30, x = WINDOW_WIDTH - 215, y = 70 + i * 40)
point_list[i][2] = Entry(window, font = ("Courier New", 16))
point_list[i][2].place(width = 90, height = 30, x = WINDOW_WIDTH - 115, y = 70 + i * 40)
point_list[i][1].focus()
clear_fields(point_txt)
point_txt.insert(0, numb_points)
clear_fields(del_point_txt)
del_point_txt.insert(0, numb_points)
triangle_points = [-1, -1, -1]
else:
messagebox.showinfo("Замечание",
"Нельзя использовать больше 10 точек!")
def del_point():
global numb_points, triangle_points
numb_p = len(point_coord)
if numb_p == 0:
messagebox.showwarning("Ошибка", "Нет заданных точек!")
return
string = del_point_txt.get()
try:
number = int(string)
if number < 1 or number > numb_p:
messagebox.showwarning("Ошибка",
"Неверно введен номер удаляемой точки!\n"
"Ожидался ввод целова числа от 1 до %d." %(numb_p))
return
except:
messagebox.showwarning("Ошибка",
"Неверно введён номер точки!\n"
"Ожидался ввод целова числа от 1 до %d." %(numb_p))
return
points_str_array = []
for i in range(numb_points):
if i + 1 != number:
points_str_array.append([point_list[i][1].get(), point_list[i][2].get()])
numb_points -= 1
clear_table()
enter_points(numb_points)
point_coord.pop(number - 1)
new_point_coord.pop(number - 1)
for i in range(numb_points):
point_list[i][1].insert(0, points_str_array[i][0])
point_list[i][2].insert(0, points_str_array[i][1])
clear_fields(point_txt)
point_txt.insert(0, numb_points)
clear_fields(del_point_txt)
del_point_txt.insert(0, numb_points)
triangle_points = [-1, -1, -1]
def display_results():
if triangle_points[0] == -1:
messagebox.showwarning("Ошибка",
"Перед выводом результатов необходимо построить треугольник!")
return
triangle = [[point_coord[triangle_points[0]][0], tranc_coord_back(point_coord[triangle_points[0]][1])],
[point_coord[triangle_points[1]][0], tranc_coord_back(point_coord[triangle_points[1]][1])],
[point_coord[triangle_points[2]][0], tranc_coord_back(point_coord[triangle_points[2]][1])]]
ab, bc, ac = sides_triangle(triangle)
area_dif = area_difference(ab, bc, ac)
print(triangle_points)
messagebox.showinfo("Результаты работы программы",
"Условие задачи:\n\tНа плоскости дано множество точек. "
"Найти такой треугольник с вершинами в этих точках, "
"у которого разность площадей треугольника и вписанного круга максимальна.\n\n"
"\t\tРезультаты вычислений\n\n"
"Вершины треугольника:\n\t%2d) [%.2f, %.2f]\n\t%2d) [%.2f, %.2f]\n\t%2d) [%.2f, %.2f]\n\n"
"Разность площадей = %.2f"
%(triangle_points[0] + 1, triangle[0][0], triangle[0][1],
triangle_points[1] + 1, triangle[1][0], triangle[1][1],
triangle_points[2] + 1, triangle[2][0], triangle[2][1],
area_dif))
def clear_fields(field):
string = field.get()
len_str = len(string)
while len_str >= 1:
field.delete(len_str - 1)
len_str -= 1
def clear_table():
for i in range(10):
point_list[i][0].place_forget()
point_list[i][1].place_forget()
point_list[i][2].place_forget()
def clear_canvas():
global numb_points, triangle_points
numb_points = 0
point_coord.clear()
new_point_coord.clear()
triangle_points = [-1, -1, -1]
clear_fields(point_txt)
clear_fields(del_point_txt)
canvas.delete("all")
for i in range(numb_points):
clear_fields(point_list[i][1])
clear_fields(point_list[i][2])
clear_table()
Entry(window, font = ("Courier New", 16), bd = 3, state = DISABLED).\
place(width = 250, height = 420, x = CANVAS_WIDTH + 20, y = 60)
point_txt.focus()
def task():
messagebox.showinfo("Условие задачи",
"\tНа плоскости дано множество точек. "
"Найти такой треугольник с вершинами в этих точках, "
"у которого разность площадей треугольника и вписанного круга максимальна.")
def aboutauthor():
messagebox.showinfo(title='Автор', message='Турчанинов Александр ИУ7-44Б')
if __name__ == "__main__":
window = Tk()
window.title("Лабораторная работа №1")
window.geometry("%dx%d" %(WINDOW_WIDTH, WINDOW_HEIGHT))
window.resizable(False, False)
canvas = Canvas(window, width = CANVAS_WIDTH, height = CANVAS_HEIGHT, bg = "lightgray")
canvas.place(x = 0, y = 120)
point_coord = []
new_point_coord = []
triangle_points = [-1, -1, -1]
numb_points = 0
Label(window, height = 2, text = "Кол-во точек:",
font = ("Courier New", 16)).place(x = 10, y = 10)
point_txt = Entry(window, font = ("Courier New", 16))
point_txt.place(width = 180, height = 40, x = 205, y = 10)
point_txt.focus()
Entry(window, font = ("Courier New", 15), bd = 3, state = DISABLED).\
place(width = 250, height = 50, x = CANVAS_WIDTH + 20, y = 10)
Label(window, text = " № X Y ", font = ("Courier New", 18)).\
place(width = 240, height = 40, x = CANVAS_WIDTH + 25, y = 15)
Entry(window, font = ("Courier New", 16), bd = 3, state = DISABLED).\
place(width = 250, height = 420, x = CANVAS_WIDTH + 20, y = 60)
point_1_lbl = Label(); point_1x_txt = Entry(); point_1y_txt = Entry()
point_2_lbl = Label(); point_2x_txt = Entry(); point_2y_txt = Entry()
point_3_lbl = Label(); point_3x_txt = Entry(); point_3y_txt = Entry()
point_4_lbl = Label(); point_4x_txt = Entry(); point_4y_txt = Entry()
point_5_lbl = Label(); point_5x_txt = Entry(); point_5y_txt = Entry()
point_6_lbl = Label(); point_6x_txt = Entry(); point_6y_txt = Entry()
point_7_lbl = Label(); point_7x_txt = Entry(); point_7y_txt = Entry()
point_8_lbl = Label(); point_8x_txt = Entry(); point_8y_txt = Entry()
point_9_lbl = Label(); point_9x_txt = Entry(); point_9y_txt = Entry()
point_10_lbl = Label(); point_10x_txt = Entry(); point_10y_txt = Entry()
point_list = [[point_1_lbl, point_1x_txt, point_1y_txt],
[point_2_lbl, point_2x_txt, point_2y_txt],
[point_3_lbl, point_3x_txt, point_3y_txt],
[point_4_lbl, point_4x_txt, point_4y_txt],
[point_5_lbl, point_5x_txt, point_5y_txt],
[point_6_lbl, point_6x_txt, point_6y_txt],
[point_7_lbl, point_7x_txt, point_7y_txt],
[point_8_lbl, point_8x_txt, point_8y_txt],
[point_9_lbl, point_9x_txt, point_9y_txt],
[point_10_lbl, point_10x_txt, point_10y_txt]]
Label(window, text = "№ удаляемой точки:", font = ("Courier New", 15)).\
place(height = 50, x = CANVAS_WIDTH + 20, y = 535)
del_point_txt = Entry(window, font = ("Courier New", 15))
del_point_txt.place(width = 250, height = 40, x = CANVAS_WIDTH + 20, y = 575)
Button(text = "Ввести точки", font = ("Courier New", 15), command = read_numb_points).\
place(width = 180, height = 40, x = 400, y = 10)
Button(text = "Очистить всё", font = ("Courier New", 15), command = clear_canvas).\
place(width = 180, height = 40, x = 400, y = 60)
Button(text = "Построить точки", font = ("Courier New", 15), command = build_points).\
place(width = 250, height = 40, x = 593, y = 10)
Button(text = "Построить Треугольник", font = ("Courier New", 15), command = build_triangle).\
place(width = 250, height = 40, x = 593, y = 60)
Button(text = "Результат", font = ("Courier New", 15), command = display_results).\
place(width = 180, height = 40, x = 205, y = 60)
Button(text = "Условие задачи", font = ("Courier New", 15), command = task).\
place(width = 180, height = 40, x = 10, y = 60)
Button(text = "Добавить\nточку", font = ("Courier New", 15), command = add_point).\
place(width = 120, height = 50, x = CANVAS_WIDTH + 20, y = 490)
Button(text = "Удалить\nточку", font = ("Courier New", 15), command = del_point).\
place(width = 120, height = 50, x = CANVAS_WIDTH + 150, y = 490)
mainmenu = Menu(window)
window.config(menu=mainmenu)
mainmenu.add_command(label='Author', command=aboutauthor)
window.mainloop() |
'''
Author 604829050@qq.com
https://blog.csdn.net/elecjack/article/details/51532482
'''
import scrapy
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
#from buluo.items import BuluoItem
class MySpiderForEastMoney(scrapy.Spider):
name = 'zjlx'
#start_urls = ['http://data.eastmoney.com/zjlx/detail.html']
def __init__(self):
super(MySpiderForEastMoney, self).__init__()
#设定chromedriver 使用相对路径
self.driver = webdriver.Chrome('./thirdpart/chromedriver')
#启动chrome
self.driver.set_page_load_timeout(15) # throw a TimeoutException when thepage load time is more than 15 seconds
self.driver.minimize_window()
def start_requests(self):
urls = ['http://data.eastmoney.com/zjlx/002433.html']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
filename = 'zjlx.html'
def closed(self):
#结束chrome
self.driver.close()
|
class Level:
NONE = 0
FIELD = 1
WALL = 10
HEAD = 20
TAIL = 30
APPLE = 40
EXIT = 50
def __init__(self, lines):
self.apple_count = 0
self.start_addr = 0
self.growth = 0
self.timer = 0
self.apple_timer = 0
self.width = 0
self.height = 0
self.data = None
self.free_cells = 1
self.build(lines)
def build(self, lines):
self.width = 0
self.height = len(lines)
for line in lines:
line = line.rstrip()
l = len(line)
if l > self.width:
self.width = l
level = bytearray(len(lines) * self.width)
line_addr = 0
for line in lines:
offset = line_addr
for char in line:
if char == '.':
level[offset] = self.FIELD
self.free_cells += 1
elif char == '#':
level[offset] = self.WALL
elif char == '@':
level[offset] = 1
self.start_addr = offset
elif char == 'E':
level[offset] = self.EXIT
offset += 1
line_addr += self.width
self.data = level
def get_free_cells(self, buffer):
buffer_pos = 0
for i in range(self.width + 1, len(self.data) - self.width - 1):
if self.data[i] == self.FIELD:
buffer[buffer_pos] = i
buffer_pos += 1
return buffer_pos
|
import os
import folium
# Importing explicitly the module from 'folium' - 'folium.plugins'
import folium.plugins as plugins
''' *************************************** Generating Folium Base Map **************************************'''
# Generating a 'Leaflet' map for the location in interest by passing through the coordinates
# Calling the 'folium.folium.Map' object
Site_Coord = [4.145825, 108.3035]
m_folium = folium.Map(location = Site_Coord,
zoom_start = 5)
''' *************************************** Adding Minimap onto Folium Base Map **************************************'''
# Activating the 'folium.plugins' to include a minimap at the bottomright of the main map
m_minimap_Batu_Kawan = plugins.MiniMap(toggle_display = True,
width=200,
height=200,
zoom_level_fixed=None,
minimized=True)
m_folium.add_child(m_minimap_Batu_Kawan)
''' ***************************** Extracting G&P Geotechnics Project with Coordinates *******************************'''
import pyexcel as pyex
import numpy as np
DATA = pyex.get_book(file_name = '2020_Gallery.xlsx')
''' ******************************************** 2008 **************************************************************'''
# Data extraction for projects secured in year 2008
DATA_2008 = np.array(DATA.sheet_by_name('2008'))
lat_2008 = np.ndarray.tolist(DATA_2008[1:,1])
lgn_2008 = np.ndarray.tolist(DATA_2008[1:,0])
pgn_2008 = np.ndarray.tolist(DATA_2008[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2008 = folium.FeatureGroup("2008 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2008, lgn_2008, pgn_2008):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2008.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='black',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2008)
''' ******************************************** 2009 **************************************************************'''
# Data extraction for projects secured in year 2009
DATA_2009 = np.array(DATA.sheet_by_name('2009'))
lat_2009 = np.ndarray.tolist(DATA_2009[1:,1])
lgn_2009 = np.ndarray.tolist(DATA_2009[1:,0])
pgn_2009 = np.ndarray.tolist(DATA_2009[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2009 = folium.FeatureGroup("2009 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2009, lgn_2009, pgn_2009):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2009.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='black',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2009)
''' ******************************************** 2010 **************************************************************'''
# Data extraction for projects secured in year 2010
DATA_2010 = np.array(DATA.sheet_by_name('2010'))
lat_2010 = np.ndarray.tolist(DATA_2010[1:,1])
lgn_2010 = np.ndarray.tolist(DATA_2010[1:,0])
pgn_2010 = np.ndarray.tolist(DATA_2010[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2010 = folium.FeatureGroup("2010 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2010, lgn_2010, pgn_2010):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2010.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='black',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2010)
''' ******************************************** 2011 **************************************************************'''
# Data extraction for projects secured in year 2011
DATA_2011 = np.array(DATA.sheet_by_name('2011'))
lat_2011 = np.ndarray.tolist(DATA_2011[1:,1])
lgn_2011 = np.ndarray.tolist(DATA_2011[1:,0])
pgn_2011 = np.ndarray.tolist(DATA_2011[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2011 = folium.FeatureGroup("2011 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2011, lgn_2011, pgn_2011):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2011.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2011)
''' ******************************************** 2012 **************************************************************'''
# Data extraction for projects secured in year 2012
DATA_2012 = np.array(DATA.sheet_by_name('2012'))
lat_2012 = np.ndarray.tolist(DATA_2012[1:,1])
lgn_2012 = np.ndarray.tolist(DATA_2012[1:,0])
pgn_2012 = np.ndarray.tolist(DATA_2012[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2012 = folium.FeatureGroup("2012 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2012, lgn_2012, pgn_2012):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2012.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2012)
''' ******************************************** 2013 **************************************************************'''
# Data extraction for projects secured in year 2013
DATA_2013 = np.array(DATA.sheet_by_name('2013'))
lat_2013 = np.ndarray.tolist(DATA_2013[1:,1])
lgn_2013 = np.ndarray.tolist(DATA_2013[1:,0])
pgn_2013 = np.ndarray.tolist(DATA_2013[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2013 = folium.FeatureGroup("2013 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2013, lgn_2013, pgn_2013):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2013.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2013)
''' ******************************************** 2014 **************************************************************'''
# Data extraction for projects secured in year 2014
DATA_2014 = np.array(DATA.sheet_by_name('2014'))
lat_2014 = np.ndarray.tolist(DATA_2014[1:,1])
lgn_2014 = np.ndarray.tolist(DATA_2014[1:,0])
pgn_2014 = np.ndarray.tolist(DATA_2014[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2014 = folium.FeatureGroup("2014 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2014, lgn_2014, pgn_2014):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2014.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2014)
''' ******************************************** 2015 **************************************************************'''
# Data extraction for projects secured in year 2015
DATA_2015 = np.array(DATA.sheet_by_name('2015'))
lat_2015 = np.ndarray.tolist(DATA_2015[1:,1])
lgn_2015 = np.ndarray.tolist(DATA_2015[1:,0])
pgn_2015 = np.ndarray.tolist(DATA_2015[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2015 = folium.FeatureGroup("2015 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2015, lgn_2015, pgn_2015):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2015.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2015)
''' ******************************************** 2016 **************************************************************'''
# Data extraction for projects secured in year 2016
DATA_2016 = np.array(DATA.sheet_by_name('2016'))
lat_2016 = np.ndarray.tolist(DATA_2016[1:,1])
lgn_2016 = np.ndarray.tolist(DATA_2016[1:,0])
pgn_2016 = np.ndarray.tolist(DATA_2016[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2016 = folium.FeatureGroup("2016 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2016, lgn_2016, pgn_2016):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2016.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2016)
''' ******************************************** 2017 **************************************************************'''
# Data extraction for projects secured in year 2017
DATA_2017 = np.array(DATA.sheet_by_name('2017'))
lat_2017 = np.ndarray.tolist(DATA_2017[1:,1])
lgn_2017 = np.ndarray.tolist(DATA_2017[1:,0])
pgn_2017 = np.ndarray.tolist(DATA_2017[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2017 = folium.FeatureGroup("2017 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2017, lgn_2017, pgn_2017):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2017.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2017)
''' ******************************************** 2018 **************************************************************'''
# Data extraction for projects secured in year 2018
DATA_2018 = np.array(DATA.sheet_by_name('2018'))
lat_2018 = np.ndarray.tolist(DATA_2018[1:,1])
lgn_2018 = np.ndarray.tolist(DATA_2018[1:,0])
pgn_2018 = np.ndarray.tolist(DATA_2018[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2018 = folium.FeatureGroup("2018 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2018, lgn_2018, pgn_2018):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2018.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2018)
''' ******************************************** 2019 **************************************************************'''
# Data extraction for projects secured in year 2019
DATA_2019 = np.array(DATA.sheet_by_name('2019'))
lat_2019 = np.ndarray.tolist(DATA_2019[1:,1])
lgn_2019 = np.ndarray.tolist(DATA_2019[1:,0])
pgn_2019 = np.ndarray.tolist(DATA_2019[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2019 = folium.FeatureGroup("2019 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2019, lgn_2019, pgn_2019):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2019.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2019)
''' ******************************************** 2020 **************************************************************'''
# Data extraction for projects secured in year 2020
DATA_2020 = np.array(DATA.sheet_by_name('2020'))
lat_2020 = np.ndarray.tolist(DATA_2020[1:,1])
lgn_2020 = np.ndarray.tolist(DATA_2020[1:,0])
pgn_2020 = np.ndarray.tolist(DATA_2020[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2020 = folium.FeatureGroup("2020 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2020, lgn_2020, pgn_2020):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2020.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2020)
''' *************************************** Activating LayerControl in Folium Base Map **************************************'''
folium.LayerControl().add_to(m_folium)
''' *************************************** Saving Folium Base Map as HTML **************************************'''
# Saving the 'Leaflet' map generated in html format
m_folium.save('20200629_G&P_ALL.html')
|
import typing as t
import jank
from .renderer import Renderer
class RectRenderer(Renderer):
def __init__(
self,
width: float, height: float,
colour: t.Tuple[int, int, int] = (255, 255, 255),
batch: t.Optional[jank.graphics.Batch] = None,
group: t.Optional[jank.graphics.Group] = None
):
self.rect = jank.shape_sprites.Rectangle(
0, 0,
width, height,
color=colour,
batch=batch,
group=group
)
@staticmethod
def create_from_rect(rect: jank.shape_sprites.Rectangle):
rect_renderer = RectRenderer.__new__(RectRenderer)
rect_renderer.rect = rect
return rect_renderer
def set_position(self, position: jank.Vec2d):
self.rect.position = position
def set_rotation(self, rotation_degrees: float):
self.rect.rotation = rotation_degrees
def get_batch(self) -> t.Optional[jank.graphics.Batch]:
return self.rect.batch
def set_batch(self, batch: t.Optional[jank.graphics.Batch]):
self.rect.batch = batch
def get_group(self) -> t.Optional[jank.graphics.Group]:
return self.rect.group
def set_group(self, group: t.Optional[jank.graphics.Group]):
self.rect.group = group
def get_width(self) -> float:
return self.rect.width
def set_width(self, width: float):
self.rect.width = width
def get_height(self) -> float:
return self.rect.height
def set_height(self, height: float):
self.rect.height = height
def draw(self):
self.rect.draw()
def delete(self):
self.rect.delete()
@property
def colour(self) -> t.Tuple[int, int, int]:
return self.rect.color
@colour.setter
def colour(self, colour: t.Tuple[int, int, int]):
self.rect.color = colour
@property
def opacity(self) -> int:
return self.rect.opacity
@opacity.setter
def opacity(self, opacity: int):
self.rect.opacity = opacity
|
import numpy as np
def betaratio(kv, kb, prob, err=None, perframe=True):
"""Calculate speckle contrast from photon probability ratios."""
prob = np.ma.array(prob, mask=np.zeros_like(prob, dtype=np.bool))
kb = np.ma.array(kb, mask=kb == 0)
beta = np.ma.array(np.zeros((kv.size - 1, kb.size)))
dbeta = beta.copy()
for i, ki in enumerate(kv[:-1]):
c = i + 2 < kv.size
prob[i : i + 1 + c][prob[i + 1 : i + 2 + c] == 0] = np.ma.masked
p1 = prob[i]
p2 = prob[i + 1]
a = p1 / p2
divd = a * kb - (ki + 1.0)
divs = 1.0 + (1.0 - a) * ki
beta[i] = divd / kb / divs
prob.mask = np.ma.nomask
if err is not None:
dbeta = 1.0 / kb * np.sqrt(2 * (1 + np.abs(beta)) / (err[0] * err[1]))
else:
dbeta = np.ones((kb.size, 1), dtype=np.float32)
return beta, dbeta
|
def eleva_potencia_cc(a,b):
'''
Eleva número "a" a la "b" potencia.
Insumo (input):
a: número
b: número
Producto (output):
resultado: un número
'''
z_var = 11
c = 10 + z_var
resultado = a**b + c
return resultado
def eleva_potencia_dd(a,b):
'''
Eleva número "a" a la "b" potencia.
Insumo (input):
a: número
b: número
Producto (output):
resultado: un número
'''
z_var = 11
c = 10 + z_var
resultado = a**b + c
return resultado |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##g
"""Parameter representation"""
__docformat__ = 'restructuredtext'
import re
import textwrap
import warnings
import numpy as np
from mvpa2.base.state import IndexedCollectable
from mvpa2.base.constraints import expand_contraint_spec
if __debug__:
from mvpa2.base import debug
_whitespace_re = re.compile('\n\s+|^\s+')
__all__ = [ 'Parameter', 'KernelParameter' ]
class Parameter(IndexedCollectable):
"""This class shall serve as a representation of a parameter.
It might be useful if a little more information than the pure parameter
value is required (or even only useful).
Each parameter must have a value. However additional attributes can be
passed to the constructor and will be stored in the object.
Notes
-----
BIG ASSUMPTION: stored values are not mutable, ie nobody should do
cls.parameter1[:] = ...
or we wouldn't know that it was changed
Here is a list of possible additional attributes:
step
Increment/decrement step size hint for optimization
"""
def __init__(self, default, constraints=None, ro=False, index=None, value=None,
name=None, doc=None, **kwargs):
"""Specify a Parameter with a default value and arbitrary
number of additional attributes.
Parameters
----------
constraints : callable
A functor that takes any input value, performs checks or type
conversions and finally returns a value that is appropriate for a
parameter or raises an exception.
name : str
Name of the parameter under which it should be available in its
respective collection.
doc : str
Documentation about the purpose of this parameter.
index : int or None
Index of parameter among the others. Determines order of listing
in help. If None, order of instantiation determines the index.
ro : bool
Either value which will be assigned in the constructor is read-only and
cannot be changed
value
Actual value of the parameter to be assigned
Examples
--------
-ensure the parameter to be of type float
(None not allowed as value):
constraints = EnsureFloat()
>>> from mvpa2.base.param import Parameter
>>> from mvpa2.base.constraints import (EnsureFloat, EnsureRange,
... AltConstraints, Constraints)
>>> C = Parameter(23.0, constraints=EnsureFloat())
-ensure the parameter to be of type float or to be None:
>>> C = Parameter(23.0, constraints=AltConstraints(EnsureFloat(), None))
-ensure the parameter to be None or to be of type float
and lie in the inclusive range (7.0,44.0):
>>> C = Parameter(23.0, AltConstraints(Constraints(EnsureFloat(),
... EnsureRange(min=7.0,max=44.0)),
... None))
"""
allowedtype = kwargs.pop('allowedtype', None)
if allowedtype is not None:
warnings.warn(
"allowedtype option was deprecated in favor of constraints. "
"Adjust your code, provided value '%s' was ignored"
% str(allowedtype), category=DeprecationWarning)
# XXX probably is too generic...
# and potentially dangerous...
# let's at least keep track of what is passed
self._additional_props = []
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
self._additional_props.append(k)
self.__default = default
self._ro = ro
self.constraints = expand_contraint_spec(constraints)
# needs to come after kwargs processing, since some debug statements
# rely on working repr()
# value is not passed since we need to invoke _set with init=True
# below
IndexedCollectable.__init__(self, index=index, # value=value,
name=name, doc=doc)
self._isset = False
if value is None:
self._set(self.__default, init=True)
else:
self._set(value, init=True)
if __debug__:
if 'val' in kwargs:
raise ValueError, "'val' property name is illegal."
def __reduce__(self):
icr = IndexedCollectable.__reduce__(self)
# Collect all possible additional properties which were passed
# to the constructor
state = dict([(k, getattr(self, k)) for k in self._additional_props])
state['_additional_props'] = self._additional_props
state.update(icr[2])
res = (self.__class__, (self.__default, self.constraints, self._ro) + icr[1], state)
#if __debug__ and 'COL_RED' in debug.active:
# debug('COL_RED', 'Returning %s for %s' % (res, self))
return res
def __str__(self):
res = IndexedCollectable.__str__(self)
# it is enabled but no value is assigned yet
res += '=%s' % (self.value,)
return res
def __repr__(self):
# cannot use IndexedCollectable's repr(), since the contructor
# needs to handle the mandatory 'default' argument
# TODO: so what? just tune it up ;)
# TODO: think what to do with index parameter...
s = "%s(%s, name=%s, doc=%s" % (self.__class__.__name__,
self.__default,
repr(self.name),
repr(self.__doc__))
plist = ["%s=%s" % (p, self.__getattribute__(p))
for p in self._additional_props]
if len(plist):
s += ', ' + ', '.join(plist)
if self._ro:
s += ', ro=True'
if not self.is_default:
s += ', value=%r' % (self.value, )
s += ')'
return s
def _paramdoc(self, indent=" ", width=70):
"""Docstring for the parameter to be used in lists of parameters
Returns
-------
string or list of strings (if indent is None)
"""
paramsdoc = '%s' % self.name
if not self.constraints is None:
sdoc = self.constraints.short_description()
if not sdoc is None:
if sdoc[0] == '(' and sdoc[-1] == ')':
sdoc = sdoc[1:-1]
# parameters are always optional
paramsdoc += " : %s, optional" % sdoc
paramsdoc = [paramsdoc]
try:
doc = self.__doc__.strip()
if not doc.endswith('.'):
doc += '.'
if self.constraints is not None:
cdoc = self.constraints.long_description()
if cdoc[0] == '(' and cdoc[-1] == ')':
cdoc = cdoc[1:-1]
doc += ' Constraints: %s.' % cdoc
try:
doc += " [Default: %r]" % (self.default,)
except:
pass
# Explicitly deal with multiple spaces, for some reason
# replace_whitespace is non-effective
doc = _whitespace_re.sub(' ', doc)
paramsdoc += [indent + x
for x in textwrap.wrap(doc, width=width-len(indent),
replace_whitespace=True)]
except Exception, e:
pass
return '\n'.join(paramsdoc)
# XXX should be named reset2default? correspondingly in
# ParameterCollection as well
def reset_value(self):
"""Reset value to the default"""
#IndexedCollectable.reset(self)
if not self.is_default and not self._ro:
self._isset = True
self.value = self.__default
def _set(self, val, init=False):
if self.constraints is not None:
# for c in self.constraints:
# val = c(val)
# #val = c.validate(val)
val = self.constraints(val)
different_value = self._value != val
isarray = isinstance(different_value, np.ndarray)
if self._ro and not init:
raise RuntimeError, \
"Attempt to set read-only parameter %s to %s" \
% (self.name, val)
if (isarray and np.any(different_value)) or \
((not isarray) and different_value):
if __debug__:
debug("COL",
"Parameter: setting %s to %s " % (str(self), val))
self._value = val
# Set 'isset' only if not called from initialization routine
self._isset = not init #True
elif __debug__:
debug("COL",
"Parameter: not setting %s since value is the same" \
% (str(self)))
@property
def is_default(self):
"""Returns True if current value is bound to default one"""
return self._value is self.default
@property
def equal_default(self):
"""Returns True if current value is equal to default one"""
return self._value == self.__default
def _set_default(self, value):
wasdefault = self.is_default
self.__default = value
if wasdefault:
self.reset_value()
self._isset = False
# incorrect behavior
#def reset(self):
# """Override reset so we don't clean the flag"""
# pass
default = property(fget=lambda x:x.__default, fset=_set_default)
value = property(fget=lambda x:x._value, fset=_set)
class KernelParameter(Parameter):
"""Just that it is different beast"""
pass
|
print("Hello World! 2nd") |
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level # noqa
from floodsystem.analysis import risk_by_gradients
import matplotlib.pyplot as plt # requirement # noqa
# --------------------------------------------------------------------------------------------------------
# CODE START
stations = build_station_list() # build station list
update_water_levels(stations) # update water levels
N = 3 # how many stations to build functions for
workstations = stations_highest_rel_level(stations, N) # creates list of station names
# alternative implementation, consider all stations above threshold
# threshold = 1 # sets threshold
# highstations = stations_level_over_threshold(stations, threshold) # calculate stations above threshold
# workstations = [] # initialise list for station names
# for i in highstations: # iterate through highstations
# workstations.append(highstations[0]) # adds station names
riverfunctions = [] # create lists for river data
dt = 5 # how many days back to check
p = 2 # polynomial order to produce, must be greater than 2
for station in stations: # iterate through stations
if station.name in workstations: # check if station is in desired check list
# print("\n{}:".format(station.name)) # print station name for testing
risk = risk_by_gradients(station, dt, p) # calculate risk by gradient
entry = [station.name, risk] # initialise output list
riverfunctions.append(entry) # adds to master list
for i in riverfunctions: # for testing
print(i)
|
import os
from django.http import HttpResponseForbidden
from bluebottle.bluebottle_utils.utils import get_client_ip
from apps.organizations.models import Organization, OrganizationMember, OrganizationAddress, OrganizationDocument
from apps.organizations.permissions import IsOrganizationMember
from apps.organizations.serializers import OrganizationSerializer, ManageOrganizationSerializer, OrganizationAddressSerializer, OrganizationDocumentSerializer
from django.http.response import HttpResponseForbidden
from django.views.generic.detail import DetailView
from rest_framework import generics
from django.shortcuts import get_object_or_404
from filetransfers.api import serve_file
class OrganizationList(generics.ListAPIView):
model = Organization
serializer_class = OrganizationSerializer
paginate_by = 10
class OrganizationDetail(generics.RetrieveAPIView):
model = Organization
serializer_class = OrganizationSerializer
class ManageOrganizationList(generics.ListCreateAPIView):
model = Organization
serializer_class = ManageOrganizationSerializer
paginate_by = 10
# Limit the view to only the organizations the current user is member of
def get_queryset(self):
org_ids = OrganizationMember.objects.filter(user=self.request.user).values_list('organization_id', flat=True).all()
queryset = super(ManageOrganizationList, self).get_queryset()
queryset = queryset.filter(id__in=org_ids)
return queryset
def post_save(self, obj, created=False):
if created:
member = OrganizationMember(organization=obj, user=self.request.user)
member.save()
class ManageOrganizationDetail(generics.RetrieveUpdateAPIView):
model = Organization
serializer_class = ManageOrganizationSerializer
permission_classes = (IsOrganizationMember, )
class ManageOrganizationAddressList(generics.ListCreateAPIView):
model = OrganizationAddress
serializer_class = OrganizationAddressSerializer
paginate_by = 10
def get_queryset(self):
"""
Override get_queryset() to filter on multiple values for 'id'
It seems that DRF2 doesn't have a implementation for filtering against an array of ids.
https://groups.google.com/forum/?fromgroups#!topic/django-rest-framework/vbifEyharBw
Also filter for organization members (request user should be a member)
"""
queryset = super(ManageOrganizationAddressList, self).get_queryset()
org_ids = OrganizationMember.objects.filter(user=self.request.user).values_list('organization_id', flat=True).all()
queryset = queryset.filter(organization_id__in=org_ids)
id_list = self.request.GET.getlist('ids[]', None)
if id_list:
queryset = queryset.filter(id__in=id_list)
return queryset
class ManageOrganizationAddressDetail(generics.RetrieveUpdateDestroyAPIView):
model = OrganizationAddress
serializer_class = OrganizationAddressSerializer
permission_classes = (IsOrganizationMember, )
class ManageOrganizationDocumentList(generics.ListCreateAPIView):
model = OrganizationDocument
serializer_class = OrganizationDocumentSerializer
paginate_by = 20
filter = ('organization', )
def pre_save(self, obj):
obj.author = self.request.user
obj.ip_address = get_client_ip(self.request)
class ManageOrganizationDocumentDetail(generics.RetrieveUpdateDestroyAPIView):
model = OrganizationDocument
serializer_class = OrganizationDocumentSerializer
paginate_by = 20
filter = ('organization', )
def pre_save(self, obj):
obj.author = self.request.user
obj.ip_address = get_client_ip(self.request)
# Non API views
# Download private documents
class OrganizationDocumentDownloadView(DetailView):
model = OrganizationDocument
def get(self, request, pk):
upload = get_object_or_404(OrganizationDocument, pk=pk)
if upload.author != request.user:
return HttpResponseForbidden()
file_name = os.path.basename(upload.file.name)
return serve_file(request, upload.file, save_as=file_name) |
import json
import logging
from apps.utils.aws.sns import _sns_text_based_notification
from apps.utils.data_helpers.manager import DataManager
from apps.utils.timezone_utils import str_utc
from ..action import BaseAction
logger = logging.getLogger(__name__)
class CusAction(BaseAction):
REQUIRED_EXTRA_KEYS = ['sns_topic']
def __str__(self):
return 'Custom Action'
def process(self, payload, in_data):
super(CusAction, self).process(payload, in_data)
if not DataManager.is_instance('event', in_data) or not self.check_payload(payload):
self.handle_error(str(self), "Payload is not well formatted : {}".format(payload))
return False
try:
sns_topic = payload['action']['extra_payload']['sns_topic']
sns_payload = {
"uuid": str(in_data.uuid),
"project": in_data.project_slug,
"device": in_data.device_slug,
"stream": in_data.stream_slug,
"timestamp": str_utc(in_data.timestamp),
"bucket": in_data.s3bucket,
"key": in_data.s3key
}
_sns_text_based_notification(sns_topic, json.dumps(sns_payload))
return True
except Exception as e:
self.handle_error(str(self), str(e))
return False
|
from __future__ import division
from __future__ import absolute_import
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Concatenate
from rl.agents.ddpg import DDPGAgent
from rl.memory import SequentialMemory
from rl.processors import MultiInputProcessor
from ..util import MultiInputTestEnv
def test_single_ddpg_input():
nb_actions = 2
actor = Sequential()
actor.add(Flatten(input_shape=(2, 3)))
actor.add(Dense(nb_actions))
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(2, 3), name='observation_input')
x = Concatenate()([action_input, Flatten()(observation_input)])
x = Dense(1)(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
memory = SequentialMemory(limit=10, window_length=2)
agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
agent.compile('sgd')
agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
def test_multi_ddpg_input():
nb_actions = 2
actor_observation_input1 = Input(shape=(2, 3), name='actor_observation_input1')
actor_observation_input2 = Input(shape=(2, 4), name='actor_observation_input2')
actor = Sequential()
x = Concatenate()([actor_observation_input1, actor_observation_input2])
x = Flatten()(x)
x = Dense(nb_actions)(x)
actor = Model(inputs=[actor_observation_input1, actor_observation_input2], outputs=x)
action_input = Input(shape=(nb_actions,), name='action_input')
critic_observation_input1 = Input(shape=(2, 3), name='critic_observation_input1')
critic_observation_input2 = Input(shape=(2, 4), name='critic_observation_input2')
x = Concatenate()([critic_observation_input1, critic_observation_input2])
x = Concatenate()([action_input, Flatten()(x)])
x = Dense(1)(x)
critic = Model(inputs=[action_input, critic_observation_input1, critic_observation_input2], outputs=x)
processor = MultiInputProcessor(nb_inputs=2)
memory = SequentialMemory(limit=10, window_length=2)
agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4,
processor=processor)
agent.compile('sgd')
agent.fit(MultiInputTestEnv([(3,), (4,)]), nb_steps=10)
|
import logging
import os
from google.appengine.api import app_identity
from google.auth import app_engine
from google.oauth2 import service_account
import googleapiclient.discovery
from django.apps import apps
from djangae.environment import is_production_environment
from .utils import get_backup_setting, get_backup_path
logger = logging.getLogger(__name__)
AUTH_SCOPES = ['https://www.googleapis.com/auth/datastore']
SERVICE_URL = 'https://datastore.googleapis.com/$discovery/rest?version=v1'
def backup_datastore(bucket=None, kinds=None):
"""
Using the new scheduled backup service write all required entity kinds
to a specific GCS bucket path.
"""
backup_enabled = get_backup_setting("ENABLED", False)
if not backup_enabled:
logger.warning(
"DJANGAE_BACKUP_ENABLED is False or not set."
"The datastore backup will not be run."
)
return
# make sure no blacklisted entity kinds are included in our export
valid_models = _get_valid_export_models(kinds)
if not valid_models:
logger.warning("No whitelisted entity kinds to export.")
return
# build the service object with the necessary credentials and trigger export
service = _get_service()
body = {
'outputUrlPrefix': get_backup_path(bucket),
'entityFilter': {
'kinds': valid_models,
}
}
app_id = app_identity.get_application_id()
request = service.projects().export(projectId=app_id, body=body)
request.execute()
def _get_valid_export_models(kinds=None):
"""Make sure no blacklist models are included in our backup export."""
excluded_models = get_backup_setting("EXCLUDE_MODELS", required=False, default=[])
excluded_apps = get_backup_setting("EXCLUDE_APPS", required=False, default=[])
models_to_backup = []
for model in apps.get_models(include_auto_created=True):
app_label = model._meta.app_label
object_name = model._meta.object_name
model_def = "{}_{}".format(app_label, object_name.lower())
if app_label in excluded_apps:
logger.info(
"Not backing up %s due to the %s app being in DJANGAE_BACKUP_EXCLUDE_APPS",
model_def, app_label
)
continue
if model_def in excluded_models:
logger.info(
"Not backing up %s as it is blacklisted in DJANGAE_BACKUP_EXCLUDE_MODELS",
model_def
)
continue
logger.info("%s added to list of models to backup", model_def)
models_to_backup.append(model_def)
# if kinds we explcitly provided by the caller, we only return those
# already validated by our previous checks
if kinds:
models_to_backup = [model for model in models_to_backup if model in kinds]
return models_to_backup
def _get_service():
"""Creates an Admin API service object for talking to the API."""
credentials = _get_authentication_credentials()
return googleapiclient.discovery.build(
'admin', 'v1',
credentials=credentials,
discoveryServiceUrl=SERVICE_URL
)
def _get_authentication_credentials():
"""
Returns authentication credentials depending on environment. See
https://developers.google.com/api-client-library/python/auth/service-accounts
"""
if is_production_environment():
credentials = app_engine.Credentials(scopes=AUTH_SCOPES)
else:
service_account_path = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
credentials = service_account.Credentials.from_service_account_file(
service_account_path, scopes=AUTH_SCOPES
)
return credentials
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import cv2
import matplotlib.pyplot as plt
import numpy as np
import random
# In[2]:
# Read image and convert them to gray!! we need to use rgb2gray not bgr2gray
def read_image(path):
img = cv2.imread(path)
img_gray= cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
img_rgb = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img_gray, img, img_rgb
# In[3]:
left, left_rgb, leftt = read_image('images/1.jpg')
right, right_rgb, rightt = read_image('images/2.jpg')
# In[4]:
def SIFT(img):
siftDetector= cv2.xfeatures2d.SIFT_create()
kp, des = siftDetector.detectAndCompute(img, None)
return kp, des
def plot_sift(img, kp):
tmp = img.copy()
cv2.drawKeypoints(img, kp, tmp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(tmp)
cv2.imwrite('data/sift.jpg', cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR))
return tmp
# In[5]:
kp_left, des_left = SIFT(left)
kp_right, des_right = SIFT(right)
# In[6]:
def sqeuclidean(des_left, des_right):
res = []
for i in des_left:
res.append([np.sum(np.square(i-j)) for j in des_right])
return np.array(res)
# In[7]:
def match_points(kp_left, kp_right, des_left, des_right, threshold=7000):
print("Matching ...")
dist = sqeuclidean(des_left, des_right)
desp1_idx, desp2_idx = np.where(dist < threshold)[0], np.where(dist < threshold)[1]
coords = []
for i, j in zip(desp1_idx, desp2_idx):
coords.append(list(kp_left[i].pt + kp_right[j].pt))
match_coords = np.array(coords)
return match_coords
# In[8]:
match = match_points(kp_left, kp_right, des_left, des_right, 7000)
# In[9]:
def homomat(pairs):
rows = []
for i in range(pairs.shape[0]):
p1 = np.append(pairs[i][0:2], 1)
p2 = np.append(pairs[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1]*p1[0], -p2[1]*p1[1], -p2[1]*p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1], -p2[0]*p1[2]]
rows.append(row1)
rows.append(row2)
rows = np.array(rows)
U, s, V = np.linalg.svd(rows)
H = V[len(V)-1].reshape(3, 3)
H = H/H[2, 2]
return H
# In[10]:
def random_point(coords, k=4):
idx = random.sample(range(coords.shape[0]), k)
point = []
for i in idx:
point.append(coords[i])
return np.array(point)
# In[11]:
def get_error(point, H):
num_points = len(point)
all_p1 = np.concatenate((point[:, 0:2], np.ones((num_points, 1))), axis=1)
all_p2 = point[:, 2:4]
estimate_p2 = np.zeros((num_points, 2))
for i in range(num_points):
temp = np.dot(H, all_p1[i])
estimate_p2[i] = (temp/temp[2])[0:2] # set index 2 to 1 and slice the index 0, 1
# Compute error.
errors = np.linalg.norm(all_p2 - estimate_p2 , axis=1) ** 2
return errors
# In[12]:
def ransac(coords, threshold, iters):
print("RANSAC PROCESSING...")
num_inliers = 0
num_best_inliers = 0
for i in range(iters):
points = random_point(coords)
H = homomat(points)
# divide by zero avoid
if np.linalg.matrix_rank(H) < 3:
continue
errors = get_error(coords, H)
idx = np.where(errors < threshold)[0]
inliers = coords[idx]
num_inliers = len(inliers)
if num_inliers > num_best_inliers:
best_inliers = inliers.copy()
num_best_inliers = num_inliers
best_H = H.copy()
print("Number of inliers: {}".format(num_best_inliers))
return best_inliers, best_H
# In[13]:
inliers, H = ransac(match, 0.5, 1000)
# In[14]:
# In[15]:
total_img = np.concatenate((leftt, rightt), axis=1)
# In[16]:
# I've tried to use CV2 plot the points but it need int type
# In[65]:
def plot_inlier_matches(inliers, total_img):
match_img = total_img.copy()
offset = total_img.shape[1]/2
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.imshow(np.array(match_img).astype('uint8')) # RGB is integer type
ax.plot(inliers[:,0], inliers[:,1], 'xr')
ax.plot(inliers[:,2]+offset, inliers[:,3], 'xr')
ax.plot([inliers[:,0], inliers[:,2]+offset],[inliers[:,1], inliers[:,3]], 'r', linewidth=0.2)
plt.show()
plt.savefig('foo.png')
# In[66]:
plot_inlier_matches(inliers,total_img)
# In[29]:
def stitch_img(left, right, H):
# Convert to double and normalize. 避免雜訊
left = cv2.normalize(left.astype('float'), None,
0.0, 1.0, cv2.NORM_MINMAX)
# Convert to double and normalize.
right = cv2.normalize(right.astype('float'), None,
0.0, 1.0, cv2.NORM_MINMAX)
# left image
height_l, width_l, channel_l = left.shape
corners = [[0, 0, 1], [width_l, 0, 1], [width_l, height_l, 1], [0, height_l, 1]]
corners_new = [np.dot(H, corner) for corner in corners]
corners_new = np.array(corners_new).T
x_news = corners_new[0] / corners_new[2]
y_news = corners_new[1] / corners_new[2]
y_min = min(y_news)
x_min = min(x_news)
translation_mat = np.array([[1, 0, -x_min], [0, 1, -y_min], [0, 0, 1]])
H = np.dot(translation_mat, H)
# Get height, width
height_new = int(round(abs(y_min) + height_l))
width_new = int(round(abs(x_min) + width_l))
size = (width_new, height_new)
# right image
warped_l = cv2.warpPerspective(src=left, M=H, dsize=size)
height_r, width_r, channel_r = right.shape
height_new = int(round(abs(y_min) + height_r))
width_new = int(round(abs(x_min) + width_r))
size = (width_new, height_new)
warped_r = cv2.warpPerspective(src=right, M=translation_mat, dsize=size)
black = np.zeros(3) # Black pixel.
# Stitching procedure, store results in warped_l.
for i in range(warped_r.shape[0]):
for j in range(warped_r.shape[1]):
pixel_l = warped_l[i, j, :]
pixel_r = warped_r[i, j, :]
if not np.array_equal(pixel_l, black) and np.array_equal(pixel_r, black):
warped_l[i, j, :] = pixel_l
elif np.array_equal(pixel_l, black) and not np.array_equal(pixel_r, black):
warped_l[i, j, :] = pixel_r
elif not np.array_equal(pixel_l, black) and not np.array_equal(pixel_r, black):
warped_l[i, j, :] = (pixel_l + pixel_r) / 2
else:
pass
stitch_image = warped_l[:warped_r.shape[0], :warped_r.shape[1], :]
return stitch_image
# In[50]:
res = stitch_img(leftt, rightt, H)
# In[51]:
plt.imshow(res)
plt.show()
|
#!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
from pimouse_ros.srv import TimedMotion
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.srv_tm = rospy.Service('timed_motion', TimedMotion, self.callback_tm)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0",'w') as lf,\
open("/dev/rtmotor_raw_r0",'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0 * message.linear.x / (9 * math.pi)
rot_hz = 400.0 * message.angular.z / math.pi
self.set_raw_freq(forward_hz - rot_hz, forward_hz + rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self,onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self,message): return self.onoff_response(True)
def callback_off(self,message): return self.onoff_response(False)
def callback_tm(self,message):
if not self.is_on:
rospy.logerr("not enpowered")
return False
dev = "/dev/rtmotor0"
try:
with open(dev,'w') as f:
f.write("%d %d %d\n" %
(message.left_hz,message.right_hz,message.duration_ms))
except:
rospy.logerr("cannot write to " + dev)
return False
return True
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
|
import requests
def get_matches(
since,
count=1000,
game="aoe2de",
language="en"):
assert isinstance(since, int)
params = {
"since": since,
"count": count,
"game": game,
"language": language
}
resp = requests.get("https://aoe2.net/api/matches", params)
resp.raise_for_status()
return resp.json()
def get_strings(game="aoe2de", language="en"):
params = {
"game": game,
"language": language
}
resp = requests.get("https://aoe2.net/api/strings", params)
resp.raise_for_status()
return resp.json()
|
'''Utility routines that add in file access'''
try:
from ..common_util import util as ut
except:
from common_util import util as ut
import json
#############################
# Exceptions
#############################
class Error(Exception):
"""Base class for exceptions."""
class FileError(Error):
def __init__(self, msg):
self.msg = msg
class InternalError(Error):
def __init__(self, msg):
self.msg = msg
#############################
# Functions
#############################
def SplitFilename(filename):
# Returns the name and extension
# Input: filename - <file>.<ext>
# Output: (file, ext)
result = (filename, "")
pos = filename.find(".")
if pos >- 0:
result = filename[0:pos], filename[pos+1:]
return result
#############################
# Classes
#############################
class _FileBase(object):
# Base class used in file accesses
def __init__(self, filename):
# Input: file name
# Assumes that the first line are the variable names
self._filename = filename
self._OpenFile()
self._colnames = self._fh.readline().split() # Column names
self._CloseFile()
def _OpenFile(self):
# Only _OpenFile and _CloseFile modify self._fh
# On output, self._fh points to the fie
try:
self._fh = open(self._filename, 'r')
except:
raise FileError("%s cannot be opened" % self._filename)
def _CloseFile(self):
# Only _OpenFile and _CloseFile modify self._fh
self._fh.close()
self._fh = None
class File2Json(_FileBase):
# Creates json string from a tsv file
def ReadAll(self):
# Reads the entire file
# Output: json string
self._OpenFile()
lines = self._fh.readlines()
self._CloseFile()
lst = []
n = 0 # line number
for line in lines:
n += 1
if n == 1:
continue # Skip the header line
dic = {}
colvals = line.split()
if len(colvals) != len(self._colnames):
msg = "Inconsistent column format in file %s in line %n" % (
self._filename, n)
raise FileError(msg)
for i in range(len(colvals)):
dic[self._colnames[i]] = ut.ConvertType(colvals[i])
lst.append(dic)
result = json.dumps(lst)
return result
class CrdFile2Json(_FileBase):
# Creates ordinates and point values for a structured tsv file in which
# each row in the file is a point consisting of successive columns for
# y coordinate
# x coordinate
# value at the x,y point
# The first line is header that specifies the names of the columns
# Coordinate values are ordered by their sequence of occurrence
# Points are an array of dictionaries consisting of the x position,
# y position, and value.
NUM_COLS = 3
def __init__(self, filename):
self._xcrds = []
self._ycrds = []
self._points = []
super(CrdFile2Json, self).__init__(filename)
def _GetCoord(self, coord_type, coord_value):
# Returns the offset index of the coordinate value
# Input: coord_type - "x" or "y"
# coord_value - character value of the coordinate
# Output: index of the coordinate
# Exception: raise InternalError if coordinate doesn't exist
if coord_type == "x":
coords = self._xcrds
else:
if coord_type == "y":
coords = self._ycrds
else:
raise InternalError("Invalid coord_type - %s" % coord_type)
result = coords.index(coord_value) + 1
if result is None:
msg = "Invalid coordinate %s for coordinate type %s in file %s on line %n" % (
coord_value, coord_type, self._filename, n)
raise InternalError(msg)
return result
def ReadAll(self):
# Processes the file. Results are retrived using GetXCrds, GetYCrds, GetPoints
# Get the file data
self._OpenFile()
lines = self._fh.readlines()
self._CloseFile()
n = 0 # line number
# Iterate across all rows in the file
for line in lines:
n += 1
if n == 1:
continue # Skip the header line
colvals = line.split()
if len(colvals) != CrdFile2Json.NUM_COLS:
msg = "Inconsistent column format in file %s in line %n" % (
self._filename, n)
raise FileError(msg)
ycrd = colvals[0]
xcrd = colvals[1]
val = colvals[2]
# Update the coordinates
try:
self._ycrds.index(ycrd)
except:
self._ycrds.append(ycrd)
try:
self._xcrds.index(xcrd)
except:
self._xcrds.append(xcrd)
# Populate the dictionary for this point
dic = {
"y": self._GetCoord("y", ycrd),
"x": self._GetCoord("x", xcrd),
"value": ut.ConvertType(val),
}
self._points.append(dic)
def GetXCrds(self):
return json.dumps(self._xcrds)
def GetYCrds(self):
return json.dumps(self._ycrds)
def GetPoints(self):
return json.dumps(self._points)
def GetHeaders(self):
return self._colnames
|
from django.shortcuts import render
# from profiles.models import Profile
# from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .models import Profile
# User = get_user_model()
# Create your views here.
@login_required
def get_profile(request):
user = User.objects.get(id=request.session.get('_auth_user_id'))
profile = Profile.objects.get(id=request.session.get('_auth_user_id'))
context = {
'user': user,
'profile': profile,
}
return render(request, "profiles/profile.html", context)
|
# This file encapsulates the various prediction algorithms that take a trip and return a label data structure
# Named "inferrers.py" instead of "predictors.py" to avoid a name collection in our abbreviated import convention
import logging
import random
import copy
import emission.analysis.modelling.tour_model_first_only.load_predict as lp
# A set of placeholder predictors to allow pipeline development without a real inference algorithm.
# For the moment, the system is configured to work with two labels, "mode_confirm" and
# "purpose_confirm", so I'll do that.
# The first placeholder scenario represents a case where it is hard to distinguish between
# biking and walking (e.g., because the user is a very slow biker) and hard to distinguish
# between work and shopping at the grocery store (e.g., because the user works at the
# grocery store), but whenever the user bikes to the location it is to work and whenever
# the user walks to the location it is to shop (e.g., because they don't have a basket on
# their bike), and the user bikes to the location four times more than they walk there.
# Obviously, it is a simplification.
def placeholder_predictor_0(trip):
return [
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.8},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.2}
]
# The next placeholder scenario provides that same set of labels in 75% of cases and no
# labels in the rest.
def placeholder_predictor_1(trip):
return [
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.8},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.2}
] if random.random() > 0.25 else []
# This third scenario provides labels designed to test the soundness and resilience of
# the client-side inference processing algorithms.
def placeholder_predictor_2(trip):
# Timestamp2index gives us a deterministic way to match test trips with labels
# Hardcoded to match "test_july_22" -- clearly, this is just for testing
timestamp2index = {494: 5, 565: 4, 795: 3, 805: 2, 880: 1, 960: 0}
timestamp = trip["data"]["start_local_dt"]["hour"]*60+trip["data"]["start_local_dt"]["minute"]
index = timestamp2index[timestamp] if timestamp in timestamp2index else 0
return [
[
],
[
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.8},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.2}
],
[
{"labels": {"mode_confirm": "drove_alone"}, "p": 0.8},
],
[
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.8},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.2}
],
[
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.45},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "entertainment"}, "p": 0.35},
{"labels": {"mode_confirm": "drove_alone", "purpose_confirm": "work"}, "p": 0.15},
{"labels": {"mode_confirm": "shared_ride", "purpose_confirm": "work"}, "p": 0.05}
],
[
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.45},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "entertainment"}, "p": 0.35},
{"labels": {"mode_confirm": "drove_alone", "purpose_confirm": "work"}, "p": 0.15},
{"labels": {"mode_confirm": "shared_ride", "purpose_confirm": "work"}, "p": 0.05}
]
][index]
# This fourth scenario provides labels designed to test the expectation and notification system.
def placeholder_predictor_3(trip):
timestamp2index = {494: 5, 565: 4, 795: 3, 805: 2, 880: 1, 960: 0}
timestamp = trip["data"]["start_local_dt"]["hour"]*60+trip["data"]["start_local_dt"]["minute"]
index = timestamp2index[timestamp] if timestamp in timestamp2index else 0
return [
[
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.80},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.20}
],
[
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.80},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.20}
],
[
{"labels": {"mode_confirm": "drove_alone", "purpose_confirm": "entertainment"}, "p": 0.70},
],
[
{"labels": {"mode_confirm": "bike", "purpose_confirm": "work"}, "p": 0.96},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.04}
],
[
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.45},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "entertainment"}, "p": 0.35},
{"labels": {"mode_confirm": "drove_alone", "purpose_confirm": "work"}, "p": 0.15},
{"labels": {"mode_confirm": "shared_ride", "purpose_confirm": "work"}, "p": 0.05}
],
[
{"labels": {"mode_confirm": "walk", "purpose_confirm": "shopping"}, "p": 0.60},
{"labels": {"mode_confirm": "walk", "purpose_confirm": "entertainment"}, "p": 0.25},
{"labels": {"mode_confirm": "drove_alone", "purpose_confirm": "work"}, "p": 0.11},
{"labels": {"mode_confirm": "shared_ride", "purpose_confirm": "work"}, "p": 0.04}
]
][index]
# Placeholder that is suitable for a demo.
# Finds all unique label combinations for this user and picks one randomly
def placeholder_predictor_demo(trip):
import random
import emission.core.get_database as edb
user = trip["user_id"]
unique_user_inputs = edb.get_analysis_timeseries_db().find({"user_id": user}).distinct("data.user_input")
if len(unique_user_inputs) == 0:
return []
random_user_input = random.choice(unique_user_inputs) if random.randrange(0,10) > 0 else []
logging.debug(f"In placeholder_predictor_demo: found {len(unique_user_inputs)} for user {user}, returning value {random_user_input}")
return [{"labels": random_user_input, "p": random.random()}]
# Non-placeholder implementation. First bins the trips, and then clusters every bin
# See emission.analysis.modelling.tour_model for more details
# Assumes that pre-built models are stored in working directory
# Models are built using evaluation_pipeline.py and build_save_model.py
# This algorithm is now DEPRECATED in favor of predict_cluster_confidence_discounting (see https://github.com/e-mission/e-mission-docs/issues/663)
def predict_two_stage_bin_cluster(trip):
return lp.predict_labels(trip)
# Reduce the confidence of the clustering prediction when the number of trips in the cluster is small
# See https://github.com/e-mission/e-mission-docs/issues/663
def n_to_confidence_coeff(n, max_confidence=None, first_confidence=None, confidence_multiplier=None):
if max_confidence is None: max_confidence = 0.99 # Confidence coefficient for n approaching infinity -- in the GitHub issue, this is 1-A
if first_confidence is None: first_confidence = 0.80 # Confidence coefficient for n = 1 -- in the issue, this is B
if confidence_multiplier is None: confidence_multiplier = 0.30 # How much of the remaining removable confidence to remove between n = k and n = k+1 -- in the issue, this is C
return max_confidence-(max_confidence-first_confidence)*(1-confidence_multiplier)**(n-1) # This is the u = ... formula in the issue
# predict_two_stage_bin_cluster but with the above reduction in confidence
def predict_cluster_confidence_discounting(trip, max_confidence=None, first_confidence=None, confidence_multiplier=None):
labels, n = lp.predict_labels_with_n(trip)
if n <= 0: # No model data or trip didn't match a cluster
logging.debug(f"In predict_cluster_confidence_discounting: n={n}; returning as-is")
return labels
confidence_coeff = n_to_confidence_coeff(n, max_confidence, first_confidence, confidence_multiplier)
logging.debug(f"In predict_cluster_confidence_discounting: n={n}; discounting with coefficient {confidence_coeff}")
labels = copy.deepcopy(labels)
for l in labels: l["p"] *= confidence_coeff
return labels
|
#
# $Copyright (c) 2019 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its subsidiaries and/or its affiliates and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
#
from pysys.constants import *
from apamax.analyticsbuilder.basetest import AnalyticsBuilderBaseTest
import json
class PySysTest(AnalyticsBuilderBaseTest):
def inputManagedObject(self, id, type, name, supportedOperations=[], supportedMeasurements=[], childDeviceIds=[], childAssetIds=[],deviceParentIds=[], assetParentIds=[], position={}, params={}):
"""
Generate the string form of a managed object event.
:param id: Unique device identifier of the device.
:param name: Name of the device.
:param supportedOperations: A list of supported operations for this device.
:param supportedMeasurements: A list of supported measurements for this device.
:param childDeviceIds: The identifiers of the child devices.
:param childAssetIds: The identifiers of the child assets.
:param deviceParentIds: The identifiers of the parent devices.
:param assetParentIds: The identifiers of the parent assets.
:param position: Contains 'lat', 'lng', 'altitude' and 'accuracy'.
:param params: Other fragments for the managed object.
"""
managedObjectParams = ', '.join([json.dumps(id), json.dumps(type), json.dumps(name), json.dumps(supportedOperations), json.dumps(supportedMeasurements),
json.dumps(childDeviceIds), json.dumps(childAssetIds), json.dumps(deviceParentIds),
json.dumps(assetParentIds),
json.dumps(json.dumps(position)),
json.dumps(json.dumps(params))])
return f'apamax.analyticsbuilder.test.SendManagedObject({managedObjectParams})'
def execute(self):
correlator = self.startAnalyticsBuilderCorrelator(blockSourceDir=f'{self.project.SOURCE}/blocks/')
correlator.injectEPL(self.input + '/SendC8yObjects.mon')
deviceId = '100'
self.models = []
modelId = self.createTestModel('apamax.analyticsbuilder.samples.DeviceLocationInput', parameters={'deviceId':deviceId}, isDeviceOrGroup='c8y_IsDevice')
self.models.append(modelId)
self.sendEventStrings(correlator,
self.timestamp(1),
self.timestamp(2.6),
self.inputManagedObject(deviceId, 'com_test_device_100' ,'Device_100',[],[],[],[],[],[],
{'alt':93.5,'lat':17.426479,'lng':78.33123},{'c8y_IsDevice':{}}),
self.timestamp(3),
self.timestamp(4))
#Creating model with a group. This group has two devices with ids '1' and '2'.
groupId='group1'
sub_groupDeviceIds=['1', '2']
modelId = self.createTestModel('apamax.analyticsbuilder.samples.DeviceLocationInput', parameters={'deviceId': groupId}, isDeviceOrGroup='c8y_IsDeviceGroup')
self.models.append(modelId)
self.sendEventStrings(correlator,
self.timestamp(10),
self.timestamp(12),
self.inputManagedObject(sub_groupDeviceIds[0], 'com_test_device_1', 'Device_1', [], [], [], [], [], [],
{'alt': 98.4, 'lat': 3.1428, 'lng': 6.2857},
{'c8y_IsDevice': {}}),
self.timestamp(13),
self.timestamp(14),
self.inputManagedObject(sub_groupDeviceIds[1], 'com_test_device_2', 'Device_2', [], [], [],
[], [], [],
{'alt': 94.4, 'lat': 2.7182, 'lng': 0.3678},
{'c8y_IsDevice': {}}),
self.timestamp(15),
self.timestamp(20))
def validate(self):
# Verifying that there are no errors in log file.
self.checkLogs()
# Verifying that model is deployed successfully.
for modelId in self.models:
self.assertGrep(self.analyticsBuilderCorrelator.logfile, expr='Model \"'+modelId +'\" with PRODUCTION mode has started')
# Verifying the outputs from the block.
self.assertThat('outputs == expected', outputs = [value['properties'] for value in self.allOutputFromBlock(modelId = self.models[0])], expected = [
{'alt':93.5, 'lat':17.426479, 'lng':78.33123 }])
self.assertThat('outputs == expected', outputs = [value['properties'] for value in self.allOutputFromBlock(modelId = self.models[1])], expected = [
{'alt':98.4, 'lat':3.1428, 'lng':6.2857 },
{'alt':94.4, 'lat':2.7182, 'lng':.3678 }])
|
# -*- coding: utf-8 -*-
from ipodio import __version__
from setuptools import setup, find_packages
setup(
name='ipodio',
version=__version__,
description='iPod command line managing tool',
author='Javier Santacruz',
author_email='javier.santacruz.lc@gmail.com',
url='https://github.com/jvrsantacruz/ipodio',
packages=find_packages(exclude=['spec', 'spec.*']),
install_requires=[
'docopt',
'mp3hash'
],
classifiers=[
'Environment :: Console',
'Operating System :: POSIX',
'Development Status :: 3 - Alpha',
'Topic :: Multimedia :: Sound/Audio',
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
platforms=['Unix'],
entry_points={
'console_scripts': ['ipodio = ipodio.cli:main']
}
)
|
# pylint: disable=missing-module-docstring
# pylint: disable=missing-function-docstring
from pathlib import Path
from setuptools import setup
from youconfigme import __version__
def read_readme():
readme_f = Path(__file__).parent / 'README.md'
with open(readme_f) as f:
return f.read()
setup(
name='youconfigme',
version=__version__,
description='YouConfigMe helps you manage config in a pythonic way',
long_description=read_readme(),
long_description_content_type='text/markdown',
url='https://github.com/crossnox/YouConfigMe',
author='CrossNox',
install_requires=[],
extras_require={
'test': ['pytest'],
'dev': [
'pre-commit',
'mypy',
'flake8',
'isort',
'black',
'pylint',
'bump',
'nox',
],
},
packages=['youconfigme'],
classifiers=['Programming Language :: Python :: 3'],
)
|
# early_exit_experiments.py
# runs the experiments in section 5.1
import torch
import numpy as np
import aux_funcs as af
import model_funcs as mf
import network_architectures as arcs
from profiler import profile_sdn, profile
def early_exit_experiments(models_path, device='cpu'):
sdn_training_type = 'ic_only' # IC-only training
#sdn_training_type = 'sdn_training' # SDN training
# task = 'cifar10'
# task = 'cifar100'
task = 'tinyimagenet'
#sdn_names = ['vgg16bn_sdn', 'resnet56_sdn', 'wideresnet32_4_sdn', 'mobilenet_sdn']; add_trigger = False
sdn_names = ['vgg16bn_sdn']; add_trigger = False
sdn_names = [task + '_' + sdn_name + '_' + sdn_training_type for sdn_name in sdn_names]
for sdn_name in sdn_names:
cnn_name = sdn_name.replace('sdn', 'cnn')
cnn_name = cnn_name.replace('_ic_only', '')
cnn_name = cnn_name.replace('_sdn_training', '')
print(sdn_name)
print(cnn_name)
sdn_model, sdn_params = arcs.load_model(models_path, sdn_name, epoch=-1)
sdn_model.to(device)
dataset = af.get_dataset(sdn_params['task'])
cnn_model, _ = arcs.load_model(models_path, cnn_name, epoch=-1)
cnn_model.to(device)
print('Get CNN results')
top1_test, top5_test, total_time = mf.cnn_test_time(cnn_model, dataset.test_loader, device)
total_ops, total_params = profile(cnn_model, cnn_model.input_size, device)
print("#Ops: %f GOps"%(total_ops/1e9))
print("#Parameters: %f M"%(total_params/1e6))
print('Top1 Test accuracy: {}'.format(top1_test))
#print('Top5 Test accuracy: {}'.format(top5_test))
print('25 percent cost: {}'.format((total_ops/1e9)*0.25))
print('50 percent cost: {}'.format((total_ops/1e9)*0.5))
print('75 percent cost: {}'.format((total_ops/1e9)*0.75))
# to test early-exits with the SDN
one_batch_dataset = af.get_dataset(sdn_params['task'], 1)
print('Get SDN early exit results')
total_ops, total_params = profile_sdn(sdn_model, sdn_model.input_size, device)
print("#Ops (GOps): {}".format(total_ops))
print("#Params (mil): {}".format(total_params))
top1_test, top5_test = mf.sdn_test(sdn_model, dataset.test_loader, device)
print('Top1 Test accuracy: {}'.format(top1_test))
#print('Top5 Test accuracy: {}'.format(top5_test))
print('Calibrate confidence_thresholds')
confidence_thresholds = [0.1, 0.15, 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999] # search for the confidence threshold for early exits
sdn_model.forward = sdn_model.early_exit
for threshold in confidence_thresholds:
print(threshold)
sdn_model.confidence_threshold = threshold
# change the forward func for sdn to forward with cascade
top1_test, top5_test, early_exit_counts, non_conf_exit_counts, total_time = mf.sdn_test_early_exits(sdn_model, one_batch_dataset.test_loader, device)
average_mult_ops = 0
total_num_instances = 0
for output_id, output_count in enumerate(early_exit_counts):
average_mult_ops += output_count*total_ops[output_id]
total_num_instances += output_count
for output_count in non_conf_exit_counts:
total_num_instances += output_count
average_mult_ops += output_count*total_ops[output_id]
average_mult_ops /= total_num_instances
print('Early exit Counts:')
print(early_exit_counts)
print('Non confident exit counts:')
print(non_conf_exit_counts)
print('Top1 Test accuracy: {}'.format(top1_test))
print('Top5 Test accuracy: {}'.format(top5_test))
print('SDN cascading took {} seconds.'.format(total_time))
print('Average Mult-Ops: {}'.format(average_mult_ops))
def main():
torch.manual_seed(af.get_random_seed()) # reproducible
np.random.seed(af.get_random_seed())
device = af.get_pytorch_device()
trained_models_path = 'networks/{}'.format(af.get_random_seed())
#trained_models_path = 'backdoored_models'
early_exit_experiments(trained_models_path, device)
if __name__ == '__main__':
main() |
'''
求出 MK 平均值
给你两个整数 m 和 k ,以及数据流形式的若干整数。你需要实现一个数据结构,计算这个数据流的 MK 平均值 。
MK 平均值 按照如下步骤计算:
如果数据流中的整数少于 m 个,MK 平均值 为 -1 ,否则将数据流中最后 m 个元素拷贝到一个独立的容器中。
从这个容器中删除最小的 k 个数和最大的 k 个数。
计算剩余元素的平均值,并 向下取整到最近的整数 。
请你实现 MKAverage 类:
MKAverage(int m, int k) 用一个空的数据流和两个整数 m 和 k 初始化 MKAverage 对象。
void addElement(int num) 往数据流中插入一个新的元素 num 。
int calculateMKAverage() 对当前的数据流计算并返回 MK 平均数 ,结果需 向下取整到最近的整数 。
示例 1:
输入:
["MKAverage", "addElement", "addElement", "calculateMKAverage", "addElement", "calculateMKAverage",
"addElement", "addElement", "addElement", "calculateMKAverage"]
[[3, 1], [3], [1], [], [10], [], [5], [5], [5], []]
输出:
[null, null, null, -1, null, 3, null, null, null, 5]
解释:
MKAverage obj = new MKAverage(3, 1)
obj.addElement(3) # 当前元素为 [3]
obj.addElement(1) # 当前元素为 [3,1]
obj.calculateMKAverage() # 返回 -1 ,因为 m = 3 ,但数据流中只有 2 个元素
obj.addElement(10) # 当前元素为 [3,1,10]
obj.calculateMKAverage() # 最后 3 个元素为 [3,1,10]
# 删除最小以及最大的 1 个元素后,容器为 [3]
# [3] 的平均值等于 3/1 = 3 ,故返回 3
obj.addElement(5) # 当前元素为 [3,1,10,5]
obj.addElement(5) # 当前元素为 [3,1,10,5,5]
obj.addElement(5) # 当前元素为 [3,1,10,5,5,5]
obj.calculateMKAverage() # 最后 3 个元素为 [5,5,5]
# 删除最小以及最大的 1 个元素后,容器为 [5]
# [5] 的平均值等于 5/1 = 5 ,故返回 5
提示:
3 <= m <= 10^5
1 <= k*2 < m
1 <= num <= 10^5
addElement 与 calculateMKAverage 总操作次数不超过 10^5 次。
'''
from collections import Counter
from collections import deque
from sortedcontainers import SortedList
'''
思路:队列+平衡二叉树
设1个队列q,保持数据流中最多m个元素
设3个平衡二叉树的实现sortedList:minK,maxK,mid,保存最小、最大、中间的k、k、m-2k个元素
算法如下:
如果数据流小于m,暂存到数值中。
一旦数据流大于等于m,则:
> 将最大的k个数加入最小堆
> 将最小的k个数加入最大堆
> 剩余的数求和total,记住不在最大最小堆中的数据个数count
后续每加入一个新的元素num,
> 如果大于最大k个数中的最小数,将其替换最大k个数中最小数a,此时calculateMKAverage结果为(total+a)/(count+1)
> 如果小于最小k个数中的最大数,将其替换最小k个数中最大数a,此时calculateMKAverage结果为(total+a)/(count+1)
时间复杂度:单次addElement为O(logk),触发建堆的那一次addElement的时间复杂度为O(mlogk),单次calculateMKAverage为O(1)
空间复杂度:O(k),需要2个k大小的堆
TODO 第15个测试案例未通过
'''
class MKAverage:
def __init__(self, m: int, k: int):
self.minK = SortedList()
self.maxK = SortedList()
self.mid = SortedList()
self.q = deque(maxlen=m)
self.count = m - 2 * k
self.total = 0
self.avg = -1
self.data = []
self.m = m
self.k = k
def addElement(self, num: int) -> None:
if len(self.data) < self.m - 1:
self.data.append(num)
return
if len(self.data) == self.m - 1:
self.data.append(num)
self.initHeap()
return
# 将队头的元素删除
front = self.q.popleft()
self.q.append(num)
if front in self.mid:
self.total -= front
self.mid.remove(front)
elif front in self.minK:
self.minK.remove(front)
else:
self.maxK.remove(front)
# 将新元素加入
if len(self.minK) != 0 and num < self.minK[-1]:
self.minK.add(num)
elif len(self.maxK) != 0 and num > self.maxK[0]:
self.maxK.add(num)
else:
self.mid.add(num)
self.total += num
# 检查3个分区大小是否正常,如果不正常,进行调整
if len(self.minK) > self.k:
val = self.minK[-1]
self.mid.add(val)
self.total += val
del self.minK[-1]
elif len(self.minK) < self.k:
val = self.mid[0]
self.minK.add(val)
self.total -= val
del self.mid[0]
if len(self.mid) > self.k:
val = self.mid[-1]
self.maxK.add(val)
self.total -= val
del self.mid[-1]
elif len(self.mid) < self.k:
val = self.maxK[0]
self.mid.add(val)
self.total += val
del self.maxK[0]
self.avg = self.total // self.count
def initHeap(self):
counter = Counter(self.data)
# 将数据加入最大集最小集
for num in self.data:
self.q.append(num)
if len(self.minK) < self.k:
self.minK.add(num)
self.maxK.add(num)
else:
if self.minK[-1] > num:
del self.minK[-1]
self.minK.add(num)
if self.maxK[0] < num:
del self.maxK[0]
self.maxK.add(num)
for i in range(self.k):
counter[self.minK[i]] -= 1
counter[self.maxK[i]] -= 1
# 将剩余的元素加入mid
self.total = 0
for val in counter.elements():
self.total += val
self.mid.add(val)
self.avg = self.total // self.count
def calculateMKAverage(self) -> int:
return self.avg
obj = MKAverage(3, 1)
obj.addElement(3) # 当前元素为 [3]
obj.addElement(1) # 当前元素为 [3,1]
print(obj.calculateMKAverage()) # 返回 -1 ,因为 m = 3 ,但数据流中只有 2 个元素
obj.addElement(10) # 当前元素为 [3,1,10]
print(obj.calculateMKAverage()) # 最后 3 个元素为 [3,1,10]
# 删除最小以及最大的 1 个元素后,容器为 [3]
# [3] 的平均值等于 3/1 = 3 ,故返回 3
obj.addElement(5) # 当前元素为 [3,1,10,5]
obj.addElement(5) # 当前元素为 [3,1,10,5,5]
obj.addElement(5) # 当前元素为 [3,1,10,5,5,5]
print(obj.calculateMKAverage()) # 最后 3 个元素为 [5,5,5]
# 删除最小以及最大的 1 个元素后,容器为 [5]
# [5] 的平均值等于 5/1 = 5 ,故返回 5
obj = MKAverage(3, 1)
obj.addElement(17612) # 当前元素为 [3]
obj.addElement(74607) # 当前元素为 [3,1]
print(obj.calculateMKAverage()) # 返回 -1 ,因为 m = 3 ,但数据流中只有 2 个元素
obj.addElement(8272) # 当前元素为 [3,1,10]
obj.addElement(33433) # 当前元素为 [3,1,10,5]
print(obj.calculateMKAverage()) # 最后 3 个元素为 [3,1,10]
# 删除最小以及最大的 1 个元素后,容器为 [3]
# [3] 的平均值等于 3/1 = 3 ,故返回 3
obj.addElement(15456) # 当前元素为 [3,1,10,5,5]
obj.addElement(64938) # 当前元素为 [3,1,10,5,5,5]
print(obj.calculateMKAverage()) # 最后 3 个元素为 [5,5,5]
obj.addElement(99741) # 当前元素为 [3,1,10,5,5,5]
# 删除最小以及最大的 1 个元素后,容器为 [5]
# [5] 的平均值等于 5/1 = 5 ,故返回 5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import array
import binascii
import contextlib
import mmap
import sys
import numpy
import pytest
from cybuffer import cybuffer
try:
from cybuffer import getbuffer
except ImportError:
getbuffer = memoryview
Py_UNICODE_SIZE = array.array('u').itemsize
@pytest.mark.skipif(
sys.version_info[0] != 2, reason="getbuffer is Python 2 only"
)
@pytest.mark.parametrize("v, to_char", [
(b"abcdefghi", lambda c: c),
(bytearray(b"abcdefghi"), chr),
])
def test_getbuffer(v, to_char):
# Initialize buffers
b = getbuffer(v)
m = memoryview(v)
mb = memoryview(b)
# Validate type
assert isinstance(b, buffer)
# Validate content
assert list(b) == list(map(to_char, v))
# Validate permissions
assert mb.readonly == m.readonly
def test_empty_constructor():
with pytest.raises(TypeError):
b = cybuffer()
def validate_against_memoryview(v, b, m, suboffsets=tuple()):
# Test view properties' data relationships
assert b.obj is v
assert b.nbytes == len(m.tobytes())
assert b.itemsize == (len(m.tobytes()) // len(v))
assert b.ndim == m.ndim
assert b.suboffsets == suboffsets
assert b.shape == (len(v),)
assert b.strides == (len(m.tobytes()) // len(v),)
# Test Python 3+ properties
if sys.version_info.major > 2:
assert b.obj is m.obj
assert b.c_contiguous == m.c_contiguous
assert b.f_contiguous == m.f_contiguous
assert b.contiguous == m.contiguous
assert b.nbytes == m.nbytes
# Test methods
assert b.tobytes() == m.tobytes()
if sys.version_info.major > 2:
assert b.hex() == m.hex()
else:
assert b.hex() == binascii.hexlify(m)
@pytest.mark.parametrize("v", [
b"abcdefghi",
bytearray(b"abcdefghi"),
])
def test_bytes(v):
# Initialize buffers
b = cybuffer(v)
m = memoryview(v)
# Validate format
assert b.format == m.format
assert b.itemsize == m.itemsize
# Validate contiguity
assert b.c_contiguous
assert b.f_contiguous
assert b.contiguous
# Validate permissions
assert b.readonly == m.readonly
# Test methods
assert b.tolist() == m.tolist()
validate_against_memoryview(v, b, m)
@pytest.mark.parametrize("f",
["b", "B", "h", "H", "i", "I", "l", "L", "q", "Q", "f", "d"]
)
def test_1d_arrays(f):
# Skip some newer types
if sys.version_info.major < 3 and f in "qQ":
pytest.skip("Format `%s` not available on Python 2" % f)
# Initialize buffers
v = array.array(f, [0, 1, 2, 3, 4])
b = cybuffer(v)
m = memoryview(getbuffer(v))
# Validate format
assert b.format == v.typecode
assert b.itemsize == v.itemsize
# Validate contiguity
assert b.c_contiguous
assert b.f_contiguous
assert b.contiguous
# Validate permissions
assert not b.readonly
# Test methods
assert b.tolist() == v.tolist()
validate_against_memoryview(v, b, m)
@pytest.mark.parametrize("f, s", [
("c", b"Hello World!"),
("u", u"Hello World!"),
])
def test_1d_text_arrays(f, s):
# Skip some newer types
if sys.version_info.major > 2 and f is "c":
pytest.skip("Format `%s` not available on Python 3" % f)
# Initialize buffers
v = array.array(f, s)
b = cybuffer(v)
m = memoryview(getbuffer(v))
# Validate format
assert b.itemsize == v.itemsize
if f is "u" and Py_UNICODE_SIZE == 2:
assert b.format == "H"
elif f is "u" and Py_UNICODE_SIZE == 4:
assert b.format == "I"
elif f is "c":
assert b.format == "B"
# Validate contiguity
assert b.c_contiguous
assert b.f_contiguous
assert b.contiguous
# Validate permissions
assert not b.readonly
# Test methods
assert b.tolist() == list(map(ord, v))
validate_against_memoryview(v, b, m)
def test_mmap():
with contextlib.closing(mmap.mmap(-1, 10, access=mmap.ACCESS_WRITE)) as v:
# Initialize buffers
b = cybuffer(v)
m = memoryview(getbuffer(v))
# Validate format
assert b.format == m.format
assert b.itemsize == m.itemsize
# Validate contiguity
assert b.c_contiguous
assert b.f_contiguous
assert b.contiguous
# Validate permissions
assert not b.readonly
# Test methods
assert b.tolist() == m.tolist()
validate_against_memoryview(v, b, m)
# Cleanup to close memory
del b
del m
@pytest.mark.parametrize("s",
[(10,), (10, 11), (10, 11, 12)]
)
@pytest.mark.parametrize("o",
["C", "F"]
)
def test_nd_numpy_arrays(s, o):
# Initialize buffers
numpy.random.seed(42)
a = numpy.random.random(s).astype(float, order=o)
b = cybuffer(a)
# Validate identity
assert b.obj is a
# Validate shape, size, etc.
assert b.nbytes == a.nbytes
assert b.ndim == a.ndim
assert b.suboffsets == tuple()
assert b.shape == a.shape
assert b.strides == a.strides
# Validate format
assert b.format == a.dtype.char
assert b.itemsize == a.itemsize
# Validate contiguity
assert b.c_contiguous == a.flags.c_contiguous
assert b.f_contiguous == a.flags.f_contiguous
assert b.contiguous == (a.flags.c_contiguous or a.flags.f_contiguous)
# Validate permissions
assert b.readonly != a.flags.writeable
# Test methods
assert b.tobytes() == a.tobytes()
assert b.tolist() == a.tolist()
if sys.version_info.major > 2:
assert b.hex() == a.tobytes().hex()
else:
assert b.hex() == binascii.hexlify(a.tobytes())
|
#!/usr/bin/env python
''' MASVS document parser and converter class.
By Bernhard Mueller, updated by Jeroen Beckers and Carlos Holguera
Copyright (c) 2021 OWASP Foundation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import re
import json
import yaml
from xml.sax.saxutils import escape
import csv
from pathlib import Path
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def order_filenames(target):
keys = [f"-V{k}-" for k in range(1,9)]
l = [file.name for file in Path(target).glob("0x*-V*.md")]
ret_l = []
for k in keys:
for name in l:
if k in name:
ret_l.append(name)
return ret_l
class MASVS:
''' Creates requirements list out of markdown files. '''
requirements = {}
def __init__(self, lang):
if lang == "en":
target = "../Document"
else:
target = "../Document-{}".format(lang)
for file in order_filenames(target):
for line in open(os.path.join(target, file)):
regex = re.compile(r'\*\*(\d\.\d+)\*\*\s\|\s{0,1}(.*?)\s{0,1}\|\s{0,1}(.*?)\s{0,1}\|\s{0,1}(.*?)\s{0,1}\|(\s{0,1}(.*?)\s{0,1}\|)?')
m = re.search(regex, line)
if m:
req = {}
num_id = m.group(1).strip()
mstg_id = m.group(2).replace(u"\u2011", "-")
req['id'] = num_id
req['category'] = mstg_id
req['text'] = m.group(3).strip()
if m.group(5):
req['L1'] = len(m.group(4).strip()) > 0
req['L2'] = len(m.group(5).strip()) > 0
req['R'] = False
else:
req['R'] = True
req['L1'] = False
req['L2'] = False
self.requirements[mstg_id] = req
def to_json(self):
''' Returns a JSON-formatted string '''
return json.dumps(self.requirements)
def to_yaml(self):
''' Returns a YAML-formatted string '''
return yaml.dump(self.requirements, allow_unicode=True, indent=4, default_flow_style=False, sort_keys=False)
def to_xml(self):
''' Returns XML '''
xml = '<requirements>\n'
for id, r in self.requirements.items():
xml += f"\t<requirement id='{r['id']}' category='{r['category']}' L1='{int(r['L1'])}' L2='{int(r['L2'])}' R='{int(r['R'])}'>\n\t\t{escape(r['text'])}\n\t</requirement>\n"
xml += '</requirements>'
return xml
def to_csv(self):
''' Returns CSV '''
si = StringIO()
writer = csv.DictWriter(si, ['id', 'category', 'text', 'L1', 'L2', 'R'], extrasaction='ignore')
writer.writeheader()
rows = [r for id, r in self.requirements.items()]
writer.writerows(rows)
return si.getvalue()
|
import sys
import os
from ..parameter import pDeepParameter
from .. import evaluate as evaluate
from . import tune_and_predict
# train_folder = sys.argv[1]
# test_folder = sys.argv[2]
train_Vls = ("/home/pfind/pDeepDev/pDeepData/phospho/PhosSynVelosptmRS/train", 40, "Velos")
train_QE = ("/home/pfind/pDeepDev/pDeepData/phospho/Olsen-NC-QEHFX-28/train", 28, "QE")
train_Lumos = ("/home/pfind/pDeepDev/pDeepData/phospho/Olsen-NC-QEHFX-28/train", 32, "Lumos")
test_Vls = ("/home/pfind/pDeepDev/pDeepData/phospho/PhosSynVelosptmRS/test", 40, "Velos")
test_QE = ("/home/pfind/pDeepDev/pDeepData/phospho/Olsen-NC-QEHFX-28/test", 28, "QE")
train_WB = "/home/pfind/pDeepDev/pDeepData/phospho/WenBoPhos/train", 30, "QE"
test_WB = "/home/pfind/pDeepDev/pDeepData/phospho/WenBoPhos/test", 30, "QE"
param = pDeepParameter()
param.model = "tmp/model/pretrain-180921-modloss-mod8D.ckpt"
# param.model = "tmp/model/pretrain-phos.ckpt"
param.RT_model = ""
param.fixmod = "Carbamidomethyl[C],Oxidation[M]".split(",")
param.varmod = "Phospho[Y],Phospho[S],Phospho[T]".split(",")
param.predict_instrument = "QE"
param.predict_nce = 30
param.min_varmod = 1
param.max_varmod = 3
param.epochs = 50
param.dropout = 0.2
param.n_tune_per_psmlabel = 1000000
param.n_test_per_psmlabel = param.n_tune_per_psmlabel
def get_psmlabels(data_folder, nce = 27, instrument = "QE"):
ret = []
instruments = []
nces = []
for psmlabel in os.listdir(data_folder):
if psmlabel.endswith(".psmlabel") or psmlabel.endswith(".plabel"):
ret.append(os.path.join(data_folder, psmlabel))
instruments.append(instrument)
nces.append(nce)
return ret, nces, instruments
def add_to_train(param, psmlabels, nces, instruments):
param.tune_psmlabels.extend(psmlabels)
param.tune_nces.extend(nces)
param.tune_instruments.extend(instruments)
def add_to_test(param, psmlabels, nces, instruments):
param.test_psmlabels.extend(psmlabels)
param.test_nces.extend(nces)
param.test_instruments.extend(instruments)
add_to_train(param, *get_psmlabels(*train_Vls))
add_to_train(param, *get_psmlabels(*train_QE))
add_to_train(param, *get_psmlabels(*train_Lumos))
add_to_test(param, *get_psmlabels(*test_Vls))
add_to_test(param, *get_psmlabels(*test_QE))
# add_to_train(param, *get_psmlabels(*train_WB))
# add_to_test(param, *get_psmlabels(*test_WB))
tune_and_predict.init_config(param)
pdeep, _ = tune_and_predict.tune(param)
pdeep.SaveModel("tmp/model/pretrain-phos.ckpt")
|
from collections import defaultdict
from itertools import product
def solution(clothes):
answer = 1
hash_dict = defaultdict(int)
for c, category in clothes:
hash_dict[category] += 1
for key in hash_dict:
answer *= (hash_dict[key] + 1)
answer -= 1
return answer |
# Generated by Django 3.0.6 on 2020-05-20 17:49
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0001_initial'),
('users', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('perm', '0002_auto_20200520_1745'),
]
operations = [
migrations.AlterField(
model_name='permisson',
name='app',
field=models.ManyToManyField(blank=True, related_name='granted_by_permissions', to='application.Application', verbose_name='应用'),
),
migrations.AlterField(
model_name='permisson',
name='group',
field=models.ManyToManyField(blank=True, related_name='granted_by_permissions', to='users.tGroup', verbose_name='用户组'),
),
migrations.AlterField(
model_name='permisson',
name='users',
field=models.ManyToManyField(blank=True, related_name='granted_by_permissions', to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
]
|
'''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.core.files import File
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
from django.contrib.auth.models import User
from dicomdb.apps.main.models import (
Batch,
Image,
Header,
HeaderValue,
HeaderField
)
from dicomdb.settings import MEDIA_ROOT
from pydicom import read_file
import os
#### GETS #############################################################
def get_batch(bid):
'''get a single batch, or return 404'''
keyargs = {'id':bid}
try:
batch = Batch.objects.get(**keyargs)
except Batch.DoesNotExist:
raise Http404
else:
return batch
def get_image(iid):
'''get a single image, or return 404'''
keyargs = {'id':iid}
try:
image = Image.objects.get(**keyargs)
except Image.DoesNotExist:
raise Http404
else:
return image
def ls_fullpath(dirname,ext=None):
'''get full path of all files in a directory'''
if ext is not None:
return [os.path.join(dirname, f) for f in os.listdir(dirname) if f.endswith(ext)]
return [os.path.join(dirname, f) for f in os.listdir(dirname)]
### FILES ##############################################################
def save_image_dicom(dicom,dicom_file,basename=None):
'''save image dicom will save a dicom file to django's media
storage, for this application defined under /images.
:param dicom: the main.Image instance
:param dicom_file: the dicom file (usually in /data) to save
'''
if basename is None:
basename = os.path.basename(dicom_file)
with open(dicom_file,'rb') as filey:
django_file = File(filey)
dicom.image.save(basename,
django_file,
save=True)
dicom.save()
return dicom
def upload_dicom_batch(batch,dicom_file):
'''upload a dicom image to a batch
'''
# The dicom folder will be named based on the accession#
dcm = read_file(dicom_file,force=True)
dicom_uid = os.path.basename(dicom_file)
# Create the Image object in the database
# A dicom instance number must be unique for its batch
dicom = Image.objects.create(batch=batch,
uid=dicom_uid)
# Save the dicom file to storage
dicom = save_image_dicom(dicom=dicom,
dicom_file=dicom_file) # Also saves
# Add all header fields (for now not private)
dicom = add_header_fields(instance=dicom,
dicom=dcm)
return dicom
## MODELS ##############################################################
def add_header_fields(instance,dicom=None,include_private=False):
'''add header fields will add header field objects to be associated
with an instance of a dicom Image in the database'''
if dicom is None:
dicom = read_file(instance.image.path)
skip = ["PixelData"]
fields = dicom.dir()
if include_private:
from deid.dicom.tags import get_private
bot.debug("Private tags not yet implemented, not sure if good idea.")
for field in fields:
values = dicom.get(field)
if values not in [None,''] and field not in skip:
header_field,created = HeaderField.objects.get_or_create(field=field)
if not isinstance(values,list):
values = [values]
for value in values:
value_field,created = HeaderValue.objects.get_or_create(name=value)
header,created = Header.objects.get_or_create(value=value_field,
field=header_field)
header.save()
instance.headers.add(header)
instance.save()
return instance
def add_batch_error(message,batch):
'''add batch error will log an error, and flag the batch to have it.
'''
bot.error(message)
batch.has_error = True
batch.errors.append(message)
batch.save()
return batch
def change_status(images,status):
'''change status will update an instance status
to the status choice provided. This works for batch
and images
'''
updated = []
if not isinstance(images,list):
images = [images]
for image in images:
image.status=status
image.save()
updated.append(image)
if len(updated) == 1:
updated = updated[0]
return updated
|
from src.actions.action import scrap_forum, post_threats
from src.actions.no_credentials import NoCredentials
from src.authentication.get_token import login
from time import sleep
from src.lib.logger import setup_logger
from logging import getLogger
from settings import config
setup_logger(config)
log = getLogger(__name__)
def main():
payload = scrap_forum(strategy=NoCredentials())
login()
post_threats(payload=payload)
if __name__ == "__main__":
count = 0
while count < 5:
log.info(f"Main sequence started. Count = {count}")
main()
sleep(10)
count += 1
|
# -*- coding: utf-8 -*-
"""Configurations for quantization for MicroNet (CIFAR100).
- Author: Curt-Park
- Email: jwpark@jmarple.ai
"""
from config.train.cifar100 import micronet
config = micronet.config
config.update(
{
"MODEL_NAME": "quant_mixnet",
"LR_SCHEDULER_PARAMS": dict(warmup_epochs=0, start_lr=1e-4),
"LR": 1e-4,
"EPOCHS": 2,
}
)
|
from django.db import models
from django.db.models.fields.related import ForeignKey
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class Type(models.Model):
name = models.CharField(max_length=125)
class Meta:
verbose_name = _("Type")
verbose_name_plural = _("Types")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("Type_detail", kwargs={"pk": self.pk})
class Product(models.Model):
name = models.CharField(max_length=200)
price = models.IntegerField()
type = ForeignKey(Type, on_delete=models.CASCADE, related_name='product_type')
user = ForeignKey(User, on_delete=models.CASCADE, related_name='product_user')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _("Product")
verbose_name_plural = _("Products")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("Product_detail", kwargs={"pk": self.pk})
|
def is_macro_action(unit_command_action, abilities):
"""
This function determines if a unit_command action is a macro action
based on the following criteria: if the action is related to training, morphing,
researching or building units or technologies, then it is a macro action. It
returns a boolean.
Feel free to add more stuff in macro_names by checking the stableid.json and
adding names that you feel should be stored as macro actions.
"""
macro_names = [
"Morph",
"Research",
"Train",
"ZergBuild",
"ProtossBuild",
"TerranBuild",
]
for name in macro_names:
ability = abilities[unit_command_action.ability_id]
# print(f"ability: {ability}")
if name in ability.link_name:
# we're also storing the the canceling of stuff.
# if hasattr(ability, 'button_name') and 'Cancel' in ability.button_name:
# return False
return True
return False
def get_human_name(action_doc, abilities):
ability = abilities[action_doc["ability_id"]]
return str(ability.link_name) + str(ability.button_name)
def get_actions(actions, abilities):
"""
This function is supposed to return macro actions from an observation.
Arguments:
-actions, actions for a given observation
-abilities, which is the result of controller.raw_data().abilities
Returns:
- a list macro_actions of all the macro actions found in obs.actions.
Raw observations and unit commands are defined in the protocol here:
https://github.com/Blizzard/s2client-proto/blob/aa41daa2da79431d3b88b115e6a17b23a9260529/s2clientprotocol/raw.proto#L160
TO-DO:
- clean the hasattr, it seems to be innecesary
"""
macro_actions = []
for action in actions:
if hasattr(action, "action_raw") and hasattr(action.action_raw, "unit_command"):
if is_macro_action(action.action_raw.unit_command, abilities):
action_doc = {}
if hasattr(action.action_raw.unit_command, "ability_id"):
action_doc["ability_id"] = action.action_raw.unit_command.ability_id
if hasattr(action.action_raw.unit_command, "unit_tags"):
action_doc["unit_tags"] = [
tag for tag in action.action_raw.unit_command.unit_tags
]
if hasattr(action.action_raw.unit_command, "target_unit_tag"):
action_doc[
"target_unit_tag"
] = action.action_raw.unit_command.target_unit_tag
if hasattr(action.action_raw.unit_command, "target_world_space_pos"):
action_doc["target_world_space_pos"] = {
"x": action.action_raw.unit_command.target_world_space_pos.x,
"y": action.action_raw.unit_command.target_world_space_pos.y,
}
macro_actions.append(action_doc)
return macro_actions
|
import json
import csv
import argparse
def unfold_list(base_key, l):
new_dict = dict()
for i, val in enumerate(l):
if isinstance(val, dict):
new_k = base_key + '_' + str(i)
new_dict.update(unfold(new_k, val))
else:
new_dict[base_key] = l
return new_dict
def unfold(base_key, d):
new_dict = dict()
for k in d:
new_k = base_key + '_' + k
if isinstance(d[k], dict):
new_dict.update(unfold(new_k, d[k]))
elif isinstance(d[k], list):
new_dict.update(unfold_list(new_k, d[k]))
else:
new_dict[new_k] = d[k]
return new_dict
def json_to_csv(json_path):
'''
Turns json file to csv
'''
csv_path = json_path[:-4] + 'csv'
with open(json_path) as f:
unfolded = []
header = set()
items = json.load(f)['items']
for i in items:
# unfold row
row = unfold('item', i)
unfolded.append(row)
# Update header
header.update(list(row.keys()))
# Write data to csv
with open(csv_path, 'w') as f:
header = list(header)
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
writer.writerows(unfolded)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert a europeana \
json file to csv')
parser.add_argument('json_path', type=str, help='path to your json file')
args = parser.parse_args()
if args.json_path.endswith('.json'):
json_path = args.json_path
json_to_csv(json_path)
|
from time import sleep
from colorama import Fore, init
init(convert=True)
print(f'''
============= PAINEL DE LOGIN =============
sistema para calcular o pesos dos pallet.
criado por mim: ZkDragonZk , Estudante De TI
===========================================
''')
print('')
lastro = int(input('Digite o lastro: '))
print('')
altura = int(input('Digite a altra: '))
print('')
pesokg = int(input('Digite o Peso de 1 caixa: '))
print('')
resultado = (lastro * altura) * pesokg
print(f'{Fore.RED}O Pallet Pesa No Total:' ' {} Kg'.format(resultado))
sleep(40) |
#!/usr/bin/env python
import json
import sys
import psycopg2 as psql
import spacy
def main():
file_path = sys.argv[1]
_analyze(file_path)
def _analyze(path):
connection = psql.connect("dbname=asrdb user=postgres")
cursor = connection.cursor()
cursor.execute("SELECT raw_data FROM asr_data WHERE file_path = %s", (path,))
nlp_data = cursor.fetchall()
nlp = spacy.load('en_core_web_sm')
doc = nlp(nlp_data[0][0])
cursor.close()
connection.close()
_structure_data(doc, path)
def _structure_data(doc, path):
features = {}
features.setdefault(0, {"text", "dep", "pos"})
for i, item in enumerate(doc):
features[i] = {"text": item.lemma_,
"dep": item.dep_,
"pos": item.pos_}
# using stdout to pipe straight json string to the app.
feature_output = json.dumps(features, indent=4, sort_keys=True)
print(feature_output)
# store nlp data in DB for depterity, can't be grabbed by node-deptgres :(
# connection = psql.connect("dbname=asrdb user=deptgres")
# cursor = connection.cursor()
# cursor.execute("INSERT INTO asr_data (file_path, nlp_data) VALUES (%s, %s)",
# (path, feature_output))
# connection.commit()
# cursor.close()
# connection.close()
main()
|
from app import app, db
from flask import request, jsonify
from dataclasses import dataclass
from sqlalchemy import exc
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
amount = db.Column(db.Integer, nullable=False)
required_amount = db.Column(db.Integer, nullable=False, server_default='0')
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'amount': self.amount,
'required_amount': self.required_amount,
'needed_amount': self.get_needed_amount()
}
def get_needed_amount(self):
return max(self.required_amount - self.amount, 0)
@staticmethod
def from_dict(dict):
return Product(**dict)
def __repr__(self):
return f'<Product {self.name}, amount={self.amount}>'
@dataclass
class ProductResult:
product: Product = None
status: str = None
error: str = None
def __init__(self, product=None):
self.product = product
def to_dict(self):
product = None if self.product is None else self.product.to_dict()
return {'product': product, 'status': self.status, 'error': self.error}
@app.route('/products', methods=['GET'])
def get_products():
return jsonify([p.to_dict() for p in Product.query.all()])
@app.route('/products/<id>', methods=['GET'])
def get_products_by_id(id):
product_result = ProductResult()
try:
product_result.product = Product.query.get(id)
if product_result.product is None:
product_result.status = "failed"
product_result.error = "product_not_found"
else:
product_result.status = 'ok'
except exc.DatabaseError:
product_result.status = "failed"
product_result.error = "id_wrong_format"
return jsonify(product_result.to_dict())
@app.route('/products', methods=['POST'])
def post_products():
product_result = ProductResult(Product.from_dict(request.json))
db.session.add(product_result.product)
try:
db.session.commit()
product_result.status = 'ok'
except exc.IntegrityError as error:
db.session.rollback()
product_result.status = 'failed'
product_result.error = "product_alredy_exits"
return jsonify(product_result.to_dict())
@app.route('/products/<id>', methods=['PUT'])
def put_products(id):
product_result = ProductResult()
try:
product_update = request.json
product_result.product = Product.query.get(id)
if product_result.product is None:
product_result.status = "failed"
product_result.error = "product_not_found"
else:
if "name" in product_update:
product_result.product.name = product_update["name"]
if "amount" in product_update:
product_result.product.amount = product_update["amount"]
if "required_amount" in product_update:
product_result.product.required_amount = product_update["required_amount"]
product_result.status = 'ok'
db.session.commit()
except exc.DatabaseError:
product_result.status = "failed"
product_result.error = "id_wrong_format"
return jsonify(product_result.to_dict())
@app.route('/products/<id>', methods=['DELETE'])
def delete_products(id):
product_result = ProductResult()
try:
product_result.product = Product.query.get(id)
if product_result.product is None:
product_result.status = "failed"
product_result.error = "product_not_found"
else:
db.session.delete(product_result.product)
db.session.commit()
product_result.status = 'ok'
except exc.DatabaseError:
product_result.status = "failed"
product_result.error = "id_wrong_format"
return jsonify(product_result.to_dict())
|
"""
File: 2048_Game
Author: Massimo Stefani , Michael Greub
Date: 28.12.2018
This is a game of 2048 to be played on the Raspberry SenseHAT.
"""
# Importation des modules requis
from sense_hat import SenseHat
from random import randint
from time import sleep
from time import time
import games
sense = SenseHat()
sense.clear(0, 0, 0)
size = 8
#-----Définition des couleurs-----
MESSAGE = (128, 124, 128)
BLACK_0 = (0, 0, 0)
BLUE_1 = (0, 255, 255)
GREEN_2 = (0, 255, 127)
GREEN_3 = (0, 255, 0)
GREEN_4 = (127, 255, 0)
YELLOW_5 = (255, 255, 0)
ORANGE_6 = (255, 127, 0)
RED_7 = (255, 0, 0)
PINK_8 = (255, 0, 127)
PINK_9 = (255, 0, 255)
PINK_10 = (127, 0, 255)
BLUE_11 = (0, 0, 255)
BLUE_12 = (0, 127, 255)
WHITE_13 = (255, 255, 255)
r = RED_7
o = BLACK_0
y = YELLOW_5
end = True
colors = [BLACK_0, BLUE_1, GREEN_2, GREEN_3, GREEN_4, YELLOW_5, ORANGE_6, RED_7,\
PINK_8, PINK_9, PINK_10, BLUE_11, BLUE_12, WHITE_13,]
# ------Définition des matrices utilisées------
L4 = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
L8 = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
L_CROSS = [r, o, o, o, o, o, o, r,
o, r, o, o, o, o, r, o,
o, o, r, o, o, r, o, o,
o, o, o, r, r, o, o, o,
o, o, o, r, r, o, o, o,
o, o, r, o, o, r, o, o,
o, r, o, o, o, o, r, o,
r, o, o, o, o, o, o, r
]
L_WIN = [ o, o, o, o, o, o, o, o,
o, y, y, y, y, y, y, o,
o, y, y, y, y, y, y, o,
o, y, y, y, y, y, y, o,
o, o, y, y, y, y, o, o,
o, o, o, y, y, o, o, o,
o, o, o, y, y, o, o, o,
o, y, y, y, y, y, y, o,
]
#----- Définitionts des fonctions-----
def before_startup():
'''Prepare matrixes, SenseHat and events to be ready for a complete startup'''
set_matrices_0()
sense.clear()
for event in sense.stick.get_events():
break
startup()
def startup():
"""Starts the game"""
global size
sense.show_message('Choose your mode:',0.1, MESSAGE)
modes = ['4X4', '8X8']
mode = [4, 8]
sleep(0.2)
selecting = True
i = 0
selection_startup(selecting, modes, mode, i)
new_block(size)
def set_matrices_0():
"""Setting matrixes to 0"""
for x in range(4):
for y in range(4):
L4[x][y] = 0
for x in range(8):
for y in range(8):
L8[x][y] = 0
def selection_startup(selecting, modes, mode, i):
"""Navigation to select the mode"""
global size
while selecting:
sense.show_message(modes[i], 0.1, MESSAGE)
for event in sense.stick.get_events():
if event.action == 'pressed':
if event.direction == 'right' or event.direction == 'left':
i = (i + 1) % 2
sense.show_message(modes[i], 0.1, MESSAGE)
elif event.direction == 'middle':
selecting = False
size = mode[i]
def set_pixels(n):
"""Game is shown in 8x8"""
if n == 4:
set_pixels_4()
else:
for x in range(8):
for y in range(8):
sense.set_pixel(x, y, colors[L8[x][y]])
def set_pixels_4():
"""Game is shown in 4x4. 1 pixel = 4 pixels"""
L8_affichage = [
[L4[0][0], L4[0][0], L4[0][1], L4[0][1], L4[0][2], L4[0][2], L4[0][3], L4[0][3]],
[L4[0][0], L4[0][0], L4[0][1], L4[0][1], L4[0][2], L4[0][2], L4[0][3], L4[0][3]],
[L4[1][0], L4[1][0], L4[1][1], L4[1][1], L4[1][2], L4[1][2], L4[1][3], L4[1][3]],
[L4[1][0], L4[1][0], L4[1][1], L4[1][1], L4[1][2], L4[1][2], L4[1][3], L4[1][3]],
[L4[2][0], L4[2][0], L4[2][1], L4[2][1], L4[2][2], L4[2][2], L4[2][3], L4[2][3]],
[L4[2][0], L4[2][0], L4[2][1], L4[2][1], L4[2][2], L4[2][2], L4[2][3], L4[2][3]],
[L4[3][0], L4[3][0], L4[3][1], L4[3][1], L4[3][2], L4[3][2], L4[3][3], L4[3][3]],
[L4[3][0], L4[3][0], L4[3][1], L4[3][1], L4[3][2], L4[3][2], L4[3][3], L4[3][3]]
]
for x in range(8):
for y in range(8):
sense.set_pixel(x,y, colors[L8_affichage[x][y]])
def new_block(n):
"""Create a new block"""
sleep(0.25)
i = number_empty_block(n)
print (i)
if i > 1:
two_new_blocks(n)
elif i == 1:
one_new_block(n)
control_end(n)
set_pixels(n)
def number_empty_block(n):
"""Count the number of empty block"""
L = L4 if n == 4 else L8
i = 0
for x in range(n):
for y in range(n):
if L[x][y] == 0:
i = i + 1
return i
def two_new_blocks(n):
"""Add two new blocks"""
r = randint(0,1)
L = L4 if n == 4 else L8
while r < 2: #tant qu'on en a pas créé 2
x = randint(0, (n - 1))#
y = randint(0, (n - 1))
# On choisis aléatoirement une ligne et une colonne
if L[x][y] == 0:# On controle si ce pixel est vide
L[x][y] = 1 # On défini un bloc de couleur correspondant au chiffre 2
r = r + 1# Si le bloc est créé on indente pour créé exactement 2 nouveaux pixels
def one_new_block(n):
"""Add only one block"""
r = randint(0, 1)
L = L4 if n == 4 else L8
while r < 1: #tant qu'on en a pas créé 2
x = randint(0, (n - 1))#
y = randint(0, (n - 1))# On choisis aléatoirement une ligne et une colonne
if L[x][y] == 0:# On controle si ce pixel est vide
L[x][y] = 1 # On défini un bloc de couleur correspondant au chiffre 2
r = r + 1
def moved_up(n):
"""Reacts to the joystick pushed up."""
print(L4)
L = L4 if n == 4 else L8
for x in range(n):
for y in range(n):# Sur chaque pixel en prenantles pixels en ligne puis en colonne
if L[x][y] > 0 and y >= 1:# On controle que le pixel ne soit pas une case vide
move_pixel_up(x, y, n)
set_pixels(n)
print(L4)
new_block(n)
def move_pixel_up(x, y, n):
"""Move the pixel in the matrix up"""
L = L4 if n == 4 else L8
while L[x][y - 1] == 0 and y >= 1:# Si la case est vide
L[x][y - 1] = L[x][y]
L[x][y] = 0
y = y - 1
if L[x][y - 1] == L[x][y]:
L[x][y - 1] = L[x][y - 1] + 1
L[x][y] = 0
def moved_down(n):
"""Reacts to the joystick pushed down."""
L = L4 if n == 4 else L8
for x in range(n):
for z in range(n - 1):
y = n - 2 - z
if L[x][y] > 0 and y <= (n - 2):# On controle que le pixel ne soit pas une case vide
move_pixel_down(x, y, n)
set_pixels(n)
new_block(n)
def move_pixel_down(x, y, n):
"""Move the pixel in the matrix down"""
L = L4 if n == 4 else L8
while y <= (n - 2) and L[x][y + 1] == 0:# Si la case est vide
L[x][y + 1] = L[x][y]
L[x][y] = 0
y = y + 1
if y < (n - 1) and L[x][y + 1] == L[x][y]:
L[x][y + 1] = L[x][y + 1] + 1
L[x][y] = 0
def moved_left(n):
"""Reacts to the joystick pushed left."""
L = L4 if n == 4 else L8
for y in range(n):
for x in range(n):
if L[x][y] > 0:# On controle que le pixel ne soit pas une case vide
move_pixel_left(x, y, n)
set_pixels(n)
new_block(n)
def move_pixel_left(x, y, n):
"""Move the pixel in the matrix left"""
L = L4 if n == 4 else L8
while x > 0 and L[x - 1][y] == 0:# Si la case est vide
L[x - 1][y] = L[x][y]
L[x][y] = 0
x = x - 1
if L[x - 1][y] == L[x][y]:
L[x - 1][y] = L[x - 1][y] + 1
L[x][y] = 0
def moved_right(n):
"""Reacts to the joystick pushed right."""
L = L4 if n == 4 else L8
for y in range(n):
for z in range(n - 1):
x = n - 2 - z
if L[x][y] > 0 and x < (n - 1):
move_pixel_right(x, y, n)
set_pixels(n)
new_block(n)
def move_pixel_right(x, y, n):
"""Move the pixel in the matrix right"""
L = L4 if n == 4 else L8
while x < (n - 1) and L[x + 1][y] == 0:
L[x + 1][y] = L[x][y]
L[x][y] = 0
x = x + 1
if x < (n - 1) and L[x + 1][y] == L[x][y]:
L[x + 1][y] = L[x + 1][y] + 1
L[x][y] = 0
def control_end(n):
"""Returns True when the player looses."""
global end
end = True
L = L4 if n == 4 else L8
check_empty_cells(n)
check_neigbors_cells_for_center(n)
check_neigbors_cells_for_border(n)
if end == True:
end_animation(n)
else:
control_victory(n)
def check_empty_cells(n):
"""Check if if there is an empty cell or not. Return True if there is no empty cell"""
global end
L = L4 if n == 4 else L8
for x in range(n):
for y in range(n):
if L[x][y] == 0:
end = False
def check_neigbors_cells_for_center(n):
global end
"""Check the state of neighbours cells (only cells in the center)"""
L = L4 if n == 4 else L8
if end == True:
for x in range(1, n - 1):
for y in range(1, n - 1):
if L[x][y] == L[x][y + 1] or L[x][y] == L[x + 1][y] \
or L[x][y] == L[x - 1][y] or L[x][y] == L[x][y - 1]:
end = False
def check_neigbors_cells_for_border(n):
global end
"""Check the state of neighbours cells (only cells in the border)"""
L = L4 if n == 4 else L8
if end == True:
for y in range(n - 1):
for x in range(n - 1):
if L[0][x] == L[0][x + 1] or L[x][0] == L[x + 1][0] \
or L[n - 1][x] == L[n - 1][x + 1] or L[x][n - 1] == L[x + 1][n - 1]:
end = False
def end_animation(n):
"""Show a message when the player loses the game and show the score"""
loser_animation_part_1(n)
score_calculator(n)
sense.show_message('You lose... Your score is:', 0.075, MESSAGE)
show = True
show_score()
main()
def loser_animation_part_1(n):
"""First part of the animation of a lost game"""
set_pixels(n)
sleep(3)
r = RED_7
o = BLACK_0
sense.clear()
loser_animation_part_2(n)
def loser_animation_part_2(n):
"""Animation of a red cross when the game is over"""
for i in range(5):
sense.set_pixels(L_CROSS)
sleep(0.1)
sense.clear()
sleep(0.1)
sense.set_pixels(L_CROSS)
sleep(1)
set_pixels(n)
sleep(2)
def score_calculator(n):
"""Calculate the score shown"""
L = L4 if n == 4 else L8
score = 0
for x in range(n):
for y in range(n):
if L[x][y] != 0:
score = score + 2 ** L[x][y]
def show_score():
"""Display the score"""
while show:
score = str(score)
string = score + 'pts'
sense.show_message(string, 0.1, MESSAGE)
sense.show_message('Press to end', 0.075, MESSAGE)
for event in sense.stick.get_events():
if event.action == 'pressed':
show = False
def exit():
"""Use to exit the game"""
t0 = time()
while time() < t0 + 1:
for event in sense.stick.get_events():
if event.action == 'pressed' and event.direction == 'middle':
show_message = True
while show_message:
sense.show_message('Press to return to menu', 0.075, MESSAGE)
for event in sense.stick.get_events():
if event.action == 'pressed':
show_message = False
games.main()
def control_victory(n):
"""Control if the maximum is reached (14th block)"""
L = L4 if n == 4 else L8
for x in range(n):
for y in range(n):
if L[x][y] == 14:
sense.set_pixels(L_WIN)
victory(n)
set_pixels(n)
def victory(n):
"""Show the message when the player wins"""
sleep(9)
score_calculator(n)
sense.show_message('Congratulations, you just reached the highest block. Your score is :', 0.075, MESSAGE)
show_score
main()
#-----Reactions du joystick-----
def main():
"""Main menu"""
before_startup()
running = True
while running:
for event in sense.stick.get_events():
if event.action == 'pressed':
if event.direction == 'up':
moved_up(size)
elif event.direction == 'down':
moved_down(size)
elif event.direction == 'right':
moved_right(size)
elif event.direction == 'left':
moved_left(size)
elif event.direction == 'middle':
exit()
if __name__ == '__main__':
main()
|
from .creators import (create_community_chat, create_group_community,
create_member, create_invite_link)
|
from flask_wtf import FlaskForm
# inportation of input fields from
from wtforms import StringField, PasswordField, SubmitField, IntegerField, BooleanField, RadioField, TextAreaField
# importation of validators
from wtforms.validators import Required, Email, EqualTo
# custom validator
from wtforms import ValidationError
# import User model
from ..models import User
class RegistrationForm(FlaskForm):
"""
registration form class for creation of input fields
"""
email = StringField('Input Email Address', validators=[Required(), Email()])
username = StringField('Input username', validators=[Required()])
age = IntegerField('age', validators=[Required()])
password = PasswordField('Password',validators=[Required(), EqualTo('password', message='Passwords must match')])
password_second = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('SIGN UP')
def validate_email(self, data_field):
"""
Method to checking if email used matches existing emails
"""
if User.query.filter_by(email=data_field.data).first():
raise ValidationError('Email matches existing account')
def validate_username(self, data_field):
"""
checking if username keyed in matches existing usernames
"""
if User.query.filter_by(username=data_field.data).first():
raise ValidationError('Username is already Taken, try another one')
class LoginForm(FlaskForm):
"""
login form class to create login form
"""
email = StringField('Email Address Here', validators=[Required(), Email()])
password = PasswordField('Password', validators=[Required()])
remember = BooleanField('Remember me') # render checkbox to remember password details
submit = SubmitField('Log In')
class PitchForm(FlaskForm):
"""
class update to create pitch form
"""
title = StringField('Write Your Pitch', validators=[Required()])
pitch = TextAreaField('Pitch Goes Here')
Category = RadioField('Categories', choices=[('Promotional'), ('Motivational'),
('Product'),('Ideas')])
submit = SubmitField('Submit') |
import datetime
import re
from typing import Generator, Text
from scrapers.scrapers import Scraper
class DnevnikStrategy(Scraper):
def get_name(self) -> Text:
return 'dnevnik'
def get_list_url(self) -> Text:
URL = 'https://www.dnevnik.bg/allnews/today/'
return URL
def list_articles(self, soup) -> Generator[Text, None, None]:
content = soup.findAll('div', {'class': 'grid-container'})
articles = soup.findAll('article')
for article in articles:
header = article.div.h2
if header is not None:
link = header.a.get('href')
print(link)
yield link
def get_keywords(self, soup) -> Generator[Text, None, None]:
keywords = soup.findAll('li', {'itemprop': 'keywords'})
for keyword in keywords:
yield keyword.text.strip()
def get_date(self, soup):
meta = soup.find('meta', {'property': 'article:published_time'})
if meta:
date = meta['content']
res = re.match(r"^(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})", date)
y = int(res.group('y'))
m = int(res.group('m'))
d = int(res.group('d'))
date = datetime.datetime(y, m, d, 0, 0, 0)
else:
return super(DnevnikStrategy, self).get_date(soup)
return date
def get_author(self, soup):
span = soup.find('span', {'itemprop': 'author'})
if span:
meta = span.find('meta', {'itemprop': 'name'})
return meta['content'].strip()
return False
def get_content(self, soup):
body = soup.find('div', {'itemprop': 'articleBody'})
if body:
return body.text.strip()
else:
article = soup.find('article', {'id': 'live-story'})
if article:
return article.text.strip()
else:
article = soup.find('div', {'class': 'article-content'})
if article:
return article.text.strip()
return False
|
#!/usr/bin/env python
import rospy
import rostest
import unittest
import sys
import tf2_ros
__author__ = 'Emiliano Borghi'
PKG = 'ca_gazebo'
NAME = 'tf_checker'
class TfCheckerTests(unittest.TestCase):
def __init__(self, *args):
# Call TestCase class
super(TfCheckerTests, self).__init__(*args)
def setUp(self):
# Init ROS and params
rospy.init_node(NAME, anonymous=True)
# Setup the tf listener
self.buffer = tf2_ros.Buffer()
self.tl = tf2_ros.TransformListener(self.buffer)
def test_robot_description_param(self):
robot_description_param = rospy.get_param("create1/robot_description", False)
self.assertNotEqual(robot_description_param, False)
def check_tree(self, parent, child):
try:
self.assertRaises(
self.buffer.lookup_transform(
'create1_tf/base_link', 'create1_tf/base_footprint',
rospy.Time(), rospy.Duration(5)
)
)
except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
self.assertFalse(True, "Test timed out waiting for the transform to be broadcast.")
def test_frame_exists(self):
# Testing tree
self.tf_tree = rospy.get_param("tf_test")
# Check correct tree
for parent in self.tf_tree:
for child in self.tf_tree[parent]:
rospy.loginfo("Checking Tf {} --> {}".format(parent, child))
self.check_tree('create1_tf/' + parent, 'create1_tf' + child)
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TfCheckerTests, sys.argv)
|
"""Tests relating to the PointIsotherm class."""
import pandas
import pytest
from matplotlib.testing.decorators import cleanup
from pandas.testing import assert_series_equal
import pygaps
import pygaps.utilities.exceptions as pgEx
from .conftest import LOADING_AT_PARAM
from .conftest import LOADING_PARAM
from .conftest import PRESSURE_AT_PARAM
from .conftest import PRESSURE_PARAM
@pytest.mark.core
class TestPointIsotherm():
"""Test the PointIsotherm class."""
##########################
def test_isotherm_create(self):
"""Check isotherm can be created from basic data."""
isotherm_param = {
'material': 'carbon',
'adsorbate': 'nitrogen',
'temperature': 77,
}
pressure = [1, 2, 3, 4, 5, 3, 2]
loading = [1, 2, 3, 4, 5, 3, 2]
pygaps.PointIsotherm(
pressure=pressure,
loading=loading,
**isotherm_param,
)
pygaps.PointIsotherm(
isotherm_data=pandas.DataFrame({
'pressure': pressure,
'loading': loading
}),
pressure_key='pressure',
loading_key='loading',
**isotherm_param,
)
# Wrong branch
with pytest.raises(pgEx.ParameterError):
pygaps.PointIsotherm(
pressure=pressure,
loading=loading,
branch='random',
**isotherm_param,
)
def test_isotherm_id(self, basic_pointisotherm):
"""Check isotherm id works as intended."""
iso_id = basic_pointisotherm.iso_id
basic_pointisotherm.new_param = 'changed'
assert iso_id != basic_pointisotherm.iso_id
basic_pointisotherm.data_raw = basic_pointisotherm.data_raw[:5]
assert iso_id != basic_pointisotherm.iso_id
@pytest.mark.parametrize('missing_key', ['loading_key', 'pressure_key'])
def test_isotherm_miss_key(
self,
isotherm_data,
isotherm_parameters,
missing_key,
):
"""Tests exception throw for missing data primary key (loading/pressure)."""
keys = dict(
pressure_key="pressure",
loading_key="loading",
)
del keys[missing_key]
with pytest.raises(pgEx.ParameterError):
pygaps.PointIsotherm(
isotherm_data=isotherm_data,
loading_key=keys.get('loading_key'),
pressure_key=keys.get('pressure_key'),
**isotherm_parameters
)
@pytest.mark.parametrize(
'branch, expected', [
('guess', 4.5),
('des', 1.0),
([False, False, True, True, True, True, True, True], 3.0),
]
)
def test_isotherm_create_branches(
self,
isotherm_data,
isotherm_parameters,
branch,
expected,
):
"""Tests if isotherm branches are well specified."""
isotherm = pygaps.PointIsotherm(
isotherm_data=isotherm_data,
loading_key='loading',
pressure_key='pressure',
other_keys=['enthalpy'],
branch=branch,
**isotherm_parameters
)
assert isotherm.pressure(branch='des')[0] == expected
def test_isotherm_existing_branches(
self,
isotherm_parameters,
isotherm_data,
):
"""Tests if isotherm branches are well specified."""
isotherm_datab = isotherm_data.copy()
isotherm_datab['branch'] = [
False, False, True, True, True, True, True, True
]
isotherm = pygaps.PointIsotherm(
isotherm_data=isotherm_datab,
loading_key='loading',
pressure_key='pressure',
other_keys=['enthalpy'],
**isotherm_parameters
)
assert isotherm.pressure(branch='des')[0] == 3.0
def test_isotherm_equality(
self,
isotherm_parameters,
isotherm_data,
basic_pointisotherm,
):
"""Check isotherm id's are unique"""
isotherm = pygaps.PointIsotherm(
isotherm_data=isotherm_data,
loading_key='loading',
pressure_key='pressure',
other_keys=['enthalpy'],
**isotherm_parameters
)
assert isotherm == basic_pointisotherm
isotherm.temperature = 0
assert isotherm != basic_pointisotherm
def test_isotherm_create_from_isotherm(self, basic_isotherm):
"""Check isotherm can be created from isotherm."""
pygaps.PointIsotherm.from_isotherm(
basic_isotherm,
pressure=[1, 2, 3, 4, 5, 3, 2],
loading=[1, 2, 3, 4, 5, 3, 2],
)
def test_isotherm_create_from_modelisotherm(
self,
basic_modelisotherm,
basic_pointisotherm,
):
"""Check isotherm can be created from isotherm."""
# regular creation
isotherm = pygaps.PointIsotherm.from_modelisotherm(
basic_modelisotherm, pressure_points=None
)
assert isotherm.loading_at(3) == pytest.approx(
basic_modelisotherm.loading_at(3)
)
# Specifying points
isotherm = pygaps.PointIsotherm.from_modelisotherm(
basic_modelisotherm, pressure_points=[1, 2, 3, 4]
)
assert isotherm.loading_at(3) == pytest.approx(
basic_modelisotherm.loading_at(3)
)
# Specifying isotherm
isotherm = pygaps.PointIsotherm.from_modelisotherm(
basic_modelisotherm, pressure_points=basic_pointisotherm
)
assert isotherm.loading_at(3) == pytest.approx(
basic_modelisotherm.loading_at(3)
)
##########################
def test_isotherm_ret_has_branch(
self,
basic_pointisotherm,
):
"""Check that all the functions in pointIsotherm return their specified parameter."""
assert basic_pointisotherm.has_branch(branch='ads')
assert basic_pointisotherm.has_branch(branch='des')
def test_isotherm_ret_data(
self,
basic_pointisotherm,
):
"""Check that all the functions in pointIsotherm return their specified parameter."""
# all data
assert basic_pointisotherm.data().drop('branch', axis=1).equals(
pandas.DataFrame({
basic_pointisotherm.pressure_key:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 4.5, 2.5],
basic_pointisotherm.loading_key:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 4.5, 2.5],
"enthalpy": [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 4.0, 4.0],
})
)
# adsorption branch
assert basic_pointisotherm.data(
branch='ads'
).drop('branch', axis=1).equals(
pandas.DataFrame({
basic_pointisotherm.pressure_key:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
basic_pointisotherm.loading_key:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
"enthalpy": [5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
})
)
# desorption branch
assert basic_pointisotherm.data(
branch='des'
).drop('branch', axis=1).equals(
pandas.DataFrame({
basic_pointisotherm.pressure_key: [4.5, 2.5],
basic_pointisotherm.loading_key: [4.5, 2.5],
"enthalpy": [4.0, 4.0],
},
index=[6, 7])
)
# Wrong branch
with pytest.raises(pgEx.ParameterError):
basic_pointisotherm.data(branch='random')
@pytest.mark.parametrize(
'expected, parameters',
[
(4.5, {
'branch': 'des'
}), # Branch specified
] + PRESSURE_PARAM
)
def test_isotherm_ret_pressure(
self,
use_adsorbate,
basic_pointisotherm,
expected,
parameters,
):
"""Check that the pressure functions of a pointIsotherm return their specified parameter."""
assert basic_pointisotherm.pressure(
**parameters
)[0] == pytest.approx(expected, 1e-5)
def test_isotherm_ret_pressure_indexed(
self,
basic_pointisotherm,
):
"""Indexed option specified."""
assert_series_equal(
basic_pointisotherm.loading(branch='ads', indexed=True),
pandas.Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name='loading')
)
@pytest.mark.parametrize(
'expected, parameters',
[
(4.5, {
'branch': 'des'
}), # Branch specified
] + LOADING_PARAM
)
def test_isotherm_ret_loading(
self,
use_adsorbate,
use_material,
basic_pointisotherm,
expected,
parameters,
):
"""Check that the loading functions of a pointIsotherm return their specified parameter."""
assert basic_pointisotherm.loading(
**parameters
)[0] == pytest.approx(expected, 1e-5)
def test_isotherm_ret_loading_indexed(
self,
basic_pointisotherm,
):
"""Indexed option specified."""
assert basic_pointisotherm.loading(indexed=True).equals(
pandas.Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 4.5, 2.5])
)
def test_isotherm_ret_other_data(
self,
basic_pointisotherm,
):
"""Check that all the functions in pointIsotherm return their specified parameter."""
other_key = "enthalpy"
# Standard return
assert set(basic_pointisotherm.other_data(other_key)
) == set([5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 4.0, 4.0])
# Branch specified
assert set(basic_pointisotherm.other_data(other_key, branch='ads')
) == set([5.0, 5.0, 5.0, 5.0, 5.0, 5.0])
# Range specified
assert set(basic_pointisotherm.other_data(other_key, limits=(3, 4.5))
) == set([4.0, 4.0])
# Indexed option specified
assert basic_pointisotherm.other_data(other_key, indexed=True).equals(
pandas.Series([5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 4.0, 4.0])
)
# Error
with pytest.raises(pgEx.ParameterError):
basic_pointisotherm.other_data('random')
##########################
@pytest.mark.parametrize(
'inp, expected, parameters',
[
(10, 20.0, {
'interp_fill': (0, 20)
}), # Interpolate limit
(1.5, 1.5, {
'interpolation_type': 'slinear'
}), # Interpolate type
] + PRESSURE_AT_PARAM
)
def test_isotherm_ret_pressure_at(
self,
use_material,
use_adsorbate,
basic_pointisotherm,
inp,
parameters,
expected,
):
"""Check the PointIsotherm pressure_at(loading) function."""
assert basic_pointisotherm.pressure_at(
inp,
**parameters,
) == pytest.approx(expected, 1e-5)
@pytest.mark.parametrize(
'inp, expected, parameters', [
(10, 20.0, {
'interp_fill': (0, 20)
}),
(1, 1, {
'interpolation_type': 'slinear'
}),
] + LOADING_AT_PARAM
)
def test_isotherm_ret_loading_at(
self,
use_material,
use_adsorbate,
basic_pointisotherm,
inp,
parameters,
expected,
):
"""Returning a loading at a particular point specified parameter."""
assert basic_pointisotherm.loading_at(
inp,
**parameters,
) == pytest.approx(expected, 1e-5)
@pytest.mark.parametrize(
'inp, expected, parameters', [
(1, 1, dict()),
(1, 1, dict(branch='ads')),
(100000, 1, dict(pressure_unit='Pa')),
(0.5, 3.89137, dict(pressure_mode='relative')),
]
)
def test_isotherm_spreading_pressure_at(
self,
use_adsorbate,
basic_pointisotherm,
inp,
parameters,
expected,
):
"""Check the PointIsotherm spreading pressure calculation."""
assert basic_pointisotherm.spreading_pressure_at(
inp, **parameters
) == pytest.approx(expected, 1e-5)
##########################
@pytest.mark.parametrize(
'parameters', [
({
"pressure_mode": "absolute",
"pressure_unit": "Pa",
}),
({
"loading_basis": "mass",
"loading_unit": "g",
}),
({
"material_basis": "volume",
"material_unit": "cm3",
}),
({
"pressure_mode": "absolute",
"pressure_unit": "Pa",
"loading_basis": "mass",
"loading_unit": "g",
"material_basis": "volume",
"material_unit": "cm3",
}),
]
)
def test_isotherm_convert(
self,
use_adsorbate,
use_material,
basic_pointisotherm,
parameters,
):
"""Check convenience conversion function."""
# Do the conversion
basic_pointisotherm.convert(**parameters)
# Check for good parameters as well
for p in [
'pressure_mode',
'pressure_unit',
'loading_basis',
'loading_unit',
'material_basis',
'material_unit',
]:
if p in parameters:
assert getattr(basic_pointisotherm, p) == parameters[p]
@pytest.mark.parametrize(
'expected, parameters', [
(1, {
"unit_to": "bar",
}),
(1e5, {
"mode_to": "absolute",
"unit_to": "Pa",
}),
(0.12849, {
"mode_to": "relative",
}),
(12.849, {
"mode_to": "relative%",
}),
pytest.param(1, {"unit_to": "bad_unit"}, marks=pytest.mark.xfail),
pytest.param(1, {"mode_to": "bad_mode"}, marks=pytest.mark.xfail),
]
)
def test_isotherm_convert_pressure(
self,
use_adsorbate,
basic_pointisotherm,
expected,
parameters,
):
"""Check that the pressure conversion function works as expected."""
# Do the conversion
basic_pointisotherm.convert_pressure(**parameters)
converted = basic_pointisotherm.pressure()[0]
# Check if one datapoint is now as expected
assert converted == pytest.approx(expected, 0.01)
# Check for good parameters as well
if 'mode_to' in parameters:
assert basic_pointisotherm.pressure_mode == parameters['mode_to']
if 'unit_to' in parameters:
assert basic_pointisotherm.pressure_unit == parameters['unit_to']
@pytest.mark.parametrize(
'expected, parameters',
[
(1, {
"basis_to": "molar",
"unit_to": "mmol",
}),
(1e-3, {
"unit_to": "mol",
}),
(22.414, {
"unit_to": "cm3(STP)",
}),
(0.028, {
"basis_to": "mass",
"unit_to": "g",
}),
(0.876484, {
"basis_to": "volume",
"unit_to": "cm3",
}),
(0.0280135, {
'basis_to': 'fraction',
}), # Fractional weight (will be 1/1000 mol * 28.01 g/mol)
(2.80134, {
'basis_to': 'percent',
}), # Percent weight
pytest.param(1, {"unit_to": "bad_unit"}, marks=pytest.mark.xfail),
pytest.param(
1, {"basis_to": "bad_basis"}, marks=pytest.mark.xfail
),
]
)
def test_isotherm_convert_loading(
self,
use_adsorbate,
use_material,
basic_pointisotherm,
expected,
parameters,
):
"""Check that the loading conversion function works as expected."""
# Do the conversion
basic_pointisotherm.convert_loading(**parameters)
converted = basic_pointisotherm.loading()[0]
# Check if one datapoint is now as expected
assert converted == pytest.approx(expected, 0.01)
# Check for good parameters as well
if 'basis_to' in parameters:
assert basic_pointisotherm.loading_basis == parameters['basis_to']
if 'unit_to' in parameters:
assert basic_pointisotherm.loading_unit == parameters['unit_to']
@pytest.mark.parametrize(
'expected, parameters', [
(1, {
"basis_to": "mass",
"unit_to": "g",
}),
(1000, {
"unit_to": "kg",
}),
(0.01, {
"basis_to": "molar",
"unit_to": "mmol",
}),
(2, {
"basis_to": "volume",
"unit_to": "cm3",
}),
pytest.param(1, {"unit_to": "bad_unit"}, marks=pytest.mark.xfail),
pytest.param(
1, {"basis_to": "bad_basis"}, marks=pytest.mark.xfail
),
]
)
def test_isotherm_convert_material(
self,
use_adsorbate,
use_material,
basic_pointisotherm,
expected,
parameters,
):
"""Check that the loading conversion function work as expected."""
# Do the conversion
basic_pointisotherm.convert_material(**parameters)
converted = basic_pointisotherm.loading()[0]
# Check if one datapoint is now as expected
assert converted == pytest.approx(expected, 0.01)
# Check for good parameters as well
if 'basis_to' in parameters:
assert basic_pointisotherm.material_basis == parameters['basis_to']
if 'unit_to' in parameters:
assert basic_pointisotherm.material_unit == parameters['unit_to']
def test_isotherm_convert_complex(
self,
use_adsorbate,
use_material,
basic_pointisotherm,
):
"""Some more complex conversions are checked here."""
# Convert from mmol/g -> wt% (g/g)
basic_pointisotherm.convert_loading(basis_to='fraction')
assert (
basic_pointisotherm.loading()[0] == pytest.approx(0.028, 0.001)
)
# Convert from wt% (g/g) to vol% (cm3/cm3)
basic_pointisotherm.convert_material(basis_to='volume', unit_to='cm3')
assert (
basic_pointisotherm.loading()[0] == pytest.approx(1.7529, 0.001)
)
# Convert from vol% (cm3/cm3) to vol% (m3/m3)
basic_pointisotherm.convert_material(basis_to='volume', unit_to='m3')
assert (
basic_pointisotherm.loading()[0] == pytest.approx(1.7529, 0.001)
)
# Convert from vol% (m3/m3) to mol% (mol/mol)
basic_pointisotherm.convert_material(basis_to='molar', unit_to='mol')
assert (basic_pointisotherm.loading()[0] == pytest.approx(0.01, 0.001))
# Convert from mol% (mol/mol) to mmol/mol
basic_pointisotherm.convert_loading(basis_to='molar', unit_to='mmol')
assert (basic_pointisotherm.loading()[0] == pytest.approx(10, 0.001))
# Convert from mmol/mol to mmol/g
basic_pointisotherm.convert_material(basis_to='mass', unit_to='g')
assert (basic_pointisotherm.loading()[0] == pytest.approx(1, 0.001))
##########################
@cleanup
def test_isotherm_print_parameters(self, basic_pointisotherm):
"""Check isotherm can print its own info."""
print(basic_pointisotherm)
basic_pointisotherm.plot()
basic_pointisotherm.print_info()
|
# Copyright (c) 2022 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
import hypothesis.strategies as st
import pytest
from hypothesis import given, settings
from pydantic import AnyUrl, PositiveFloat
from pydantic.dataclasses import dataclass as pyd_dataclass
from typing_extensions import Literal
from hydra_zen import builds, instantiate
from hydra_zen.third_party.pydantic import validates_with_pydantic
parametrize_pydantic_fields = pytest.mark.parametrize(
"custom_type, good_val, bad_val",
[
(PositiveFloat, 22, -1),
(AnyUrl, "http://www.pythonlikeyoumeanit.com", "hello"),
],
)
@parametrize_pydantic_fields
def test_pydantic_specific_fields_function(custom_type, good_val, bad_val):
def f(x):
return x
f.__annotations__["x"] = custom_type
wrapped = validates_with_pydantic(f)
wrapped(good_val) # ok
with pytest.raises(Exception):
wrapped(bad_val)
@parametrize_pydantic_fields
def test_pydantic_specific_fields_class(custom_type, good_val, bad_val):
class A:
def __init__(self, x) -> None:
pass
A.__init__.__annotations__["x"] = custom_type
validates_with_pydantic(A) # type: ignore
A(good_val)
with pytest.raises(Exception):
A(bad_val)
def test_custom_validation_config():
# test that users can pass a custom-configured instance of
# `pydantic.validate_arguments`
from functools import partial
from pydantic import validate_arguments
class A:
pass
def f(x: A):
return x
yes_arb_types = partial(
validates_with_pydantic,
validator=validate_arguments(config=dict(arbitrary_types_allowed=True)),
)
no_arb_types = partial(
validates_with_pydantic,
validator=validate_arguments(config=dict(arbitrary_types_allowed=False)),
)
yes_arb_types(f)(A())
with pytest.raises(RuntimeError):
no_arb_types(f)(A())
@pyd_dataclass
class PydanticConf:
x: Literal[1, 2]
y: int = 2
@pytest.mark.parametrize("x", [1, 2])
def test_documented_example_passes(x):
HydraConf = builds(PydanticConf, populate_full_signature=True)
conf = instantiate(HydraConf, x=x)
assert isinstance(conf, PydanticConf)
assert conf == PydanticConf(x=x, y=2)
@settings(max_examples=20)
@given(x=(st.integers() | st.floats()).filter(lambda x: x != 1 and x != 2))
def test_documented_example_raises(x):
HydraConf = builds(PydanticConf, populate_full_signature=True)
with pytest.raises(Exception):
# using a broad exception here because of
# re-raising incompatibilities with Hydra
instantiate(HydraConf, x=x)
|
import os
from functools import lru_cache
import requests
import yaml
from ..exceptions import PrefixError
ISSUE_BACKEND_API_KEY = os.environ.get("YOUTRACK_TOKEN")
YOUTRACK_API_URL = os.environ.get("YOUTRACK_API_URL")
ISSUES_ENDPOINT = f"{YOUTRACK_API_URL}/issues"
YOUTRACK_PROJECT = os.environ.get("YOUTRACK_PROJECT")
@lru_cache()
def get_user_mapping():
with open(os.path.expanduser(os.environ["YOUTRACK_USER_MAPPING"]), "r") as fh:
data = yaml.load(fh)
return data["user_mapping"]
class Backend:
PrefixError = PrefixError
ACTIVE_COLUMN = "In Progress"
def __init__(self, issue_number):
self.issue_number = issue_number
def move_card(self, column_name):
self.session.move_card(self.issue_number, column_name)
@property
@lru_cache()
def session(self):
return Session()
@property
def subject(self):
issue = self.session.get_issue(self.issue_number)
subject = issue["summary"]
return subject
class Session:
@property
@lru_cache()
def session(self):
s = requests.Session()
s.headers.update(
{
"Authorization": "Bearer {}".format(ISSUE_BACKEND_API_KEY),
"Accept": "application/json",
"Content-Type": "application/json",
}
)
return s
def _get_custom_field(self, name, value, field_type=None):
field_type = field_type or "SingleEnumIssueCustomField"
# if the value is a SingleEnumIssueCustomField, convert the value to a dict
# otherwise leave it alone
value = value
if field_type == "SingleEnumIssueCustomField":
value = {"name": value}
return {"name": name, "$type": field_type, "value": value}
def create_issue(
self,
type: str,
subsystem: str,
summary: str,
description: str,
extra_fields: list = None,
):
custom_fields = [
self._get_custom_field("Type", type),
self._get_custom_field("Subsystem", subsystem),
]
if extra_fields:
for field in extra_fields:
field_type = field.get("$type")
field_obj = self._get_custom_field(
field["name"], field["value"], field_type=field_type
)
custom_fields.append(field_obj)
data = {
"project": {"id": YOUTRACK_PROJECT},
"summary": summary,
"description": description,
"usesMarkdown": True,
"customFields": custom_fields,
}
response = self.session.post(ISSUES_ENDPOINT, json=data)
response.raise_for_status()
return response
def get_issue(self, issue_id: str):
issue_endpoint = f"{ISSUES_ENDPOINT}/{issue_id}?fields=summary"
response = self.session.get(issue_endpoint)
response.raise_for_status()
return response.json()
def move_card(self, issue_id: str, column_name: str):
data = {
"customFields": [
{
"value": {"name": column_name},
"name": "State",
"$type": "SingleEnumIssueCustomField",
}
]
}
issue_endpoint = f"{ISSUES_ENDPOINT}/{issue_id}"
response = self.session.post(issue_endpoint, json=data)
response.raise_for_status()
|
from django.test import TestCase
from goodbuyDatabase.models import Country, Store, Corporation, Rating, Company, Brand
class TestModels(TestCase):
def setUp(self):
self.country_obj = Country.objects.create(
name='Frankreich',
code='FR'
)
self.store_obj = Store.objects.create(
name='Edeka',
country=Country.objects.get(name='Frankreich')
)
self.corporation_obj = Corporation.objects.create(
name='Apple',
logo='www.apple/jpg.de',
wiki='www.apple/wiki.de',
origin=Country.objects.get(name='Frankreich')
)
self.rating_obj = Rating.objects.create(
women_value = 8,
women_rating_text = 'They treat their female workers good',
land_value = 2,
land_rating_text = 'The environment has to suffer a lot because of their produced waste',
climate_value = 5,
climate_rating_text = 'They don\'t have a lot to do with climate',
corporation = Corporation.objects.get(name='Apple')
)
self.company_obj = Company.objects.create(
name = 'Amazon',
logo = 'www.Amazon/logo.de',
wiki = 'www.wiki/Amazon.de',
corporation = Corporation.objects.get(name='Apple'),
origin = Country.objects.get(code='FR')
)
self.brand_obj = Brand.objects.create(
name = 'Factory',
company = Company.objects.get(name='Amazon'),
corporation = Corporation.objects.get(name='Apple')
)
def test_if_country_creation_works(self):
country = Country.objects.get(name='Frankreich')
self.assertEqual(country.code, 'FR')
def test_if_store_creation_works(self):
store = Store.objects.get(name='Edeka')
self.assertEqual(store.name, 'Edeka')
def test_if_corporation_creation_works(self):
corportaion = Corporation.objects.get(name='Apple')
self.assertEqual(corportaion.wiki, 'www.apple/wiki.de')
def test_if_rating_creation_works(self):
rating = Rating.objects.get(women_value=8)
self.assertEqual(rating.climate_value, 5)
def test_if_company_creation_works(self):
company = Company.objects.get(logo='www.Amazon/logo.de')
self.assertEqual(company.name, 'Amazon')
def test_if_brand_creation_works(self):
brand = Brand.objects.get(name='Factory')
self.assertEqual(brand.name, 'Factory')
|
#!/usr/bin/env python
# coding:utf-8
import cPickle
import json
import requests
path_history = "./history_file/"
class UseManiphest:
def __init__(self):
with open("./web_files/web_message.json", 'r') as load_f:
load_dict = json.load(load_f, encoding='UTF-8')
self.headers = load_dict['headers']
self.index_url = load_dict['index_url']
self.maniphest = 'maniphest/'
def load_session(self):
with open(path_history + 'cookies', 'rb') as f:
# headers = cPickle.load(f)
cookies = cPickle.load(f)
return cookies
def enter_maniphest(self):
session = requests.session()
get_url = bytes(self.index_url) + bytes(self.maniphest)
maniphest_page = session.get(get_url, headers=self.headers, cookies=self.load_session())
maniphest_content = maniphest_page.content
print '\n'
print '\n'
print '-------------------------------maniphest_content--------------------------------------'
print maniphest_content
|
# -*- coding: utf-8 -*-
import webbrowser
from string import Template
from constantes import *
from helpers import leer_archivo, mostrar_texto
class ModeloDePresupuesto:
# Datos comerciales
titulo = "PRESUPUESTO"
encabezado_nombre = "Autotruck Calabozo C.A"
encabezado_web = "www.autotruckcalabozo.com.ve"
encabezado_email = "info@autotruckcalabozo.com.ve"
encabezado_direccion = "Calabozo - Edo. Guárico"
fecha = FECHA_ACTUAL
vencimiento = FECHA_CADUCIDAD
# Datos impositivos
alicuota_iva = 16
# Propiedades relativas al formato
html = TEMPLATE_HTML
txt = TEMPLATE_TXT
# Servicios y precios
services = ('Motor', 'Caja', 'Tren', 'Frenos', 'Mantenimiento')
motor = ['Servicio de ajustes y correción de fallas en el motor', 5000]
caja = ['Servicio de ajustes y correción de fallas en la caja', 5000]
tren = ['Servicio de ajustes y correción en el tren delantero/trasero', 4000]
frenos = ['Servicio de ajustes y correción de fallas en los frenos', 4000]
mantenimiento =['Servicio de camio de aceite y filtro', 3000]
lista_precios = {'Motor': motor,
'Caja': caja,
'Tren': tren,
'Frenos': frenos,
'Mantenimiento': mantenimiento}
# Setear los datos del cliente
def set_cliente(self):
self.cliente = input(NOMBRE_CLIENTE)
self.rif = input(RIF)
self.direccion = input(DIRECCION_CLIENTE)
# Seleccionar tipo de plan
def seleccionar_serv(self):
texto_a_mostrar = ELEGIR_SERVICIO
codigo_serv = 0
for serv in self.services:
texto_a_mostrar += '(%d)%s ' % (codigo_serv, serv)
codigo_serv += 1
texto_a_mostrar += ": "
elegir_serv = input(texto_a_mostrar)
# Capturando excepciones
try:
elegir_serv = int(elegir_serv)
self.serv = self.services[elegir_serv]
except (ValueError, IndexError):
mostrar_texto(DATO_INCORRECTO)
self.seleccionar_serv()
else:
datos_servicio = self.lista_precios[self.services[elegir_serv]]
self.servicio = datos_servicio[0]
importe = datos_servicio[1]
self.importe = float(importe)
# Calcular IVA
def calcular_iva(self):
self.monto_iva = self.importe * self.alicuota_iva / 100
# Calcula el monto total del presupuesto
def calcular_neto(self):
self.neto = self.importe + self.monto_iva
# Armar numero de presupuesto
def armar_numero_presupuesto(self):
contador = open('contador.txt', 'r+')
ultimo_num = int(contador.read())
nuevo = ultimo_num + 1
contador.seek(0)
nuevo = str(nuevo)
contador.write(nuevo)
contador.close()
self.numero_presupuesto = nuevo
# Guardar presupuesto en archivo
def guardar_presupuesto(self, txt, html):
respuesta = input(GUARDAR_ARCHIVO)
# Si el usuario indica "s"
if respuesta.lower() == 'n':
mostrar_texto(txt)
# si en cambio el usuario indica "n"
elif respuesta.lower() == 's':
filename = 'presupuestos/'+self.numero_presupuesto+'.html'
presupuesto = open(filename, 'w')
presupuesto.write(html)
presupuesto.close()
mostrar_texto(CONFIRM_ARCHIVO_GUARDADO)
self.mostrar_presupuesto(filename)
# sino
else:
mostrar_texto(OPCION_INCORRECTA)
self.guardar_presupuesto(txt, html)
# Mostrar presupuesto en navegador
def mostrar_presupuesto(self, archivo):
respuesta = input(MOSTRAR_PRESUPUESTO)
if respuesta.lower() == 's':
webbrowser.open(BASE_DIR + "/" + archivo)
# Armar el presupuesto
def armar_presupuesto(self):
"""Esta función se encarga de armar todo el presupuesto"""
self.armar_numero_presupuesto()
txt = leer_archivo(self.txt)
html = leer_archivo(self.html)
# armo un diccionario con los datos del template
diccionario = dict(nombre=self.encabezado_nombre,
web=self.encabezado_web,
ubicacion=self.encabezado_direccion,
email=self.encabezado_email,
titulo=self.titulo,
numero=self.numero_presupuesto,
fecha=self.fecha,
rif=self.rif,
cliente=self.cliente,
direccion=self.direccion,
serv=self.serv,
servicio=self.servicio,
precio=self.importe,
iva=self.monto_iva,
total=self.neto,
limite=self.vencimiento)
txt = Template(txt).safe_substitute(diccionario)
html = Template(html).safe_substitute(diccionario)
self.guardar_presupuesto(txt, html)
# Método constructor
def __init__(self):
mostrar_texto(ENCABEZADO_MODULO)
self.set_cliente()
self.seleccionar_serv()
self.calcular_iva()
self.calcular_neto()
self.armar_presupuesto()
# Instanciar clase
presupuesto = ModeloDePresupuesto()
|
import discord
import json
from discord.ext import commands
import os
import psycopg2
class HelpCommands(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.group(name='help')
async def do_help(self, ctx):
if ctx.guild:
try:
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute(f"SELECT * FROM prefixes WHERE guild_id = {ctx.guild.id}")
prefix = cur.fetchone()
except KeyError:
prefix = None
if prefix == None:
prefix = 'pt!'
else:
prefix = prefix[1]
HelpMsg = discord.Embed(title='Help Page', description='This is a page full of commands you can use with VPT Bot', color=3447003)
HelpMsg.set_author(name='VPT Bot', icon_url=self.bot.user.avatar_url)
HelpMsg.add_field(name=prefix + 'help', value='Displays this help message!', inline=False)
HelpMsg.add_field(name=prefix + 'next [station]', value='Shows next 3 departures per direction from a station.', inline=False)
HelpMsg.add_field(name=prefix + 'next(train/bus/tram/vline) [station] \n(alias = (next/n)(t/b/t/v) or (t/b/t/v)(next/n)', value='Shows next 3 departures per direction from a station for a route type.', inline=False)
HelpMsg.add_field(name=prefix + 'next [station]', value='Shows next 3 departures per direction from a station for all route types.', inline=False)
HelpMsg.add_field(name=prefix + 'setdisruptionschannel [channel]', value='Keeps channel specified up to date with current train disruptions.', inline=False)
HelpMsg.add_field(name=prefix + 'invite', value='Sends you a link to invite the bot.', inline=False)
HelpMsg.add_field(name=prefix + 'prefix', value='Shows your current set prefix.', inline=False)
HelpMsg.add_field(name=prefix + 'setprefix [prefix]', value='Sets a new prefix.', inline=False)
HelpMsg.set_footer(icon_url=self.bot.user.avatar_url, text='© VPT Bot')
await ctx.send(embed=HelpMsg)
def setup(bot):
bot.add_cog(HelpCommands(bot)) |
#!/usr/bin/env python3
# Purpose: Submit Spark job to EMR Master Node
# Author: Gary A. Stafford (December 2020)
# Usage Example: python3 ./submit_spark_ssh.py \
# --ec2-key-path ~/.ssh/emr-demo-123456789012-us-east-1.pem
import argparse
import logging
import boto3
from paramiko import SSHClient, AutoAddPolicy
logging.basicConfig(format='[%(asctime)s] %(levelname)s - %(message)s', level=logging.INFO)
ssm_client = boto3.client('ssm')
def main():
args = parse_args()
params = get_parameters()
submit_job(params['master_public_dns'], 'hadoop', args.ec2_key_path, params['work_bucket'])
def submit_job(master_public_dns, username, ec2_key_path, work_bucket):
"""Submit job to EMR Master Node"""
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(hostname=master_public_dns, username=username, key_filename=ec2_key_path)
stdin_, stdout_, stderr_ = ssh.exec_command(
command=f"""
spark-submit --deploy-mode cluster --master yarn \
--conf spark.yarn.submit.waitAppCompletion=true \
s3a://{work_bucket}/analyze/bakery_sales_ssm.py"""
)
stdout_lines = ''
while not stdout_.channel.exit_status_ready():
if stdout_.channel.recv_ready():
stdout_lines = stdout_.readlines()
logging.info(' '.join(map(str, stdout_lines)))
ssh.close()
def get_parameters():
"""Load parameter values from AWS Systems Manager (SSM) Parameter Store"""
params = {
'master_public_dns': ssm_client.get_parameter(Name='/movie_sr/master_public_dns')['Parameter']['Value'],
'work_bucket': ssm_client.get_parameter(Name='/movie_sr/work_bucket')['Parameter']['Value']
}
return params
def parse_args():
"""Parse argument values from command-line"""
parser = argparse.ArgumentParser(description='Arguments required for script.')
parser.add_argument('-e', '--ec2-key-path', required=True, help='EC2 Key Path')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
"""SearchCV pipeline for incremental hyper-parameter search."""
# Authors: Peter Steiner <peter.steiner@tu-dresden.de>,
# Simon Stone <simon.stone@tu-dresden.de>
# License: BSD 3 clause
import sys
if sys.version_info >= (3, 8):
from typing import Union, Optional, Callable, Dict, Any, Literal
else:
from typing import Union, Optional, Callable, Dict, Any
from typing_extensions import Literal
from sklearn.base import BaseEstimator
from sklearn.model_selection._search import BaseSearchCV
import numpy as np
from collections.abc import Iterable
class SequentialSearchCV(BaseSearchCV):
"""
A series of searches on hyper-parameters.
Parameters
----------
estimator : sklearn.base.BaseEstimator
Any object derived from ```sklearn.base.BaseEstimator```
to be sequentially optimized.
searches : Iterable
Any ```Iterable``` that contains tuples of search steps.
scoring : Union[str, Callable, list, tuple, dict, None], default=None
Strategy to evaluate the performance of the cross-validated model
on the test set.
If ```scoring``` represents a single score, one can use:
-a single string (see The scoring parameter: defining model evaluation rules);
- a callable (see Defining your scoring strategy from metric functions)
that returns a single value.
If ```scoring``` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric names and
the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
refit : bool, default = False
Refit an estimator using the best found parameters on the whole dataset.
For multiple metric evaluation, this needs to be a ```str``` denoting the scorer
that would be used to find the best parameters for refitting the estimator
at the end.
Where there are considerations other than maximum score in choosing a best
estimator, refit can be set to a function which returns the selected
```best_index_``` given ```cv_results_```. In that case, the
```best_estimator_``` and ```best_params_``` will be set according to the
returned ```best_index_``` while the ```best_score_``` attribute will
not be available.
The refitted estimator is made available at the ```best_estimator_``` attribute
and permits using predict directly on this ```GridSearchCV``` instance.
Also for multiple metric evaluation, the attributes ```best_index_```,
```best_score_``` and ```best_params_``` will only be available if refit is set
and all of them will be determined w.r.t this specific scorer.
See ```scoring``` parameter to know more about multiple metric evaluation.
cv : Union[int, np.integer, Iterable, None], default=None
Determines the cross-validation splitting strategy. Possible inputs for ```cv```
are:
- ```None```, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a ```(Stratified)KFold```,
- CV splitter,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ```y``` is either
binary or multiclass, ```sklearn.model_selection.StratifiedKFold``` is used.
In all other cases, ```sklearn.model_selection.KFold``` is used.
These splitters are instantiated with ```shuffle=False``` so the splits will be
the same across calls.
verbose : Union[int, np.integer]
Controls the verbosity: the higher, the more messages.
- >1 : the computation time for each fold and parameter candidate is displayed;
- >2 : the score is also displayed;
- >3 : the fold and candidate parameter indexes are also displayed together with
the starting time of the computation.
pre_dispatch: Union[int, np.integer, str], default = '2*n_jobs'
Controls the number of jobs that get dispatched during parallel execution.
Reducing this number can be useful to avoid an explosion of memory consumption
when more jobs get dispatched than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately created and spawned.
Use this for lightweight and fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in ‘2*n_jobs’
error_score: Union[Literal['raise'], int, float, np.integer, np.float],
default=np.nan
Value to assign to the score if an error occurs in estimator fitting. If set to
'raise', the error is raised. If a numeric value is given, FitFailedWarning
is raised. This parameter does not affect the refit step,
which will always raise the error.
"""
def __init__(self, estimator: BaseEstimator,
searches: list,
scoring: Union[str, Callable, list, tuple, dict, None] = None,
n_jobs: Union[int, np.integer, None] = None,
refit: bool = True,
cv: Union[int, np.integer, Iterable, None] = None,
verbose: Union[int, np.integer] = 0,
pre_dispatch: Union[int, np.integer, str] = '2*n_jobs',
error_score: Union[Literal['raise'], int,
float, np.integer] = np.nan) -> None:
"""Construct the SequentialSearchCV."""
self.estimator: Optional[BaseEstimator] = None
super().__init__(estimator, scoring=scoring, n_jobs=n_jobs, refit=refit, cv=cv,
verbose=verbose, pre_dispatch=pre_dispatch,
error_score=error_score, return_train_score=True)
self.searches = searches
def _run_search(self, evaluate_candidates: Callable) -> None:
"""
Run all the searches.
Parameters
----------
evaluate_candidates: Callable
"""
evaluate_candidates(self.searches)
def fit(self, X: np.ndarray, y: np.ndarray, *, groups: Optional[np.ndarray] = None,
**fit_params: Any) -> BaseSearchCV:
"""
Run fit with all sets of parameters.
Parameters
----------
X : np.ndarray, shape=(n_samples, n_features) or (n_sequences)
Training input.
y : np.ndarray, shape=(n_samples, n_features) or shape=(n_samples, )
or (n_sequences)
Training target.
groups : Optional[ndarray], shape=(n_samples, ), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Only used in conjunction with a "Group" cv instance.
**fit_params : Any
Parameters passed to the ```fit``` method of the estimator.
"""
def evaluate_candidates(searches: list) -> None:
self.all_cv_results_: Dict[str, dict] = {}
self.all_best_estimator_: Dict[str, BaseEstimator] = {}
self.all_best_score_: Dict[str, Any] = {}
self.all_best_params_: Dict[str, dict] = {}
self.all_best_index_: Dict[str, int] = {}
self.all_scorer_: Dict[str, Any] = {}
self.all_n_splits_: Dict[str, int] = {}
self.all_refit_time_: Dict[str, float] = {}
self.all_multimetric_: Dict[str, bool] = {}
for name, search, params, *kwargs in searches:
if len(kwargs) == 1 and 'refit' in kwargs[0].keys():
result = search(self.estimator, params, **kwargs[0]).fit(X, y)
elif len(kwargs) == 1 and 'refit' not in kwargs[0].keys():
result = search(self.estimator, params, refit=True,
**kwargs[0]).fit(X, y)
else:
result = search(self.estimator, params, refit=True).fit(X, y)
# Save the attributes of the intermediate search results
self.all_cv_results_[name] = result.cv_results_
self.all_best_estimator_[name] = result.best_estimator_
self.all_best_score_[name] = result.best_score_
self.all_best_params_[name] = result.best_params_
self.all_best_index_[name] = result.best_index_
self.all_scorer_[name] = result.scorer_
self.all_n_splits_[name] = result.n_splits_
self.all_refit_time_[name] = result.refit_time_
self.all_multimetric_[name] = result.multimetric_
self.estimator = result.best_estimator_
self._run_search(evaluate_candidates)
return self
@property
def cv_results_(self) -> dict:
"""
A dict with keys as column headers and values as columns.
It can be imported into a pandas DataFrame. For instance the below given table
will be represented by a cv_results_ dict of:
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.80, 0.84, 0.70],
'split1_test_score' : [0.82, 0.50, 0.70],
'mean_test_score' : [0.81, 0.67, 0.70],
'std_test_score' : [0.01, 0.24, 0.00],
'rank_test_score' : [1, 3, 2],
'split0_train_score' : [0.80, 0.92, 0.70],
'split1_train_score' : [0.82, 0.55, 0.70],
'mean_train_score' : [0.81, 0.74, 0.70],
'std_train_score' : [0.01, 0.19, 0.00],
'mean_fit_time' : [0.73, 0.63, 0.43],
'std_fit_time' : [0.01, 0.02, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04],
'std_score_time' : [0.00, 0.00, 0.00],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
Note
----
The key 'params' is used to store a list of parameter settings dicts
for all the parameter candidates.
The mean_fit_time, std_fit_time, mean_score_time and std_score_time
are all in seconds.
For multi-metric evaluation, the scores for all the scorers are available in the
cv_results_ dict at the keys ending with that scorer’s name ('_<scorer_name>')
instead of '_score' shown above.
(‘split0_test_precision’, ‘mean_train_precision’ etc.)
Returns
-------
dict
"""
return self.all_cv_results_[self.searches[-1][0]]
@property
def best_estimator_(self) -> Any:
"""
Estimator that was chosen by the search.
I.e. estimator which gave highest score
(or smallest loss if specified) on the left out data.
Not available if refit=False.
See refit parameter for more information on allowed values.
Returns
-------
Estimator
"""
if self.refit:
return self.all_best_estimator_[self.searches[-1][0]]
return None
@property
def best_score_(self) -> float:
"""
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is present only if refit is specified.
This attribute is not available if refit is a function.
Returns
-------
float
"""
if self.refit:
return self.all_best_score_[self.searches[-1][0]]
return np.nan
@property
def best_params_(self) -> dict:
"""
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if refit is specified.
Returns
-------
dict
"""
if self.refit:
return self.all_best_params_[self.searches[-1][0]]
return {}
@property
def best_index_(self) -> Union[int, np.integer]:
"""
The index (of the cv_results_ arrays) which corresponds to the best candidate.
The dict at search.cv_results_['params'][search.best_index_] gives the parameter
setting for the best model, that gives the highest mean score
(search.best_score_).
For multi-metric evaluation, this is present only if refit is specified.
Returns
-------
Union[int, np.integer]
"""
if self.refit:
return self.all_best_index_[self.searches[-1][0]]
return 0
@property
def scorer_(self) -> Dict:
"""
Scorer function used on the held out data.
To choose the best parameters for the model.
For multi-metric evaluation, this attribute holds the validated scoring dict
which maps the scorer key to the scorer callable.
Returns
-------
function or a dict
"""
return self.all_scorer_[self.searches[-1][0]]
@property
def n_splits_(self) -> Union[int, np.integer]:
"""
The number of cross-validation splits (folds/iterations).
Returns
-------
Union[int, np.integer]
"""
return self.all_n_splits_[self.searches[-1][0]]
@property
def refit_time_(self) -> float:
"""
Second used for refitting the best model on the whole dataset.
Returns
-------
Union[int, np.integer]
"""
return self.all_refit_time_[self.searches[-1][0]]
@property
def multimetric(self) -> bool:
"""
Whether or not the scorers compute several metrics.
Returns
-------
bool
"""
return self.all_multimetric_[self.searches[-1][0]]
|
"""
A `WebView` mixin.
Mixin for a window that adds functions to setup `html2` objects.
Adds events that control external and internal links.
Loads HTML files from our specified data location.
Loads HTML from strings and even converts it from Markdown if requested.
Licensed under MIT
Copyright (c) 2013 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import wx.html2
import markdown
import pymdownx.slugs as slugs
import os
import re
import html
from urllib.request import url2pathname
from urllib.parse import urlparse
from .. import data
from ..app.custom_app import debug
import webbrowser
from .. import util
import functools
HTML_FILE = 0
HTML_STRING = 1
MARKDOWN_STRING = 2
URL_LINK = 0
HTML_LINK = 1
BLANK_LINK = 2
OTHER_LINK = 3
RE_WIN_DRIVE_LETTER = re.compile(r"^[A-Za-z]$")
RE_WIN_DRIVE_PATH = re.compile(r"^[A-Za-z]:(?:\\.*)?$")
RE_SLASH_WIN_DRIVE = re.compile(r"^/[A-Za-z]{1}:/.*")
RE_URL = re.compile('(http|ftp)s?|data|mailto|tel|news')
EXTENSIONS = [
"markdown.extensions.toc",
"markdown.extensions.attr_list",
"markdown.extensions.def_list",
"markdown.extensions.smarty",
"markdown.extensions.footnotes",
"markdown.extensions.tables",
"markdown.extensions.sane_lists",
"markdown.extensions.admonition",
"markdown.extensions.md_in_html",
"pymdownx.highlight",
"pymdownx.inlinehilite",
"pymdownx.magiclink",
"pymdownx.superfences",
"pymdownx.betterem",
"pymdownx.keys",
"pymdownx.escapeall",
"pymdownx.smartsymbols",
"pymdownx.tasklist",
"pymdownx.tilde",
"pymdownx.caret",
"pymdownx.mark",
"pymdownx.b64",
"pymdownx.pathconverter"
]
EXTENSION_CONFIGS = {
"markdown.extensions.toc": {
"slugify": slugs.uslugify,
},
"pymdownx.inlinehilite": {
"style_plain_text": True
},
"pymdownx.superfences": {
"custom_fences": []
},
"pymdownx.magiclink": {
"repo_url_shortener": True,
"repo_url_shorthand": True,
"user": "facelessuser",
"repo": "Rummage"
},
"markdown.extensions.smarty": {
"smart_quotes": False
},
"pymdownx.escapeall": {
"hardbreak": True,
"nbsp": True
},
"pymdownx.pathconverter": {
"base_path": os.path.join(data.RESOURCE_PATH, 'docs'),
"absolute": True
},
"pymdownx.b64": {
"base_path": os.path.join(data.RESOURCE_PATH, 'docs')
}
}
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<title>%s</title>
<style>
%s
</style>
</head>
<body>
<div class="markdown">
%s
</div>
</body>
</html>
"""
def escape(txt):
"""Basic HTML escaping."""
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def convert_markdown(title, content):
"""Convert Markdown to HTML."""
css = data.get_file(os.path.join('docs', 'css', 'theme.css'))
content = markdown.Markdown(extensions=EXTENSIONS, extension_configs=EXTENSION_CONFIGS).convert(content)
content = TEMPLATE % (escape(title), css, content)
return content
def parse_url(url):
"""
Parse the URL.
Try to determine if the following is a file path or
(as we will call anything else) a URL.
We return it slightly modified and combine the path parts.
We also assume if we see something like c:/ it is a Windows path.
We don't bother checking if this **is** a Windows system, but
'nix users really shouldn't be creating weird names like c: for their folder.
"""
is_url = False
is_absolute = False
is_blank = False
scheme, netloc, path, params, query, fragment = urlparse(html.unescape(url))
if scheme == 'about' and netloc == '' and path == "blank":
is_blank = True
elif RE_URL.match(scheme):
# Clearly a URL
is_url = True
elif scheme == '' and netloc == '' and path == '':
# Maybe just a URL fragment
is_url = True
elif scheme == 'file' and (RE_WIN_DRIVE_PATH.match(netloc)):
# file://c:/path or file://c:\path
path = '/' + (netloc + path).replace('\\', '/')
netloc = ''
is_absolute = True
elif scheme == 'file' and netloc.startswith('\\'):
# file://\c:\path or file://\\path
path = (netloc + path).replace('\\', '/')
netloc = ''
is_absolute = True
elif scheme == 'file':
# file:///path
is_absolute = True
elif RE_WIN_DRIVE_LETTER.match(scheme):
# c:/path
path = '/%s:%s' % (scheme, path.replace('\\', '/'))
scheme = 'file'
netloc = ''
is_absolute = True
elif scheme == '' and netloc != '' and url.startswith('//'):
# //file/path
path = '//' + netloc + path
scheme = 'file'
netloc = ''
is_absolute = True
elif scheme != '' and netloc != '':
# A non file path or strange URL
is_url = True
elif path.startswith(('/', '\\')):
# /root path
is_absolute = True
return (scheme, netloc, path, params, query, fragment, is_url, is_absolute, is_blank)
def link_type(link):
"""Test if local file."""
link_type = OTHER_LINK
try:
scheme, netloc, path, params, query, fragment, is_url, is_absolute, is_blank = parse_url(link)
if is_url:
link_type = URL_LINK
elif is_blank:
link_type = BLANK_LINK
else:
path = url2pathname(path).replace('\\', '/')
# Adjust /c:/ to c:/.
if scheme == 'file' and RE_SLASH_WIN_DRIVE.match(path):
path = path[1:]
file_name = os.path.normpath(path)
if os.path.exists(file_name) and (file_name.lower().endswith('.html') or os.path.isdir(file_name)):
link_type = HTML_LINK
except Exception:
# Parsing crashed and burned; no need to continue.
pass
return link_type
class WebViewMixin:
"""HTML `WebView`."""
def setup_html(self, obj, control_title=None):
"""Setup HTML events."""
# Setup busy tracker
obj.busy = False
obj.control_title = control_title
# Setup events
obj.Bind(wx.html2.EVT_WEBVIEW_NAVIGATING, functools.partial(self.on_navigate, obj=obj))
obj.Bind(wx.html2.EVT_WEBVIEW_LOADED, functools.partial(self.on_html_loaded, obj=obj))
obj.Bind(wx.html2.EVT_WEBVIEW_TITLE_CHANGED, functools.partial(self.on_title_changed, obj=obj))
def load_html(self, obj, content, title, content_type):
"""Load HTML."""
obj.content_type = content_type
if obj.content_type == HTML_FILE:
url = 'file://%s' % os.path.join(data.RESOURCE_PATH, 'docs', content).replace('\\', '/')
if obj.busy:
self.Stop()
obj.busy = True
obj.LoadURL(url)
else:
if obj.content_type == MARKDOWN_STRING:
content = convert_markdown(title, content)
else:
content = content
obj.busy = True
obj.SetPage(content, 'file://')
if util._PLATFORM == "windows":
# Ugh. Why can't things just work
# Here we must reload the page so that things render properly.
# This was done to fix poorly rendered pages observed in Windows.
obj.Reload()
def on_navigate(self, event, obj=None):
"""Handle links."""
target = event.GetTarget()
url = event.GetURL()
debug("HTML Nav URL: " + url)
debug("HTML Nav Target: " + target)
# Things we can allow the backend to handle (local HTML files)
ltype = link_type(url)
if ltype == BLANK_LINK:
obj.busy = True
# We don't handle links outside of a "blank" (HTML string) page.
# This mainly occurs on Windows.
elif obj.content_type == HTML_STRING and url.startswith('about:'):
obj.busy = False
event.Veto()
# 'Nix systems treat "blank" (HTML string) pages as root paths most of the time.
# So if we get `file:///` that is not empty and not linking to a target, we are
# Linking outside or page, but not to an external site.
elif obj.content_type == HTML_STRING and not (url == 'file:///' or url.startswith('file:///#')):
obj.busy = False
event.Veto()
elif ltype == HTML_LINK:
obj.busy = True
# Send URL links to browser
elif ltype == URL_LINK:
webbrowser.open_new_tab(url)
obj.busy = False
event.Veto()
# Show unhandled links
else:
debug("HTML unhandled link: " + url)
obj.busy = False
event.Veto()
def on_title_changed(self, event, obj=None):
"""Get title."""
if obj.control_title is not None:
title = obj.CurrentTitle
obj.control_title.SetTitle(title)
def on_html_loaded(self, event, obj=None):
"""Handle loaded event."""
obj.busy = False
|
import sys
sys.path.append('./')
import pytest
import torch
from src.models.mlp import MLP
from src.models.cnn import CNN
@pytest.mark.parametrize(
["input_shape", "in_features", "num_classes", "hidden_dim"],
[
((2, 1, 28, 28), 784, 10, 200),
],
)
def test_mlp_forward(input_shape, in_features, num_classes, hidden_dim):
x = torch.FloatTensor(*input_shape)
model = MLP(in_features, num_classes, hidden_dim)
logits = model(x)
assert logits.shape == (input_shape[0], num_classes)
@pytest.mark.parametrize(
["input_shape", "in_features", "num_classes"],
[
((2, 1, 28, 28), 1, 10),
],
)
def test_cnn_forward(input_shape, in_features, num_classes):
x = torch.FloatTensor(*input_shape)
model = CNN(in_features, num_classes)
logits = model(x)
assert logits.shape == (input_shape[0], num_classes)
if __name__ == "__main__":
test_cnn_forward((2, 1, 28, 28), 1, 10) |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the OpenTimelineIO project
"""Unit tests for the OTIO to SVG adapter"""
import os
import unittest
import tempfile
import xml.etree.ElementTree as ET
import opentimelineio as otio
SAMPLE_DATA_DIR = os.path.join(os.path.dirname(__file__), "sample_data")
SIMPLE_CUT_OTIO_PATH = os.path.join(SAMPLE_DATA_DIR, 'simple_cut.otio')
SIMPLE_CUT_SVG_PATH = os.path.join(SAMPLE_DATA_DIR, 'simple_cut.svg')
MULTIPLE_TRACK_OTIO_PATH = os.path.join(SAMPLE_DATA_DIR, 'multiple_track.otio')
MULTIPLE_TRACK_SVG_PATH = os.path.join(SAMPLE_DATA_DIR, 'multiple_track.svg')
TRANSITION_OTIO_PATH = os.path.join(SAMPLE_DATA_DIR, 'transition.otio')
TRANSITION_SVG_PATH = os.path.join(SAMPLE_DATA_DIR, 'transition.svg')
def _svg_equal(e1, e2):
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if e1.tail != e2.tail:
return False
if e1.attrib != e2.attrib:
return False
if len(e1) != len(e2):
return False
return all(_svg_equal(c1, c2) for c1, c2 in zip(e1, e2))
class SVGAdapterTest(unittest.TestCase):
def test_simple_cut(self):
self.maxDiff = None
tmp_path = tempfile.mkstemp(suffix=".svg", text=True)[1]
timeline = otio.core.deserialize_json_from_file(SIMPLE_CUT_OTIO_PATH)
otio.adapters.write_to_file(input_otio=timeline, filepath=tmp_path)
test_tree = ET.parse(SIMPLE_CUT_SVG_PATH)
test_root = test_tree.getroot()
reference_tree = ET.parse(tmp_path)
reference_root = reference_tree.getroot()
self.assertTrue(_svg_equal(test_root, reference_root))
def test_multiple_tracks(self):
self.maxDiff = None
tmp_path = tempfile.mkstemp(suffix=".svg", text=True)[1]
timeline = otio.core.deserialize_json_from_file(MULTIPLE_TRACK_OTIO_PATH)
otio.adapters.write_to_file(input_otio=timeline, filepath=tmp_path)
test_tree = ET.parse(MULTIPLE_TRACK_SVG_PATH)
test_root = test_tree.getroot()
reference_tree = ET.parse(tmp_path)
reference_root = reference_tree.getroot()
self.assertTrue(_svg_equal(test_root, reference_root))
def test_transition(self):
self.maxDiff = None
tmp_path = tempfile.mkstemp(suffix=".svg", text=True)[1]
timeline = otio.core.deserialize_json_from_file(TRANSITION_OTIO_PATH)
otio.adapters.write_to_file(input_otio=timeline, filepath=tmp_path)
test_tree = ET.parse(TRANSITION_SVG_PATH)
test_root = test_tree.getroot()
reference_tree = ET.parse(tmp_path)
reference_root = reference_tree.getroot()
self.assertTrue(_svg_equal(test_root, reference_root))
|
import abc
from semantic_version import Version
class InterestingPath(abc.ABC):
"""
a path which which is important to you in some way.
For example, in linux it may be the installation path of a library
"""
def __init__(self, architecture: int, path: str, version: Version):
self.architecture: int = architecture
self.path: path = path
self.version: Version = version
def __str__(self):
return f"{{ architecture: {self.architecture}, version: {self.version}, path: {self.path} }}"
|
# -*- Mode: Python -*-
# keep the last 10 compilers in case of nasty bugs that are
# difficult to back out of.
import os
import re
def rename_binaries():
files = []
for path in os.listdir ('./self'):
m = re.match ('^compile([0-9])$', path)
if m is not None:
num = int (m.group(1))
files.append ((num, path))
files.sort()
files.reverse()
for num, path in files:
#print 'self/%s' %path, 'self/compile%d' % (num+1)
os.rename ('self/%s' %path, 'self/compile%d' % (num+1))
# over the horizon...
if os.path.isfile ('self/compile10'):
os.unlink ('self/compile10')
os.rename ('self/compile', 'self/compile0')
if 0 == os.system ('self/compile0 self/compile.scm'):
rename_binaries()
else:
os.rename ('self/compile0', 'self/compile')
|
from pathlib import Path
from typing import Optional
from labml import monit
from labml.internal import util
from labml.internal.lab import WebAPIConfigs
_CONFIG_FILE_NAME = '.labml'
class Computer:
"""
### Computer
Lab contains the labml specific properties.
"""
web_api: WebAPIConfigs
uuid: str
def __init__(self):
self.home = Path.home()
self.__load_configs()
def __load_configs(self):
config_file = self.home / _CONFIG_FILE_NAME
if config_file.exists():
with open(str(config_file)) as f:
config = util.yaml_load(f.read())
if config is None:
config = {}
else:
with monit.section('Creating a .labml config'):
from uuid import uuid1
config = {'uuid': uuid1().hex}
with open(str(config_file), 'w') as f:
f.write(util.yaml_dump(config))
default_config = self.__default_config()
for k, v in default_config.items():
if k not in config:
config[k] = v
self.uuid = config['uuid']
web_api_url = config['web_api']
if web_api_url[0:4] != 'http':
web_api_url = f"https://api.lab-ml.com/api/v1/computer?labml_token={web_api_url}&"
self.web_api = WebAPIConfigs(url=web_api_url,
frequency=config['web_api_frequency'],
verify_connection=config['web_api_verify_connection'],
open_browser=config['web_api_open_browser'])
def __str__(self):
return f"<Computer uuid={self.uuid}>"
def __repr__(self):
return str(self)
@staticmethod
def __default_config():
return dict(
web_api='https://api.lab-ml.com/api/v1/computer?',
web_api_frequency=0,
web_api_verify_connection=True,
web_api_open_browser=True,
)
_internal: Optional[Computer] = None
def computer_singleton() -> Computer:
global _internal
if _internal is None:
_internal = Computer()
return _internal
|
"""Errors for puckfetcher."""
class PuckError(Exception):
"""
Generic Exception for errors in this project.
Attributes:
desc -- short message describing error
"""
def __init__(self, desc: str) -> None:
super(PuckError, self).__init__()
self.desc = desc
class BadCommandError(PuckError):
"""
Exception raised when a command is given bad arguments.
Attributes:
desc -- short message describing error.
"""
def __init__(self, desc: str) -> None:
super(BadCommandError, self).__init__(desc)
class MalformedConfigError(PuckError):
"""
Exception raised when we were provided invalid options during Config construction.
Attributes:
desc -- short message describing error
"""
def __init__(self, desc: str) -> None:
super(MalformedConfigError, self).__init__(desc)
class MalformedSubscriptionError(PuckError):
"""
Exception raised when we were provided invalid options during Subscription construction.
Attributes:
desc -- short message describing error
"""
def __init__(self, desc: str) -> None:
super(MalformedSubscriptionError, self).__init__(desc)
|
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# __init__.py
#
# Parts of publications, e.g. PartOfBook
# These are technically elements, with all their behavior, but typically
# they are not placed in a layout. Instead they contain a set of pages,
# that get transfered to a new created Document during composition.
#
from pagebot.publications.portfolios.baseportfolio import BasePortfolio
PORTFOLIO_CLASSES = {
}
|
# hello_milvus.py demonstrates the basic operations of PyMilvus, a Python SDK of Milvus.
# 1. connect to Milvus
# 2. create collection
# 3. insert data
# 4. create index
# 5. search, query, and hybrid search on entities
# 6. delete entities by PK
# 7. drop collection
import random
import time
from pymilvus import (
connections,
utility,
FieldSchema, CollectionSchema, DataType,
Collection,
)
fmt = "\n=== {:30} ===\n"
search_latency_fmt = "search latency = {:.4f}s"
#################################################################################
# 1. connect to Milvus
# Add a new connection alias `default` for Milvus server in `localhost:19530`
# Actually the "default" alias is a buildin in PyMilvus.
# If the address of Milvus is the same as `localhost:19530`, you can omit all
# parameters and call the method as: `connections.connect()`.
#
# Note: the `using` parameter of the following methods is default to "default".
print(fmt.format("start connecting to Milvus"))
connections.connect("default", host="localhost", port="19530")
has = utility.has_collection("hello_milvus")
print(f"Does collection hello_milvus exist in Milvus: {has}")
#################################################################################
# 2. create collection
# We're going to create a collection with 3 fields.
# +-+------------+------------+------------------+------------------------------+
# | | field name | field type | other attributes | field description |
# +-+------------+------------+------------------+------------------------------+
# |1| "pk" | Int64 | is_primary=True | "primary field" |
# | | | | auto_id=False | |
# +-+------------+------------+------------------+------------------------------+
# |2| "random" | Double | | "a double field" |
# +-+------------+------------+------------------+------------------------------+
# |3|"embeddings"| FloatVector| dim=8 | "float vector with dim 8" |
# +-+------------+------------+------------------+------------------------------+
fields = [
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=False),
FieldSchema(name="random", dtype=DataType.DOUBLE),
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=8)
]
schema = CollectionSchema(fields, "hello_milvus is the simplest demo to introduce the APIs")
print(fmt.format("Create collection `hello_milvus`"))
hello_milvus = Collection("hello_milvus", schema, consistency_level="Strong")
################################################################################
# 3. insert data
# We are going to insert 3000 rows of data into `hello_milvus`
# Data to be inserted must be organized in fields.
#
# The insert() method returns:
# - either automatically generated primary keys by Milvus if auto_id=True in the schema;
# - or the existing primary key field from the entities if auto_id=False in the schema.
print(fmt.format("Start inserting entities"))
num_entities = 3000
entities = [
# provide the pk field because `auto_id` is set to False
[i for i in range(num_entities)],
[float(random.randrange(-20, -10)) for _ in range(num_entities)], # field random
[[random.random() for _ in range(8)] for _ in range(num_entities)], # field embeddings
]
insert_result = hello_milvus.insert(entities)
print(f"Number of entities in Milvus: {hello_milvus.num_entities}") # check the num_entites
################################################################################
# 4. create index
# We are going to create an IVF_FLAT index for hello_milvus collection.
# create_index() can only be applied to `FloatVector` and `BinaryVector` fields.
print(fmt.format("Start Creating index IVF_FLAT"))
index = {
"index_type": "IVF_FLAT",
"metric_type": "L2",
"params": {"nlist": 128},
}
hello_milvus.create_index("embeddings", index)
################################################################################
# 5. search, query, and hybrid search
# After data were inserted into Milvus and indexed, you can perform:
# - search based on vector similarity
# - query based on scalar filtering(boolean, int, etc.)
# - hybrid search based on vector similarity and scalar filtering.
#
# Before conducting a search or a query, you need to load the data in `hello_milvus` into memory.
print(fmt.format("Start loading"))
hello_milvus.load()
# -----------------------------------------------------------------------------
# search based on vector similarity
print(fmt.format("Start searching based on vector similarity"))
vectors_to_search = entities[-1][-2:]
search_params = {
"metric_type": "l2",
"params": {"nprobe": 10},
}
start_time = time.time()
result = hello_milvus.search(vectors_to_search, "embeddings", search_params, limit=3, output_fields=["random"])
end_time = time.time()
for hits in result:
for hit in hits:
print(f"hit: {hit}, random field: {hit.entity.get('random')}")
print(search_latency_fmt.format(end_time - start_time))
# -----------------------------------------------------------------------------
# query based on scalar filtering(boolean, int, etc.)
print(fmt.format("Start querying with `random > -14`"))
start_time = time.time()
result = hello_milvus.query(expr="random > -14", output_fields=["random", "embeddings"])
end_time = time.time()
print(f"query result:\n-{result[0]}")
print(search_latency_fmt.format(end_time - start_time))
# -----------------------------------------------------------------------------
# hybrid search
print(fmt.format("Start hybrid searching with `random > -12`"))
start_time = time.time()
result = hello_milvus.search(vectors_to_search, "embeddings", search_params, limit=3, expr="random > -12", output_fields=["random"])
end_time = time.time()
for hits in result:
for hit in hits:
print(f"hit: {hit}, random field: {hit.entity.get('random')}")
print(search_latency_fmt.format(end_time - start_time))
###############################################################################
# 6. delete entities by PK
# You can delete entities by their PK values using boolean expressions.
ids = insert_result.primary_keys
expr = f"pk in [{ids[0]}, {ids[1]}]"
print(fmt.format(f"Start deleting with expr `{expr}`"))
result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"])
print(f"query before delete by expr=`{expr}` -> result: \n-{result[0]}\n-{result[1]}\n")
hello_milvus.delete(expr)
result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"])
print(f"query after delete by expr=`{expr}` -> result: {result}\n")
###############################################################################
# 7. drop collection
# Finally, drop the hello_milvus collection
print(fmt.format("Drop collection `hello_milvus`"))
utility.drop_collection("hello_milvus")
|
fibVrednosti = [ 0 : 0, 1 : 1 ]
def fib(n):
if n in fibVrednosti.keys():
return fibVrednosti[n]
else:
rezultat = fib(n-1) + fib(n-2)
fibVrednosti[n] = rezultat
return rezultat
rezultat = fib(100)
print(rezultat)
|
print("begin smoke test")
import sys
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import tensorflow as tf
tf.test.is_gpu_available()
print("<<< See GPU info above >>>")
import numpy as np
import align
import argparse
import facenet
import os
import sys
import math
import pickle
from sklearn.svm import SVC
print("end smoke test")
|
import urllib.request
import cv2
import numpy as np
import os
def create_pos_n_neg():
for file_type in ['pos']:
for img in os.listdir(file_type):
print (file_type)
if file_type == 'pos':
line = file_type+'/'+img+' 1 0 0 200 150\n'
with open('info.dat','a') as f:
f.write(line)
elif file_type == 'neg':
line = file_type+'/'+img+'\n'
with open('bg.txt','a') as f:
f.write(line)
create_pos_n_neg() |
class Pipeline:
def __init__(self, jobs, context=None):
self.jobs = jobs
self.context = context or {}
def execute(self):
for job in self.jobs:
job.execute(self.context)
|
"""
The YAML subset accepted by ``demes`` is defined here as a ``strictyaml`` schema.
"""
from strictyaml import (
CommaSeparated,
Map,
MapPattern,
Float,
Int,
Optional,
Seq,
Str,
)
Number = Int() | Float()
epoch_schema = Map(
{
Optional("start_time"): Number,
"end_time": Number,
Optional("initial_size"): Number,
Optional("final_size"): Number,
Optional("size_function"): Str(),
Optional("selfing_rate"): Number,
Optional("cloning_rate"): Number,
}
)
asymmetric_migration_schema = Map(
{
Optional("start_time"): Number,
Optional("end_time"): Number,
"source": Str(),
"dest": Str(),
"rate": Float(),
}
)
symmetric_migration_schema = Map(
{
Optional("start_time"): Number,
Optional("end_time"): Number,
"demes": CommaSeparated(Str()),
"rate": Float(),
}
)
pulse_schema = Map(
{"time": Number, "source": Str(), "dest": Str(), "proportion": Float()}
)
deme_schema = Map(
{
Optional("description"): Str(),
Optional("ancestors"): CommaSeparated(Str()),
Optional("proportions"): CommaSeparated(Float()),
Optional("start_time"): Number,
Optional("end_time"): Number,
Optional("initial_size"): Number,
Optional("final_size"): Number,
Optional("epochs"): Seq(epoch_schema),
Optional("selfing_rate"): Number,
Optional("cloning_rate"): Number,
}
)
deme_graph_schema = Map(
{
"description": Str(),
"time_units": Str(),
Optional("generation_time"): Number,
Optional("doi"): Str(),
"demes": MapPattern(Str(), deme_schema),
Optional("migrations"): Map(
{
Optional("symmetric"): Seq(symmetric_migration_schema),
Optional("asymmetric"): Seq(asymmetric_migration_schema),
}
),
Optional("pulses"): Seq(pulse_schema),
Optional("selfing_rate"): Number,
Optional("cloning_rate"): Number,
}
)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents the state of Python objects being formatted.
Objects (e.g., list comprehensions, dictionaries, etc.) have specific
requirements on how they're formatted. These state objects keep track of these
requirements.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ComprehensionState(object):
"""Maintains the state of list comprehension formatting decisions.
A stack of ComprehensionState objects are kept to ensure that list
comprehensions are wrapped with well-defined rules.
Attributes:
expr_token: The first token in the comprehension.
for_token: The first 'for' token of the comprehension.
has_split_at_for: Whether there is a newline immediately before the
for_token.
has_interior_split: Whether there is a newline within the comprehension.
That is, a split somewhere after expr_token or before closing_bracket.
"""
def __init__(self, expr_token):
self.expr_token = expr_token
self.for_token = None
self.has_split_at_for = False
self.has_interior_split = False
def HasTrivialExpr(self):
"""Returns whether the comp_expr is "trivial" i.e. is a single token."""
return self.expr_token.next_token.value == 'for'
@property
def opening_bracket(self):
return self.expr_token.previous_token
@property
def closing_bracket(self):
return self.opening_bracket.matching_bracket
def Clone(self):
clone = ComprehensionState(self.expr_token)
clone.for_token = self.for_token
clone.has_split_at_for = self.has_split_at_for
clone.has_interior_split = self.has_interior_split
return clone
def __repr__(self):
return ('[opening_bracket::%s, for_token::%s, has_split_at_for::%s,'
' has_interior_split::%s, has_trivial_expr::%s]' %
(self.opening_bracket, self.for_token, self.has_split_at_for,
self.has_interior_split, self.HasTrivialExpr()))
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self == other
def __hash__(self, *args, **kwargs):
return hash((self.expr_token, self.for_token, self.has_split_at_for,
self.has_interior_split))
|
class Solution:
def canBeEqual(self, target: list, arr: list) -> bool:
return sorted(target) == sorted(arr)
if __name__ == '__main__':
target = [1, 2, 3, 4]
arr = [2, 4, 1, 3]
print(f"Input: target = {target}, arr = {arr}")
print(f"Output: {Solution().canBeEqual(target, arr)}")
|
from xarray import Dataset
import os
import numpy as np
import tempfile
from urbanspoon import repository, core
from urbanspoon.tests.conftest import (
time_series_factory,
spatial_gcm_factory,
)
def test_read_dataset():
url = "memory://test_read_dataset.zarr" # writing in memory FS -- don't assign same names throughout tests.
Dataset({"bar": 321}).to_zarr(url)
actual = repository.read_dataset(url)
expected = Dataset({"bar": 123})
assert actual == expected
def test_read_array():
url = "memory://test_read_array.zarr"
Dataset({"bar": 321}).to_zarr(url) # Manually write to memory FS.
actual = repository.read_array(url, "bar")
expected = np.array(321)
np.testing.assert_equal(actual, expected)
def test_write_colored_maps():
fakedata = {"A": spatial_gcm_factory(), "B": spatial_gcm_factory()}
with tempfile.NamedTemporaryFile() as outfile:
repository.write_plot(
out=f"{outfile.name}.png",
format="png",
plot_func=core.plot_colored_maps,
da=fakedata,
common_title="sometitle",
units="someunits",
color_bar_range=(0, 1),
)
assert os.path.isfile(f"{outfile.name}.png")
def test_write_colored_timeseries():
fakedata = {
"A": {
"temporal_data": time_series_factory(),
"linestyle": ":",
"color": "blue",
},
"B": {
"temporal_data": time_series_factory(),
"linestyle": ":",
"color": "black",
},
}
with tempfile.NamedTemporaryFile() as outfile:
repository.write_plot(
out=f"{outfile.name}.png",
format="png",
plot_func=core.plot_colored_timeseries,
da=fakedata,
title="sometitle",
units="someunits",
)
assert os.path.isfile(f"{outfile.name}.png")
|
import os, signal, subprocess, sys
import StringIO
import ShUtil
import Test
import Util
import platform
import tempfile
class InternalShellError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
kIsWindows = platform.system() == 'Windows'
# Don't use close_fds on Windows.
kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out,err = p.communicate()
exitCode = p.wait()
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def executeShCmd(cmd, cfg, cwd, results):
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
return executeShCmd(cmd.rhs, cfg, cwd, results)
if cmd.op == '&':
raise NotImplementedError,"unsupported test command: '&'"
if cmd.op == '||':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res != 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
if cmd.op == '&&':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res is None:
return res
if res == 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
raise ValueError,'Unknown shell command: %r' % cmd.op
assert isinstance(cmd, ShUtil.Pipeline)
procs = []
input = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
# Apply the redirections, we use (N,) as a sentinal to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
redirects = [(0,), (1,), (2,)]
for r in j.redirects:
if r[0] == ('>',2):
redirects[2] = [r[1], 'w', None]
elif r[0] == ('>>',2):
redirects[2] = [r[1], 'a', None]
elif r[0] == ('>&',2) and r[1] in '012':
redirects[2] = redirects[int(r[1])]
elif r[0] == ('>&',) or r[0] == ('&>',):
redirects[1] = redirects[2] = [r[1], 'w', None]
elif r[0] == ('>',):
redirects[1] = [r[1], 'w', None]
elif r[0] == ('>>',):
redirects[1] = [r[1], 'a', None]
elif r[0] == ('<',):
redirects[0] = [r[1], 'r', None]
else:
raise NotImplementedError,"Unsupported redirect: %r" % (r,)
# Map from the final redirections to something subprocess can handle.
final_redirects = []
for index,r in enumerate(redirects):
if r == (0,):
result = input
elif r == (1,):
if index == 0:
raise NotImplementedError,"Unsupported redirect for stdin"
elif index == 1:
result = subprocess.PIPE
else:
result = subprocess.STDOUT
elif r == (2,):
if index != 2:
raise NotImplementedError,"Unsupported redirect on stdout"
result = subprocess.PIPE
else:
if r[2] is None:
if kAvoidDevNull and r[0] == '/dev/null':
r[2] = tempfile.TemporaryFile(mode=r[1])
else:
r[2] = open(r[0], r[1])
# Workaround a Win32 and/or subprocess bug when appending.
#
# FIXME: Actually, this is probably an instance of PR6753.
if r[1] == 'a':
r[2].seek(0, 2)
opened_files.append(r[2])
result = r[2]
final_redirects.append(result)
stdin, stdout, stderr = final_redirects
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
stderr = subprocess.PIPE
stderrIsStdout = True
else:
stderrIsStdout = False
# Don't allow stderr on a PIPE except for the last
# process, this could deadlock.
#
# FIXME: This is slow, but so is deadlock.
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
stderr = tempfile.TemporaryFile(mode='w+b')
stderrTempFiles.append((i, stderr))
# Resolve the executable path ourselves.
args = list(j.args)
args[0] = Util.which(args[0], cfg.environment['PATH'])
if not args[0]:
raise InternalShellError(j, '%r: command not found' % j.args[0])
# Replace uses of /dev/null with temporary files.
if kAvoidDevNull:
for i,arg in enumerate(args):
if arg == "/dev/null":
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
named_temp_files.append(f.name)
args[i] = f.name
procs.append(subprocess.Popen(args, cwd=cwd,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cfg.environment,
close_fds = kUseCloseFDs))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
# Update the current stdin source.
if stdout == subprocess.PIPE:
input = procs[-1].stdout
elif stderrIsStdout:
input = procs[-1].stderr
else:
input = subprocess.PIPE
# FIXME: There is probably still deadlock potential here. Yawn.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
procData[i] = (procData[i][0], f.read())
exitCode = None
for i,(out,err) in enumerate(procData):
res = procs[i].wait()
# Detect Ctrl-C in subprocess.
if res == -signal.SIGINT:
raise KeyboardInterrupt
results.append((cmd.commands[i], out, err, res))
if cmd.pipe_err:
# Python treats the exit code as a signed char.
if res < 0:
exitCode = min(exitCode, res)
else:
exitCode = max(exitCode, res)
else:
exitCode = res
# Explicitly close any redirected files.
for f in opened_files:
f.close()
# Remove any named temporary files we created.
for f in named_temp_files:
try:
os.remove(f)
except OSError:
pass
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
ln = ' &&\n'.join(commands)
try:
cmd = ShUtil.ShParser(ln, litConfig.isWindows).parse()
except:
return (Test.FAIL, "shell parser error on: %r" % ln)
results = []
try:
exitCode = executeShCmd(cmd, test.config, cwd, results)
except InternalShellError,e:
out = ''
err = e.message
exitCode = 255
out = err = ''
for i,(cmd, cmd_out,cmd_err,res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
return out, err, exitCode
def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd):
import TclUtil
cmds = []
for ln in commands:
# Given the unfortunate way LLVM's test are written, the line gets
# backslash substitution done twice.
ln = TclUtil.TclLexer(ln).lex_unquoted(process_all = True)
try:
tokens = list(TclUtil.TclLexer(ln).lex())
except:
return (Test.FAIL, "Tcl lexer error on: %r" % ln)
# Validate there are no control tokens.
for t in tokens:
if not isinstance(t, str):
return (Test.FAIL,
"Invalid test line: %r containing %r" % (ln, t))
try:
cmds.append(TclUtil.TclExecCommand(tokens).parse_pipeline())
except:
return (Test.FAIL, "Tcl 'exec' parse error on: %r" % ln)
if litConfig.useValgrind:
for pipeline in cmds:
if pipeline.commands:
# Only valgrind the first command in each pipeline, to avoid
# valgrinding things like grep, not, and FileCheck.
cmd = pipeline.commands[0]
cmd.args = litConfig.valgrindArgs + cmd.args
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
# FIXME: This is lame, we shouldn't need bash. See PR5240.
bashPath = litConfig.getBashPath()
if litConfig.useTclAsSh and bashPath:
script = tmpBase + '.script'
# Write script file
f = open(script,'w')
print >>f, 'set -o pipefail'
cmd.toShell(f, pipefail = True)
f.close()
if 0:
print >>sys.stdout, cmd
print >>sys.stdout, open(script).read()
print >>sys.stdout
return '', '', 0
command = [litConfig.getBashPath(), script]
out,err,exitCode = executeCommand(command, cwd=cwd,
env=test.config.environment)
return out,err,exitCode
else:
results = []
try:
exitCode = executeShCmd(cmd, test.config, cwd, results)
except InternalShellError,e:
results.append((e.command, '', e.message + '\n', 255))
exitCode = 255
out = err = ''
for i,(cmd, cmd_out, cmd_err, res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
return out, err, exitCode
def executeScript(test, litConfig, tmpBase, commands, cwd):
script = tmpBase + '.script'
if litConfig.isWindows:
script += '.bat'
# Write script file
f = open(script,'w')
if litConfig.isWindows:
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
f.write(' &&\n'.join(commands))
f.write('\n')
f.close()
if litConfig.isWindows:
command = ['cmd','/c', script]
else:
command = ['/bin/sh', script]
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
return executeCommand(command, cwd=cwd, env=test.config.environment)
def isExpectedFail(xfails, xtargets, target_triple):
# Check if any xfail matches this target.
for item in xfails:
if item == '*' or item in target_triple:
break
else:
return False
# If so, see if it is expected to pass on this target.
#
# FIXME: Rename XTARGET to something that makes sense, like XPASS.
for item in xtargets:
if item == '*' or item in target_triple:
return False
return True
def parseIntegratedTestScript(test, normalize_slashes=False):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET'
information. The RUN lines also will have variable substitution performed.
"""
# Get the temporary location, this is always relative to the test suite
# root, not test source root.
#
# FIXME: This should not be here?
sourcepath = test.getSourcePath()
sourcedir = os.path.dirname(sourcepath)
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpBase = os.path.join(execdir, 'Output', execbase)
if test.index is not None:
tmpBase += '_%d' % test.index
# Normalize slashes, if requested.
if normalize_slashes:
sourcepath = sourcepath.replace('\\', '/')
sourcedir = sourcedir.replace('\\', '/')
tmpBase = tmpBase.replace('\\', '/')
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = [('%%', '#_MARKER_#')]
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%t', tmpBase + '.tmp'),
# FIXME: Remove this once we kill DejaGNU.
('%abs_tmp', tmpBase + '.tmp'),
('#_MARKER_#', '%')])
# Collect the test lines from the script.
script = []
xfails = []
xtargets = []
requires = []
for ln in open(sourcepath):
if 'RUN:' in ln:
# Isolate the command to run.
index = ln.index('RUN:')
ln = ln[index+4:]
# Trim trailing whitespace.
ln = ln.rstrip()
# Collapse lines with trailing '\\'.
if script and script[-1][-1] == '\\':
script[-1] = script[-1][:-1] + ln
else:
script.append(ln)
elif 'XFAIL:' in ln:
items = ln[ln.index('XFAIL:') + 6:].split(',')
xfails.extend([s.strip() for s in items])
elif 'XTARGET:' in ln:
items = ln[ln.index('XTARGET:') + 8:].split(',')
xtargets.extend([s.strip() for s in items])
elif 'REQUIRES:' in ln:
items = ln[ln.index('REQUIRES:') + 9:].split(',')
requires.extend([s.strip() for s in items])
elif 'END.' in ln:
# Check for END. lines.
if ln[ln.index('END.'):].strip() == 'END.':
break
# Apply substitutions to the script.
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
ln = ln.replace(a,b)
# Strip the trailing newline and any extra whitespace.
return ln.strip()
script = map(processLine, script)
# Verify the script contains a run line.
if not script:
return (Test.UNRESOLVED, "Test has no run line!")
# Check for unterminated run lines.
if script[-1][-1] == '\\':
return (Test.UNRESOLVED, "Test has unterminated run lines (with '\\')")
# Check that we have the required features:
missing_required_features = [f for f in requires
if f not in test.config.available_features]
if missing_required_features:
msg = ', '.join(missing_required_features)
return (Test.UNSUPPORTED,
"Test requires the following features: %s" % msg)
isXFail = isExpectedFail(xfails, xtargets, test.suite.config.target_triple)
return script,isXFail,tmpBase,execdir
def formatTestOutput(status, out, err, exitCode, failDueToStderr, script):
output = StringIO.StringIO()
print >>output, "Script:"
print >>output, "--"
print >>output, '\n'.join(script)
print >>output, "--"
print >>output, "Exit Code: %r" % exitCode,
if failDueToStderr:
print >>output, "(but there was output on stderr)"
else:
print >>output
if out:
print >>output, "Command Output (stdout):"
print >>output, "--"
output.write(out)
print >>output, "--"
if err:
print >>output, "Command Output (stderr):"
print >>output, "--"
output.write(err)
print >>output, "--"
return (status, output.getvalue())
def executeTclTest(test, litConfig):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
# Parse the test script, normalizing slashes in substitutions on Windows
# (since otherwise Tcl style lexing will treat them as escapes).
res = parseIntegratedTestScript(test, normalize_slashes=kIsWindows)
if len(res) == 2:
return res
script, isXFail, tmpBase, execdir = res
if litConfig.noExecute:
return (Test.PASS, '')
# Create the output directory if it does not already exist.
Util.mkdir_p(os.path.dirname(tmpBase))
res = executeTclScriptInternal(test, litConfig, tmpBase, script, execdir)
if len(res) == 2:
return res
# Test for failure. In addition to the exit code, Tcl commands are
# considered to fail if there is any standard error output.
out,err,exitCode = res
if isXFail:
ok = exitCode != 0 or err
if ok:
status = Test.XFAIL
else:
status = Test.XPASS
else:
ok = exitCode == 0 and not err
if ok:
status = Test.PASS
else:
status = Test.FAIL
if ok:
return (status,'')
# Set a flag for formatTestOutput so it can explain why the test was
# considered to have failed, despite having an exit code of 0.
failDueToStderr = exitCode == 0 and err
return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
def executeShTest(test, litConfig, useExternalSh):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
res = parseIntegratedTestScript(test)
if len(res) == 2:
return res
script, isXFail, tmpBase, execdir = res
if litConfig.noExecute:
return (Test.PASS, '')
# Create the output directory if it does not already exist.
Util.mkdir_p(os.path.dirname(tmpBase))
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if len(res) == 2:
return res
out,err,exitCode = res
if isXFail:
ok = exitCode != 0
if ok:
status = Test.XFAIL
else:
status = Test.XPASS
else:
ok = exitCode == 0
if ok:
status = Test.PASS
else:
status = Test.FAIL
if ok:
return (status,'')
# Sh tests are not considered to fail just from stderr output.
failDueToStderr = False
return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
def flip(image):
return torch.flip(image, [3])
def contrast(image):
return image * (1.0 + random.uniform(-0.2, 0.2))
def brightness(image):
return image * (1.0 + random.uniform(-0.2, 0.2))
def gaussian_noise(image):
return image + torch.randn(*image.shape).to(image.device) * 0.2
def random_roll(image):
H, W = image.shape[-2:]
p = H // 8
h, w = random.randint(-p, p), random.randint(-p, p)
image = torch.roll(image, (h, w), (-2, -1))
return image
|
import cv2, time
import pandas as pd
import numpy as np
from math import atan2, tan, radians
from pick_gt_px import get_args
def make_point(target_pixel, px, v_center, h_center, v_fov, h_fov):
v, h = target_pixel
v_angle = atan2(v_center - v, v_center / tan(v_fov))
h_angle = atan2(h - h_center, h_center / tan(h_fov))
z = px[v, h] * 1000. / 255.
return z * tan(h_angle), z * tan(v_angle), z
def main():
args = get_args()
data_path = args.data_path
bg_img_fname = data_path + "bg_depth.png"
gt_px_fname = data_path + "ground_truth_px.csv"
gt_depth_fname = data_path + "gt_depth.csv"
gt_fname = data_path + "ground_truth.csv"
gt_img = data_path + "ground_truth.png"
bg_frame = cv2.imread(bg_img_fname)
gt_pxs = np.array(pd.read_csv(gt_px_fname, header=None))
height, width, channel = bg_frame.shape
print(height, width)
ground_truth = np.zeros((33, 3))
v_center = (height - 1) / 2
h_center = (width - 1) / 2
v_fov = radians(22.5)
h_fov = radians(29)
green = np.array([0, 255, 0])
gt_depth = np.empty((gt_pxs.shape[0],))
for i, gt_px in enumerate(gt_pxs):
ground_truth[i, :] = make_point(tuple((gt_px[1], gt_px[0])), bg_frame[:, :, 0], v_center, h_center, v_fov, h_fov)
gt_depth[i] = bg_frame[gt_px[1] + 0, gt_px[0] + 0, 0] * 1000. / 255
bg_frame[gt_px[1] + 0, gt_px[0] + 0, :] = green
bg_frame[gt_px[1] + 1, gt_px[0] + 0, :] = green
bg_frame[gt_px[1] + 0, gt_px[0] + 1, :] = green
bg_frame[gt_px[1] - 1, gt_px[0] - 0, :] = green
bg_frame[gt_px[1] - 0, gt_px[0] - 1, :] = green
cv2.imwrite(gt_img, bg_frame)
pd.DataFrame(gt_depth).to_csv(gt_depth_fname, header=False, index=False)
pd.DataFrame(ground_truth).to_csv(gt_fname, header=False, index=False)
if __name__ == "__main__":
main()
|
import os, sys
sys.path.append('../general')
sys.path.append('../neuromorphic')
from threadhandler import *
from neuromorphic import *
from tactileboard import *
from collections import deque
from iLimb import *
ilimb = iLimbController('COM16')
ilimb.connect()
ilimb.control(['thumb','index','middle','ring','little'],['open']*5,[297]*5)
time.sleep(1)
#ilimb.control(['thumb','index'],['close']*2,[297]*2)
#time.sleep(0.1)
#ilimb.control(['middle','ring','little'],['close']*3,[297]*3)
#a = input('')
#-------------------------------------------------------------------------------
rightTactile = TactileBoard('COM59',_sensitivity=TBCONSTS.DEFAULT_SENS)
rightTactile.start()
rightTactile.startCalibration(100)
time.sleep(0.5)
rightTactile.loadCalibration()
rightTactile.useCalib = True
auxcounter = 0
findex = []
fthumb = []
flagType = 0
pos = 0
dataQueue = deque()
flagIndexOk = False
flagThumbOk = False
flagFingers = False
flagRun = True
thProc = None
fingerArray = [['thumb',2,0.2],['index',4,0.1]]#,['ring',3,0.1]]
#-------------------------------------------------------------------------------
def update():
global rightTactile,ilimb,findex,fthumb,auxcounter,pos,flagIndexOk, flagThumbOk, flagFingers,thProc, flagRun, fingerArray
if flagRun is True:
q = rightTactile.getData()
n = len(q)
for k in range(n): #only one tactile sensor being used
tactileSample = q.popleft()
if flagRun:
#ret = ilimb.doFeedbackPowerGrasp(tactileSample,fingerArray,5)
ret = ilimb.doFeedbackPowerGrasp(tactileSample,fingerArray,5)
if ret is True:
print('finished main')
flagRun = False
break
time.sleep(0.001)
a = input('')
thProc = ThreadHandler(update)
rightTactile.start()
thProc.start()
a = input('press enter to stop')
|
from __future__ import unicode_literals
from ..permissions import (
permission_document_version_revert, permission_document_version_view,
)
from .base import GenericDocumentViewTestCase
from .literals import TEST_VERSION_COMMENT
from .mixins import DocumentVersionTestMixin
class DocumentVersionTestCase(DocumentVersionTestMixin, GenericDocumentViewTestCase):
def _request_document_version_list_view(self):
return self.get(
viewname='documents:document_version_list',
kwargs={'pk': self.test_document.pk}
)
def test_document_version_list_no_permission(self):
self._upload_new_version()
response = self._request_document_version_list_view()
self.assertEqual(response.status_code, 404)
def test_document_version_list_with_access(self):
self._upload_new_version()
self.grant_access(
obj=self.test_document, permission=permission_document_version_view
)
response = self._request_document_version_list_view()
self.assertContains(
response=response, text=TEST_VERSION_COMMENT, status_code=200
)
def _request_document_version_revert_view(self, document_version):
return self.post(
viewname='documents:document_version_revert',
kwargs={'pk': document_version.pk}
)
def test_document_version_revert_no_permission(self):
first_version = self.test_document.latest_version
self._upload_new_version()
response = self._request_document_version_revert_view(
document_version=first_version
)
self.assertEqual(response.status_code, 404)
self.assertEqual(self.test_document.versions.count(), 2)
def test_document_version_revert_with_access(self):
first_version = self.test_document.latest_version
self._upload_new_version()
self.grant_access(
obj=self.test_document, permission=permission_document_version_revert
)
response = self._request_document_version_revert_view(
document_version=first_version
)
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_document.versions.count(), 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.