text stringlengths 8 6.05M |
|---|
#!/usr/bin/python
max = 0
ans = 0
for D in range(2, 1001):
n = int(D ** 0.5)
if n ** 2 == D:
continue
d = 1
m = 0
a = n
num = a
den = 1
n1 = 1
d1 = 0
while num ** 2 - D * den ** 2 != 1:
m = d * a - m
d = (D - m ** 2) // d
a = (n + m) // d
n2 = n1
d2 = d1
n1 = num
d1 = den
num = a * n1 + n2
den = a * d1 + d2
if num > max:
max = num
ans = D
print(ans)
|
#raised if parsing fails because of page scheme is changed
class PageSchemeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
#raised if argument of the parser function is not valid
class ArgumentError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
|
from api_handler import api_handler
import hashlib
from logger import logger
class Dataset:
def __init__(self, filepath):
self.filepath = filepath
self.sensors = []
self.checksum = None
self.file_length=None #This will be calculated by the calculate_checksum method, for performance reasons
with open(filepath) as file:
head_line = file.readline()
self.sensors =[col.strip() for col in head_line.strip().split(",")]
self.sensors.remove("time")
def invalid_sensors(self):
sensors_in_db = api_handler.sens_abbrs
invalid_sensors = []
for abbr in self.sensors:
if abbr not in sensors_in_db:
sensors_not_in_db.append(abbr)
return invalid_sensors
def report_invalid_sensors(self):
print("*****************************************************************************************************************")
print("Sensor name problem in file ", self.filepath)
print("Sensors with these abbreviations don't exist in the database: ", self.invalid_sensors())
print("*****************************************************************************************************************\n")
def calculate_checksum(self):
with open(self.filepath, 'rb') as file:
file_content = file.read()
checksum = hashlib.sha256(file_content).hexdigest()
self.file_length = len(file_content)
self.checksum = checksum
logger.success("Checksum calculated for file {}!".format(self.filepath))
return checksum
|
from github import Github
from jenkins import Jenkins
git_access_token = ""
jenkins_server_url = "
jenkins_username = ""
jenkins_password = ""
git_repo_name = "docker-test_test"
docker_repo_name = git_repo_name.replace("_", "-").replace("docker-", "")
jenkins_job_name = "docker-build-{0}".format(git_repo_name.replace("docker-", ""))
def create_jenkins_job_xml(github_repo_url):
return """ \
<?xml version='1.0' encoding='UTF-8'?>
<flow-definition plugin="workflow-job@2.3">
<description></description>
<keepDependencies>false</keepDependencies>
<properties/>
<definition class="org.jenkinsci.plugins.workflow.cps.CpsScmFlowDefinition" plugin="workflow-cps@2.9">
<scm class="hudson.plugins.git.GitSCM" plugin="git@2.5.2">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<url>{0}</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>*/master</name>
</hudson.plugins.git.BranchSpec>
</branches>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<submoduleCfg class="list"/>
<extensions/>
</scm>
<scriptPath>Jenkinsfile</scriptPath>
</definition>
<triggers/>
</flow-definition> \
""".format(github_repo_url)
def create_github_repo(repo_name, access_token):
g = Github(access_token)
authenticated_user = g.get_user()
repo = authenticated_user.create_repo(repo_name, private=False)
return repo.clone_url
def create_jenkins_job(name, github_clone_url, jenkins_url, user, passwd):
job_xml = create_jenkins_job_xml(github_clone_url)
jenkins_server = Jenkins(jenkins_url, username=user, password=passwd)
jenkins_server.create_job(name, job_xml)
if __name__ == "__main__":
clone_url = create_github_repo(git_repo_name, git_access_token)
create_jenkins_job(jenkins_job_name, clone_url, jenkins_server_url, jenkins_username, jenkins_password) |
#!/usr/bin/env python
#coding=utf-8
__author__ = "yidong.lu"
__email__ = "yidongsky@gmail.com"
from django.conf.urls import url,include
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from nginx_collector.views import (
NginxViewSet,
StatusAPIView,
)
nginx_list = NginxViewSet.as_view({
'get': 'list'
'post': 'create'
'delete': 'destory'
})
status_list = StatusAPIView.as_view({
'get':'list'
})
urlpatterns = [
# url(r'^status',StatusAPIView.as_view(),name='nginx-status'),
# url(r'^', include('data_collector.urls',namespace="collector")),
]
urlpatterns = format_suffix_patterns(patterns('nginx_collector.views',
url(r'^nginx/$', nginx_list, name='nginx_list'),
url(r'^status/$', status_list, name='status_list'),
)
|
# coding: utf-8
# Standard Python libraries
from typing import Optional, Union
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# https://github.com/usnistgov/atomman
import atomman.unitconvert as uc
# Local imports
from . import CalculationSubset
from ..input import value
class LammpsMinimize(CalculationSubset):
"""Handles calculation terms for performing a LAMMPS energy/force minimization"""
############################# Core properties #################################
def __init__(self,
parent,
prefix: str = '',
templateheader: Optional[str] = None,
templatedescription: Optional[str] = None):
"""
Initializes a calculation record subset object.
Parameters
----------
parent : iprPy.calculation.Calculation
The parent calculation object that the subset object is part of.
This allows for the subset methods to access parameters set to the
calculation itself or other subsets.
prefix : str, optional
An optional prefix to add to metadata field names to allow for
differentiating between multiple subsets of the same style within
a single record
templateheader : str, optional
An alternate header to use in the template file for the subset.
templatedescription : str, optional
An alternate description of the subset for the templatedoc.
"""
super().__init__(parent, prefix=prefix, templateheader=templateheader,
templatedescription=templatedescription)
self.energytolerance = 0.0
self.forcetolerance = 0.0
self.maxiterations = 100000
self.maxevaluations = 1000000
self.maxatommotion = uc.set_in_units(0.01, 'angstrom')
############################## Class attributes ################################
@property
def energytolerance(self) -> float:
"""float: The energy tolerance to use for minimization"""
return self.__energytolerance
@energytolerance.setter
def energytolerance(self, val: float):
self.__energytolerance = float(val)
@property
def forcetolerance(self) -> float:
"""float: The force tolerance to use for minimization"""
return self.__forcetolerance
@forcetolerance.setter
def forcetolerance(self, val: Union[str, float]):
if isinstance(val, str):
self.__forcetolerance = uc.set_literal(val)
else:
self.__forcetolerance = float(val)
@property
def maxiterations(self) -> int:
"""int: Max number of minimization iterations"""
return self.__maxiterations
@maxiterations.setter
def maxiterations(self, val: int):
val = int(val)
assert val >= 0, 'maxiterations must be >= 0'
self.__maxiterations = val
@property
def maxevaluations(self) -> int:
"""int: Max number of minimization evaluations"""
return self.__maxevaluations
@maxevaluations.setter
def maxevaluations(self, val: int):
val = int(val)
assert val >= 0, 'maxevaluations must be >= 0'
self.__maxevaluations = val
@property
def maxatommotion(self) -> float:
"""float: The max distance for atomic relaxations each iteration"""
return self.__maxatommotion
@maxatommotion.setter
def maxatommotion(self, val: Union[str, float]):
if isinstance(val, str):
self.__maxatommotion = uc.set_literal(val)
else:
self.__maxatommotion = float(val)
def set_values(self, **kwargs: any):
"""
Allows for multiple class attribute values to be updated at once.
Parameters
----------
energytolerance : float, optional
The energy tolerance to set for the minimization.
forcetolerance : float or str, optional
The force tolerance to set for the minimization. Can be given as
a str that specifies force units.
maxiterations : int, optional
The maximum number of minimization iterations to use.
maxevaluations : int, optional
The maximum number of minimization maxevaluations to use.
maxatommotion : float or str, optional
The maximum atomic relaxation distance to allow for each iteration.
Can be given as a str that specifies length units.
"""
if 'energytolerance' in kwargs:
self.energytolerance = kwargs['energytolerance']
if 'forcetolerance' in kwargs:
self.forcetolerance = kwargs['forcetolerance']
if 'maxiterations' in kwargs:
self.maxiterations = kwargs['maxiterations']
if 'maxevaluations' in kwargs:
self.maxevaluations = kwargs['maxevaluations']
if 'maxatommotion' in kwargs:
self.maxatommotion = kwargs['maxatommotion']
####################### Parameter file interactions ###########################
def _template_init(self,
templateheader: Optional[str] = None,
templatedescription: Optional[str] = None):
"""
Sets the template header and description values.
Parameters
----------
templateheader : str, optional
An alternate header to use in the template file for the subset.
templatedescription : str, optional
An alternate description of the subset for the templatedoc.
"""
# Set default template header
if templateheader is None:
templateheader = 'LAMMPS Energy/Force Minimization'
# Set default template description
if templatedescription is None:
templatedescription = ' '.join([
"Specifies the parameters and options associated with performing",
"an energy and/or force minimization in LAMMPS."])
super()._template_init(templateheader, templatedescription)
@property
def templatekeys(self) -> dict:
"""dict : The subset-specific input keys and their descriptions."""
return {
'energytolerance': ' '.join([
"The energy tolerance to use for the minimization. This value is",
"unitless and corresponds to the etol term for the LAMMPS",
"minimize command. Default value is 0.0."]),
'forcetolerance': ' '.join([
"The force tolerance to use for the minimization. This value is",
"in force units and corresponds to the ftol term for the LAMMPS",
"minimize command. Default value is '0.0 eV/angstrom'."]),
'maxiterations': ' '.join([
"The maximum number of iterations to use for the minimization.",
"This value corresponds to the maxiter term for the LAMMPS",
"minimize command. Default value is 100000."]),
'maxevaluations': ' '.join([
"The maximum number of iterations to use for the minimization.",
"This value corresponds to the maxeval term for the LAMMPS",
"minimize command. Default value is 1000000."]),
'maxatommotion': ' '.join([
"The maximum distance that any atom can move during a minimization",
"iteration. This value is in units length and corresponds to the",
"dmax term for the LAMMPS min_modify command. Default value is",
"'0.01 angstrom'."]),
}
@property
def preparekeys(self) -> list:
"""
list : The input keys (without prefix) used when preparing a calculation.
Typically, this is templatekeys plus *_content keys so prepare can access
content before it exists in the calc folders being prepared.
"""
return list(self.templatekeys.keys()) + []
@property
def interpretkeys(self) -> list:
"""
list : The input keys (without prefix) accessed when interpreting the
calculation input file. Typically, this is preparekeys plus any extra
keys used or generated when processing the inputs.
"""
return self.preparekeys + [
'force_unit',
'length_unit',
]
def load_parameters(self, input_dict: dict):
"""
Interprets calculation parameters.
Parameters
----------
input_dict : dict
Dictionary containing input parameter key-value pairs.
"""
# Set default keynames
keymap = self.keymap
# Extract input values and assign default values
self.energytolerance = input_dict.get(keymap['energytolerance'], 0.0)
self.forcetolerance = value(input_dict, keymap['forcetolerance'],
default_unit=self.parent.units.force_unit,
default_term='0.0')
self.maxiterations = input_dict.get(keymap['maxiterations'], 100000)
self.maxevaluations = input_dict.get(keymap['maxevaluations'], 1000000)
self.maxatommotion = value(input_dict, keymap['maxatommotion'],
default_unit=self.parent.units.length_unit,
default_term='0.01 angstrom')
# Check that one of the tolerances is set
if self.energytolerance == 0.0 and self.forcetolerance == 0.0:
raise ValueError('energytolerance and forcetolerance cannot both be 0.0')
########################### Data model interactions ###########################
def load_model(self, model: DM):
"""Loads subset attributes from an existing model."""
run_params = model['calculation']['run-parameter']
self.energytolerance = run_params[f'{self.modelprefix}energytolerance']
self.forcetolerance = uc.value_unit(run_params[f'{self.modelprefix}forcetolerance'])
self.maxiterations = run_params[f'{self.modelprefix}maxiterations']
self.maxevaluations = run_params[f'{self.modelprefix}maxevaluations']
self.maxatommotion = uc.value_unit(run_params[f'{self.modelprefix}maxatommotion'])
def build_model(self,
model: DM,
**kwargs: any):
"""
Adds the subset model to the parent model.
Parameters
----------
model : DataModelDict.DataModelDict
The record content (after root element) to add content to.
kwargs : any
Any options to pass on to dict_insert that specify where the subset
content gets added to in the parent model.
"""
# Check that one of the tolerances is set
if self.energytolerance == 0.0 and self.forcetolerance == 0.0:
raise ValueError('energytolerance and forcetolerance cannot both be 0.0')
# Build paths if needed
if 'calculation' not in model:
model['calculation'] = DM()
if 'run-parameter' not in model['calculation']:
model['calculation']['run-parameter'] = DM()
# Save values
run_params = model['calculation']['run-parameter']
run_params[f'{self.modelprefix}energytolerance'] = self.energytolerance
run_params[f'{self.modelprefix}forcetolerance'] = uc.model(self.forcetolerance,
self.parent.units.force_unit)
run_params[f'{self.modelprefix}maxiterations'] = self.maxiterations
run_params[f'{self.modelprefix}maxevaluations'] = self.maxevaluations
run_params[f'{self.modelprefix}maxatommotion'] = uc.model(self.maxatommotion,
self.parent.units.length_unit)
########################## Metadata interactions ##############################
def metadata(self, meta: dict):
"""
Converts the structured content to a simpler dictionary.
Parameters
----------
meta : dict
The dictionary to add the subset content to
"""
# Check that one of the tolerances is set
if self.energytolerance == 0.0 and self.forcetolerance == 0.0:
raise ValueError('energytolerance and forcetolerance cannot both be 0.0')
prefix = self.prefix
meta[f'{prefix}energytolerance'] = self.energytolerance
meta[f'{prefix}forcetolerance'] = self.forcetolerance
meta[f'{prefix}maxiterations'] = self.maxiterations
meta[f'{prefix}maxevaluations'] = self.maxevaluations
meta[f'{prefix}maxatommotion'] = self.maxatommotion
########################### Calculation interactions ##########################
def calc_inputs(self, input_dict: dict):
"""
Generates calculation function input parameters based on the values
assigned to attributes of the subset.
Parameters
----------
input_dict : dict
The dictionary of input parameters to add subset terms to.
"""
# Check that one of the tolerances is set
if self.energytolerance == 0.0 and self.forcetolerance == 0.0:
raise ValueError('energytolerance and forcetolerance cannot both be 0.0')
# Get ftol, dmax in LAMMPS units?
input_dict['etol'] = self.energytolerance
input_dict['ftol'] = self.forcetolerance
input_dict['maxiter'] = self.maxiterations
input_dict['maxeval'] = self.maxevaluations
input_dict['dmax'] = self.maxatommotion
|
from settings import ENABLE_SEARCH, SITE_URL, THEME_NAME, STATIC_URL, ANALYTICS_CODE
from models import Category
def categories(request):
cat = Category.objects.all()
return { 'categories':cat }
def current_site_url(request):
return { 'current_site_url' : SITE_URL }
def current_theme(request):
current_theme = "%sthemes/%s" % (STATIC_URL, THEME_NAME)
return { 'current_theme' : current_theme }
def current_theme_base(request):
current_theme_base = "themes/%s/base/base.html" % (THEME_NAME)
return { 'current_theme_base': current_theme_base }
def search_enabled(request):
return {'search_enabled': ENABLE_SEARCH }
def analytics_code(request):
return {'analytics_code' : ANALYTICS_CODE } |
from unittest import mock
import arrow
import pytest
import wrapt
from blazeutils.containers import LazyDict
import keg_storage
from keg_storage.backends.base import FileMode, ListEntry
from keg_storage.backends.sftp import SFTPRemoteFile
def sftp_mocked(**kwargs):
@wrapt.decorator(adapter=lambda self: None)
def wrapper(wrapped, instance, args, _kwargs):
@mock.patch('keg_storage.sftp.log', autospec=True, spec_set=True)
def run_test(m_log):
m_client = mock.MagicMock(
spec=keg_storage.sftp.SSHClient,
spec_set=keg_storage.sftp.SSHClient
)
class FakeSFTPStorage(keg_storage.sftp.SFTPStorage):
def create_client(self):
return m_client
fake_sftp = FakeSFTPStorage(
host=kwargs.pop('host', 'foo'),
username=kwargs.pop('username', 'bar'),
key_filename=kwargs.pop('key_filename', None),
known_hosts_fpath=kwargs.pop('known_hosts_fpath', 'known_hosts'),
**kwargs
)
m_sftp = mock.MagicMock()
m_client.__enter__.return_value = m_client
m_client.open_sftp.return_value = m_sftp
wrapped(
sftp=fake_sftp,
m_sftp=m_sftp,
m_log=m_log
)
return run_test()
return wrapper
class TestSFTPStorage:
@mock.patch('keg_storage.backends.sftp.SSHClient')
def test_default_port(self, m_ssh):
m_client = m_ssh.return_value
storage = keg_storage.sftp.SFTPStorage(
host='foo',
username='bar',
key_filename='localhost_id_rsa',
known_hosts_fpath='known_hosts',
)
storage.create_client()
m_client.load_system_host_keys.assert_called_once_with('known_hosts')
m_client.connect.assert_called_once_with(
'foo',
port=22,
username='bar',
key_filename='localhost_id_rsa',
allow_agent=False,
look_for_keys=False
)
@mock.patch('keg_storage.backends.sftp.SSHClient')
def test_port_set(self, m_ssh):
m_client = m_ssh.return_value
storage = keg_storage.sftp.SFTPStorage(
host='foo',
username='bar',
key_filename='localhost_id_rsa',
known_hosts_fpath='known_hosts',
port=2200
)
storage.create_client()
m_client.load_system_host_keys.assert_called_once_with('known_hosts')
m_client.connect.assert_called_once_with(
'foo',
port=2200,
username='bar',
key_filename='localhost_id_rsa',
allow_agent=False,
look_for_keys=False
)
@sftp_mocked()
def test_sftp_list_files(self, sftp, m_sftp, m_log):
files = [
LazyDict(filename='a.txt', st_mtime=1564771623, st_size=128),
LazyDict(filename='b.pdf', st_mtime=1564771638, st_size=32768),
LazyDict(filename='more.txt', st_mtime=1564771647, st_size=100)
]
m_sftp.listdir_attr.return_value = files
assert sftp.list('.') == [
ListEntry(name='a.txt', last_modified=arrow.get(1564771623), size=128),
ListEntry(name='b.pdf', last_modified=arrow.get(1564771638), size=32768),
ListEntry(name='more.txt', last_modified=arrow.get(1564771647), size=100),
]
assert m_log.info.mock_calls == []
@sftp_mocked()
def test_sftp_delete_file(self, sftp, m_sftp, m_log):
sftp.delete('/tmp/abc/baz.txt')
m_sftp.remove.assert_called_once_with('/tmp/abc/baz.txt')
m_log.info.assert_called_once_with("Deleting remote file '%s'", '/tmp/abc/baz.txt')
@sftp_mocked()
def test_open(self, sftp, m_sftp, m_log):
file = sftp.open('/tmp/foo.txt', FileMode.read)
assert isinstance(file, SFTPRemoteFile)
assert file.mode == FileMode.read
assert file.path == '/tmp/foo.txt'
assert file.sftp is m_sftp
m_sftp.open.assert_called_once_with('/tmp/foo.txt', 'rb')
@sftp_mocked()
def test_read_operations(self, sftp, m_sftp, m_log):
m_file = m_sftp.open.return_value
m_file.read.return_value = b'some data'
with sftp.open('/tmp/foo.txt', FileMode.read) as file:
assert file.read(4) == b'some data'
m_file.read.assert_called_once_with(4)
m_file.close.assert_not_called()
m_file.close.assert_called_once_with()
@sftp_mocked()
def test_read_not_permitted(self, sftp, m_sftp, m_log):
with sftp.open('/tmp/foo.txt', FileMode.write) as file:
with pytest.raises(IOError, match="File not opened for reading"):
file.read(1)
@sftp_mocked()
def test_write_operations(self, sftp, m_sftp, m_log):
m_file = m_sftp.open.return_value
with sftp.open('/tmp/foo.txt', FileMode.write) as file:
file.write(b'some data')
m_file.write.assert_called_once_with(b'some data')
m_file.close.assert_not_called()
m_file.close.assert_called_once_with()
@sftp_mocked()
def test_write_not_permitted(self, sftp, m_sftp, m_log):
with sftp.open("/tmp/foo.txt", FileMode.read) as file:
with pytest.raises(IOError, match="File not opened for writing"):
file.write(b"")
|
import csv
data_list = [
{"id":10001, "wname":"python","year":"2001"},
{"id":10002, 'wname':'UI','year':'2002'},
{"id":10004, 'wname':'AI','year':'2003'}
]
try:
with open("ws.csv","w",newline="") as file:
heading = ['id','wname','year',]
obj = csv.DictWriter(file,fieldnames = heading)
obj.writeheader()
obj.writerow(data_list)
except Exception as e:
print(str(e))
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pkg_resources import WorkingSet
from pants.base.exceptions import BuildConfigurationError
from pants.bin.options_initializer import OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
def test_invalid_version():
options_bootstrapper = OptionsBootstrapper(args=['--pants-version=99.99.9999'])
with pytest.raises(BuildConfigurationError):
OptionsInitializer(options_bootstrapper, WorkingSet()).setup()
|
ssh = "tcp port 22" |
from django.shortcuts import render_to_response
from django.template import RequestContext
def index(request):
return render_to_response('index.html', RequestContext(request))
def contact(request):
return render_to_response('contact.html', RequestContext(request)) |
#多层全连接神经网络
import torch
from torch import nn,optim
from torch.autograd import Variable
import net
#超参数
learn_rate=1e-2
epoch_size=700
# 获得训练数据 - train.csv
import csv
with open('./data/train.csv') as f :
lines = csv.reader(f)
label, attr = [], []
for line in lines :
if lines.line_num == 1 :
continue
label.append(int(line[0]))
attr.append([float(j) for j in line[1:]])
# 将数据分为 60(epoches) * 700(rows) 的数据集
epoches = []
for i in range(0, len(label), epoch_size):
torch_attr = torch.FloatTensor(attr[i: i + epoch_size])
torch_label = torch.LongTensor(label[i: i + epoch_size])
epoches.append((torch_attr, torch_label))
model=net.simpleNet(28*28,300,100,10)
criterion=nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=learn_rate)
#开始训练
def train():
epoch_num, loss_sum, cort_num_sum = 0, 0.0, 0
for epoch in epoches :
epoch_num += 1
inputs = Variable(epoch[0])
target = Variable(epoch[1])
output = model(inputs)
loss = criterion(output, target)
# reset gradients
optimizer.zero_grad()
# backward pass
loss.backward()
# update parameters
optimizer.step()
# get training infomation
loss_sum += loss.data[0]
_, pred = torch.max(output.data, 1)
num_correct = torch.eq(pred, epoch[1]).sum()
cort_num_sum += num_correct
loss_avg = loss_sum /float(epoch_num)
cort_num_avg = cort_num_sum / float(epoch_num) /float( epoch_size)
return loss_avg,cort_num_avg
# 对所有数据跑300遍模型
loss, correct = [], []
training_time = 300
for i in range(1, training_time + 1) :
loss_avg, correct_num_avg = train()
loss.append(loss_avg)
if i< 20 or i % 20 == 0 :
print('--- train time {} ---'.format(i))
print('average loss = {:.4f}'.format(loss_avg))
print('average correct number = {:.4f}'.format(correct_num_avg))
correct.append(correct_num_avg)
|
from tools import ReadConfig,ReadJson
from common import FormatConversion
class DisposeApi:
def __init__(self,casename = None):
self.readconfighandle = ReadConfig.ReadConfig()
self.version = self.readconfighandle.get_data('INTERFACE','version_num')
self.formatconversionhandle = FormatConversion.FormatConversion()
self.readrelyjsonhandle = ReadJson.ReadJson('RelyOn','RELYON')
self.readcasejsonhandle = ReadJson.ReadJson(casename,'CASE')
#获取接口完整地址
def get_url(self,data):
if data['模块'] == 'system':
url = self.readconfighandle.get_data('INTERFACE','url_system')
elif data['模块'] == 'app':
url = self.readconfighandle.get_data('INTERFACE','url_app')
else:
url = self.readconfighandle.get_data('INTERFACE','url_other')
case_api = data['请求API']
case_api_isrely = data['API是否依赖']
if case_api_isrely == '是':
case_data = {}
case_api_rely= data['API依赖字段'].split(',')
case_api_relyed = data['API被依赖字段'].split(',')
for i in range(len(case_api_rely)):
if "case_" in case_api_relyed[i]:
case_data[case_api_rely[i]] = self.get_case_json(case_api_relyed[i])
elif "rely_" in case_api_relyed[i]:
case_data[case_api_rely[i]] = self.get_rely_json(case_api_relyed[i])
case_url = url + case_api.format(version = self.version,**case_data)
else:
case_url = url + case_api.format(version = self.version)
return case_url
#获取依赖json值
def get_rely_json(self,case_api_relyed):
jsondata = self.readrelyjsonhandle.get_json_data()
jsonrelydata = self.formatconversionhandle.FormatConversion(case_api_relyed,jsondata)
return jsonrelydata
#获取用例json值
def get_case_json(self,case_api_relyed):
jsondata = self.readcasejsonhandle.get_json_data()
jsonrelydata = self.formatconversionhandle.FormatConversion(case_api_relyed,jsondata)
return jsonrelydata
|
from utils import get_formatted_time
import time
import logging
import numpy as np
import json
import uuid
import h5py
import logging.handlers
import os
from config import RecorderConfig
try:
import cv2
except:
print("OpenCV not installed! You should not use the monitor!")
class Recorder(object):
default_config = RecorderConfig().metadata
def __init__(self, config=None, logger=None, monitoring=False):
self.created_timestamp = time.time()
self.created_time = get_formatted_time(self.created_timestamp)
self.default_config.update(config)
self.config = self.default_config
self.logger = logging.getLogger() if not logger else logger
self.monitoring = monitoring
if self.monitoring:
try:
print("You are using Opencv-python library, version: ", cv2.__version__)
except:
raise ValueError("OpenCV not installed!")
if ("exp_name" in self.config) and (self.config["exp_name"]):
self.exp_name = self.config["exp_name"]
else:
self.exp_name = self.created_time
self.config["exp_name"] = self.exp_name
self.save_dir = self.config["save_dir"]
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.dataset_names = self.config["dataset_names"]
self.initialized_dataset = {k: False for k in self.config["dataset_names"]}
self.filename = self._get_file_name()
self.buffer_size = self.config["buffer_size"]
self.preassigned_buffer_size = self.buffer_size
self.compress = self.config["compress"] if self.config["compress"] else None
self.file = None
self.filemode = None
self.use_video_writer = self.config["use_video_writer"]
self.video_writer = None
self.videofile = None
if os.path.exists(self.filename):
self.file = self._get_file('a')
else:
self.file = self._get_file('w')
if self.use_video_writer:
self.videofile = self.filename.replace("h5", "avi")
self.logger.info("We will use OpenCV Video Writer to store video at {}.".format(self.videofile))
fourcc = cv2.VideoWriter_fourcc(*"XVID")
self.video_writer = cv2.VideoWriter(self.videofile, fourcc, 10, (1280, 960))
self.dataset_names = list(self.dataset_names)
self.dataset_names.remove('frame')
file = self.file
for ds_name in self.dataset_names:
if self.initialized_dataset[ds_name]:
break
shape = self.config["dataset_shapes"][ds_name]
shape = (self.preassigned_buffer_size, *shape)
file.create_dataset(ds_name, shape=shape,
dtype=self.config["dataset_dtypes"][ds_name], compression=self.compress,
chunks=shape, maxshape=(None, *shape[1:]))
file.attrs['filename'] = self.filename
file.attrs['created_timestamp'] = self.created_timestamp
file.attrs['created_time'] = self.created_time
file.attrs["video_file_name"] = self.videofile
config = json.dumps(config)
file.attrs['config'] = config
ds_names = json.dumps(self.dataset_names)
file.attrs["dataset_names"] = ds_names
timestamp = time.time()
timen = get_formatted_time(timestamp)
self.last_modified_timestamp = {k: timestamp for k in self.dataset_names}
self.last_modified_time = {k: timen for k in self.dataset_names}
self.buffers = {k: [] for k in self.dataset_names}
self.accumulated_stored_samples = {k: 0 for k in self.dataset_names}
info_msg = "{}: HDF5 file {} is ready! With metadata {} and datasets {}".format(self.last_modified_time,
self.filename,
config, ds_names)
self.logger.info(info_msg)
def _get_file(self, mode='a'):
if self.file:
self.file.close()
# file = h5py.File(self.filename, mode) # for cache.
file = h5py.File(self.filename, mode, rdcc_nbytes=300 * 1024 ** 2) # 300M for cache.
for ds_name in self.dataset_names:
if ds_name in file:
self.initialized_dataset[ds_name] = True
self.filemode = mode
return file
def add(self, data_dict, force=False):
assert isinstance(data_dict, dict)
assert self.filemode is "a" or "w"
if set(data_dict).add("timestamp") != set(self.dataset_names).add("frame"):
error_msg = "data_dict is required have same keys as dataset_names, which is {}" \
"but only have {}. It may cause the timestamp system mess up!".format(self.dataset_names,
data_dict.keys())
self.logger.error(error_msg)
raise ValueError(error_msg)
self._append_to_buffer(np.array((time.time(),)), "timestamp", force)
# add_to_dataset_flag = False
for k, data in data_dict.items():
assert isinstance(data, np.ndarray), "Each entry of data_dict should be a np.ndarray, but get {}.".format(
type(data))
if k == 'frame' and self.use_video_writer:
self.video_writer.write(data)
continue
self._append_to_buffer(data, k, force)
if len(set(self.accumulated_stored_samples.values())) != 1:
error_msg = "dataset unbalance! The length of each dataset are: {}, but they should be the same!".format(
self.accumulated_stored_samples)
self.logger.error(error_msg)
raise ValueError(error_msg)
if self.monitoring:
cv2.imshow("Recorder", data_dict["frame"])
cv2.waitKey(1)
# if add_to_dataset_flag:
# self.accumulated_stored_samples += self.buffer_size
def _append_to_buffer(self, ndarray, dataset_name, force=False):
assert isinstance(ndarray, np.ndarray)
assert ndarray.size == 1 or ndarray.shape[
0] != 1, "Function add(ndarray) required a single data sample, not a batch!"
buffer = self.buffers[dataset_name]
if ndarray is not None:
buffer.append(ndarray)
if buffer and (force or len(buffer) == self.buffer_size):
self.logger.debug(
"Have collected {} data for dataset {}, prepare to store it. Totally passed {} data.".format(
len(buffer), dataset_name,
self.accumulated_stored_samples))
self._append_to_dataset(buffer, dataset_name)
buffer.clear()
return True
return False
def _append_to_dataset(self, buffer, dataset_name):
assert isinstance(buffer, list)
assert isinstance(dataset_name, str)
now = time.time()
file = self.file
ndarray = np.stack(buffer)
shape = ndarray.shape
dataset = file[dataset_name]
current_length = self.accumulated_stored_samples[dataset_name]
dataset_shape = dataset.shape
if dataset_shape[0] < current_length + self.buffer_size:
dataset.resize(dataset.shape[0] + self.preassigned_buffer_size, axis=0)
self.logger.debug(
"Prepare to update the dataset {}, in index range [{}, {}]".format(dataset_name, current_length,
current_length + shape[0]))
dataset[current_length: current_length + shape[0]] = ndarray
self.accumulated_stored_samples[dataset_name] += shape[0]
self.last_modified_timestamp[dataset_name] = time.time()
self.last_modified_time[dataset_name] = get_formatted_time(self.last_modified_timestamp[dataset_name])
dataset.attrs["last_modified_timestamp"] = json.dumps(self.last_modified_timestamp)
dataset.attrs["last_modified_time"] = json.dumps(self.last_modified_time)
self.logger.debug("Data has been appended to {} with shape {}. Current dataset {} shape {}.".format(
dataset.name, ndarray.shape, dataset_name, dataset.shape))
buffer.clear()
self.logger.debug("TIMING: recorder take {} seconds to store {} data.".format(time.time() - now, ndarray.shape))
return dataset_shape
def _get_file_name(self):
filename = os.path.join(self.save_dir, "{}.h5".format(self.exp_name))
return filename
def read(self):
# # For testing usage
ret = {}
self.file = self._get_file('r')
file = self.file
self.logger.debug(
"Now we have everything in file: {}. self.dataset_names {}.".format(list(file.keys()), self.dataset_names))
for k in self.dataset_names:
dset = file[k]
ret[k] = dset
# self.file = self._get_file('a')
return ret
def display(self):
self.file = self._get_file('r')
if self.videofile:
logging.error("We are using OpenCV for video storage! The video file is in: {}".format(self.videofile))
return
frames = self.file["frame"]
for f in frames:
cv2.imshow("Replay", f)
cv2.waitKey(100)
cv2.destroyAllWindows()
# self.file = self._get_file('a')
def close(self):
if self.monitoring:
cv2.destroyAllWindows()
if any([len(buffer) > 0 for buffer in self.buffers.values()]):
length = len(self.buffers["timestamp"])
for k, buffer in self.buffers.items():
if len(buffer) != length:
self.logger.warning("The buffer have different length as timestamp! We will clip those excessive.")
buffer = buffer[:length]
self._append_to_dataset(buffer, k)
if self.video_writer:
self.video_writer.release()
self.logger.info("Files has been saved at < {} >.".format(self.filename))
self.file.close()
self.logger.debug('Recorder Disconnected. The whole life span of recorder is {} seconds.'.format(
time.time() - self.created_timestamp))
def build_recorder_process(config, data_queue, log_queue, log_level, monitoring=False):
qh = logging.handlers.QueueHandler(log_queue)
logger = logging.getLogger()
logger.setLevel(log_level)
logger.addHandler(qh)
r = Recorder(config, logger, monitoring=monitoring)
try:
while True:
data_dict = data_queue.get()
if data_dict is None:
break
else:
logger.debug("Recieve: {}".format(data_dict.keys()))
r.add(data_dict)
except EOFError:
logging.error("EOFError happen! The recorder process is killed!")
raise EOFError
finally:
r.close()
logger.info("Prepare to delete data_queue and log_queue.")
data_queue.cancel_join_thread()
log_queue.cancel_join_thread()
def test_generated_data():
import uuid
filename = "tmp_{}".format(uuid.uuid4())
config = {"exp_name": filename,
"buffer_size": 5,
"save_dir": 'tmp',
"compress": "gzip",
"dataset_names": ("lidar_data", "extra_data", "frame", "timestamp"),
"dataset_dtypes": {"lidar_data": "uint16", "extra_data": "float32", "frame": "uint8",
"timestamp": "float64"},
"dataset_shapes": {"lidar_data": (10, 100, 110), "extra_data": (10, 100, 110), "frame": (960, 1280, 3),
"timestamp": (1,)},
"use_video_writer": True
}
r = Recorder(config)
for _ in range(103):
data_dict = {k: np.ones([10, 100, 110], dtype=config["dataset_dtypes"][k]) for k in
("lidar_data", "extra_data")}
data_dict["frame"] = np.random.randint(low=0, high=256, size=(960, 1280, 3), dtype=np.uint8)
r.add(data_dict)
filename = r.filename
r.close()
return filename
def test_camera_data():
from camera import setup_camera, close_camera, shot
filename = "tmp_{}".format(uuid.uuid4())
config = {"exp_name": filename,
"buffer_size": 5,
"save_dir": 'tmp',
"compress": "gzip",
"dataset_names": ("lidar_data", "extra_data", "frame", "timestamp"),
"dataset_dtypes": {"lidar_data": "uint16", "extra_data": "float32", "frame": "uint8",
"timestamp": "float64"},
"dataset_shapes": {"lidar_data": (30600,), "extra_data": (10, 100, 110), "frame": (960, 1280, 3),
"timestamp": (1,)},
"use_video_writer": True
}
cam = setup_camera()
r = Recorder(config, monitoring=True)
for _ in range(200):
data_dict = {k: np.ones([10, 100, 110], dtype=config["dataset_dtypes"][k]) for k in ("extra_data",)}
data_dict["lidar_data"] = np.random.randint(0, 30000, size=(30600,), dtype=np.uint16)
data_dict["frame"] = shot(cam)
r.add(data_dict)
r.close()
return filename
def test_display_and_read(filename):
config = {"exp_name": filename,
"buffer_size": 5,
"save_dir": 'tmp',
"compress": False,
"dataset_names": ("lidar_data", "extra_data", "frame", "timestamp"),
"dataset_dtypes": {"lidar_data": "uint16", "extra_data": "float32", "frame": "uint8",
"timestamp": "float64"},
"dataset_shapes": {"lidar_data": (10, 100, 110), "extra_data": (10, 100, 110), "frame": (960, 1280, 3),
"timestamp": (1,)},
}
r = Recorder(config, monitoring=True)
d = r.read()
print(d)
r.display()
r.close()
def test_opencv():
import cv2
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("tmp/tmpx64.avi", fourcc, 10, (1280, 960))
now = time.time()
for i in range(200):
frame = np.random.randint(low=0, high=256, size=(960, 1280, 3), dtype=np.uint8)
out.write(frame)
print(time.time() - now)
out.release()
if __name__ == '__main__':
from utils import setup_logger
import uuid
log_level = "DEBUG"
setup_logger(log_level)
filename = test_camera_data()
test_display_and_read(filename)
|
import sys
import unittest
import mock
from mock import patch, MagicMock
import main
class TestMainBoilerplate(unittest.TestCase):
def test_init_boilerplate(self):
with mock.patch.object(main, "main", return_value=42):
with mock.patch.object(main, "__name__", "__main__"):
with mock.patch.object(main.sys, 'exit') as mock_exit:
main.init()
assert mock_exit.call_args[0][0] == 42
@patch('main.app')
def test_main_boilerplate(self, mock_app: MagicMock):
old_sys_argv = sys.argv
sys.argv = ['main.py']
try:
main.main()
mock_app.run.assert_called_with(host='0.0.0.0', port=30000)
finally:
sys.argv = old_sys_argv
@patch('main.app')
def test_main_boilerplate_with_argument(self, mock_app: MagicMock):
old_sys_argv = sys.argv
sys.argv = ['main.py', "7777"]
try:
main.main()
mock_app.run.assert_called_with(host='0.0.0.0', port=7777)
finally:
sys.argv = old_sys_argv
@patch('main.app')
@patch('builtins.print')
def test_main_boilerplate_with_argument_error(self, mock_print:MagicMock, mock_app: MagicMock):
old_sys_argv = sys.argv
sys.argv = ['main.py', "asdfasdkfajlsdfjlds"]
try:
main.main()
mock_app.run.assert_not_called()
finally:
sys.argv = old_sys_argv
|
'''
!/usr/bin/env python
@author:nistha_jaiswal,archita_ganguly,ayanava_dutta,rohan_sasmal
-*-coding:utf-8-*-
'''
import streamlit as st
import numpy as np
import pandas as pd
import json
import datetime
import os
from sshtunnel import SSHTunnelForwarder
from rpscript.rpanalysis.rp_analysis_functions import putty_conn,fill_q2,pred_pres_or_not,get_final_input,missed_hdr_ids,match_features
from rpscript.rpanalysis.rp_analysis_functions import filter_data,not_included_pay_hdr_ids,check_amt,get_invoices,match_invoices,our_subset
server=[]
def main():
root_dir ='/root/caa/rp/monitor/'
cred_path='/root/caascript/res/cred.csv'
current_date=datetime.date.today()
month=current_date.strftime("%b")
year=current_date.strftime("%Y")
path_month_year=str(month)+"_"+str(year)
query_output_path = root_dir+path_month_year+'/Output_Files/DB_Query_files/'
final_report = root_dir+path_month_year+'/Output_Files/Script_Final_files/Sept_CashApp_Monitoring_report_2021-01-11.csv'
account_port = '/root/caascript/res/port.csv'
user_name=st.text_input("Enter your Name").title()
if not os.path.exists(cred_path):
st.warning("Please enter credentials via Login page before proceeding")
user_name=""
flag1=0
if user_name!="":
cred = pd.read_csv(cred_path)
#upl_data = st.file_uploader("Upload Data")
port_dat=[]
if os.path.exists(account_port):
port_dat=pd.read_csv(account_port)
else:
st.warning("Please perform Monitoring for your Accounts before proceeding")
if port_dat is not None:
with st.spinner("Searching Accounts for " +str(user_name)):
upl_data = st.file_uploader("Upload Data", type=["csv"])
if upl_data is not None:
upl_data.seek(0)
data= pd.read_csv(upl_data)
data['Intern Name'] = data['Intern Name'].str.title()
data = data[data['Intern Name'] == user_name]
data['Account Id'] = data['Account Id'].astype('int')
data.reset_index(drop=True, inplace=True)
st.write("All assigned accounts:")
st.write(data)
if st.button("Connect to PUTTY"):
for_accounts,server=putty_conn(user_name, port_dat,cred)
with st.spinner("Checking Q1, Q2..."):
data,flag2 = fill_q2(data, query_output_path)
pred_pres_or_not(data, for_accounts,cred)
st.success("Final csv generated")
if st.checkbox("Show Accounts in yellow or red:", value=False):
data = filter_data(data)
data=data[['Account Id','Account Name','Schema', 'Intern Name','Top 3 %','status']]
st.write("Hey " +str(user_name)+"! you have these accounts for analysis:")
st.table(data)
accid = st.radio('Select account',(data['Account Id']))
st.write("Working on:",accid)
schema = data[data['Account Id'] == accid]['Schema'].reset_index(drop=True)[0]
outcome, our_subsets = missed_hdr_ids(schema,query_output_path)
pay_hdr_id = not_included_pay_hdr_ids(accid, outcome)
match = check_amt(pay_hdr_id, schema, query_output_path)
if len(match)!=0:
pay_id = st.radio('Select payment_hdr_id', (match))
with st.spinner("Fetching final input..."):
final_input=get_final_input(schema,accid,pay_id,port_dat,cred)
final_input=json.loads(final_input['final_input'][0])
#st.write(json.loads(final_input['final_input'][0]))
invoices, selected_invoices = get_invoices(pay_hdr_id,match, final_input, schema, query_output_path)
flag,analyst_subset_id = match_invoices(pay_id, invoices, selected_invoices)
if flag==1:
st.write("Analyst subset_id : ",analyst_subset_id)
our_subset_id = our_subset(pay_id, our_subsets)
subset_id = st.radio(
'Select subset_id',
(our_subset_id))
match_features(final_input, subset_id, analyst_subset_id)
st.success("Check subset features")
else:
st.write(analyst_subset_id)
st.success("ANALYSIS DONE")
#for i in server:
# i.stop()
else:
st.error("No data found for Analysis")
else:
st.warning("Enter your name to begin")
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import operator
import math
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# load training data
training_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
target = "Survived"
# select the columns we want to use as features (remove those below, encode sex)
columns = training_data.columns.tolist()
# drop the columns with names below
columns = [c for c in columns if c not in ["Name", "Ticket", "PassengerId"]]
# simple function for classifying cabins - encode the highest letter
def classifyCabin(x):
if not isinstance(x, str):
return 0
if "A" in x:
return 1
if "B" in x:
return 2
if "C" in x:
return 3
if "D" in x:
return 4
if "E" in x:
return 5
if "F" in x:
return 6
if "G" in x:
return 7
return 8
def numCabins(x):
if isinstance(x, str):
return len(x.split())
else:
return 0
def preprocess(df, columns, target):
le = LabelEncoder()
# encode data for sex,
df["Sex"] = le.fit_transform(df["Sex"])
# replace unknown embarked with 'x'
df["Embarked"] = df["Embarked"].fillna("x")
# numerically encode embarked
df["Embarked"] = le.fit_transform(df["Embarked"])
#classify cabins
columns.append("CabinClassification")
df["CabinClassification"] = df.Cabin.apply(classifyCabin)
# count cabins
df["Cabin"] = df.Cabin.apply(lambda x: numCabins(x))
df = df[columns]
# replace missing ages with mean
averageAge = df["Age"].mean()
df["Age"] = df["Age"].fillna(averageAge)
# print range of vals in every col
for col in df:
print("{0} has {1} unique values".format(col ,len(df[col].unique())))
dfnew = df.dropna() # lose all rows with missing values
print("Dropping {0} rows due to missing values".format(len(df) - len(dfnew)))
df = dfnew
y = df[target] # target
return df.drop(target, axis= 1), y
X, y = preprocess(training_data, columns, target)
print("Using {0} Features".format(len(X.columns)))
X_train, X_test, y_train, y_test = train_test_split(X, y)
names = ["Perceptron", "Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
linear_model.Perceptron(),
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=8),
RandomForestClassifier(max_depth=5, n_estimators=10),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
results = {}
for name, clf in zip(names, classifiers):
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
results[name] = score
# print("{0} : {1}".format(name, score))
best = max(results, key=results.get)
print("{0} has R2 score of {1:.2f}".format(best, results[best])) |
#!/usr/bin/env python3
# made for the sake of clarity - to understand these techniques, not performance
# you can learn more at https://nmap.org/book/man-port-scanning-techniques.html
from scapy.all import *
import argparse
import os
import sys
from datetime import datetime
conf.verb = 0
def check_host(ip):
ping = sr1(IP(dst=ip, ttl=20)/ICMP(), timeout=10)
if not (ping is None):
print("\033[92m[+] Host", ip , "activo\033[0m")
return True
else:
print("\033[91[-] Host", ip, "no activo.\nParando ejecución...\033[0m")
quit()
def get_ports(pstr): # Parseo de los 3 tipos de str que recibimos con el parámetro -p
if "-" not in pstr and "," not in pstr:
return int(pstr)
elif "," in pstr:
ports = pstr.split(",")
for i in range(0,len(ports)):
ports[i] = int(ports[i])
return ports
else:
ports = pstr.split("-", 1)
ports[0] = int(ports[0])
ports[1] = int(ports[1])
return ports
def syn_stealth(port, ip):
# Cliente envía SYN
# Si el puerto está abierto, recibe SYN-ACK y envía RST para no completar el handshake
# Si recibe RST (0x14) o no recibe respuesta, cerrado
# Si recibe ICMP ERROR tipo 3 con códigos 1,2,3,9,10 o 13, el puerto está filtrado y no se puede saber si está A/C.
# 1. No completa el handshake
s_port = RandShort()
synack = sr1(IP(dst=ip)/TCP(sport=s_port,dport=port, flags='S'),timeout=10) #Enviamos un paquete desde un puerto aleatorio
if 'NoneType' in str(type(synack)):
print("--> Puerto", port, "\033[93mfiltrado\033[0m")
else:
if synack.getlayer(TCP).flags == 0x12:
s_rst = sr1(IP(dst=ip)/TCP(sport=s_port, dport=port, flags='AR'), timeout=10)
print("--> Puerto", port, "\033[92mabierto\033[0m")
else:
if not synack or synack.getlayer(TCP).flags == 0x14: # 0x14 = RST
print("--> Puerto", port, "\033[91mcerrado\033[0m")
elif int(synack.getlayer(ICMP).code) in [1,2,3,9,10,13]:
print("--> Puerto", port, "\033[93mfiltrado\033[0m")
def init_ss(port, ip):
if check_host(ip):
if isinstance(port, list) and len(port) < 2:
port_range = range(port[0], port[1]+1)
try:
for i in port_range:
syn_stealth(i, ip)
except KeyboardInterrupt:
sys.exit(1)
elif isinstance(port,list) and len(port) > 2:
try:
for i in port:
syn_stealth(i, ip)
except KeyboardInterrupt:
sys.exit(1)
else:
syn_stealth(port, ip)
def tcp_connect(port, ip):
s_port = RandShort()
synack = sr1(IP(dst=ip)/TCP(sport=s_port,dport=port, flags='S'),timeout=10) #Enviamos un paquete desde un puerto aleatorio
if 'NoneType' in str(type(synack)):
print("--> Puerto", port, "\033[93mfiltrado\033[0m")
else:
if synack.getlayer(TCP).flags == 0x12:
ack = sr1(IP(dst=ip)/TCP(sport=s_port, dport=port, flags='A', ack=synack[TCP].seq+1), timeout=10) #Termina el handshake
print("--> Puerto", port, "\033[92mabierto\033[0m")
else:
if not synack or synack.getlayer(TCP).flags == 0x14: # 0x14 = RST
print("--> Puerto", port, "\033[91mcerrado\033[0m")
elif int(synack.getlayer(ICMP).code) in [1,2,3,9,10,13]:
print("--> Puerto", port, "\033[93mfiltrado\033[0m")
def init_st(port, ip):
if check_host(ip):
if isinstance(port, list) and len(port) < 2:
port_range = range(port[0], port[1]+1)
try:
for i in port_range:
tcp_connect(i, ip)
except KeyboardInterrupt:
sys.exit(1)
elif isinstance(port,list) and len(port) > 2:
try:
for i in port:
tcp_connect(i, ip)
except KeyboardInterrupt:
sys.exit(1)
else:
tcp_connect(port, ip)
def udp_connect(port, ip):
udp_resp = sr1(IP(dst=ip)/UDP(dport=port),timeout=5)
if 'NoneType' in str(type(udp_resp)):
print("--> Puerto", port, "\033[92mabierto|filtrado\033[0m")
elif udp_resp.haslayer(UDP):
print("--> Puerto", port, "\033[92mabierto\033[0m")
elif udp_resp.haslayer(ICMP):
if int(udp_resp.getlayer(ICMP).type)==3 and int(udp_resp.getlayer(ICMP).code) == 3:
print("--> Puerto", port, "\033[91mcerrado\033[0m")
elif int(udp_resp.getlayer(ICMP).type)==3 and int(udp_resp.getlayer(ICMP).code) in [1,2,3,9,10,13]:
print("--> Puerto", port, "\033[93mfiltrado\033[0m")
def init_su(port, ip):
if check_host(ip):
if isinstance(port, list) and len(port) < 2:
port_range = range(port[0], port[1]+1)
try:
for i in port_range:
udp_connect(i, ip)
except KeyboardInterrupt:
sys.exit(1)
elif isinstance(port,list) and len(port) > 2:
try:
for i in port:
udp_connect(i, ip)
except KeyboardInterrupt:
sys.exit(1)
else:
udp_connect(port, ip)
def xmas_scan(port, ip):
# Se envía un paquete TCP con las flags PSH, FIN y URG.
# Si está abierto, no recibe respuesta.
xmas_resp = sr1(IP(dst=ip)/TCP(dport=port, flags="FPU"), timeout=10)
if not xmas_resp or str(type(xmas_resp))=="<class 'NoneType'>":
print("--> Puerto", port, "\033[92mabierto|filtrado\033[0m")
elif xmas_resp.getlayer(TCP).flags == 0x14:
print("--> Puerto", port, "\033[91mcerrado\033[0m")
elif int(synack.getlayer(ICMP).type)==3 and int(synack.getlayer(ICMP).type) in [1,2,3,9,10,13]:
print("--> Puerto", port, "\033[93mfiltrado\033[0m")
def init_sx(port, ip):
if check_host(ip):
if isinstance(port, list) and len(port) < 2:
port_range = range(port[0], port[1]+1)
try:
for i in port_range:
xmas_scan(i, ip)
except KeyboardInterrupt:
sys.exit(1)
elif isinstance(port,list) and len(port) > 2:
try:
for i in port:
xmas_scan(i, ip)
except KeyboardInterrupt:
sys.exit(1)
else:
xmas_scan(port, ip)
def start_scan(options):
modes = {
"sS":init_ss,
"sT":init_st,
"sU":init_su,
"sX":init_sx,
}
scan = modes.get(options.mode, lambda:"Modo incorrecto")
port = get_ports(options.port)
print(" Puerto/Rango de puertos:", port)
print("---------------------------------------------------------------------------")
scan(port, options.target)
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--target', dest='target', help='Dirección IP objetivo', required=True)
parser.add_argument('-m', '--mode', dest='mode', help='Modo de escaneo. sS (TCP SYN Stealth scan), sT (TCP Connect Scan), sU (UDP Scan), sX (XMAS Scan)', required=True)
parser.add_argument('-p', '--port', dest='port', help='Puerto o rango de puertos, de la forma pInicial-pFinal', required=True)
options = parser.parse_args()
return options
os.system('clear')
print("\n")
print("\033[1m\033[91m__̴ı̴̴̡̡̡ ̡͌l̡̡̡ ̡͌l̡*̡̡ ̴̡ı̴̴̡ ̡̡͡|̲̲̲͡͡͡ ̲▫̲͡ ̲̲̲͡͡π̲̲͡͡ ̲̲͡▫̲̲͡͡ ̲|̡̡̡ ̡ ̴̡ı̴̡̡ ̡͌l̡̡̡̡.___ portScanner.py __̴ı̴̴̡̡̡ ̡͌l̡̡̡ ̡͌l̡*̡̡ ̴̡ı̴̴̡ ̡̡͡|̲̲̲͡͡͡ ̲▫̲͡ ̲̲̲͡͡π̲̲͡͡ ̲̲͡▫̲̲͡͡ ̲|̡̡̡ ̡ ̴̡ı̴̡̡ ̡͌l̡̡̡̡.___\033[0m")
print("---------------------------------------------------------------------------")
print("[!] USO:")
print("-> -t, --target : dirección IP objetivo")
print("-> -m, --mode : modo de scaneo.\n - sS (TCP SYN Stealth scan)\n - sT (TCP Connect Scan)\n - sU (UDP Scan)\n - sX (XMAS Scan)'")
print("-> -p, --port,ports : puerto, rango de puertos o lista de puertos. EJ: 1-40, 22 o 22,24,29,45")
print("---------------------------------------------------------------------------")
options = get_options()
print("\n---- Iniciando escaneo ----")
print(" OBJETIVO: ", options.target)
print(" MODO: ", options.mode)
init_time = datetime.now()
start_scan(options)
print("Escaneo finalizado tras", datetime.now() - init_time, "segundos")
|
from abc import ABCMeta, abstractmethod, abstractproperty
class Position():
def __init__(self, x, y):
self._position = (x, y)
self._x = x
self._y = y
def __eq__(self, other):
return self._x == other.x and self._y == other.y
@property
def position(self):
return self._position
@property
def x(self):
return self._x
@property
def y(self):
return self._y
class Cell():
__metaclass__ = ABCMeta
@abstractproperty
def position(self):
pass
@abstractmethod
def action(self, gamefield, tick_time):
pass
@abstractmethod
def contact(self, user):
pass
@abstractmethod
def is_passable(self, user):
pass
@abstractproperty
def image_name(self):
pass
|
#!/usr/bin/env python3
import os
from functions import fuel_calculator, fuel_calculator_part_two
current_dir = os.path.dirname(__file__)
def part_one():
total_fuel = 0
with open(os.path.join(current_dir, "input.txt"), "r") as masses:
total_fuel = sum([fuel_calculator(int(mass)) for mass in masses])
return total_fuel
def part_two():
total_fuel = 0
with open(os.path.join(current_dir, "input.txt"), "r") as masses:
total_fuel = sum([fuel_calculator_part_two([int(mass)]) for mass in masses])
return total_fuel
if __name__ == "__main__":
print("part one's result is {}".format(part_one()))
print("part two's result is {}".format(part_two()))
|
#!/usr/bin/env python
"""makemake V0.10
Copyright (c) 2010-2017 Michael P. Hayes, UC ECE, NZ
This program tries to make a Makefile from a template. Given a C file
(or a directory which searched to find a C file containing a main
function) the included header files are recursively searched. The for
each header file, a search is made for a similarly named C file. The
header files for each C file are recursively searched and addded to
the list of found header files. Again, similarly named C files are
searched until all the C files required for the program are found.
Usage: makemake --template template cfile search-dirs
or makemake --template template directory search-dirs
or makemake --builddir builddir --objext objext --template template directory search-dirs
or makemake --builddir builddir --modules --relpath directory search-dirs
or makemake --builddir builddir --files --relpath directory search-dirs
By default makemake will create a rule like foo.o: foo.c bar.h
and this will require a VPATH if the dependencies are another directory.
Alternatively, use the --relpath option to makemake to explicitly add the
relative path to the dependencies.
Note, this will die if there are circular dependencies. FIXME!
The --modules option also needs fixing. FIXME!
There are special strings that are replaced in the template file:
@PROJECT@ Project name
@VPATH@ List of source directories
@CC@ Compiler name
@CFLAGS@ Compiler flags
@INCLUDES@ List of include directories each prefixed by -I
@SRC@ List of source files
@OBJ@ List of object files
@CCRULES@ Rules to build object files from C files
Note, the callgraph generation requires non-documented behaviour of
gcc. This is likely to change.
"""
# See http://www.amk.ca/python/howto/regex/ for regular expressions in python.
# See also sre.py.
# + one or more, ? 0 or 1, * 0 or more
# http://www.cs.umd.edu/~nspring/software/style-check-readme.html
# TODO: Use ArgumentParser and rewrite to use classes
from __future__ import print_function
import sys
import re
import os
import subprocess
from os import pathsep
import os.path
from optparse import OptionParser
def unique(list):
dict = {}
for item in list:
dict[item] = True;
return dict.keys()
def file_search(filename, search_path, debug):
"""Given a search path, find file
"""
file_found = False
paths = search_path.split(pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
file_found = True
break
if file_found:
return os.path.abspath(os.path.join(path, filename))
# FIXME, if have :: at end of search path then need to search subdirs.
return None
def hfiles_get(cfile, filedeps, options):
deps = filedeps[cfile]
if cfile in deps:
print('Circular dependency for %s' % cfile, file=sys.stderr)
hfilelist = []
for hfile in filedeps[cfile]:
if hfile[-2:] == '.h':
if options.relpath:
hfile = os.path.relpath(hfile)
hfilelist.append(hfile)
for hfile in filedeps[cfile]:
hfilelist.extend(hfiles_get(hfile, filedeps, options))
return unique(hfilelist)
def files_get_all(filedeps, ext):
filelist = []
for target in filedeps:
if target[-2:] == ext:
filelist.append(target)
return unique(filelist)
def cfiles_get_all(filedeps):
return files_get_all(filedeps, '.c')
def hfiles_get_all(filedeps):
return files_get_all(filedeps, '.h')
def paths_prune(filelist):
relpaths = unique([os.path.dirname(os.path.relpath(path)) for path in filelist])
relpaths = unique(relpaths)
if '' in relpaths:
relpaths.remove('')
relpaths.insert(0, '.')
return relpaths
def file_parse(pathname, indent, debug):
if debug:
print('%sParsing file %s' % (indent, pathname), file=sys.stderr)
file = open(pathname, 'r')
text = file.read()
file.close()
# We could use the -MM option for gcc to find all the header file
# dependencies (even with conditional compilation) but we would
# not find the relationship between the header files. So let's do
# if outselves even with conditional compilation may lead us
# astray.
prog = re.compile(r'^#include[ ].*["<]([a-zA-Z_.0-9].*)[">]', re.MULTILINE)
hfilelist = prog.findall(text, 0)
if debug:
print('%sFound hfiles %s in %s' % (indent, hfilelist, pathname), file=sys.stderr)
return hfilelist
def makefile_print(options, template, maincfilename, filedeps,
search_list):
cfilelist = cfiles_get_all(filedeps)
cfilelist.sort()
basecfilelist = [os.path.basename(cfile) for cfile in cfilelist]
project = os.path.splitext(os.path.basename(maincfilename))
project = project[0]
file = open(template, 'r')
text = file.read()
file.close()
hfilelist = hfiles_get_all(filedeps)
includedirs = paths_prune(hfilelist)
moduledirs = paths_prune(cfilelist)
vpath = ' '.join(moduledirs)
includes = '-I' + ' -I'.join(includedirs)
src = ' '.join(basecfilelist)
obj = src
if options.builddir != '':
objfilelist = [os.path.join(options.builddir, obj1) for obj1 in basecfilelist]
objfilelist.sort()
obj = ' '.join(objfilelist)
project = os.path.join(options.builddir, project)
obj = re.sub(r'([a-zA-Z0-9/.-_]*)[.]c', r'\1' + options.objext, obj)
text = re.sub(r'@PROJECT@', project, text)
text = re.sub(r'@VPATH@', vpath, text)
text = re.sub(r'@CC@', options.cc, text)
text = re.sub(r'@CFLAGS@', options.cflags, text)
text = re.sub(r'@INCLUDES@', includes, text)
text = re.sub(r'@SRC@', src, text)
text = re.sub(r'@OBJ@', obj, text)
if re.search(r'@CCRULES@', text) != None:
search_path = pathsep.join(search_list)
rules = ''
for cfile in cfilelist:
cfilebase = os.path.basename(cfile)
if options.relpath:
cfile1 = os.path.relpath(cfile)
else:
cfile1 = cfilebase
if options.builddir != '':
rules = rules + os.path.join(options.builddir, '')
rules = rules + re.sub('([a-zA-Z0-9/.-_]*)[.]c', r'\1' + options.objext, cfilebase) + ': ' + cfile1
hfilelist = hfiles_get(cfile, filedeps, options)
hfilelist.sort()
if options.debug:
print('Need hfiles %s for %s' % (hfilelist, cfile), file=sys.stderr)
for hfile in hfilelist:
rules = rules + ' ' + hfile
rules = rules + '\n'
rules = rules + '\t$(CC) -c $(CFLAGS) $< -o $@\n\n'
text = re.sub(r'@CCRULES@', rules, text)
print(text)
def subprocess_command(command):
p = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
child_stdout, child_stdin = (p.stdout, p.stdin)
child_stdin.close()
response = child_stdout.read()
child_stdout.close()
return response
def maincfilename_find(dirname):
files = subprocess_command('grep -l "main[ ]*(" ' + dirname + '/*.c')
filelist = files.strip().split(' ')
if not filelist:
return None
# What if there are multiple files with main? For now, select the
# first one.
return filelist[0]
def functions_find(filepath, functiondeps, functions, options):
command = options.compile + ' -c ' + filepath + ' -fdump-tree-cfg-raw -DDEBUG > /dev/null'
if options.debug:
print(command, file=sys.stderr)
os.system(command)
version = subprocess_command(options.compile + ' -dumpversion').strip()
if version >= '4.9.2':
ext = '.011t.cfg'
elif version > '4.5.3':
ext = '.014t.cfg'
else:
ext = '.012t.cfg'
rtlfilename = os.path.abspath(os.path.basename(filepath)) + ext
if not os.path.exists(rtlfilename):
print('Could not find %s to generate callgraph' % rtlfilename, file=sys.stderr)
return
file = open(rtlfilename, 'r')
text = file.readlines()
file.close()
function = None
for line in text:
matches = re.findall(r'^;; Function (.*)\s[(]', line)
if matches:
function = matches[0]
functiondeps[function] = []
functions[function] = filepath
if options.debug:
print('DEF', function, file=sys.stderr)
matches = re.findall(r'.*gimple_call <([\w]*),', line)
if matches:
if options.debug:
print('USE', matches[0], file=sys.stderr)
if function:
functiondeps[function].append(matches[0])
else:
print(matches[0], 'used outside function in', filepath, file=sys.stderr)
# Search for where a function address is taken
function = None
for line in text:
matches = re.findall(r'^;; Function (.*)\s[(]', line)
if matches:
function = matches[0]
# gimple_assign <addr_expr, tasks[0].func, display_task, NULL>
matches = re.findall(r'.*gimple_assign <addr_expr, [\w\[\].]*, ([\w]*)', line)
if matches and functiondeps.has_key(matches[0]):
# This is an indirect link
functiondeps[function].append('@' + matches[0])
command = 'rm ' + rtlfilename
if options.debug:
print(command, file=sys.stderr)
os.system(command)
def files_find(filepath, search_path, filedeps, moduledeps, indent, debug):
# filedeps is a cache of all known included files
if filedeps.has_key(filepath):
return
# Find included header files
includes = file_parse(filepath, indent + ' ', debug)
includes2 = []
for hfile in includes:
hpath = file_search(hfile, search_path, debug)
if not hpath:
continue
includes2.append(hpath)
# Guess modules from header files
modules = []
for hpath in includes2:
cpath = re.sub(r'([a-zA-Z._0-9/.-_].*)[.]h', r'\1.c', hpath);
if (not os.path.exists(cpath)) or (cpath == filepath):
continue
# Have found a module
modules.append(cpath)
base, ext = os.path.splitext(os.path.basename(filepath))
if ext == '.c':
moduledeps[base] = []
for module in modules:
modbase, ext = os.path.splitext(os.path.basename(module))
moduledeps[base].append(modbase)
filedeps[filepath] = includes2
# Search recursively each new included file
for file in includes2:
files_find(file, search_path, filedeps, moduledeps, indent + ' ', debug)
# Search the modules
for file in modules:
files_find(file, search_path, filedeps, moduledeps, indent + ' ', debug)
def alldeps_print(depsdir, options):
for target in depsdir.keys():
targetbase = os.path.basename(target)
if targetbase in options.exclude:
continue
deps = depsdir[target]
deps = [dep for dep in deps if os.path.basename(dep) not in options.exclude]
if options.relpath:
deps = [os.path.relpath(dep) for dep in deps]
print(os.path.relpath(target) + ': ' + ' '.join(deps) + '\n')
def deps_print(target, depsdir, options, record={}):
if record.has_key(target):
return
if not depsdir.has_key(target):
return
deps = depsdir[target]
deps = [dep for dep in deps if os.path.basename(dep) not in options.exclude]
for dep in deps:
# Have recursion
if target == dep:
continue
deps_print(dep, depsdir, options, record)
if options.relpath:
deps = [os.path.relpath(dep) for dep in deps]
record[target] = True
print(os.path.relpath(target) + ': ' + ' '.join(deps) + '\n')
def callgraph_print(target, functiondeps, functions, options, record={}):
if record.has_key(target):
return
if not functiondeps.has_key(target):
return
deps = functiondeps[target]
deps = [dep for dep in deps if dep not in options.exclude]
for dep in deps:
# Have recursion
if target == dep:
continue
if dep[0] == '@':
dep = dep[1:]
callgraph_print(dep, functiondeps, functions, options, record)
record[target] = True
# print os.path.relpath(target) + ': ' + ' '.join(deps) + '\n' + '\t' + os.path.basename(functions[target]) + '\n'
print(os.path.relpath(target) + '@' + os.path.basename(functions[target]) + ': ' + ' '.join(deps) + '\n')
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
version = __doc__.split('\n')[0]
parser = OptionParser(usage='%prog', version=version,
description=__doc__)
parser.add_option('--showops', action='store_true',
dest='showops', default=False,
help='show operations')
parser.add_option('--modules', action='store_true',
dest='modules', default=False,
help='generate module dependencies')
parser.add_option('--calls', action='store_true',
dest='calls', default=False,
help='generate callgraph')
parser.add_option('--files', action='store_true',
dest='files', default=False,
help='generate file dependencies')
parser.add_option('--debug', action='store_true',
dest='debug', default=False,
help='enable debugging')
parser.add_option('--exclude', dest='exclude',
default='',
help='files to exclude')
parser.add_option('--objext', dest='objext',
default='.o',
help='object file extension')
parser.add_option('--exeext', dest='exeext',
default='.out',
help='executable file extension')
parser.add_option('--cc', dest='cc',
default='gcc',
help='compiler name')
parser.add_option('--cflags', dest='cflags',
default='',
help='CFLAGS')
parser.add_option('--relpath', action='store_true',
dest='relpath', default=False,
help='use relative paths')
parser.add_option('--outfile', dest='outfilename',
default=None,
help='output filename')
parser.add_option('--builddir', dest='builddir',
default='',
help='build dirname')
parser.add_option('--template', dest='template',
default=None,
help='template filename')
options, args = parser.parse_args()
if len(args) < 1:
print(__doc__)
sys.exit(0)
if ',' in options.exclude:
options.exclude = options.exclude.split(',')
else:
options.exclude = options.exclude.split()
maincfilename = args[0]
search_list = []
if len(args) > 1:
search_list.extend(args[1:len(args)])
search_path = pathsep.join(search_list)
includes = '-I' + ' -I'.join(search_list)
options.compile = options.cc + ' ' + options.cflags + ' ' + includes
if options.debug:
print(search_list, file=sys.stderr)
print('template', options.template, file=sys.stderr)
print('cfile', maincfilename, file=sys.stderr)
print('search_path', search_path, file=sys.stderr)
print('CWD = ', os.getcwd(), file=sys.stderr)
print(options.compile, file=sys.stderr)
if os.path.isdir(maincfilename):
if options.debug:
print('Searching ' + maincfilename, file=sys.stderr)
maincfilename = maincfilename_find(maincfilename)
if not maincfilename:
sys.exit(1)
if options.debug:
print('Found C file ' + maincfilename, file=sys.stderr)
# Search main c file looking for header files included with #include
# and any header files included by the header files
filedeps = {}
moduledeps = {}
files_find(maincfilename, search_path, filedeps, moduledeps, '', options.debug)
cfilelist = cfiles_get_all(filedeps)
ofilelist = [cfile[:-2] + options.objext for cfile in cfilelist]
outfile = maincfilename[:-2] + options.exeext
filedeps[outfile] = ofilelist
for ofile in ofilelist:
deps = []
deps.append(ofile[:-2] + '.c')
filedeps[ofile] = deps
if options.calls:
functiondeps = {}
functions = {}
for cfile in cfilelist:
functions_find(cfile, functiondeps, functions, options)
callgraph_print('main', functiondeps, functions, options)
if options.files:
deps_print(outfile, filedeps, options)
if options.modules:
target, ext = os.path.splitext(os.path.basename(maincfilename))
deps_print(target, moduledeps, options)
if options.template:
makefile_print(options, options.template, maincfilename, filedeps,
search_list)
return 0
if __name__ == "__main__":
sys.exit(main())
|
import sys
from node import *
#input params
input_list = [-1, 1, 3, 7, 11, 9, 2, 3, 5];
partition = 5;
#make a linked-list using the input list
head = node(input_list[0]);
current_ref = head;
for index in range(1, len(input_list)):
current_ref.add_next(node(input_list[index]));
current_ref = current_ref.next;
head.print_list();
#find where the first 'partition' ends i.e. the element right before where the second partition starts
new_node = node(None);
new_node.add_next(head);
current_ref = new_node;
partition_start, partition_ref = -1, None;
for index in range(len(input_list)+1):
if (current_ref.next):
if ( (current_ref.next).value >= partition ):
partition_start = index-1;
partition_ref = current_ref;
break;
else:
current_ref = current_ref.next;
#check if partitioning is even possible
if (not partition_ref):
print("ERROR: Cannot Partition.");
exit(0);
#go through the list in O(n) and move the smaller elements in the second partition...
#...to the left partition
prev_ref = partition_ref;
current_ref = partition_ref.next;
for index in range(partition_start+1, len(input_list)):
#check if we need to swap nodes
if (current_ref.value < partition):
prev_ref.next = current_ref.next;
current_ref.next = partition_ref.next;
partition_ref.next = current_ref;
partition_ref = partition_ref.next;
current_ref = prev_ref.next;
partition_start += 1;
else:
prev_ref = prev_ref.next;
current_ref = current_ref.next;
#fix the head of the linked-list
head = new_node.next;
#print the output
head.print_list() |
from django.db import models
class User(models.Model):
phone = models.CharField(max_length = 100, null = False, blank = False)
def __str__(self):
return self.phone |
# coding: utf-8
import sys
from setuptools import setup, find_packages
def README():
with open('README.rst') as f:
return f.read()
# Backward-compatibility dependencies for Python 2
_python2_requires = [
'configparser',
'pathlib',
] if sys.version_info < (3,) else []
setup(
name='django-develop',
description='Django development for humans',
long_description=README(),
url='https://github.com/pjdelport/django-develop',
author='Pi Delport',
author_email='pjdelport@gmail.com',
package_dir={'': 'src'},
packages=find_packages('src'),
setup_requires=['setuptools_scm'],
use_scm_version=True,
install_requires=[
# attrs 15.2.0 (2015-12-08) adds the convert feature.
'attrs >=15.2.0',
'Django',
] + _python2_requires,
# The django-develop command-line script
entry_points={
'console_scripts': [
'django-develop = django_develop.cli:main',
'django-develop-config = django_develop.cli:main_config',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: Public Domain',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities',
],
)
|
'''
# 2016. 08. 19
'''
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import pandas_datareader.data as web
import matplotlib.pyplot as plt
import datetime
def main():
DnloadStockData(
"samsung.data", "005930", 2015, 1, 1, 2015, 12, 31)
df = loadStockData("samsung.data")
n, bins, patched = plt.hist(df["Open"])
plt.axvline(df["Open"].mean(), color="green")
# plt.show()
print(" 히스토그램 ")
for index in range(len(n)):
print("Bin : " + str(bins[index]) + " frequency : " + str(n[index]) )
print(" 산점도 행렬 ")
scatter_matrix(
df[["Open", "High", "Low", "Close"]], alpha=0.2, figsize=(6,6),
diagonal='kde')
# plt.show()
print(" 상자 그림 ")
df[["Open", "High", "Low", "Close"]].plot(kind="box")
plt.show()
def DnloadStockData(filename, companyCode, year1, month1, date1,
year2, month2, date2):
start = datetime.datetime(year1, month1, date1)
end = datetime.datetime(year2, month2, date2)
df = web.DataReader(str(companyCode) + ".KS", "yahoo", start, end)
df.to_pickle(filename)
return df
def loadStockData(filename):
df = pd.read_pickle(filename)
print(df.describe())
print(df.quantile([.25, .5, .75]))
return df
if __name__ == '__main__':
main() |
def shift(letter, n):
pass
def encrypt(message, shift_amount):
pass
def decrypt(message, shift_amount):
pass
secret_message = "encryption is fun"
encrypted_message = encrypt(secret_message, 3)
print(encrypted_message)
|
# -*- coding: utf-8 -*-
class Iterator:
def __init__(self, nums):
self.nums = nums
def hasNext(self):
return bool(self.nums)
def next(self):
return self.nums.pop()
class PeekingIterator:
def __init__(self, iterator):
self.cache = None
self.iterator = iterator
def peek(self):
if self.cache is None:
self.cache = self.iterator.next()
return self.cache
def hasNext(self):
if self.cache is not None:
return True
return self.iterator.hasNext()
def next(self):
if self.cache is not None:
result = self.cache
self.cache = None
return result
return self.iterator.next()
if __name__ == "__main__":
iterator = PeekingIterator(Iterator([1, 2, 3]))
assert iterator.hasNext()
assert 3 == iterator.peek()
assert 3 == iterator.next()
|
import uuid
import os
import os.path
def get_file_path(instance,filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join('images/uploads/', filename)
|
import yaml
import os
from collections import defaultdict
def get_id(myfile):
fil = myfile.strip().split("/")[-1]
return fil.strip().split(".")[0]
def get_umpires(dat):
ump = [None,None,None]
for i in range(len(dat)):
ump[i] = dat[i]
if i == 2:
break
return ump
def get_win_details(dat):
res = ""
for k in dat:
if str(type(dat[k])) == "<type 'dict'>":
res += str(k)+" "+get_win_details(dat[k])
elif k != 'winner':
res += str(k)
res += " "+str(dat[k])+" "
return res
class YamlReader:
def getMatchDetails(self,myfile):
stream = file(myfile,'r')
data = yaml.load(stream)
matchid = get_id(myfile)
#print data['info'].keys()
data = defaultdict(str,data)
if 'city' in data['info']:
venue_city = data['info']['city']
else:
venue_city = ""
date = data['info']['dates'][0]
match_type = data['info']['match_type']
gender = data['info']['gender']
venue_stadium = data['info']['venue']
team1,team2 = data['info']['teams']
umpire1,umpire2,umpire3 = get_umpires(data['info']['umpires'])
toss_winner = data['info']['toss']['winner']
toss_decision = data['info']['toss']['decision']
if 'winner' in data['info']['outcome']:
winner = data['info']['outcome']['winner']
else:
winner = data['info']['outcome']['result']
win_by = get_win_details(data['info']['outcome'])
if 'overs' in data['info']:
max_overs = data['info']['overs']
else:
max_overs = None
if 'player_of_match' in data['info']:
player_of_match = data['info']['player_of_match'][0]
else:
player_of_match = None
return [matchid,team1,team2,win_by,winner,player_of_match,max_overs,venue_city,
venue_stadium,date,gender,match_type,toss_winner,toss_decision,umpire1,umpire2,umpire3]
def getBallDetails(self,myfile):
stream = file(myfile,'r')
balls_data = []
matchid = get_id(myfile)
data = yaml.load(stream)
#print data.keys()
for i in range(len(data['innings'])):
innings = data['innings'][i].keys()[0]
batting_team = data['innings'][i][innings]['team']
for j in range(len(data['innings'][i][innings]['deliveries'])):
ball_num = j+1
over,batsman,bowler,non_striker,runs_batsman,runs_extras,runs_total,wicket_player,wicket_kind,wicket_fielder = self.get_ball_data(data['innings'][i][innings]['deliveries'][j])
balls_data.append([matchid,innings,batting_team,ball_num,over,batsman,
bowler,non_striker,runs_batsman,runs_extras,runs_total,wicket_player,wicket_kind,wicket_fielder])
#print data['innings'][0]['1st innings']['deliveries'][0]
#print data['innings'][0]['1st innings']['team']
return balls_data
def get_ball_data(self,dat):
over = dat.keys()[0]
batsman = dat[over]['batsman']
bowler = dat[over]['bowler']
non_striker = dat[over]['non_striker']
runs_batsman = dat[over]['runs']['batsman']
runs_extras = dat[over]['runs']['extras']
runs_total = dat[over]['runs']['total']
if 'wicket' in dat[over]:
wicket_player = dat[over]['wicket']['player_out']
wicket_kind = dat[over]['wicket']['kind']
if 'fielders' in dat[over]['wicket']:
wicket_fielder = dat[over]['wicket']['fielders'][0]
else:
wicket_fielder = None
else:
wicket_fielder,wicket_kind,wicket_player = None,None,None
return [over,batsman,bowler,non_striker,runs_batsman,runs_extras,runs_total,wicket_player,wicket_kind,wicket_fielder]
y = YamlReader()
#y.getMatchDetails("data/1003273.yaml")
#y.getBallDetails("data/1003273.yaml")
|
#!/usr/bin/python3
"""
programmer : Amir Kouhkan
website : www.amirkouhkan.ir
E-mail : amirkouhkan1@gmail.com
This script it's so biegner, you can developed it and make it most usefull :D
"""
import sqlite3
def createDatabase():
global db
db = sqlite3.connect('save.db')
print("Database is created successfully")
db.close()
def createTable():
#createDatabase()
#con = db.connect('save.db')
db = sqlite3.connect('save.db')
print("Connected to the database...")
db.execute('create table tbl_save(id int primary key, username text, password text, website text)')
print("Table is created successfully")
db.close()
def insertAccounts():
# createDatabase()
db = sqlite3.connect('save.db')
# con = db.connect('save.db')
print("Connected to database ...")
# cursor = con.cursor()
ID = int(input("Enter an id for this account: "))
username = input("Enter username of this account: ")
password = input("Enter password of this account: ")
website = input("Enter websites account: ")
db.execute('insert into tbl_save (id,username,password,website) values (?,?,?,?)',(ID,username,password,website))
db.commit()
print("The account's information is added successfully")
db.close()
def deleteAccount():
db = sqlite3.connect('save.db')
#createDatabase()
#con = db.connect('save.db')
ID = int(input("Enter an ID for delete account: "))
db.execute('delete from tbl_save where id={0}'.format(ID))
db.commit()
print("The account information is deleted successfully")
db.close()
def updateAccount():
db = sqlite3.connect('save.db')
#createDatabase()
#con = db.connect('save.db')
ID = int(input("Enter an id for update account information: "))
username = input("Enter new username: ")
password = input("Enter new password: ")
website = input("Enter new website")
db.execute('update tbl_save set username="{1}",password="{2}",website="{3}" where id={0}'.format(ID,username,password,website))
db.commit()
print("The account information is updated")
db.close()
def showAccount():
db = sqlite3.connect('save.db')
#createDatabase()
#con = db.connect('save.db')
row = db.execute('select * from tbl_save')
#row.fetchall()
for rows in row:
print(rows)
db.close()
def searchAccount():
try:
db =sqlite3.connect('save.db')
set_type = input("Enter type of search by name or ID: (name,id)")
if(set_type == "id" or set_type=="ID"):
ID = int(input("Enter an id for search: "))
search = db.execute('select * from tbl_save where id ={0}'.format(ID))
for i in search:
print(i)
elif(set_type == "name" or set_type == " NAME"):
name = input("Enter name of website: ")
search = db.execute('select * from tbl_save where website = "{0}"'.format(name))
for i in search:
print(i)
db.close()
except:
print("The value is not founded")
def main():
print('''Welcome To Save Account Information Application
If you use for first time you must be create the database.
1. Create Database
2. Create Table
3. Insert a account
4. Delete a account
5. Update a account
6. Show Accounts
7. Search in Database
8. Exit''')
key_number = int(input("Enter number: "))
while True:
if key_number == 1:
createDatabase()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 2:
createTable()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 3:
insertAccounts()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 4:
deleteAccount()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 5:
updateAccount()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 6:
showAccount()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 7:
searchAccount()
key_number = int(input("Do you want to continue?(1-7 / 8)"))
elif key_number == 8:
break
if __name__=="__main__":
main()
|
# coding: utf-8
from sklearn.naive_bayes import MultinomialNB
from LSA import LSA
import numpy
class NaiveBayesClassifier:
def __init__(self, alpha):
self.classifier = MultinomialNB(alpha=alpha)
@staticmethod
def normalizer(x_abnormal):
minimum = x_abnormal.min()
maximum = x_abnormal.max()
if minimum == maximum:
return x_abnormal
else:
x_new = (x_abnormal - minimum) / (maximum - minimum)
return x_new
def train(self, x_train, y_train):
x_naive = numpy.empty(x_train.shape)
for i in range(0, len(x_train)):
x_naive[i] = self.normalizer(x_train[i])
self.classifier.fit(x_naive, y_train)
def predict(self, value):
return self.classifier.predict(numpy.reshape(value, (1, len(value))))
def all_classes_result(self, value):
return self.classifier.predict_proba(numpy.reshape(value, (1, len(value))))
def test_score(self, x_test, y_test):
aux = self.classifier.score(x_test, y_test)*100
return aux
|
import myUsb
from myUsb import Queue
import json
import urllib2
from hashlib import *
import random
import unittest
import Webbrowser
import os
"""
To Do:
Function to store blockchain
Function to read blockchain
function to send manipulate JSON
main() function
"""
# stores all current data
currentData = []
# Stores the current hash value
currentHash = ()
# Stores users hash number (For programming visiting device)
newUserHash = ''
# For hash output
hashOut = ''
# Hashes stored here with objectives as key value pairs
hashList = []
# ======================================================================
# Web page functionality built using code from https://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/webtemplates.html
# Web page template
wpgMessage = """<!DOCTYPE html>
<html>
<head>
<title>Found Data</title>
<style>
span {
background-color:SlateBlue;
border:2px solid SlateBlue;
display: block;
margin: 5px;
padding: 10px;
}
h3{
font-family: "Times New Roman", Times, serif;
color: Gray;
}
p {
font-family: "Calibri", Arial, serif;
}
</style>
</head>
<body>
<span>
<h1>Found Data</h1>
<div>
<h3>Block Data:</h3>
<p>Current data block contents: {blockData}</p>
</div>
<div>
<h3>Hashes:</h3>
<p>Current hash: {wpgHashes}</p>
</div>
<div>
<h3> Session Objectives:</h3>
<p>{wpgHashList}</p>
</span>
</body>
</html>"""
# ======================================================================
wpg = open(results.html, w)
# Writes data into JSON object
# Currently unused (need to work out for C side of things)
def prepare(data):
with open(packedData, 'w') as i:
json.dump(data, i)
# Send data to html page and display page
def display(data, hashData, hashList):
blockData = data
wpgHashes = hashData
wpgHashList = hashList
contents = wpgMessage.format(**locals())
browseLocal(contents)
# Prepares web page for display
def browseLocal(webpageText = wpgMessage, filename='currentDataWPG.html'):
'''Starts a webbrowser on a local file containing the text
with given filename.'''
strToFile(webpageText, filename)
webbrowser.open("file:///" + os.path.abspath(filename))
# Writes string to a file
def strToFile(data, filename):
outFile = open(filename, w)
outFile.write(data)
outFile.close()
# Write the session objectives and return as a hashnumber
# Also adds unhashed objectives to a dict obj
def writeSessionObj():
tempObj = ''
newSessionObj = raw_input("\nInput session objectives for this session:\n")
tempObj = hashIt(newSessionObj)
hashList[tempObj] = newSessionObj
# This hash function is theoretically insecure in this manner of use
def writeUserHash():
first = raw_input("Input Users Name")
second = str(randint(1000000000,9999999999))
newUserHash = hashIt((first+second))
def hashIt(data):
hashOut = str(hashlib.sha256(data))
# Future functionality is intended to modify this script and allow rewriting of the microbit on the fly
# def writeToMicrobit(data):
# Need to find out how to write data to microbit with python
# Treat this like a document?
"""import microbit
import radio
# turn radio on
radio.on()
# set channel, power to max, assign group
radio.config(channel=7, power=10, group=1)
tX = radio.send()
rX = radio.receive()
dict currentBlock = []
# While loop to send beacon signal
while rX == False:
tX = "SYN"
if rX == "ACK":
# Placeholder in use here
tX = '%s' % (data)
# Wrong, work out how to actually validate for a 32bit string
elif type(rX) == str && len(rX) == 32:
# Needs code to validate
# store new block in dict obj
currentBlock = rX
else:
# Ensure that the script returns to sending signal code
return"""
def quitProg():
sys.exit()
def sesObj():
writeSessionObj()
myUsb.send(sessionObjectives)
def bData():
try:
display(data = currentData, hashData = currentHash, hashlist = hashList)
except:
print("\nError displaying data! D:\n")
def genHash():
writeUserHash()
print("New user hash: \n")
print(newUserHash)
def main():
try:
out
except:
print("\nUSB Connection error!\n")
choice = { 0 : quitProg,
1 : sObj,
2 : bData,
3 : genHash}
while True:
try:
choice[input("Select Option:\n 1 - Write session objectives\n 2 - Read block data\n 3 - Generate user hashes for visitor tags \n 4 - ###UNUSED CURRENTLY### 0 - Exit")]()
except:
print("That isn't an available option") #should implement proper logging here and evaluate the catch
return True
datalist = []
for i in incoming:
datalist.append(i)
incoming.dequeue(i)
for j in datalist:
currentData.append(j)
"""
TO DO:
Include method of writing hashcode to visitor microbit
"""
# ============================================UNIT TESTS==============================================
# class unitTests(unittest.TestCase):
# """docstring for writeSessio"""
# def testHash(self):
# try:
# self.assertEqual(writeUserHash("this"), 1EB79602411EF02CF6FE117897015FFF89F80FACE4ECCD50425C45149B148408)
# print("Hashes test ok")
# except:
# print("Hash tests fail D:")
|
#!/usr/bin/python3
import numpy as np
from numpy import linalg as LA
# Centroid Decomposition, with the optional possibility of specifying truncation or usage of initial sign vectors
def centroid_decomposition(matrix, truncation = 0, SV = None):
# input processing
matrix = np.asarray(matrix, dtype=np.float64).copy()
n = len(matrix)
m = len(matrix[0])
if truncation == 0:
truncation = m
if truncation < 1 or truncation > m:
print("[Centroid Decomposition] Error: invalid truncation parameter k=" + str(truncation))
print("[Centroid Decomposition] Aboritng decomposition")
return None
if SV is None:
SV = default_SV(n, truncation)
if len(SV) != truncation:
print("[Centroid Decomposition] Error: provided list of Sign Vectors doesn't match in size with the truncation truncation parameter k=" + str(truncation))
print("[Centroid Decomposition] Aboritng decomposition")
return None
L = np.zeros((truncation, n))
R = np.zeros((truncation, m))
# main loop - goes up till the truncation param (maximum of which is the # of columns)
for j in range(0, truncation):
# calculate the sign vector
Z = local_sign_vector(matrix, SV[j])
# calculate the column of R by X^T * Z / ||X^T * Z||
R_i = matrix.T @ Z
R_i = R_i / np.linalg.norm(R_i)
R[j] = R_i
# calculate the column of L by X * R_i
L_i = matrix @ R_i
L[j] = L_i
# subtract the dimension generated by L_i and R_i from the original matrix
matrix = matrix - np.outer(L_i, R_i)
# update the new sign vector in the array
SV[j] = Z
#end for
return (L.T, R.T, SV)
#end function
# Algorithm: LSV (Local Sign Vector). Finds locally optimal sign vector Z, i.e.:
# Z being locally optimal means: for all Z' sign vectors s.t. Z' is one sign flip away from Z at some index j,
# we have that ||X^T * Z|| >= ||X^T * Z'||
def local_sign_vector(matrix, Z):
n = len(matrix)
m = len(matrix[0])
eps = np.finfo(np.float64).eps
Z = local_sign_vector_init(matrix, Z)
# calculate initial product of X^T * Z with the current version of Z
direction = matrix.T @ Z
# calculate initial value of ||X^T * Z||
lastNorm = np.linalg.norm(direction) ** 2 + eps
flipped = True
while flipped:
# we terminate the loop if during the last pass we didn't flip a single sign
flipped = False
for i in range(0, n):
signDouble = Z[i] * 2
gradFlip = 0.0
# calculate how ||X^T * Z|| would change if we would change the sign at position i
# change to the values of D = X^T * Z is calculated as D_j_new = D_j - 2 * Z_i * M_ij for all j
for j in range(0, m):
localMod = direction[j] - signDouble * matrix[i][j]
gradFlip += localMod * localMod
# if it results in augmenting ||X^T * Z||
# flip the sign and replace cached version of X^T * Z and its norm
if gradFlip > lastNorm:
flipped = True
Z[i] = Z[i] * -1
lastNorm = gradFlip + eps
for j in range(0, m):
direction[j] -= signDouble * matrix[i][j]
#end for
#end if
#end for
#end while
return Z
#end function
# Auxiliary function for LSV:
# Z is initialized sequentiually where at each step we see which sign would give a larger increase to ||X^T * Z||
def local_sign_vector_init(matrix, Z):
n = len(matrix)
m = len(matrix[0])
direction = matrix[0]
for i in range(1, n):
gradPlus = 0.0
gradMinus = 0.0
for j in range(0, m):
localModPlus = direction[j] + matrix[i][j]
gradPlus += localModPlus * localModPlus
localModMinus = direction[j] - matrix[i][j]
gradMinus += localModMinus * localModMinus
if gradMinus > gradPlus:
Z[i] = -1
for j in range(0, m):
direction[j] += Z[i] * matrix[i][j]
return Z
#end function
#initialize sign vector array with default values
def default_SV(n, k):
# default sign vector is (1, 1, ..., 1)^T
baseZ = np.array([1.0] * n)
SV = []
for i in range(0, k):
SV.append(baseZ.copy())
return SV
#end function
def main():
matrix = np.loadtxt("matrix_100K.txt")
L, R, Z = centroid_decomposition(matrix)
np.savetxt("matrix_100K.L.txt", L, fmt="%10.5f")
np.savetxt("matrix_100K.R.txt", R, fmt="%10.5f")
#np.savetxt("matrix_100K.Z.txt", np.asarray(Z).T, fmt="%5.1f")
if __name__ == "__main__":
main()
|
from django.db import transaction
from django.utils import timezone
from django.core.management.base import BaseCommand, CommandError
from tqdm import tqdm
from fetcher import tools
from fetcher.models import DataSource
from catalog.models import CatalogEntry, TLE
class Command(BaseCommand):
help = 'Import TLE from specified datasource'
def add_arguments(self, parser):
"""
Set command's arguments
"""
parser.add_argument('system_name', nargs='+', type=str)
def handle(self, *args, **options):
"""
Main function
"""
for system_name in options['system_name']:
self.process_system_name(system_name)
self.stdout.write(self.style.SUCCESS('Successfully imported TLEs'))
def process_system_name(self, system_name):
"""
Finds and process a DataSource by its system name
"""
if (system_name == "all"):
sources = DataSource.objects.filter(type=DataSource.TLE)
else:
sources = DataSource.objects.filter(
type=DataSource.TLE,
system_name=system_name
)
if not len(sources):
raise CommandError('DataSource "%s" does not exists' % system_name)
for source in sources:
source.last_time_checked = timezone.now()
source.save()
self.stdout.write(source.url)
data = tools.download(source.url)
if (data == False):
raise CommandError("File could not be downloaded")
self.parse(data.splitlines())
@transaction.atomic
def parse(self, lines):
"""
Loop through received file and parse its content
"""
parser = tools.TleParser()
group = {}
for line in tqdm(lines, desc="Inserting ", total=len(lines)):
group[len(group)] = line
if len(group) == 3:
self.update(parser.parse(group))
group = {}
def update(self, data):
"""
Insert a TLE
"""
try:
catalogEntry = CatalogEntry.objects.get(
norad_catalog_number=data['satellite_number']
)
except CatalogEntry.DoesNotExist:
# Do not insert the TLE if the catalog entry it makes reference to
# does not exist
return False
try:
tle = TLE.objects.get(
first_line=data['line_0_full'],
second_line=data['line_1_full'],
third_line=data['line_2_full'],
)
# Do not go further if the TLE already is in the database
return False
except TLE.DoesNotExist:
pass
tle = TLE()
tle.first_line = data['line_0_full']
tle.second_line = data['line_1_full']
tle.third_line = data['line_2_full']
tle.satellite_number = catalogEntry
tle.classificatoin = data['classification']
tle.international_designator_year = data['international_designator_year']
tle.international_designator_number = data['international_designator_number']
tle.international_designator_piece = data['international_designator_piece']
tle.epoch_year = data['epoch_year']
tle.epoch_year = data['epoch_year']
tle.epoch_day = data['epoch_day']
tle.first_derivative_mean_motion = data['first_derivative_mean_motion']
tle.second_derivative_mean_motion = data['second_derivative_mean_motion']
tle.drag = data['drag']
tle.set_number = data['set_number']
tle.first_checksum = data['first_checksum']
tle.inclination = data['inclination']
tle.ascending_node = data['ascending_node']
tle.eccentricity = data['eccentricity']
tle.perigee_argument = data['perigee_argument']
tle.mean_anomaly = data['mean_anomaly']
tle.mean_motion = data['mean_motion']
tle.revolution_number = data['revolution_number']
tle.second_checksum = data['second_checksum']
tle.added = timezone.localtime(timezone.now())
tle.save()
|
# Generated by Django 3.0.3 on 2020-03-18 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0014_auto_20200316_1711'),
]
operations = [
migrations.AlterField(
model_name='membre',
name='mail',
field=models.EmailField(default='', error_messages={'required': 'Champ obligatoire'}, max_length=50),
),
migrations.AlterField(
model_name='membre',
name='pseudo',
field=models.CharField(error_messages={'required': 'Champ obligatoire'}, max_length=50, unique=True),
),
]
|
n = int(input())
for c in range(0, n):
p1, o1, p2, o2 = input().split(' ')
n1, n2 = map(int, input().split(' '))
if o1 == 'PAR' and (n1 + n2) % 2 == 0:
print(p1)
elif o1 == 'IMPAR' and (n1 + n2) % 2 == 0:
print(p2)
elif o1 == 'IMPAR' and (n1 + n2) % 2 != 0:
print(p1)
elif o1 == 'PAR' and (n1 + n2) % 2 != 0:
print(p2) |
class verbose:
def __init__(self, iterable, interval=None, fmt=None):
"""
Parameters
----------
iterable: iterable object to be wrapped
interval: int or None, verbosing interval in loop count
fmt: str or None, fstring for verbosing.
bar: str, progress bar
percent: float, progress [%]
"""
self.count = 0
self.iterable = iterable
if interval is None:
self.interval = len(self.iterable) // 20
else:
self.interval = int(interval)
self.fmt = fmt or '[{bar}] {percent: >3.1f}%'
def __iter__(self):
return self
def __next__(self):
if self.count >= len(self.iterable):
print(f'\r{self._progressbar(progress=1.0)}')
raise StopIteration
ret = self.iterable[self.count]
if self.count % self.interval == 0:
# print(f'{self.count} {q} {ret} aaa')
progress = self.count / len(self.iterable)
print(f'\r{self._progressbar(progress=progress)}', end='')
self.count += 1
return ret
def _progressbar(self, progress: float) -> str:
bar_length = 10
progressbar_count = int(bar_length * progress)
bar = '>' * progressbar_count + ' ' * (bar_length - progressbar_count)
return self.fmt.format(bar=bar, percent=progress*100)
if __name__ == '__main__':
from time import sleep
print('test start')
for i in verbose(range(1000), interval=123):
sleep(0.01)
# print(i)
print('test end')
|
# -*- coding: utf-8 -*-
"""
close the rabbit connection when the HTTP API finish
- catch the sigkill?
- deconstructor in the flask ext?
- check connection errors
"""
import json
import pika
from restapi.services.detect import detector
from utilities.logs import get_logger
log = get_logger(__name__)
QUEUE_SERVICE = 'rabbit'
QUEUE_VARS = detector.load_group(label=QUEUE_SERVICE)
def prepare_message(instance, user=None, **params):
"""
{ # start
"request_id": # build a hash for the current request
"edmo_code": # which eudat centre is
"json": "the json input file" # parameters maris
"datetime":"20180328T10:08:30", # timestamp
"ip_number":"544.544.544.544", # request.remote_addr
"program":"program/function name", # what's mine?
"url" ?
"user":"username", # from maris? marine id?
"log_string":"start"
}
{ # end
"datetime":"20180328T10:08:30",
"request_id":"from the json input file",
"ip_number":"544.544.544.544",
"program":"program of function = name",
"user":"username",
"edmo_code":"sample 353",
"log_string":"end"
}
"""
obj = dict(params)
instance_id = str(id(instance))
obj['request_id'] = instance_id
# obj['request_id'] = instance_id[len(instance_id) - 6:]
from b2stage.apis.commons.seadatacloud import seadata_vars
obj['edmo_code'] = seadata_vars.get('edmo_code')
from datetime import datetime
obj['datetime'] = datetime.now().strftime("%Y%m%dT%H:%M:%S")
from restapi.services.authentication import BaseAuthentication as Service
ip, _ = Service.get_host_info()
obj['ip_number'] = ip
# obj['hostname'] = hostname
from flask import request
# http://localhost:8080/api/pids/<PID>
import re
endpoint = re.sub(r"https?://[^\/]+", '', request.url)
obj['program'] = request.method + ':' + endpoint
if user is None:
user = 'import_manager'
obj['user'] = user
# log.pp(obj)
return obj
def log_into_queue(instance, dictionary_message):
""" RabbitMQ in the EUDAT infrastructure """
############
from restapi.confs import PRODUCTION
if not PRODUCTION:
return False
############
# LOGGING
current_exchange = QUEUE_VARS.get('exchange')
current_queue = QUEUE_VARS.get('queue')
# FIXME: as variables
filter_code = 'de.dkrz.seadata.filter_code.foo.json'
# app_name = 'requestlogs'
# app_name = 'maris_elk_test'
app_name = current_queue
try:
###########
# connect
# FIXME: error seem to be raised if we don't refresh connection?
# https://github.com/pika/pika/issues/397#issuecomment-35322410
msg_queue = instance.get_service_instance(QUEUE_SERVICE)
log.verbose("Connected to %s", QUEUE_SERVICE)
###########
# channel.queue_declare(queue=current_queue) # not necessary if exists
channel = msg_queue.channel() # send a message
channel.basic_publish(
exchange=current_exchange, routing_key=current_queue,
properties=pika.BasicProperties(
delivery_mode=2,
headers={'app_name': app_name, 'filter_code': filter_code},
),
body=json.dumps(dictionary_message),
)
# except (ChannelClosed, ConnectionClosed):
# pass
except BaseException as e:
log.error("Failed to log:\n%s(%s)", e.__class__.__name__, e)
else:
log.verbose('Queue msg sent')
# log.verbose("%s: sent msg '%s'", current_queue, dictionary_message)
# NOTE: bad! all connections would result in closed
# # close resource
# msg_queue.close()
return True
# def read(self):
# # log.info("Request: read a message")
# # self.get_input()
# # # log.pp(self._args, prefix_line='Parsed args')
# # current_queue = self._args.get('queue')
# # # connect
# # msg_queue = self.get_service_instance(self._queue_service)
# # log.debug("Connected to %s", self._queue_service)
# # # send a message
# # channel = msg_queue.channel()
# # channel.queue_declare(queue=current_queue)
# # def callback(ch, method, properties, body):
# # print("\n\nReceived: %r" % body)
# # import json
# # print(json.loads(body))
# # # associate callback to queue
# # channel.basic_consume(callback, queue=current_queue, no_ack=True)
# # # blocking
# # channel.start_consuming()
# # return "Received?"
# raise NotADirectoryError("Not available at the moment")
|
import subprocess
from flask import Flask
def create_app():
app = Flask(__name__)
app.config["DEBUG"] = False
app.config["SECRET_KEY"] = "pohu(jkC34&()sjhYN!mLoikdnJ??b7298YSos"
app.config["IPFS_DAEMON"] = subprocess.Popen(['ipfs', 'daemon'], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
# register blueprints
from ipfs.views import blueprints
for blueprint in blueprints:
app.register_blueprint(blueprint)
blueprint.app = app
return app |
import serial
port = "/dev/ttyACM0"
s1 = serial.Serial(port,9600)
s1.flushInput()
while True:
if s1.inWaiting()>0:
inputValue = s1.read(1)
print(ord(inputValue))
|
"""
Heber Cooke 10/24/2019
Chapter 5 Exercise 7
This program takes a text file and prints the unique words in alphibetical order
"""
fileName = input("Enter the file name: ")
f = open(fileName)
s = f.read().split() #Spliting the file into words
d =[] #List for already seen words
for x in s: # looping the empty list to check for used words
if x not in d: #If word not used add the word to main list
d.append(x)
d.sort() # sorting the main list
print(d) |
# Usage: python3 predict-test-dir-IResNetV2.py [path_to_directory_with_pictures] [correct_class]
# For instance: "python3 predict-test-dir-IResNetV2.py ./data/validation/Pitbull 1"
import os
import sys
import argparse
import numpy as np
from argparse import RawTextHelpFormatter
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential, load_model
img_width, img_height = 299, 299
breeds = ("Alaskan Malamute", "Pitbull", "Golden Retriever")
counter_ok, counter_wrong = 0, 0
model_path = './models/model.h5'
model_weights_path = './models/weights.h5'
parser = argparse.ArgumentParser(description='This program helps us to estimate the quality of learning.\nIt needs a trained InceptionResNetV2 network model, a path to directory containing dogs pictures to be tested and a correct category (class) number for these pictures.\nCorrect categories are:\n\n0 - Alaskan Malamute,\n1 - Pitbull,\n2 - Golden Retriever.\n\nThis software was written for BWI50202 .NET-Vertiefung Modul, Gruppe2,\nHochschule Niederrhein by Andrej Ovchinnikov, Jan Haupts and Yassine Magri.\nDecember 2018',
add_help=True,
epilog='Example of use: "python3 predict-test-dir-IResNetV2.py ./data/validation/Pitbull 1"',
formatter_class=RawTextHelpFormatter)
parser.add_argument("path", action ="store", help='Path to directory containing pictures')
parser.add_argument("category", action ="store", help='Correct class number for these pictures', type=int)
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.verbose:
print("Verbosity turned on")
print("Das neuronale Netz und die Gewichte werden geladen. Bitte warten...")
model = load_model(model_path)
model.load_weights(model_weights_path)
print("Das neuronale Netz und die Gewichte wurden erfolgreich geladen :)")
def predict(file):
x = load_img(file, target_size=(img_width,img_height))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
array = model.predict(x)
result = array[0]
answer = np.argmax(result)
return answer
for i, ret in enumerate(os.walk(args.path)):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
elif filename.endswith(".jpg"):
result = predict(ret[0] + '/' + filename)
if result == int(args.category):
print("Korrekte Erkennung (als", breeds[result], "): ", ret[0] + '/' + filename)
counter_ok += 1
continue
else:
print("Fehler: ", ret[0] + '/' + filename, end='')
print(' - Erkannt als ', breeds[result])
counter_wrong += 1
print("--------------------------------------------------------------------------------")
print("Gesamtergebnis:")
print("Korrekt erkannt:", counter_ok)
print("Erkannt mit Fehler:", counter_wrong)
print("Genauigkeit der Erkennung: %2.2f%%." % ((counter_ok/(counter_ok + counter_wrong))*100))
|
#!/usr/bin/env python3
import argparse
import sys
from downloader import downloader
from cleanser import cleanser
from parser import parser
def main():
month, year = filter_param()
file_list, file_format = downloader(month, year)
df_struct = cleanser(file_list, file_format)
parser(month, year, file_list, df_struct)
def filter_param(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Coletor de informações do site do MPF através do portal da transparencia. Em funcionamento normal enviará JSON estruturado para a stdout.")
parser.add_argument("-m", "--mes", required=True,
help="Mês ao qual se refere a coleta.",)
parser.add_argument("-a", "--ano", required=True,
help="Ano ao qual se refere a coleta.")
param = parser.parse_args(args)
mes = int(param.mes)
ano = int(param.ano)
return mes, ano
if __name__ == '__main__':
main()
|
#Setup
import praw, re, csv, random
#Validate Reddit Access
reddit = praw.Reddit(client_id='Dn_ef002ikq0dw',
client_secret='B_8gGLkYtz6aDmZ4tkP5Dj3BFIo',
password='zzzzzz',
user_agent='pix3lbot_scrape by /u/pix3lbot',
username='pix3lbot')
subreddit = reddit.subreddit('pix3lspace')
space = "\n"
sep = "----------"
#Download
def download(code):
for submission in subreddit.hot(limit=1000):
if re.search(code, submission.title, re.IGNORECASE):
b = {}
counter = 0
for top_level_comment in submission.comments:
comment = top_level_comment.body
counter = counter + 1
b[counter] = comment
return b
#Fetch
a = {}
v = ""
def fetch(code):
a = download(code)
for key in a:
v = a[key]
print(v)
#Upload
def create(code,data):
for submission in subreddit.hot(limit=1000):
if re.search(code, submission.title, re.IGNORECASE):
submission.reply(str(data))
#Watch Channel
c = {}
def probe(code,location):
c = download(code)
for key in c:
print("down: " + c[key + location])
with open("ProbeData.txt", 'r') as fp:
counter = 1
for row in fp:
if row != c[counter]:
print("save: " + row)
with open("ProbeData.txt", 'a') as fp:
fp.write(c[counter] + space)
elif row == c[counter]:
print("Up to date")
counter = counter + 1
probe(code,counter)
#Controller
def split(status):
if status == "Download":
name = input("File Location:" + space)
fetch(name)
elif status == "New Command Line":
name = input("Command Channel:" + space)
elif status == "Create":
name = input("File Location:" + space)
data = input("Content:" + space)
create(name,data)
elif status == "Probe":
name = input("Command Channel:" + space)
with open("ProbeData.txt", 'w') as fp:
fp.write(sep + space)
probe(name,0)
else:
print("Command Not Recognized")
#Run Loop
def run():
status = input(space)
split(status)
run()
run()
|
import sys
import os
import csv
from referenceframefunc import *
from hdf5retrieval import *
import numpy as np
from scipy import stats
import h5py
from itertools import chain
####################
# SET PRIOR TO USE
####################
CWD = '/home/selwyni/Desktop/h5/Dec 20 Data'
os.chdir(CWD)
def readHDF5(filename, permissions='r'):
sample = h5py.File(filename, permissions)
container = sample['3Ddatacontainer']
return container
#####################################
# Read DREAM3D CSVs
#####################################
def findCSVname(hdf5name):
# Input - String of h5 filename
# Output - String of csvfile path
filename = hdf5name.split('data')[0]
vol = int(filename[1:3]) // 10
shape = filename.split('_')[1]
shapetag = ''
if (shape == '1051'):
shapetag = '1051'
elif (shape == 'eq'):
shapetag = '111'
elif (shape == 'disk'):
shapetag = '10101'
csvfilename = 'asp' + shapetag + '_vol0' + str(vol) + '.csv'
return csvfilename
def retrieveDataFromCSV(csvfile, timesteps):
# Input - String containing csv file
# Output - Tuple of lists containing (q0, q1, q2, q3, surfaceareavolumeratio) for each
with open(csvfile, 'r') as obj:
reader = csv.reader(obj)
q0 = []
q1 = []
q2 = []
q3 = []
shape = []
for row in reader:
if (row[0] == 'Feature_ID'):
q0index = row.index('AvgQuats_0')
q1index = row.index('AvgQuats_1')
q2index = row.index('AvgQuats_2')
q3index = row.index('AvgQuats_3')
shapeindex = row.index('SurfaceAreaVolumeRatio')
break
for row in reader:
q0.append(row[q0index])
q1.append(row[q1index])
q2.append(row[q2index])
q3.append(row[q3index])
shape.append(row[shapeindex])
q0 = np.transpose(np.matrix(np.tile(np.array(q0, dtype = np.float32), (timesteps, 1))))
q1 = np.transpose(np.matrix(np.tile(np.array(q1, dtype = np.float32), (timesteps, 1))))
q2 = np.transpose(np.matrix(np.tile(np.array(q2, dtype = np.float32), (timesteps, 1))))
q3 = np.transpose(np.matrix(np.tile(np.array(q3, dtype = np.float32), (timesteps, 1))))
shape = np.transpose(np.matrix(np.tile(np.array(shape, dtype= np.float32), (timesteps, 1))))
return (q0, q1, q2, q3, shape)
################################################
# Writing Functions
################################################
def writeDatasetToHDF5(filename):
sampledata = readHDF5(filename, 'r+')
datapointdirs = retrieveDatapoints(sampledata)
dimensions = retrieveDimension(sampledata)
grainIDs = retrieveGrainIDs(sampledata)
numOfGrains = np.nanmax(grainIDs)
phases = retrievePhase(sampledata)
SVMs = []
EVMs = []
avgmeanSVM = []
avgmeanEVM = []
allAvgSVM = []
allAvgEVM = []
BCCSVM = []
BCCEVM = []
HCPSVM = []
HCPEVM = []
sigmaSVMs = []
sigmaEVMs = []
maxSVMs = []
maxEVMs = []
minSVMs = []
minEVMs = []
medianSVMs = []
medianEVMs = []
grainvolumes = []
slipsys = []
bungephi1 = []
bungePhi = []
bungephi2 = []
timesteps = len(datapointdirs)
for step in range(timesteps):
print("Going through Step", step)
SVM = retrieveSVM(datapointdirs[step], dimensions, 'SVM')
EVM = retrieveEVM(datapointdirs[step], dimensions, 'EVM')
slip = retrieveSlipInformation(datapointdirs[step], dimensions)
Phi = retrieveEulerAngles(datapointdirs[step], dimensions, 'Phi')
phi1 = retrieveEulerAngles(datapointdirs[step], dimensions, 'phi1')
phi2 = retrieveEulerAngles(datapointdirs[step], dimensions,'phi2')
# TODO REFACTOR THIS
meanSVM = []
meanEVM = []
sigmaSVM = []
sigmaEVM = []
maxSVM = []
maxEVM = []
minSVM = []
minEVM = []
medianSVM = []
medianEVM = []
grainsize = []
stepslipsys = []
grainphi1 = []
grainPhi = []
grainphi2 = []
for grainID in np.arange(1, numOfGrains + 1):
# For the properties of individual grains.
# Output is a list of 1 value per grain
if (grainID % 100 == 0):
print('\tGrain', grainID)
condition = grainIDs == int(grainID)
grainSVM = np.extract(condition, SVM)
grainEVM = np.extract(condition, EVM)
grainslip = np.extract(condition, slip)
grainPhiSet = np.extract(condition, Phi)
grainPhi1Set = np.extract(condition, phi1)
grainPhi2Set = np.extract(condition, phi2)
(meanq0, meanq1, meanq2, meanq3) = grainAverageQuaternion(grainPhi1Set, grainPhiSet, grainPhi2Set)
meanSVM.append(np.mean(grainSVM))
meanEVM.append(np.mean(grainEVM))
sigmaSVM.append(np.std(grainSVM))
sigmaEVM.append(np.std(grainEVM))
maxSVM.append(np.max(grainSVM))
maxEVM.append(np.max(grainEVM))
minSVM.append(np.min(grainSVM))
minEVM.append(np.min(grainEVM))
medianSVM.append(np.median(grainSVM))
medianEVM.append(np.median(grainEVM))
grainsize.append(np.sum(condition))
stepslipsys.append(np.mean(grainslip))
grainphi1.append(phi1val)
grainPhi.append(Phival)
grainphi2.append(phi2val)
for phase in [1,2]:
# Pick out phase properties
condition = phases == phase
if (phase == 1):
BCCSVMvals = np.extract(condition, SVM)
BCCEVMvals = np.extract(condition, EVM)
BCCSVM.append(np.mean(BCCSVMvals))
BCCEVM.append(np.mean(BCCEVMvals))
else:
HCPSVMvals = np.extract(condition,SVM)
HCPEVMvals = np.extract(condition,EVM)
HCPSVM.append(np.mean(HCPSVMvals))
HCPEVM.append(np.mean(HCPEVMvals))
# Aggregating List of Grain by Grain properties
SVMs.append(meanSVM)
EVMs.append(meanEVM)
sigmaSVMs.append(sigmaSVM)
sigmaEVMs.append(sigmaEVM)
maxSVMs.append(maxSVM)
maxEVMs.append(maxEVM)
minSVMs.append(minSVM)
minEVMs.append(minEVM)
medianSVMs.append(medianSVM)
medianEVMs.append(medianEVM)
grainvolumes.append(grainsize)
slipsys.append(stepslipsys)
bungephi1.append(grainphi1)
bungePhi.append(grainPhi)
bungephi2.append(grainphi2)
# Grain weighted properties
avgmeanSVM.append(np.mean(meanSVM))
avgmeanEVM.append(np.mean(meanEVM))
allAvgSVM.append(np.mean(SVM))
allAvgEVM.append(np.mean(EVM))
allPoints = np.transpose(np.array([allAvgSVM, allAvgEVM]))
avgmat = np.transpose(np.array([avgmeanSVM, avgmeanEVM]))
SVMmat = np.transpose(np.array(SVMs))
EVMmat = np.transpose(np.array(EVMs))
sigmaSVMmat = np.transpose(np.array(sigmaSVMs))
sigmaEVMmat = np.transpose(np.array(sigmaEVMs))
maxSVMmat = np.transpose(np.array(maxSVMs))
maxEVMmat = np.transpose(np.array(maxEVMs))
minSVMmat = np.transpose(np.array(minSVMs))
minEVMmat = np.transpose(np.array(minEVMs))
medianSVMmat = np.transpose(np.array(medianSVMs))
medianEVMmat = np.transpose(np.array(medianEVMs))
BCCphasemat = np.transpose(np.array([BCCSVM, BCCEVM]))
HCPphasemat = np.transpose(np.array([HCPSVM, HCPEVM]))
grainsizemat = np.transpose(np.array(grainvolumes))
slipmat = np.transpose(np.array(slipsys))
phi1mat = np.transpose(np.array(bungephi1))
Phimat = np.transpose(np.array(bungePhi))
phi2mat = np.transpose(np.array(bungephi2))
(q0mat, q1mat, q2mat, q3mat, shapemat) = retrieveDataFromCSV(CWD + '/Undeformed/CSV/' + findCSVname(filename), timesteps)
# TODO Find orientation, get difference in quaternion space
# TODO REFACTOR THIS MESS
if ('MeanSVM' not in sampledata):
sampledata.create_dataset("MeanSVM", data=SVMmat)
if ('MeanEVM' not in sampledata):
sampledata.create_dataset("MeanEVM", data=EVMmat)
if ('sigmaSVM' not in sampledata):
sampledata.create_dataset("sigmaSVM", data = sigmaSVMmat)
if ('sigmaEVM' not in sampledata):
sampledata.create_dataset("sigmaEVM", data = sigmaEVMmat)
if ('maxSVM' not in sampledata):
sampledata.create_dataset("maxSVM", data = maxSVMmat)
if ('maxEVM' not in sampledata):
sampledata.create_dataset("maxEVM", data = maxEVMmat)
if ('minSVM' not in sampledata):
sampledata.create_dataset("minSVM", data = minSVMmat)
if ('minEVM' not in sampledata):
sampledata.create_dataset("minEVM", data = minEVMmat)
if ('medianSVM' not in sampledata):
sampledata.create_dataset("medianSVM", data = medianSVMmat)
if ('medianEVM' not in sampledata):
sampledata.create_dataset("medianEVM", data = medianEVMmat)
if ('StepAverages' not in sampledata):
sampledata.create_dataset("StepAverages", data=avgmat)
if ('AllPoints' not in sampledata):
sampledata.create_dataset("AllPoints", data=allPoints)
if ('MeanBCCAvgs' not in sampledata):
sampledata.create_dataset("MeanBCCAvgs", data=BCCphasemat)
if ('MeanHCPAvgs' not in sampledata):
sampledata.create_dataset("MeanHCPAvgs", data = HCPphasemat)
if ('sigmaSVM' not in sampledata):
sampledata.create_dataset("sigmaSVM", data = sigmaSVMmat)
if ('grainVolume' not in sampledata):
sampledata.create_dataset("grainVolume", data = grainsizemat)
if ('avgSlipSys' not in sampledata):
sampledata.create_dataset('avgSlipSys', data = slipmat)
if ('grainAvgphi1' not in sampledata):
sampledata.create_dataset('grainAvgphi1', data = phi1mat)
if ('grainAvgPhi' not in sampledata):
sampledata.create_dataset('grainAvgPhi', data = Phimat)
if ('grainAvgphi2' not in sampledata):
sampledata.create_dataset('grainAvgphi2', data = phi2mat)
if ('grainAvgQuat0' not in sampledata):
sampledata.create_dataset('grainAvgQuat0', data = q0mat)
if ('grainAvgQuat1' not in sampledata):
sampledata.create_dataset('grainAvgQuat1', data = q1mat)
if ('grainAvgQuat2' not in sampledata):
sampledata.create_dataset('grainAvgQuat2', data = q2mat)
if ('grainAvgQuat3' not in sampledata):
sampledata.create_dataset('grainAvgQuat3', data = q3mat)
if ('surfaceAreaVolumeRatio' not in sampledata):
sampledata.create_dataset('surfaceAreaVolumeRatio', data = shapemat)
def writeDatasetToCSV(sampledata, h5datasetName, sizeOfArray, csvFilename):
array = np.zeros(sizeOfArray)
sampledata[h5datasetName].read_direct(array)
np.savetxt(csvFilename, array, delimiter = ',')
def writeCCADataToCSV(sampledata, numOfGrains, stepcount, datasets, steps, filename):
for step in steps:
writedata = np.arange(1, numOfGrains + 1)
header = 'GrainIDs'
for dataset in datasets:
header = header + ',' + dataset
dataArr = np.zeros((numOfGrains, stepcount))
sampledata[dataset].read_direct(dataArr)
writedata = np.vstack((writedata, dataArr[:,step]))
writedata = np.transpose(writedata)
np.savetxt(filename + 'Step' + str(step) + '.csv', writedata, delimiter = ',', header = header, comments='')
def writeDataToCSV(filename):
sampledata = readHDF5(filename, 'r+')
stepcount = 0
grainIDs = retrieveGrainIDs(sampledata)
numOfGrains = int(np.nanmax(grainIDs))
for step in sampledata:
if ('Step-' in step):
stepcount += 1
datasetNames = [['MeanSVM', 'MeanEVM', 'sigmaSVM', 'sigmaEVM', 'maxSVM', 'maxEVM', 'minSVM', 'minEVM', 'medianSVM', 'medianEVM', 'grainVolume', 'avgSlipSys', 'grainAvgphi1', 'grainAvgPhi', 'grainAvgphi2', 'surfaceAreaVolumeRatio', 'grainAvgQuat0', 'grainAvgQuat1', 'grainAvgQuat2', 'grainAvgQuat3'],
['StepAverages', 'AllPoints', 'MeanBCCAvgs', 'MeanHCPAvgs']]
for name in [item for sublist in datasetNames for item in sublist]:
if (name not in sampledata):
writeDatasetToHDF5(filename)
topname = filename.split('.')[0]
fileNames = [['GrainMeanSVM', 'GrainMeanEVM', 'GrainSigmaSVM', 'GrainSigmaEVM', 'GrainMaxSVM', 'GrainMaxEVM', 'GrainMinSVM', 'GrainMinEVM', 'GrainMedianSVM', 'GrainMedianEVM', 'GrainVolume', 'SlipSystems', 'Phi1Angle', 'PhiAngle', 'Phi2Angle', 'SurfaceAreaVolumeRatio', 'GrainAvgQuat0', 'GrainAvgQuat1', 'GrainAvgQuat2', 'GrainAvgQuat3'],
['TimeStepGrainAvg', 'TimeStepVolumeAvg', 'TimeBCCAvg', 'TimeHCPAvg']]
for index in range(2):
for datastring_compare(stringset in range(len(datasetNames[index])):
if (index == 0):
arrshape = (numOfGrains, stepcount)
elif (index == 1):
arrshape = (stepcount, 2)
writeDatasetToCSV(sampledata, datasetNames[index][dataset], arrshape, topname + fileNames[index][dataset] + '.csv')
writeCCADataToCSV(sampledata, numOfGrains, stepcount, ['grainVolume', 'surfaceAreaVolumeRatio', 'MeanSVM', 'MeanEVM'], [0,1,2,3,4,5,6,7,8,9], topname + 'CCA')
for vol in ['f20_', 'f40_', 'f60_']:
for datatype in ['eqdata.h5', 'diskdata.h5', '1051data.h5']:
writeDataToCSV(vol + datatype)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 10:41:16 2019
@author: Administrator
99: program finished
1 : adds num in two positions and store the result in third position.
2 : multiplies num in two positions and store the result in third position.
3 : takes an input to store in a specific pos
4 : outputs the value in the specific pos
5 : jump_if_true
6 : jump if false
7 : less then
8 : equals
9 : move relative base
"""
import copy
def int_compute(code_list, iter_input):
cursor = 0
op_code = code_list[cursor]%100
#print(op_mode)
rela_base = 0
#print(rela_base)
while(op_code in correct_op):
op_code = code_list[cursor]%100
op_mode = []
op_mode_int = code_list[cursor]//100
#print('op_mode_int: ' +str(op_mode_int))
for i in range(0,3):
op_mode.append(op_mode_int%10)
op_mode_int = op_mode_int//10
if(op_code == 1):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op1')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op1')
if(op_mode[2] == 0):
print(op_mode)
code_list[code_list[cursor+3]] = p1 + p2
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = p1+ p2
else:
print('error getting addr in op1')
cursor += 4
elif(op_code == 2):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op2')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op2')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = p1 * p2
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = p1 * p2
else:
print('error getting addr in op2')
cursor += 4
elif(op_code == 3):
if (op_mode[0] != 0):
#print('error getting addr in op3')
#print(code_list[cursor])
code_list[rela_base + code_list[cursor+1]] = iter_input
else:
code_list[code_list[cursor+1]] = iter_input
cursor += 2
elif(op_code == 4):
#print('op_mode: ' + str(op_mode))
if(op_mode[0] == 0):
print("the output value is (mode 0): " + str(code_list[code_list[cursor+1]]))
elif(op_mode[0] == 2):
print("the output value is (mode 2): " + str(code_list[rela_base + code_list[cursor+1]]))
else:
print("the output value is (mode 1): " + str(code_list[cursor+1]))
cursor += 2
elif(op_code == 5):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op5')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op5')
if p1:
cursor = p2
else:
cursor += 3
elif(op_code == 6):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op6')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op6')
if not p1:
cursor = p2
else:
cursor += 3
elif(op_code == 7):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op7')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op7')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = 1 if p1 < p2 else 0
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = 1 if p1 < p2 else 0
else:
print('error getting addr in op7')
cursor += 4
elif(op_code == 8):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op8')
if(op_mode[1] == 0):
p2 = code_list[code_list[cursor+2]]
elif(op_mode[1] == 1):
p2 = code_list[cursor+2]
elif(op_mode[1] == 2):
p2 = code_list[rela_base + code_list[cursor+2]]
else:
print('error getting addr in op8')
if(op_mode[2] == 0):
code_list[code_list[cursor+3]] = 1 if p1 == p2 else 0
elif(op_mode[2] == 2):
code_list[rela_base + code_list[cursor+3]] = 1 if p1 == p2 else 0
else:
print('error getting addr in op8')
cursor += 4
elif(op_code == 9):
if(op_mode[0] == 0):
p1 = code_list[code_list[cursor+1]]
elif(op_mode[0] == 1):
p1 = code_list[cursor+1]
elif(op_mode[0] == 2):
p1 = code_list[rela_base + code_list[cursor+1]]
else:
print('error getting addr in op9')
rela_base += p1
cursor += 2
else:
if(op_code == 99):
print("program halt at: " + str(code_list[cursor-1]), end = ', ')
return -1
else:
print('break: error!')
#end of while
correct_op = [1,2,3,4,5,6,7,8,9,99] #supported operations so far
if __name__ == '__main__':
f = open("input.txt", "r")
line = f.read()
mem = line.split(',', line.count(','))
mem = list(map(int, mem))
ext_mem = []
for i in range(10000):
ext_mem.append(0)
#print(mem)
mem.extend(ext_mem)
int_compute(copy.deepcopy(mem), 2)
f.close()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 15:37:49 2020
@author: Buzoni
This code can calculate de PnL, the DV01 and the Unit Price of
braziliaz Inflation-Linked Bonds
"""
from datetime import datetime, timedelta, date
from bizdays import Calendar
#Getting brazilians holidays from a txt.
HOLIDAYS_TXT = open('C:/Users/buzon/Documents/Python/Arquivos base/Anbima.txt', 'r')
HOLIDAYS = []
for linha in HOLIDAYS_TXT:
HOLIDAYS.append(linha.strip())
HOLIDAYS_TXT.close()
#Holidays and dates.
cal = Calendar(HOLIDAYS, ['Sunday', 'Saturday'])
dt_today = date.today()
dt_settlement = cal.adjust_previous(date.today()+timedelta(-1))
#Constants.
FORMAT = '%d%m%Y'
CDI = 0.019 #b252 ao ano
CARRY = (1 + CDI)**(1/252)
INDEX_IPCA_INT = 1614.62 #july2000
CUPOM = (1 + 0.06)**(1/2) - 1
CUPOM_31 = (1 + 0.12)**(1/2) - 1
#INPUTS
INDEX_IPCA_RELEASED = 5344.63 #july2020
PREVIEW_IPCA = 0.21
#INPUTS (IF NECESSARY)
INDEX_IPCA_PRIOR = 5344.63
PREVIEW_IPCA_PRIOR = 0.21 #ao mês
def truncate(number, decimal):
"""Truncate a number."""
num = str(number)
return float(num[:num.find('.') + decimal + 1])
def get_prior_date(dt_stm=dt_settlement):
"""Get the last day 15."""
if dt_stm.day >= 15:
rtn_date = date(dt_stm.year, dt_stm.month, 15)
else:
rtn_date = cal.offset(dt_stm, -21).replace(day=15)
return rtn_date
def get_next_date(dt_stm=dt_settlement):
"""Get the next day 15."""
if dt_stm.day >= 15:
rtn_date = cal.offset(dt_stm, 20).replace(day=15)
else:
rtn_date = date(dt_stm.year, dt_stm.month, 15)
return rtn_date
def project_index(coef=INDEX_IPCA_RELEASED, preview=PREVIEW_IPCA, dt_stm=dt_settlement):
"""Project the inflation index."""
date_prior = cal.adjust_next(get_prior_date(dt_stm))
date_next = cal.adjust_next(get_next_date(dt_stm))
bd_gone = cal.bizdays(date_prior, dt_stm)
bd_between_months = cal.bizdays(date_prior, date_next)
return coef * (1 + preview/100)**(bd_gone/bd_between_months)
def ratio_ipca(coef=INDEX_IPCA_RELEASED, preview=PREVIEW_IPCA, dt_stm=dt_settlement):
"""Inflation rate projected by preview inflation."""
return project_index(coef, preview, dt_stm) / INDEX_IPCA_INT
def get_vna(ratio):
"""Get the updated nominal value."""
return truncate(ratio * 1000, 6)
def get_maturity(asset):
"""Get the maturity of the asset"""
if asset[:1].upper() == 'B':
if int(asset[1:3]) % 2 == 0:
maturity = '150820' + asset[1:3]
else:
maturity = '150520' + asset[1:3]
elif asset[:1].upper() == 'C':
if int(asset[1:3]) % 2 == 21:
maturity = '010420' + asset[1:3]
else:
maturity = '010120' + asset[1:3]
else:
print('Non valid Asset!')
return None
return datetime.strptime(maturity, FORMAT)
def create_cash_flow(asset, dt_stm=dt_settlement):
"""Create the cash flow of a NTN-B"""
cash_flow = []
maturity = get_maturity(asset)
maturity = date(maturity.year, maturity.month, maturity.day)
while maturity > dt_stm:
cash_flow.append(maturity)
maturity = cal.adjust_next(cal.offset(maturity, -126).replace(day=15))
return cash_flow
def quotation_rate(asset, cls_yield, dt_stm=dt_settlement):
"""Get the quotation index."""
if asset == 'C31':
cupom = CUPOM_31
else:
cupom = CUPOM
quot_rate = 0
cls_yield /= 100
cash_flow = create_cash_flow(asset)
payments = []
for maturity_flow in cash_flow:
bussiness_day = cal.bizdays(dt_stm, maturity_flow)
if maturity_flow == cash_flow[0]:
pv_cupom = (1 + cupom) / ((1+cls_yield)**((bussiness_day)/252))
else:
pv_cupom = (cupom) / ((1+cls_yield)**((bussiness_day)/252))
payments.append(pv_cupom)
for pmt_value in payments:
quot_rate += pmt_value
return quot_rate
def unit_price(asset, cls_yield, dt_stm=dt_settlement, preview=PREVIEW_IPCA,
coef=INDEX_IPCA_RELEASED):
"""Calculate the unit price"""
quot_rate = quotation_rate(asset, cls_yield, dt_stm)
return truncate(get_vna(ratio_ipca(coef=coef, preview=preview, dt_stm=dt_stm)) *
quot_rate, 6)
def pnl_b(asset, clsd2_yield, cls_yield, intra=False, dt_stm=dt_settlement):
"""Calculate the PnL."""
quantity = int(input('Quantities: '))
dt_d1 = cal.adjust_previous(dt_stm + timedelta(-1))
up_d2 = unit_price(asset, clsd2_yield, dt_d1, PREVIEW_IPCA_PRIOR, INDEX_IPCA_PRIOR)
up_d1 = unit_price(asset, cls_yield, dt_stm)
if not intra:
up_d2 *= CARRY
return (up_d1 - up_d2) * quantity
def dv_b(asset, cls_yield, intra=False, dt_stm=dt_settlement):
"""Calculate the DV01"""
yield_sh = cls_yield + 0.01
expo = pnl_b(asset, cls_yield, yield_sh, intra, dt_stm)
return expo
#Here, I can compare the Unit Price that I calculate and the oficial unit price released
#by ANBIMA.
RND = 2
SETTLEMENT_DAY = dt_today
B21 = round(unit_price('b21', -1.5470, SETTLEMENT_DAY) - 3547.971638, RND)
B22 = round(unit_price('b22', -0.1254, SETTLEMENT_DAY) - 3716.020575, RND)
B23 = round(unit_price('b23', 0.2873, SETTLEMENT_DAY) - 3875.761221, RND)
B24 = round(unit_price('b24', 1.2948, SETTLEMENT_DAY) - 3914.786317, RND)
B25 = round(unit_price('b25', 1.7800, SETTLEMENT_DAY) - 3992.909446, RND)
B26 = round(unit_price('b26', 2.2700, SETTLEMENT_DAY) - 3998.495826, RND)
B28 = round(unit_price('b28', 2.7000, SETTLEMENT_DAY) - 4090.934677, RND)
B30 = round(unit_price('b30', 3.0500, SETTLEMENT_DAY) - 4147.060396, RND)
B35 = round(unit_price('b35', 3.4700, SETTLEMENT_DAY) - 4321.096872, RND)
B40 = round(unit_price('b40', 3.7728, SETTLEMENT_DAY) - 4340.995659, RND)
B45 = round(unit_price('b45', 4.0136, SETTLEMENT_DAY) - 4390.039770, RND)
B50 = round(unit_price('b50', 4.0337, SETTLEMENT_DAY) - 4441.954983, RND)
B55 = round(unit_price('b55', 4.0511, SETTLEMENT_DAY) - 4562.577704, RND)
difs = [B21, B22, B23, B24, B25, B26, B28, B30, B35, B40, B45, B50, B55]
labels = ['B21:', 'B22:', 'B23:', 'B24:', 'B25:', 'B26:', 'B28:',
'B30:', 'B35:', 'B40:', 'B45:', 'B50:', 'B55:']
i = 0
for ntnb in difs:
print(labels[i], ntnb)
i += 1
|
import mysql.connector
import dbconfig as cfg
db = mysql.connector.connect(
host=cfg.mysql['host'],
user=cfg.mysql['user'],
password=cfg.mysql['password'],
database=cfg.mysql['database']
)
cursor = db.cursor()
sql="CREATE TABLE accessories (id INT AUTO_INCREMENT PRIMARY KEY, type VARCHAR(255), brand VARCHAR(255), price INT)"
cursor.execute(sql)
#could not get 2 databases working so replaced accessories with a quick img gallery
|
import os
# Scheme: "postgres+psycopg2://<USERNAME>:<PASSWORD>@<IP_ADDRESS>:<PORT>/<DATABASE_NAME>"
DATABASE_URI = "postgres+psycopg2://postgres:123@localhost/user_management"
providers = {
"LOCAL": "local",
"GOOGLE": "google"
}
class Config:
UPLOAD_FOLDER = 'static/'
MAX_CONTENT_LENGTH = 10 * 1024 * 1024
SECRET_KEY = "HUEyqESqgQ1yTwzVlO6wprC9Kf1J1xuA" # "\x88\xc7\x12I\xc1\x8b\xcf\xc5\x16\xc2\xefG\x92}\x8e\xe84Y\x19\x8d\xc7\xdd9\xbd"
APP_TITLE = 'Application'
MAIL_SERVER = "smtp.googlemail.com" # os.environ.get('MAIL_SERVER')
MAIL_PORT = 587
MAIL_USE_TLS=1
MAIL_DEFAULT_SENDER = "support@flatlogic.com"
MAIL_USERNAME = "support@flatlogic.com"
MAIL_PASSWORD="UBU2JGC2wEqc"
# Google Auth parameters
class DevConfig(Config):
EMAIL_ADDRESS = 'vladprivalov1990@gmail.com'
REMOTE = "http://localhost:5000" # http://localhost:8080
class ProductionConfig(Config):
REMOTE = "https://sing-generator-node.herokuapp.com"
|
import datetime, StringIO, re
from google.appengine.api import images
from google.appengine.api import taskqueue
from google.appengine.ext import db
import app.lib.EXIF as EXIF
from app.model.account import Account
from app.model.accounts import Accounts
from app.model.place import Place
from app.model.places import Places
class Ticket(db.Model):
account = db.ReferenceProperty(Account)
created = db.DateTimeProperty(auto_now_add=True)
source = db.StringProperty()
sender = db.StringProperty()
subject = db.StringProperty()
body = db.StringProperty()
date = db.DateTimeProperty()
datestring = db.StringProperty()
filename = db.StringProperty()
file = db.BlobProperty()
location = db.GeoPtProperty()
place = db.ReferenceProperty(Place)
direction = db.StringProperty(choices=set(["D", "S", "A"]))
odo = db.IntegerProperty()
private = db.BooleanProperty()
def init(self, sender=None, subject=None, datestring=None):
self.account = None
self.date = None
self.sender = sender
self.subject = subject
self.datestring = datestring
self.private = False
def dateLocal(self):
dt = self.date
if self.account and dt:
dt = self.account.localizeDate(dt)
return dt
def displayDate(self):
dt = self.dateLocal()
d = datetime.date(dt.year, dt.month, dt.day)
if dt.hour < 4:
d = d + datetime.timedelta(days=-1)
return d
def displayPlace(self, address=False):
p = ''
if self.place:
if address:
if self.place.address and self.place.city:
p = self.place.address + ',' + self.place.city
elif self.place.address:
p = self.place.address
elif self.place.address:
p = self.place.city
else:
p = self.place.name
elif self.location:
#p = self.location.lat + ', ' + self.location.lon
p = "%.4f, %.4f" % (self.location.lat, self.location.lon)
return p
def _toUTC(self, dt):
if self.datestring:
d = self.datestring
try:
ss = d[len(d)-5:]
s = ss[0]
h = int(ss[1:3])
m = int(ss[3:])
if s == "+":
o = -1
else:
o = 1
dt = dt + datetime.timedelta(hours=o*h, minutes=o*m)
except:
pass
elif self.account:
# try with time zone
try:
tz = self.account.tzinfo()
if tz:
dt = tz.localize(dt)
except:
pass
return dt
def _setLocation(self, location):
# update the location to a better version
if not self.location:
self.location = location
else:
if int(self.location.lat) == 0 and int(self.location.lon) == 0:
if int(location.lat) != 0 or int(location.lon) != 0:
self.location = location
def _updateEmail(self):
# clean-up of the email
if self.sender:
email = self.sender
if email.find("<") > 0:
m = re.search('<(.+)>', email, re.IGNORECASE )
if m != None:
self.sender = m.group(1)
def _updateSubject(self):
# Get info out of subject
if self.subject:
s = self.subject
# Arrival or Departure?
if not self.direction:
m = re.search(r'\b([aA]|[dD]||[sS][vV]{1})\b', s, re.IGNORECASE )
if m != None:
for v in m.groups():
if v.upper() in ['D', 'V']:
self.direction = 'D'
if v.upper() in ['A']:
self.direction = 'A'
if v.upper() in ['S']:
self.direction = 'S'
# Odo reading
if not self.odo:
m = re.search(r'\b(\d+)\b', s, re.IGNORECASE )
if m != None:
for v in m.groups():
self.odo = int(v)
# Allocation of the Account
def _updateAccount(self):
if not self.account:
account = Accounts.loadByEmail(self.sender) # Find the account based on the email
if account != None:
self.account = account
def _updateEXIF(self):
# Must have the Account, for timezone correction!
if self.file and self.account: # and not self.location:
# Get Image info
ms = StringIO.StringIO(self.file)
tags = EXIF.process_file(ms)
# Get the right coordinates
if 'GPS GPSLatitude' in tags and 'GPS GPSLongitude' in tags:
# Get the right coordinates
x = tags['GPS GPSLatitude']
s = float(x.values[2].num) / float(x.values[2].den)
m = float(x.values[1].num) / float(x.values[1].den) + (s /60.0)
d = float(x.values[0].num) / float(x.values[0].den) + (m /60.0)
lat = d
x = tags['GPS GPSLongitude']
s = float(x.values[2].num) / float(x.values[2].den)
m = float(x.values[1].num) / float(x.values[1].den) + (s /60.0)
d = float(x.values[0].num) / float(x.values[0].den) + (m /60.0)
long = d
self.location = db.GeoPt(str(lat), str(long))
# Get the right date
dt = None
if 'Image DateTime' in tags:
x = tags['Image DateTime']
dt = datetime.datetime.strptime(x.values, "%Y:%m:%d %H:%M:%S")
elif 'EXIF DateTimeOriginal' in tags:
x = tags['EXIF DateTimeOriginal']
dt = datetime.datetime.strptime(x.values, "%Y:%m:%d %H:%M:%S")
if dt:
self.date = self._toUTC(dt)
# Fix the image, transform and resize... later we will keep the original.. right now mess up
img = images.Image(self.file)
h = img.height
w = img.width
t = False
# Crop the image to a square
r = w
if w > h: # landscape
lx = float((w-h)/2) / float(w)
rx = float(((w-h)/2)+h) / float(w)
img.crop(lx, 0.0, rx, 1.0)
t = True
r = h
elif h > w : # portrait
ty = float((h-w)/2) / float(h)
by = float(((h-w)/2)+w) / float(h)
img.crop(0.0, ty, 1.0, by)
t = True
r = w
# Get the orientation, and correct
if 'Image Orientation' in tags:
x = tags['Image Orientation']
o = x.values[0]
if o == 2: # flip horizontal
pass
elif o == 3: # rotate 180
img.rotate(180)
t = True
elif o == 4: # flip vertical
pass
elif o == 5: # transpose
pass
elif o == 6: # rotate 90
img.rotate(90)
t = True
#w = h
elif o == 7: # transverse
pass
elif o == 8: # rotate 270
img.rotate(270)
t = True
#w = h
# Transform is done.. now resize
if r > 240:
img.resize(240)
t = True
if t:
self.file = db.Blob(img.execute_transforms(images.JPEG))
# Fix the date based on email info
def _updateDate(self):
# if not self.date == self.created and self.datestring:
if self.date == None and self.datestring:
d = self.datestring
try:
dt = datetime.datetime.strptime(d[5:len(d)-6], "%d %b %Y %H:%M:%S")
self.date = self._toUTC(dt)
except:
pass
if self.date == None:
self.date = self.created
# Allocation of matching Place
def _updatePlace(self):
if self.account and self.location and not self.place:
r = Place.proximity_fetch(
Place.all().filter('account =', self.account),
# places.Place.all(),
self.location,
max_results=10,
max_distance=500)
if len(r) == 1:
self.place = r[0]
def _updateFuzzy(self):
# Try to fill as many defaults as possible.
if not self.account:
return
# 1. Place is crucial
bAtHome = False
if self.place:
bAtHome = self.place.type == 1
# Get the previous
prevTicket = Ticket.getPrevious(self.account, self.date)
if prevTicket:
dt = self.dateLocal() # Make sure to calculate on local dates
dtPrev = prevTicket.dateLocal()
# Direction
# ---------
# Are we at home and the previous was before 04:00 we are leaving home
if not self.direction:
dtLimit = self.account.localizeDate(datetime.datetime(dt.year, dt.month, dt.day, 4, 0, 0, 0))
if bAtHome and dtPrev < dtLimit: # First time leaving today
self.direction = "D"
# Based on movement, so much have places
if self.place and prevTicket.place:
# Have we moved since arrival?
if not self.direction:
if str(prevTicket.place.key()) == str(self.place.key()) and prevTicket.direction == 'A':
self.direction = "D"
# Have we just arrived somewhere?
if not self.direction:
if str(prevTicket.place.key()) != str(self.place.key()) and prevTicket.direction == 'D':
# Need to do average speed calculation to make realistic prediction
if self.odo and prevTicket.odo:
distance = self.odo - prevTicket.odo
td = dt - dtPrev
if td.days < 1:
h = (td.seconds / 3600.0)
speed = (distance / (td.seconds / 3600.0))
if speed > 10.0 and speed < 120.0:
self.direction = "A"
# ODO
# ---
if not self.odo and prevTicket.odo:
# Are we leaving a place
if str(prevTicket.place.key()) == str(self.place.key()) and self.direction == 'D':
self.odo = prevTicket.odo
# Do some update on the previous ticket (if appropriate)
# ------------------------------------------------------
# Direction, are we leaving here?
if not prevTicket.direction:
if str(prevTicket.place.key()) == str(self.place.key()) and self.direction == 'D':
prevTicket.direction = "A"
prevTicket.store()
# Update the location of the previous, in case it's missing
if self.location and not prevTicket.location:
if self.odo == prevTicket.odo:
prevTicket.location = self.location
prevTicket.place = self.place
prevTicket.store()
def update(self, params):
# Now set the right values from the params
if 'source' in params:
self.source = params['source']
if 'datestring' in params:
self.datestring = params['datestring']
if 'date' in params:
d = datetime.datetime.strptime(params['date'],'%Y-%m-%dT%H:%M:%SZ')
self.date = d
if 'direction' in params:
v = params['direction']
if v == '':
self.direction = None
else:
self.direction = v
if 'odo' in params:
v = params["odo"]
if v == '':
self.odo = None
else:
self.odo = int(v)
if 'private' in params:
v = params['private']
if v == "1":
self.private = True
else:
self.private = False
if 'lat' in params and 'lon' in params:
self.location = db.GeoPt(params['lat'], params['lon'])
if 'place' in params:
v = params["place"]
if v == '':
self.place = None
else:
p = Places.loadOne(v)
if p:
self.place = p.key()
self._setLocation(p.location)
#if not self.location:
# self.location = p.location
if 'hint' in params and not self.place and self.account:
v = params['hint']
p = None
if v == 'home':
p = Places.loadHomeForAccount(self.account)
elif v == 'office':
p = Places.loadOfficeForAccount(self.account)
if p:
self.place = p
self._setLocation(p.location)
# if not self.location:
# self.location = p.location
if 'foursquare' in params and self.account:
v = params["foursquare"]
if v != '': # ignore if empty
p = Places.loadByVenue(self.account, v)
if p:
self.place = p.key()
self._setLocation(p.location)
if 'content' in params:
self.filename = 'Unknown'
if params["content"]:
self.file = db.Blob(params["content"])
self.store()
def store(self):
# Some default values
# if not self.date:
# self.date = self.created
# cache the object
self.put()
def asJSON(self):
t = {'id': self.key().id(), 'odo': self.odo, 'direction': self.direction}
t['created'] = self.created.strftime('%Y-%m-%dT%H:%M:%SZ')
t['date'] = self.date.strftime('%Y-%m-%dT%H:%M:%SZ')
if self.location:
t['location'] = {'lat': self.location.lat, 'lon': self.location.lon}
if self.place:
#place = {'id': t.place.key().id(), 'name': t.place.name, 'type': t.place.type, 'address': t.place.address, 'zip': t.place.zip, 'city': t.place.city, 'state': t.place.state, 'country': t.place.country}
t['place'] = self.place.asJSON()
if self.place.type == 1:
t['hint'] = 'home'
elif self.place.type == 2:
t['hint'] = 'office'
return t
def queueProcess(self):
d = datetime.datetime.now()
n = 'Process-' + str(self.key()) + '-' + d.strftime('%d-%m-%Y-%H-%M-%S')
try:
taskqueue.add(url='/ticket/process/', name=n, params={'key': self.key()})
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError):
logging.exception('Could not queue task: ' + n)
# Processing of the Raw information, happens after creating
def process(self):
self._updateEmail()
self._updateSubject()
self._updateAccount() # Allocation is most important, cos timezone handling is user specific
self._updateEXIF()
self._updateDate() # Must come after EXIF update
self._updatePlace()
self._updateFuzzy()
# .. and store the changes
self.store()
@staticmethod
def getPrevious(account, date):
q = Ticket.all()
q.filter("account =", account)
q.filter("date <", date)
q.order("-date")
r = q.fetch(1)
if len(r) > 0:
return r[0]
else:
return None
@staticmethod
def getNext(account, date):
q = Ticket.all()
q.filter("account =", account)
q.filter("date >", date)
q.order("date")
r = q.fetch(1)
if len(r) > 0:
return r[0]
else:
return None
|
### Author: Acciente
### Version: 1.0
### Last modified date: 2017.06.29
### Usage : StrToRPN(inputStr)
### Function name is defined in funcList.
### Symbols that are not in funcList will be treated as unknown numbers.
funcList = ("sin", "cos", "tan", "asin", "acos", "atan", "sqrt")
numStr = "0123456789."
charStr = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
priorDic = {"+" : 0, "-" : 0, "*" : 1, "/" : 1, "^" : 2}
class Error(Exception):
pass
class ParenthesesError(Error):
def __init__(self):
self.message = "Parentheses don't make pair."
def __str__(self):
return self.message
class CharacterError(Error):
def __init__(self):
self.message = "Unexpected character."
def __str__(self):
return self.message
def FindPairedBracket(inputStr, startIndex):
leftNum = 0
i = startIndex
while i < len(inputStr):
if inputStr[i] == ')':
leftNum -= 1
elif inputStr[i] == '(':
leftNum += 1
if leftNum == 0:
break
i += 1
if i == len(inputStr):
raise ParenthesesError
return i
def FindCharEnd(inputStr, startIndex):
i = startIndex
while i < len(inputStr) - 1:
if inputStr[i + 1] not in charStr:
break
i += 1
if i == len(inputStr) - 1:
if inputStr[i] not in charStr:
i -= 1
return i
def FindNumEnd(inputStr, startIndex):
global numStr
i = startIndex
while i < len(inputStr) - 1:
if inputStr[i + 1] not in numStr:
break
i += 1
if i == len(inputStr) - 1:
if inputStr[i] not in numStr:
i -= 1
return i
def StrToRPNList(inputStr):
oprStack = []
finStack = []
i = 0
while i < len(inputStr):
if inputStr[i] == " ":
pass
elif inputStr[i] == "(":
pairedBracket = FindPairedBracket(inputStr, i)
finStack += StrToRPNList(inputStr[i + 1:pairedBracket])
i = pairedBracket
elif inputStr[i] in numStr:
numEnd = FindNumEnd(inputStr, i)
finStack.append(inputStr[i:numEnd + 1])
i = numEnd
elif inputStr[i] in charStr:
charEnd = FindCharEnd(inputStr, i)
if inputStr[i:charEnd + 1] in funcList:
oprStack.append((3, inputStr[i:charEnd + 1]))
else:
finStack.append(inputStr[i:charEnd + 1])
i = charEnd
elif inputStr[i] in priorDic.keys():
while True:
if len(oprStack) == 0:
break
if oprStack[len(oprStack) - 1][0] >= priorDic[inputStr[i]]:
finStack.append(oprStack.pop()[1])
else:
break
oprStack.append((priorDic[inputStr[i]], inputStr[i]))
else:
raise CharacterError
i += 1
while len(oprStack) > 0:
finStack.append(oprStack.pop()[1])
return finStack
def RPNListToStr(lst):
s = ""
for i in lst:
s += str(i)
s += " "
return s
def StrToRPN(inputStr):
if not isinstance(inputStr, str):
raise TypeError("Input must be a string!")
return RPNListToStr(StrToRPNList(inputStr))
if __name__ == "__main__":
import sys
print(StrToRPN(sys.argv[1])) |
from datetime import *
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, inch, landscape, portrait
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus import *
from geopy.distance import great_circle
import pymongo
styles = getSampleStyleSheet()
style = TableStyle([ ('ALIGN',(0,0),(-1,-1),'CENTER'),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('INNERGRID', (0,0),(-1,-1), 0.25, colors.black),
('FONT',(0,0),(0,-1),'Helvetica-Bold'),
('FONT',(0,0),(-1,0),'Helvetica-Bold'),
('FONTSIZE', (0,0), (-1, -1), 8),
('BACKGROUND',(0,0),(0,-1),colors.lightgrey),
('BACKGROUND',(0,0),(-1,0),colors.lightgrey),
('FONTSIZE',(0,0),(0,-1),9),
('FONTSIZE',(0,0),(-1,0),9),
])
stylepeq = TableStyle([('ALIGN',(0,0),(-1,-1),'CENTER'),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('INNERGRID', (0,0),(-1,-1), 0.25, colors.black),
('BACKGROUND',(0,0),(0,-1),colors.lightgrey),
('FONT',(0,0),(0,-1),'Helvetica-Bold'),
])
heading = styles["Heading1"]
heading.wordWrap = 'CJK'
normal = styles["Normal"]
class Utils:
def __init__(self):
None
def NumProtocol_to_product(self, protocol):
if protocol == 1:
return "MTC"
elif protocol == 166 or protocol == 167 or protocol == 172 or protocol == 168:
return "MXT"
elif protocol == 173:
return "MaxPB"
def protocol_to_product(self, doc):
try:
a = doc["deviceID"]
return "MaxPB"
except:
protocol = self.get_protocol(doc)
if protocol == 1:
return "MTC"
elif protocol == 166 or protocol == 167 or protocol == 172 or protocol == 168:
return "MXT"
else:
return "Error"
def numprotocol_to_product(self, protocol):
if protocol == 1:
return "MTC"
elif protocol == 166 or protocol == 167 or protocol == 172 or protocol == 168:
return "MXT"
else:
return "MaxPB"
def get_event(self, doc):
return doc["event"]
def isAccelerometerEvent(self, doc):
protocol= self.get_protocol(doc)
product = self.protocol_to_product(doc)
if product == "MXT" :
event = self.get_event(doc)
if event == 100:
return True
else:
return False
elif product == "MTC":
try:
x = int(doc["accelerometer_data"]["data"]["flag_value"])
if x > 0:
return True
else:
return False
except:
return False
else:
return False
def existsCANdata(self, doc):
protocol= self.get_protocol(doc)
product = self.protocol_to_product(doc)
if product == "MXT" :
try:
a = doc["additional_data"]["position_information"]
return True
except:
return False
elif product == "MTC":
return False
def CANdataInfo(self, doc):
stringCAN = "<br><br><b>CAN DATA: </b>"
protocol= self.get_protocol(doc)
product = self.protocol_to_product(doc)
# ANALOG DATA
speedCAN = self.get_speedCAN(doc)
RPM = self.get_rpm(doc)
odometerCAN = self.get_odometer(doc)
airtemperature = self.get_air_temperature(doc)
fuelconsumption = self.get_fuel_consumption(doc)
fuelLevel1 = self.get_fuel_level1(doc)
fuelLevel2 = self.get_fuel_level2(doc)
engineTemperature = self.get_engine_temperature(doc)
windowWipers = self.get_windshield_wipers(doc)
engineFuelRate = self.get_engine_fuel_rate(doc)
intakeAirTemp = self.get_intake_air_temp(doc)
intakeAirFlow = self.get_intake_air_flow(doc)
throttlePosition = self.get_throttle_position(doc)
barometricPressure = self.get_barometric_pressure(doc)
controlModuleVoltage = self.get_control_module_voltage(doc)
fuelType = self.get_fuel_type(doc)
ethanolRatio = self.get_ethanol_ratio(doc)
oilTemperature = self.get_oil_temperature(doc)
engineRefTorque = self.get_engine_ref_torque(doc)
currentGear = self.get_current_gear(doc)
# DIGITAL DATA
clutchDigital = self.get_clutch(doc)
brakeDigital = self.get_brake(doc)
parkingBrakeDigital = self.get_parking_brake(doc)
motorBrakeDigital = self.get_motor_brake(doc)
doorClosedDigital = self.get_door_closed(doc)
doorLockedDigital = self.get_door_locked(doc)
numDTCavailable = self.get_num_dtc_available(doc)
numDTCpacket = self.get_num_dtc_packet(doc)
if product == "MXT" :
if airtemperature != -1 :
stringCAN = stringCAN + "<br<b>Air Temperature: </b> " + str(airtemperature)
if fuelconsumption != -1 :
stringCAN = stringCAN + "<br><b>Fuel Consumption: </b> " + str(fuelconsumption)
if fuelLevel1 != -1 :
stringCAN = stringCAN + "<br<b>Fuel Level 1: </b> " + str(fuelLevel1)
if fuelLevel2 != -1 :
stringCAN = stringCAN + "<br><b>Fuel Level 2: </b> " + str(fuelLevel2)
if engineTemperature != -1 :
stringCAN = stringCAN + "<br<b>Air Temperature: </b> " + str(engineTemperature)
if windowWipers != -1 :
stringCAN = stringCAN + "<br><b>Windshield Wipers: </b> " + str(windowWipers)
if engineFuelRate != -1 :
stringCAN = stringCAN + "<br<b>Air Temperature: </b> " + str(engineFuelRate)
if intakeAirTemp != -1 :
stringCAN = stringCAN + "<br><b>Fuel Consumption: </b> " + str(intakeAirTemp)
if intakeAirFlow != -1 :
stringCAN = stringCAN + "<br<b>Air Temperature: </b> " + str(intakeAirFlow)
if throttlePosition != -1 :
stringCAN = stringCAN + "<br><b>Fuel Consumption: </b> " + str(throttlePosition)
if barometricPressure != -1 :
stringCAN = stringCAN + "<br<b>Air Temperature: </b> " + str(barometricPressure)
if controlModuleVoltage != -1 :
stringCAN = stringCAN + "<br><b>Control Module Voltage: </b> " + str(controlModuleVoltage)
if fuelType != -1 :
stringCAN = stringCAN + "<br><b>Fuel Type: </b> " + str(fuelType)
if ethanolRatio != -1 :
stringCAN = stringCAN + "<br><b>Ethanol Ratio: </b> " + str(ethanolRatio)
if oilTemperature != -1 :
stringCAN = stringCAN + "<br><b>Oil Temperature: </b> " + str(oilTemperature)
if engineRefTorque != -1 :
stringCAN = stringCAN + "<br><b>Engine Ref Torque: </b> " + str(engineRefTorque)
if currentGear != -1 :
stringCAN = stringCAN + "<br><b>Current Gear: </b> " + str(currentGear)
if clutchDigital != -1 :
stringCAN = stringCAN + "<br><b>Clutch Digital: </b> " + str(clutchDigital)
if brakeDigital != -1 :
stringCAN = stringCAN + "<br><b>Brake Digital: </b> " + str(brakeDigital)
if parkingBrakeDigital != -1 :
stringCAN = stringCAN + "<br><b>Parking Brake: </b> " + str(parkingBrakeDigital)
if motorBrakeDigital != -1 :
stringCAN = stringCAN + "<br><b>Motor Brake: </b> " + str(motorBrakeDigital)
if doorClosedDigital != -1 :
stringCAN = stringCAN + "<br><b>Door Closed : </b> " + str(doorClosedDigital)
if doorLockedDigital != -1 :
stringCAN = stringCAN + "<br><b>Door Locked: </b> " + str(doorLockedDigital)
if numDTCavailable != -1 :
stringCAN = stringCAN + "<br><b>Num DTC: </b> " + str(numDTCavailable)
if numDTCpacket != -1 :
stringCAN = stringCAN + "<br><b>Fuel Consumption: </b> " + str(numDTCpacket)
if speedCAN != -1 :
stringCAN = stringCAN + "<br><b>Speed: </b> " + str(speedCAN)
if RPM != -1 :
stringCAN = stringCAN + "<br><b>RPM: </b> " + str(RPM)
if odometerCAN != -1 :
stringCAN = stringCAN + "<br><b>Fuel Consumption: </b> " + str(odometerCAN)
return stringCAN
def PosInfo(self, doc):
gprs = Utils().get_gprsfix(doc)
snr = Utils().get_snr(doc)
svn = Utils().get_svn(doc)
hdop = Utils().get_hdop(doc)
ignition = Utils().get_ignition(doc)
evento = Utils().get_event(doc)
pos = Utils().get_memoryindex(doc)
speed = Utils().get_speed(doc)
gps = Utils().get_gpsfix(doc)
date = Utils().get_devdate(doc)
extPwr = Utils().get_extpower(doc)
product = self.protocol_to_product(doc)
stringInfo = "<b>Position</b>: " + str(pos) \
+ "<br><b>Event:</b> " + str(evento) \
+ "<br><b>Date:</b> " + str(date) \
+ "<br><b>Ignition: </b> " + str(ignition) \
+ "<br><b>Speed:</b> " + str(speed) \
+ "<br><b>GPS Fix:</b> " + str(gps) \
+ "<br><b>GPRS Fix:</b> " + str(gprs) \
+ "<br><b>Ext Power:</b> " + str(extPwr)
if product == "MXT" :
pass
elif product == "MTC":
pass
return stringInfo
def accelerometerEventInfo(self, doc):
evento = Utils().get_event(doc)
pos = Utils().get_memoryindex(doc)
speed = Utils().get_speed(doc)
gps = Utils().get_gpsfix(doc)
date = Utils().get_devdate(doc)
protocol= self.get_protocol(doc)
product = self.protocol_to_product(doc)
if product == "MXT" :
stringInfo = "<b>Position</b>: " + str(pos) \
+ "<br><b>Event:</b> " + str(evento) \
+ "<br><b>Date:</b> " + str(date) \
+ "<br><b>Speed:</b> " + str(speed) \
+ "<br><b>GPS Fix:</b> " + str(gps)
try:
docEvento = doc["additional_data"]["telemetry_events"]["hard_braking_list"][0]
evento = "Hard Breaking"
except:
try:
docEvento = doc["additional_data"]["telemetry_events"]["hard_lateral_list"][0]
if docEvento["side"] == 0:
evento = "Hard Lateral Left"
else:
evento = "Hard Lateral Right"
except:
try:
docEvento = doc["additional_data"]["telemetry_events"]["hard_acceleration_list"][0]
evento = "Hard Acceleration"
except:
try:
docEvento = doc["additional_data"]["telemetry_events"]["impact_detected_list"][0]
evento = "Impact"
except:
docEvento = 0
stringInfo = stringInfo \
+ "<br><br><b>ACCELEROMETER EVENT</b> "\
+ "<br><b>Type:</b> " + evento \
+ "<br><b>Total Time:</b> " + str(docEvento["total_time"]) \
+ "<br><b>Time to Max:</b> " + str(docEvento["time_to_max"])\
+ "<br><b>Max G:</b> " + str(docEvento["max_g"])
return stringInfo
elif product == "MTC":
stringInfo = "Bla bla bla Acelerometro MTC"
return stringInfo
def get_extpower(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MTC"):
try:
supply = doc["aditional_data"]["adc_data"]["external_power"]
except:
supply = 0
elif linha == "MXT":
supply = doc["hardware_monitor"]["detailed_supply"]
elif linha == "MaxPB":
try:
supply = doc["flags"]["deviceStatus"]["extPowerValue"]
supply = float(float(supply)/1000)
except:
try:
supply = doc["flags"]["deviceInfo"]["extPowerValue"]
supply = float(float(supply)/1000)
except:
supply = 0
else:
supply = -1
except:
supply = -1
return supply
def get_gprsfix(self, doc):
return doc["gprsfix"]
def get_ignition(self, doc):
return doc["ign"]
def get_memoryindex(self, doc):
return doc["pos"]
def get_gtwdate(self, doc):
return doc["date_gtw"]
def get_mapType(self, doc):
gps = self.get_gpsfix(doc)
gprs = self.get_gprsfix(doc)
if Utils().isAccelerometerEvent(doc) == False:
if gps == False and gprs == False:
type = 1
elif gps == False and gprs == True:
type = 2
elif gps == True and gprs == False:
type = 3
elif gps == True and gprs == True:
type = 4
else:
if gps == False and gprs == False:
type = 5
elif gps == False and gprs == True:
type = 6
elif gps == True and gprs == False:
type = 7
elif gps == True and gprs == True:
type = 8
return type
def get_devdate(self, doc):
return doc["date_dev"]
def get_serial(self, doc):
return doc["dev_id"]
def get_protocol(self, doc):
try:
protocol = doc["firmware"]["protocol"]
return protocol
except:
return 173
def get_csq(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MXT"):
csq = doc["gps_modem"]["csq"]
elif linha == "MaxPB":
try:
csq = doc["flags"]["connectionInfo"]["csq"]
except:
csq = -1
else:
csq = 0
except:
csq = 0
return csq
def get_jamming(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MXT"):
jamming = doc["gps_modem"]["flag_state"]["gsm_jamming"]
elif linha == "MaxPB":
try:
jamming = doc["flags"]["connectionInfo"]["jamming"]
except:
jamming = -1
else:
jamming = False
except:
jamming = False
return jamming
def get_snr(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MXT"):
snr = doc["gps_modem"]["snr"]
elif linha == "MaxPB":
snr = doc["gps"]["averageSnr"]
else:
snr = 0
except:
snr = 0
return snr
def get_svn(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MaxPB"):
svn = doc["gps"]["svn"]
else:
svn = doc["gps_modem"]["svn"]
except:
svn = 0
return svn
def get_hdop(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MaxPB"):
hdop = doc["gps"]["hdop"]
hdop = hdop # /10
else:
hdop = doc["gps_modem"]["hdop"]
except:
hdop = 0
return hdop
def get_speed(self, doc):
return doc["speed"]
def get_gpsfix(self, doc):
return doc["gpsfix"]
def get_antdisconnected(self, doc):
linha = Utils().protocol_to_product(doc)
if (linha == "MXT"):
antdisconnected = doc["gps_modem"]["flag_state"]["gps_antenna_disconnected"]
else:
antdisconnected = 0
return antdisconnected
def get_GPSantennaFail(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MTC"):
try:
GPSantennaFail = doc["gps_modem"]["flag_state"]["gps_antenna_status"]
except:
GPSantennaFail = 0
elif linha == "MXT":
GPSantennaFail = doc["gps_modem"]["flag_state"]["gps_antenna_failure"]
else:
GPSantennaFail = 0
except:
GPSantennaFail = 0
return GPSantennaFail
def get_latitude(self, doc):
linha = Utils().protocol_to_product(doc)
if (linha == "MaxPB"):
latitude = doc["positionInfo"][0]["latitude"]
latitude = float(float(latitude)/1000000)
else:
latitude = doc["gps_modem"]["latitude"]
return latitude
def get_longitude(self, doc):
linha = Utils().protocol_to_product(doc)
if (linha == "MaxPB"):
longitude = doc["positionInfo"][0]["longitude"]
longitude = float(float(longitude)/1000000)
else:
longitude = doc["gps_modem"]["longitude"]
return longitude
def get_gsensor(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if linha == "MTC":
try:
gsensor = doc["gps_modem"]["flag_state"]["moving"] ############################ VERIFICAR !
except:
gsensor = 0
elif linha == "MXT":
gsensor = doc["gps_modem"]["flag_state"]["moving"]
elif linha == "MaxPB":
try:
gsensor = doc["telemetry"]["status"]["moving"]
except:
gsensor = doc["telemetry"]["flags"]["moving"]
else:
gsensor = -1
except:
gsensor = False
return gsensor
def get_fuel_consumption(self, doc):
if doc["firmware"]["protocol"]==172:
try:
fuel_consumption = doc['additional_data']['position_information']['analogic_data']['fuel_consumption']
except:
fuel_consumption = -1
return fuel_consumption
def get_door_closed(self, doc):
if doc["firmware"]["protocol"]==172:
try:
door_closed_digital = int(doc['additional_data']['position_information']['digital_data']['door_closed'])
except:
door_closed_digital = -1
return door_closed_digital
def get_door_locked(self, doc):
if doc["firmware"]["protocol"]==172:
try:
door_locked_digital = int(doc['additional_data']['position_information']['digital_data']['door_locked'])
except:
door_locked_digital = -1
return door_locked_digital
def get_parking_brake(self, doc):
if doc["firmware"]["protocol"]==172:
try:
parking_brake_digital = int(doc['additional_data']['position_information']['digital_data']['parking_brake'])
except:
parking_brake_digital = -1
return parking_brake_digital
def get_motor_brake(self, doc):
if doc["firmware"]["protocol"]==172:
try:
motor_brake_digital = int(doc['additional_data']['position_information']['digital_data']['motor_brake'])
except:
motor_brake_digital = -1
return motor_brake_digital
def get_windshield_wipers(self, doc):
if doc["firmware"]["protocol"]==172:
try:
windshield_wipers_digital = int(doc['additional_data']['position_information']['digital_data']['windshield_wipers'])
except:
windshield_wipers_digital = -1
return windshield_wipers_digital
def get_speedCAN(self, doc):
if doc["firmware"]["protocol"]==172:
try:
speed = doc['additional_data']['position_information']['analogic_data']['speed']
except:
speed = -1
return speed
def get_rpm(self, doc):
if doc["firmware"]["protocol"]==172:
try:
rpm = doc['additional_data']['position_information']['analogic_data']['rpm']
except:
rpm = -1
return rpm
def get_odometer(self, doc):
if doc["firmware"]["protocol"]==172:
try:
odm = doc['additional_data']['position_information']['analogic_data']['odometer']
except:
odm = -1
return odm
def get_fuel_level1(self, doc):
if doc["firmware"]["protocol"]==172:
try:
fuel_level1 = doc['additional_data']['position_information']['analogic_data']['fuel_level1']
except:
fuel_level1 = -1
return fuel_level1
def get_fuel_level2(self, doc):
if doc["firmware"]["protocol"]==172:
try:
fuel_level2 = doc['additional_data']['position_information']['analogic_data']['fuel_level2']
except:
fuel_level2 = -1
return fuel_level2
def get_intake_air_temp(self, doc):
if doc["firmware"]["protocol"]==172:
try:
intake_air_temp = doc['additional_data']['position_information']['analogic_data']['intake_air_temp']
except:
intake_air_temp = -1
return intake_air_temp
def get_intake_air_flow(self, doc):
if doc["firmware"]["protocol"]==172:
try:
intake_air_flow = doc['additional_data']['position_information']['analogic_data']['intake_air_flow']
except:
intake_air_flow = -1
return intake_air_flow
def get_throttle_position(self, doc):
if doc["firmware"]["protocol"]==172:
try:
throttle_position = doc['additional_data']['position_information']['analogic_data']['throttle_position']
except:
throttle_position = -1
return throttle_position
def get_barometric_pressure(self, doc):
if doc["firmware"]["protocol"]==172:
try:
barometric_pressure = doc['additional_data']['position_information']['analogic_data']['barometric_pressure']
except:
barometric_pressure = -1
return barometric_pressure
def get_control_module_voltage(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
control_module_voltage = doc['additional_data']['position_information']['analogic_data']['control_module_voltage']
except:
control_module_voltage = -1
return control_module_voltage
def get_air_temperature(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
air_temperature = doc['additional_data']['position_information']['analogic_data']['air_temperature']
except:
air_temperature = -1
return air_temperature
def get_fuel_type(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
fuel_type = doc['additional_data']['position_information']['analogic_data']['fuel_type']
except:
fuel_type = -1
return fuel_type
def get_ethanol_ratio(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
ethanol_ratio = doc['additional_data']['position_information']['analogic_data']['ethanol_ratio']
except:
ethanol_ratio = -1
return ethanol_ratio
def get_oil_temperature(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
oil_temperature = doc['additional_data']['position_information']['analogic_data']['oil_temperature']
except:
oil_temperature = -1
return oil_temperature
def get_engine_temperature(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
engine_temp = doc['additional_data']['position_information']['analogic_data']['engine_temperature']
except:
engine_temp = -1
return engine_temp
def get_engine_ref_torque(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
engine_ref_torque = doc['additional_data']['position_information']['analogic_data']['engine_ref_torque']
except:
engine_ref_torque = -1
return engine_ref_torque
def get_current_gear(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
current_gear = doc['additional_data']['position_information']['analogic_data']['current_gear']
except:
current_gear = -1
return current_gear
def get_engine_fuel_rate(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
engine_fuel_rate = doc['additional_data']['position_information']['analogic_data']['engine_fuel_rate']
except:
engine_fuel_rate = -1
return engine_fuel_rate
def get_num_dtc_available(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
num_dtc_available = doc['additional_data']['position_information']['analogic_data']['num_dtc_available']
except:
num_dtc_available = -1
return num_dtc_available
def get_num_dtc_packet(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
num_dtc_packet = doc['additional_data']['position_information']['analogic_data']['num_dtc_packet']
except:
num_dtc_packet = -1
return num_dtc_packet
def get_brake(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
brake_digital = int(doc['additional_data']['position_information']['digital_data']['brake'])
except:
brake_digital = -1
return brake_digital
def get_clutch(self, doc):
if doc["firmware"]["protocol"] == 172:
try:
clutch_digital = int(doc['additional_data']['position_information']['digital_data']['clutch'])
except:
clutch_digital = -1
return clutch_digital
def get_GMT(self):
GMT = datetime.utcnow() - datetime.now()
return GMT.seconds/3600
def set_IntToDatetime(self,intDate):
devdate = datetime.utcfromtimestamp(intDate).strftime('%Y-%m-%d %H:%M:%S')
date_object = datetime.strptime(devdate, '%Y-%m-%d %H:%M:%S')
return date_object
def set_DatetimeToInt(self, timestamp):
# timestamp += timedelta(hours=self.get_GMT())
timestamp = (timestamp - datetime(1970, 1, 1)).total_seconds()
return timestamp
def get_temperature(self, doc):
linha = Utils().protocol_to_product(doc)
if (linha == "MaxPB"):
try:
temperature = doc["flags"]["deviceInfo"]["temperature"]
except:
temperature = -1000
temperature = float(float(temperature)/1000)
else:
temperature = doc["hardware_monitor"]["temperature"]
return temperature
def get_battery(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MTC"):
try:
bat = doc["aditional_data"]["adc_data"]["battery"]
except:
bat = 0
elif linha == "MXT":
bat = doc["hardware_monitor"]["detailed_supply"]
if float(bat) > 4.5:
bat = 0
elif linha == "MaxPB":
try:
bat = doc["flags"]["deviceStatus"]["battValue"]
bat = float(float(bat)/1000)
except:
try:
bat = doc["flags"]["deviceInfo"]["battValue"]
bat = float(float(bat)/1000)
except:
bat = 0
else:
bat = 0
except:
bat = 0
return bat
def get_batteryState(self, doc):
linha = Utils().protocol_to_product(doc)
try:
bat = doc["flags"]["deviceInfo"]["battState"]
except:
bat = 0
return bat
def get_hourmeter(self, doc):
linha = Utils().protocol_to_product(doc)
if (linha == "MaxPB"):
try:
hourmeter = doc["telemetry"]["status"]["hourmeter"]
except:
try:
hourmeter = doc["telemetry"]["flags"]["hourmeter"]
except:
hourmeter = 0
else:
try:
hourmeter = doc["hardware_monitor"]["hourmeter"]
except:
hourmeter = 0
return hourmeter
def get_Odometro(self, doc):
linha = Utils().protocol_to_product(doc)
if (linha == "MaxPB"):
try:
odometer = doc["telemetry"]["odometer"]["gps"]
except:
odometer = 0
else:
try:
odometer = doc["gps_modem"]["hodometer"]
except:
odometer = 0
return odometer
def get_uptime(self, doc):
linha = Utils().protocol_to_product(doc)
try:
if (linha == "MaxPB"):
uptime = doc["flags"]["deviceInfo"]["uptime"]
else:
uptime = 0
except:
uptime = 0
return uptime
def calculaDistancia(self, lat, long, referencia):
ponto = (long, lat)
distance = int(great_circle(referencia, ponto).meters)
return distance
def findGatewayInList(self, gatewayID, listGateways):
count = 0
for x in listGateways:
if x.deviceID == gatewayID:
return x.referencia, count
count += 1
class GatewayLocation:
def __init__(self, deviceID=0,referencia=0):
self.deviceID=deviceID
self.referencia=referencia
self.isUsed = False
|
number_wagons = int(input())
wagons = []
for _ in range(number_wagons):
wagons.append(0)
command_input = input()
while command_input != "End":
command_input = command_input.split(" ")
command = command_input[0]
if command == "add":
people = int(command_input[1])
last_wagon_people = wagons.pop(len(wagons) - 1)
last_wagon_people += people
wagons.append(last_wagon_people)
elif command == "insert" or command == "leave":
people = int(command_input[2])
index = int(command_input[1])
wagon_people = wagons.pop(index)
if command == "insert":
wagon_people += people
else:
wagon_people -= people
wagons.insert(index, wagon_people)
command_input = input()
print(wagons)
|
# Python Standard Libraries
from datetime import datetime, timedelta
# Third-Party Libraries
from django.conf import settings
from django.contrib.auth.models import (AbstractBaseUser,
BaseUserManager,
PermissionsMixin)
from django.db import models
import jwt
# Custom Libraries
# N/A
# TODO: Figure out if this should be in the api version logic or stored here
class UserManager(BaseUserManager):
"""Extension of UserManager for custom user control."""
def create_user(self, username=None, email=None, password=None):
"""
Arguments:
username
email
password
Returns:
(User): ????
"""
if username is None:
raise TypeError("Users must have a username.")
if password is None:
raise TypeError("Users must have a password.")
if email is None:
raise TypeError("Users must have an email address.")
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save()
return user
def create_superuser(self, username, email, password):
"""
Arguments:
username
email
password
Returns:
(User): ????
"""
user = self.create_user(username, email, password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
"""An extension of the User class:
https://docs.djangoproject.com/en/1.10/topics/auth/customizing/#django.contrib.auth.models.CustomUser
Attributes:
username (CharField): Unique username
email (EmailField): An e-mail
is_active (BooleanField): User information cannot be deleted, only
deactivated
is_staff (BooleanFiled): The `is_staff` flag is expected by Django to
determine who can and cannot log into the Django admin site. For
most users this flag will always be false.
is_staff (BooleanFiled): The `is_superuser` flag is expected by Django
created_at (DateTimeField): A timestamp representing when this object
was created.
updated_at (DateTimeField): A timestamp reprensenting when this object
was last updated.
"""
username = models.CharField(db_index=True, max_length=255, unique=True)
email = models.EmailField(db_index=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# USERNAME_FIELD is required to specify default account association field
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
# Used to point Django to the managing class for this model
objects = UserManager()
def __str__(self):
"""Returns:
String: string representation of this `User`
"""
string = ("Username: {}"
" E-mail: {}").format(self.username, self.email)
return string
@property
def token(self):
"""Allows us to get a user's token by calling `user.token` instead of
`user.generate_jwt_token().
The `@property` decorator above makes this possible. `token` is called
a "dynamic property".
"""
return self._generate_jwt_token()
def get_full_name(self):
"""Required by Django to handle emails. Usually user's first name and
last name.
Returns:
string: the username instead of their full name
"""
return self.username
def get_short_name(self):
"""Required by Django to handle emails. Usually user's first name.
Returns:
string: the username instead of their name
"""
return self.username
def _generate_jwt_token(self):
"""Generates a JSON Web Token that stores this user's ID and has an
expiry date set to 60 days into the future.
Returns:
(String): A decoded jwt token
"""
primary_key = self.pk
expiration_date = datetime.now() + timedelta(days=60)
integer_expiration_date = int(expiration_date.strftime("%s"))
jwt_data = {
"id": primary_key,
"exp": integer_expiration_date
}
token = jwt.encode(jwt_data, settings.SECRET_KEY, algorithm="HS256")
return token.decode('utf-8')
|
class String:
def length(self,s):
print("The length of the string is",len(s))
def rev(self,s):
print("The reverse of the string is",s[::-1])
def con(self,s,s2):
print("The string after concatenation is",s," ",s2)
def cop(self,s):
self.st=s
print("The string is copied ",self.st)
def comp(self,s,s2):
if s==s2:
print("The string is equal")
else:
print("The string is not equal")
ss=String()
n=int(input("Enter 1 to fine the length, 2 to reverse, 3 to concatenate, 4 to copy, 5 to compare string "))
if(n==1):
s7=input("Enter a string")
ss.length(s7)
elif n==2:
s1=input("Enter a string")
ss.rev(s1)
elif n==3:
s2=input("Enter a string")
s3=input("Enter another string")
ss.con(s2,s3)
elif n==4:
s4=input("Enter a string")
ss.cop(s4)
elif n==5:
s5=input("Enter a string")
s6=input("Enter another string to compare")
ss.comp(s5,s6)
|
import unittest
from katas.kyu_6.eighties_kids_7_shes_a_small_wonder import Robot
class RobotTestCase(unittest.TestCase):
def setUp(self):
self.vicky = Robot()
def test_equal_1(self):
self.assertEqual(self.vicky.learn_word('hello'),
'Thank you for teaching me hello')
def test_equal_2(self):
self.assertEqual(self.vicky.learn_word('world'),
'Thank you for teaching me world')
self.assertEqual(self.vicky.learn_word('World'),
'I already know the word World')
self.assertEqual(self.vicky.learn_word('world'),
'I already know the word world')
def test_equal_3(self):
self.assertEqual(self.vicky.learn_word('goodbye'),
'Thank you for teaching me goodbye')
def test_equal_4(self):
self.assertEqual(self.vicky.learn_word('Thank'),
'I already know the word Thank')
def test_equal_5(self):
self.assertEqual(self.vicky.learn_word('!@#$%^'),
'I do not understand the input')
|
from socketIO_client import SocketIO, LoggingNamespace
import cv2
import base64
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(3, GPIO.OUT)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(8, GPIO.OUT)
GPIO.setup(7, GPIO.OUT)
video_capture = cv2.VideoCapture(0)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
socketIO = SocketIO('52.90.129.89', 3000, LoggingNamespace)
def on_connect():
print("Connected")
def on_response(arg):
print "Responded", arg
if arg == 'Up':
#Fwd
GPIO.output(3, 1)
GPIO.output(5, 0)
#Bwd
GPIO.output(8, 1)
GPIO.output(7, 0)
if arg == 'Stop':
#Fwd
GPIO.output(3, 0)
GPIO.output(5, 0)
#Bwd
GPIO.output(8, 0)
GPIO.output(7, 0)
if arg == 'Left':
#Fwd
GPIO.output(3, 1)
GPIO.output(5, 0)
#Bwd
GPIO.output(8, 0)
GPIO.output(7, 1)
if arg == 'Right':
#Fwd
GPIO.output(3, 0)
GPIO.output(5, 1)
#Bwd
GPIO.output(8, 1)
GPIO.output(7, 0)
if arg == 'Down':
#Fwd
GPIO.output(3, 0)
GPIO.output(5, 1)
#Bwd
GPIO.output(8, 0)
GPIO.output(7, 1)
socketIO.on('connect', on_connect)
socketIO.on('command_message', on_response)
socketIO.wait(seconds=1)
socketIO.emit('command_message', "First Message")
while True:
ret, frame = video_capture.read()
cnt = cv2.imencode('.jpg',frame, [int(cv2.IMWRITE_JPEG_QUALITY), 25])[1]
b64 = base64.encodestring(cnt)
socketIO.emit('video_feed', b64)
socketIO.wait(seconds=0.01)
|
"""
Programmer: Keith G. Nemitz
E-mail: future@mousechief.com
Version 0.0.1 Development
"""
#The images module handles the loading of all graphics files
#conceptually, everything is a sprite.
#It also handles some graphics manipulations
#----------------------------------------------- IMPORTS
import os
import sys
import pyglet
#----------------------------------------------- Module Variables
#Paths to this game's data.
rootPath = "data"
rsrcPaths = ["icons","animations","backdrops","buttons","actors",
'music','sounds']; #last check is the root datapath
screenSize = None;
theWindow = None;
#----------------------------------------------- Module Functions
def Init(window):
global screenSize,theWindow
theWindow = window;
screenSize = window.get_size();
pyglet.resource.path = [rootPath]+[os.path.join(rootPath,x) for x in rsrcPaths];
pyglet.resource.reindex();
return theWindow;
#load an image file and convert it to the current display environment, adjusting for alpha info.
def LoadImage(name):
return pyglet.resource.image(name);
def BuildLoadSprite(name): #, batch=None, group=None):
image = pyglet.resource.image(name);
return pyglet.sprite.Sprite(image); #, batch=batch, group=group);
def BuildLoadSpriteBatch(name, batch=None, group=None):
image = pyglet.resource.image(name);
return pyglet.sprite.Sprite(image, batch=batch, group=group);
def BuildSprite(image, batch=None, group=None):
return pyglet.sprite.Sprite(image, batch=batch, group=group);
def LoadAnimFrame(path):
pass;
def NewBatch():
return pyglet.graphics.Batch();
def MakeRank(n):
return pyglet.graphics.OrderedGroup(n);
#Horizontal flip of the supplied surface
def FlipImageH(image):
image.anchor_x = image.width/2;
image.anchor_y = image.height/2;
image = image.get_texture().get_transform(flip_x=True);
image.anchor_x = 0;
image.anchor_y = 0;
return image;
#vertical flip of the supplied surface
def FlipImageV(surf):
image.anchor_x = image.width/2;
image.anchor_y = image.height/2;
image = image.get_texture().get_transform(flip_y=True);
image.anchor_x = 0;
image.anchor_y = 0;
return image;
#def Rot90(surf, degrees):
#
# return surf;
def MapBoundsToQuad(br):
r0,r1,r2,r3 = (int(br[0]), int(br[1]), int(br[2]), int(br[3]));
return (r0,r1, r2,r1, r2,r3, r0,r3);
def DrawRect(r, lineWidth = 1.0, color = (255, 255, 255, 255)):
pyglet.gl.glLineWidth(lineWidth);
pyglet.graphics.draw( 4, pyglet.gl.GL_LINE_LOOP,
('c4B', color * 4),
('v2i', MapBoundsToQuad(r.GetBounds())),
);
pass
def FillRect(r, color = (255, 255, 255, 255)):
pyglet.graphics.draw( 4, pyglet.gl.GL_QUADS,
('c4B', color * 4),
('v2i', MapBoundsToQuad(r.GetBounds())),
);
pass
#def FillTriangle(verts, color = (255, 255, 255, 255)):
# pyglet.graphics.draw( 4, pyglet.gl.GL_TRIANGLES,
# ('c4B', color * 3),
# ('v2i', MapVertsToTri(verts)),
# );
# pass
def PlaceFeature(f, image=None):
if (not image):
image = f.image;
image.set_position(f.left,f.bottom);
pass
def MoveImageTo(image, x,y):
image.set_position(x,y);
pass
def MoveImage(image, dx,dy):
image.x += dx;
image.y += dy;
pass
def RotatePict(f, deg):
f.image.rotation = deg;
pass
def SetAnchor(f, anchorX = 0, anchorY = 0):
f.image.image.anchor_x = anchorX;
f.image.image.anchor_y = anchorY;
f.image.set_position(f.left+anchorX,f.bottom+anchorY);
pass
def SetImageAnchorAndPos(image, anchorX = 0, anchorY = 0):
image.anchor_x = anchorX;
image.anchor_y = anchorY;
image.set_position(x,y);
pass
def SetVisible(image, flag):
image.visible = flag;
pass
def SetOpacity(image, opac):
image.opacity = opac;
pass
|
"""Functions to detect irregularities"""
import numpy as np
# --------------------------------------------------------------------------- #
# Utils
def linearize(a, index=-1):
"""Linearize vector in 2 linear segments
Assumption: a is based on regular step
Args:
a (np.ndarray)
index (int): index where to split linearization
if index out of bounds, return one segment
Return:
(np.ndarray)
"""
if index <= 0 or index >= (len(a) - 1):
return ((a[-1] - a[0]) / (len(a)-1)) * np.arange(0, len(a)) + a[0]
y = a[index]
fst_seg = ((y - a[0]) / index) * np.arange(index+1) + a[0]
rindex = len(a) - index - 1
lst_seg = ((a[-1] - y) / rindex) * np.arange(rindex+1) + y
return np.concatenate([fst_seg, lst_seg[1:]])
def group_consecutives(a, step=1):
"""Group step-consecutive elements in a list of arrays
Example:
>> group_consecutives([1, 2, 4, 5, 6, 9], step=1)
[[1, 2], [4, 5, 6], [9]]
"""
if len(a) == 0:
return []
return np.split(a, np.where(np.diff(a) != step)[0] + 1)
# --------------------------------------------------------------------------- #
# Algorithms
# ----------------------------------- #
# Regularity
def reg_bounds(X, bot_thld=0, top_thld=np.inf):
"""Detect regularity and return boundaries for consistent splitting
Args:
X (np.ndarray) : array to work with
bot_thld (float): bot threshold for x
top_thld (float): top threshold for x
Returns:
(int-list)
indexes where to split X so each span has x-values with either
with bot_thld <= x <= top_thld
with x < bot_thld
with top_thld < x
Example:
>> values = np.array([0, 1, 2, 3, 4, 1, 0, 5, 6])
>> detection.reg_bounds(values, bot_thld=2, top_thld=4)
[2, 5, 7]
"""
if bot_thld > top_thld:
raise ValueError("bot_thld must be smaller or equal to top_thld")
if len(X) == 0:
return []
indexes_forsplit = set()
# Small steps
if bot_thld > 0:
indexes = np.where(X < bot_thld)[0]
igroups = group_consecutives(indexes)
for igroup in igroups:
indexes_forsplit.add(igroup[0])
indexes_forsplit.add(igroup[-1] + 1)
# Wide steps
if top_thld != np.inf:
indexes = np.where(X > top_thld)[0]
igroups = group_consecutives(indexes)
for igroup in igroups:
indexes_forsplit.add(igroup[0])
indexes_forsplit.add(igroup[-1] + 1)
try:
indexes_forsplit.remove(0)
except KeyError:
pass
try:
indexes_forsplit.remove(len(X))
except KeyError:
pass
return sorted(indexes_forsplit)
def stepreg_bounds(X, *args, **kwargs):
"""Detect step regularity and return boundaries for consistent splitting
Args:
X (np.ndarray) : array to work with
bot_thld (float): bot threshold for step
top_thld (float): top threshold for step
Returns:
(int-list)
indexes where to split X so each span has x-diffs with either:
with bot_thld <= x_i+1 - x_i <= top_thld
with x_i+1 - x_i < bot_thld
with top_thld < x_i+1 - x_i
"""
if len(X) <= 1:
return []
steps = np.diff(X) # step_i refers to X[i], X[i+1]
return reg_bounds(steps, *args, **kwargs)
# ----------------------------------- #
# Elbow
def detect_elbow(Y, mthd='singleline'):
"""Return index where elbow occurs
Args:
Y (np.ndarray)
mthd (str): method to find elbow
singleline> find farthest pt from fst-pt to lst-pt line
doubleline> find pt where dist(fst-pt to pt to lst-pt, Y) is min
Return:
index (int)
"""
if mthd == 'singleline':
line = linearize(Y)
return np.argmax(np.sqrt((Y-line)**2))
elif mthd == 'doubleline':
bst_index, bst_dist = None, np.inf
for index, y in enumerate(Y[1:-1], 1):
curve = linearize(Y, index)
dist = np.linalg.norm(Y - curve)
if dist <= bst_dist:
bst_index, bst_dist = index, dist
return bst_index
else:
raise ValueError("Unknown detection method '%s' % method")
# ----------------------------------- #
# Leap
def detect_iso(Y, delta_r=0.1, lvlref=None):
"""Return indexes of isolated points
About:
First Y is shifted so each value is >=0
Yj is isolated if [i=j-1 and k=j+1]:
not (b/w Yi and Yk)
and min(|Yi - Yj / lvlref|, |Yk - Yj / lvlref|) > delta_r
Borders can be isolated if ratio with closest Y is > delta_r
Won't work properly iY has negative values
Args:
Y (float np.ndarray): list of values
delta_r (float): max factor b/w lvlref and neighbors
lvlref (float or callable): lvlref or func to compute lvlref from Y
default is 9th percentile
Return:
(int-np.ndarray) indexes of isolated points
"""
if len(Y) <= 2:
return np.array([])
lvlref = (
lvlref if isinstance(lvlref, (float, int))
else (
lvlref(Y) if lvlref
else np.percentile(Y, 90, interpolation="lower")
)
)
if lvlref <= 0:
raise ValueError("lvlref=%s <= 0" % lvlref)
# Compute isolated points in center of Y
dY = np.diff(Y)
dY_l = -dY[:-1]
dY_r = dY[1:]
inbetween = dY_l * dY_r < 0
delta = np.min([np.abs(dY_l) / lvlref, np.abs(dY_r) / lvlref], axis=0)
# Add borders
inbetween = np.concatenate([[False], inbetween, [False]])
delta = np.concatenate(
[[np.abs(dY[0]) / lvlref], delta, [np.abs(dY[-1]) / lvlref]]
)
return np.where((1 - inbetween) * (delta > delta_r))[0]
def detect_leap(X, Y, thld, lvl_thld=None, onspan=None, wfading=None):
"""Return indexes where leap is detected on Y
Args:
X (n-numpy.ndarray)
Y (n-numpy.ndarray)
thld (float) : min diff b/w consecutive values to consider leap
if thld is neg, thld considered as max diff b/w consec. values
lvl_thld (float) : min new value to consider leap
if thld is neg, lvl_thld considered as max new value
onspan (float) : given a detected leap at x,
compute prev_y on ticks b/w prev_x - onspan & prev_x
compute next_y on ticks b/w x & x + onspan
wfading (float): when computing prev_y or next_y, apply weight to
each selected y_val [weight = 1 - wfading * |x-x_ref| / onspan]
Return:
indexes (list) where y_i - y_i-1 >= deltaU_thld and y_i >= U_thld
"""
indexes = []
def flag(py, ny):
res = ((ny - py) >= thld) if thld >= 0 else ((ny - py) <= thld)
if lvl_thld is not None:
res *= (ny >= lvl_thld) if thld >= 0 else (ny <= lvl_thld)
return res
indexes = list(np.argwhere(flag(Y[:-1], Y[1:])).flatten() + 1)
if not onspan:
return indexes
findexes = []
wfading = 0 if wfading is None else wfading
if not 0 <= wfading <= 1:
raise ValueError("wfading must be b/w 0 and 1")
def weight(x, ref):
return 1 - wfading * np.abs(x - ref) / onspan
for i in indexes:
ref_x = X[i-1]
j = i - 1
sl = []
while j >= 0 and X[j] >= ref_x - onspan:
sl.insert(0, j)
j -= 1
prevX, prevY = X[sl], Y[sl]
prevW = weight(prevX, ref_x)
ref_x = X[i]
j = i
sl = []
while j < len(Y) and X[j] <= ref_x + onspan:
sl.append(j)
j += 1
nextX, nextY = X[sl], Y[sl]
nextW = weight(nextX, ref_x)
prev_y = sum(prevY * prevW) / sum(prevW)
next_y = sum(nextY * nextW) / sum(nextW)
if flag(prev_y, next_y):
findexes.append(i)
return findexes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from ?rc_node import ?RCNode
def main():
rospy.init_node("?rc_node_node")
rc_node = ?RCNode()
rospy.loginfo('%s: starting' % (rospy.get_name()))
rc_node.start()
if __name__ == "__main__":
main() |
class_names = ('Normal', 'No Lung Opacity / Not Normal', 'Lung Opacity')
|
import pygame
from pygame import Rect
from PIL import Image
import requests
from io import BytesIO
BLACK = (0,0,0)
class Piece(pygame.sprite.Sprite):
def __init__(self, color,x, y, x0=128, y0=128):
self.x0 = x0
self.y0 = y0
self.x = x
self.y = y
self.color = color
self.shape = Rect(self.x, self.y, self.x0, self.y0)
def updateShape(self):
self.shape = Rect(self.x,self.y,self.x0,self.y0)
def draw(self,screen):
self.shape = Rect(self.x, self.y, self.x0, self.y0)
pygame.draw.rect(screen,self.color,self.shape)
class ImageBlock():
def __init__(self, image, x, y, location):
self.image = pygame.image.load(image)
self.x = x
self.y = y
# self.resize()
self.rect = self.image.get_rect()
self.rect.left,self.rect.top = location
def draw(self,screen):
screen.blit(self.image,self.rect)
def resize(self):
self.image = pygame.transform.scale(self.image,(self.x,self.y))
class Background(ImageBlock):
def __init__(self, image, location = [0,0], x=896, y=768):
self.image = image
super().__init__(self.image, x, y, location)
self.resize()
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = 0,0 |
import os;
from datetime import datetime, timedelta;
from ...utils.dateutils import next_month
from ..utils.send_email import send_email;
from ..utils.compress_netcdf_file import compress_netcdf_file;
from .ERAI_Downloader import ERAI_Downloader
ENDDATE = datetime(2019, 8, 1, 0)
class ERAI_General( ERAI_Downloader ):
def __init__(self, outdir, info = None, subject = None):
if info is None: info = INFO.copy()
super().__init__( verbose=True, netcdf=True, **info )
self.subject = subject
self.outdir = outdir
def download(self, start_year = None, start_month = None, email = None, delay = None):
'''
Purpose:
A function to download all ERA-I analysis variables at surface.
Inputs:
dir : Directory to save data to
Keywords
start_year : Year to start looking for data
start_month : Month to start looking for data
email : email address to send error messages to
delay : Delay from current date to download until. Must be
a timedelta object. Default is to stop downloading
data when the month and year are within 26 weeks of
program start date
'''
if start_year is None: start_year = 1979;
if start_month is None: start_month = 1;
if delay is None: delay = timedelta(weeks = 26);
date = datetime(start_year, start_month, 1)
while date <= ENDDATE:
self.set_date( date )
target = self.defaultTarget()
if not target:
print('Issue getting target name')
return
self.info['target'] = os.path.join( self.outdir, target )
fmt = ' {:2d}: {:40}' # Format for status messages in log file
self.log.info('Downloading: '+self.info['target']) # Print a message
attempt, max_attempt = 0, 5; # Set attempt and maximum attempt for downloading and compressing
while attempt < max_attempt: # Try three times to download and compress the file
super().download() # Download the data
if self.status < 2: # If the status returned by the download is less than 2, then the file downloaded and needs compressed
self.log.info( fmt.format(attempt+1,"Downloaded!") ) # Print a message
self.log.info( fmt.format(attempt+1,"Compressing file...") ) # Print a message
status = compress_netcdf_file(self.info['target'], email=email, gzip=5, delete=True);# Compress the file
if status == 0:
attempt = max_attempt+1; # Set attempt to four (4)
self.log.info( fmt.format(attempt+1,"Compressed!") ); # Print a message
else: # If the return status of the compression failed, then delete the downloaded file, increment the attempt counter and try to download/compress again
attempt += 1; # Increment the attempt
if status == 1 :
msg = "Output file exists and clobber was not set"; # Set the message for status 1
elif status == 2:
msg = "Data was NOT written correctly after three (3) attempts"; # Set the message for status 2
elif status == 3:
msg = "There was an error reading the data"; # Set the message for status 3
elif status == 4:
msg = "Input file doesn't exist"; # Set the message for status 4
self.log.info( fmt.format(attempt+1, msg) )
if os.path.exists( self.info['target'] ):
os.remove( self.info['target'] ); # IF the download file exists, delete it
elif self.status == 2: # If the return status of the download is 2, then the compressed file already exists
self.log.info( fmt.format(attempt+1,"Compressed file already exists!") ) # Print a message
attempt = max_attempt+1; # Set attempt to four
else:
if os.path.exists( self.info['target'] ):
os.remove( self.info['target'] ); # If any other number was returned, delete the downloaded file IF it exists
attempt += 1; # Increment the attempt
if attempt == max_attempt: # If attempt is equal to three (3), then the file failed to download/compress three times and the program halts
self.log.error( fmt.format(attempt+1,"Reached maximum attempts") ) # Print a message
if email is not None: status = send_email(email, subject); # Send an email that the download failed
return 1; # Exit status one (1)
date = next_month(date)
return 0;
if __name__ == "__main__":
import argparse; # Import library for parsing
parser = argparse.ArgumentParser(description="ERA-Interim Analisys Pressure Levels Download"); # Set the description of the script to be printed in the help doc, i.e., ./script -h
### Data storage keywords; https://software.ecmwf.int/wiki/display/UDOC/Data+storage+keywords
parser.add_argument("outdir", type=str, help="Top level directory for output")
parser.add_argument("-y", "--year", type=int, help="specifies start year")
parser.add_argument("-m", "--month", type=int, help="specifies start month")
parser.add_argument("-e", "--email", type=str, help="email address to send failed message to")
args = parser.parse_args()
inst = ERAI_AN_SFC(args.outdir)
status = inst.download( args.year, args.month, args.email );
exit(status); # Exit status zero (0) on end
|
import poker
#poker.run_simulation(4, "AdKd", "QdJdTd", 1)
#equity = poker.run_simulation(4, "6d3s", "", 1000000)
#print(f"My equity is {equity}")
test_array = poker.take_screenshot()
print(test_array.shape)
print(test_array.dtype)
print(test_array)
print(test_array[49][19])
print(test_array[48][18])
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
plt.imshow(test_array)
plt.show()
|
import torch
import json
from tqdm import tqdm
from collections import defaultdict, Counter
from vocab import Vocab
def get_freqs(path, fields):
"""
freqs is a dictionary, key being the field name and value
being a counter for the frequency of each token
"""
assert isinstance(fields, (list, tuple))
freqs = defaultdict(Counter)
with open(path, 'r') as f:
for line in tqdm(f, desc='Building vocab frequencies...'):
example = json.loads(line)
for field in fields:
freqs[field].update(example[field])
return freqs
yelp_freqs = get_freqs('data/yelp_train.jsonl', ['tokens', 'tags'])
yelp_tokens_vocab = Vocab(yelp_freqs['tokens'], max_size=25_000,
min_freq=2,
special_tokens=['<sos>', '<eos>', '<mask>'])
yelp_tags_vocab = Vocab(yelp_freqs['tags'], unk_token=None,
special_tokens=['<sos>', '<eos>'])
tokenizer = torch.load('tokenizer_no_vocab.pt')
tokenizer.vocabs['tokens'] = yelp_tokens_vocab
tokenizer.vocabs['tags'] = yelp_tags_vocab
tokenizer = torch.save(tokenizer, 'tokenizer_yelp.pt')
amazon_freqs = get_freqs('data/amazon_train.jsonl', ['tokens', 'tags'])
amazon_tokens_vocab = Vocab(amazon_freqs['tokens'], max_size=25_000,
min_freq=2,
special_tokens=['<sos>', '<eos>', '<mask>'])
amazon_tags_vocab = Vocab(amazon_freqs['tags'], unk_token=None,
special_tokens=['<sos>', '<eos>'])
tokenizer = torch.load('tokenizer_no_vocab.pt')
tokenizer.vocabs['tokens'] = amazon_tokens_vocab
tokenizer.vocabs['tags'] = amazon_tags_vocab
tokenizer = torch.save(tokenizer, 'tokenizer_amazon.pt')
|
import math
import random
from typing import List
from pyglet.sprite import Sprite
from version1.game.resources import *
from version2.game.PhysicalObject import PhysicalObject
def distance(point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)
def asteroids(num_asteroids, player_location, batch=None) -> List[Sprite]:
"""
Initialize a number of randomly located asteroids
:param num_asteroids
:param player_location:
:param batch:
"""
_asteroids = []
for i in range(num_asteroids):
asteroid_x, asteroid_y = player_location
while distance((asteroid_x, asteroid_y), player_location) < 100:
asteroid_x = random.randint(0, 800)
asteroid_y = random.randint(0, 600)
new_asteroid = PhysicalObject(img=asteroid_image,
x=asteroid_x, y=asteroid_y, batch=batch)
new_asteroid.rotation = random.randint(0, 360)
# Init the velocity
new_asteroid.velocity_x = random.random() * 40
new_asteroid.velocity_y = random.random() * 40
_asteroids.append(new_asteroid)
return _asteroids
def player_lives(num_icons, batch=None):
"""
Show player lives
:param num_icons
:param batch
"""
_lives = []
for i in range(num_icons):
new_sprite = Sprite(img=player_image,
x=785 - i * 30, y=585,
batch=batch)
new_sprite.scale = 0.5
_lives.append(new_sprite)
return _lives
def new_bullet(name, rotation, width, x, y, velocity_x, velocity_y, speed, batch):
"""
Create a new bullet
:param velocity_x:
:param velocity_y:
:param name:
:param rotation:
:param width:
:param x:
:param y:
:param speed:
:param batch:
:return:
"""
angle_radians = -math.radians(rotation)
ship_radius = width
bullet_x = x + math.cos(angle_radians) * ship_radius
bullet_y = y + math.sin(angle_radians) * ship_radius
from version2.game.Bullet import Bullet
_new_bullet = Bullet(bullet_x, bullet_y, batch=batch)
_new_bullet.name = name
_new_bullet.speed = speed
bullet_vx = (
velocity_x +
math.cos(angle_radians) * _new_bullet.speed
)
bullet_vy = (
velocity_y +
math.sin(angle_radians) * _new_bullet.speed
)
_new_bullet.velocity_x = bullet_vx
_new_bullet.velocity_y = bullet_vy
return _new_bullet
|
import os, random
import numpy as np
import skimage.transform as tf
from scipy import ndarray, ndimage
from skimage import io, util
from PIL import Image
import PIL
# path variables and constant
from .. import root_dir
data_dir = root_dir.data_path()
# existing images
char_dir = "ko"
aug_crop_img_dir = os.path.join(data_dir, "aug_crop_char_img", char_dir)
all_crop_images = os.listdir(aug_crop_img_dir)
# new augmented images
aug_crop_img_dir_final = os.path.join(data_dir, "aug_crop_char_img_final")
work_folder = os.path.join(aug_crop_img_dir_final, char_dir)
os.makedirs(work_folder, exist_ok = True)
# functions
def deformation(image):
random_shear_angl = np.random.random() * np.pi/6 - np.pi/12
random_rot_angl = np.random.random() * np.pi/7 - np.pi/12 - random_shear_angl
random_x_scale = np.random.random() * .4 + .8
random_y_scale = np.random.random() * .4 + .8
random_x_trans = np.random.random() * image.shape[0] / 4 - image.shape[0] / 8
random_y_trans = np.random.random() * image.shape[1] / 4 - image.shape[1] / 8
dx = image.shape[0]/2. \
- random_x_scale * image.shape[0]/2 * np.cos(random_rot_angl)\
+ random_y_scale * image.shape[1]/2 * np.sin(random_rot_angl + random_shear_angl)
dy = image.shape[1]/2. \
- random_x_scale * image.shape[0]/2 * np.sin(random_rot_angl)\
- random_y_scale * image.shape[1]/2 * np.cos(random_rot_angl + random_shear_angl)
trans_mat = tf.AffineTransform(rotation = random_rot_angl,
translation=(dx + random_x_trans,
dy + random_y_trans),
shear = random_shear_angl,
scale = (random_x_scale,random_y_scale))
return tf.warp(image, trans_mat.inverse,output_shape=image.shape)
def image_deformation(image):
tform = tf.SimilarityTransform(scale=1, rotation = np.random.random() * np.pi/12,
translation=(0, .1))
return tf.warp(image, tform)
def random_rotation(image_array: ndarray):
# pick a random degree of rotation between 25% on the left and 25% on the right
random_degree = random.uniform(-12, 12)
return tf.rotate(image_array, random_degree)
def random_noise(image_array: ndarray):
# add random noise to the image
return util.random_noise(image_array)
def blur(image_array: ndarray):
# add random noise to the image
return ndimage.gaussian_filter(image_array, sigma = 2)
def horizontal_flip(image_array: ndarray):
# horizontal flip doesn't need skimage, it's easy as flipping the image array of pixels !
return image_array[:, ::-1]
def vertical_flip(image_array: ndarray):
# horizontal flip doesn't need skimage, it's easy as flipping the image array of pixels !
return image_array[::-1, :]
def padding_small_image(img, size):
width, height = img.size # Pillow return images size as (w, h)
if(width > height):
new_width = size
new_height = int(size * (height / width) + 0.5)
else:
new_height = size
new_width = int(size * (width / height) + 0.5)
#resize for keeping aspect ratio
img_res = img.resize((new_width, new_height), resample = PIL.Image.BICUBIC)
#Pad the borders to create a square image
img_pad = Image.new("RGB", (size, size), (128, 128, 128))
ulc = ((size - new_width) // 2, (size - new_height) // 2)
img_pad.paste(img_res, ulc)
return img_pad
def do_augment(image_path):
for i, path in enumerate(image_path):
img = io.imread(path)
trans_img_n = random_noise(img)
trans_img_r = random_rotation(img)
trans_img_b = blur(img)
trans_img_d = image_deformation(img)
# write image to the disk
new_image_path = os.path.join(work_folder, "b_" + all_crop_images[i])
io.imsave(new_image_path, trans_img_b)
new_image_path = os.path.join(work_folder, "n_" + all_crop_images[i])
io.imsave(new_image_path, trans_img_n)
new_image_path = os.path.join(work_folder, "r_" + all_crop_images[i])
io.imsave(new_image_path, trans_img_r)
new_image_path = os.path.join(work_folder, "d_" + all_crop_images[i])
io.imsave(new_image_path, trans_img_d)
## main code here ##
# for go
image_path = [os.path.join(aug_crop_img_dir, path) for path in all_crop_images]
# for augmentation
do_augment(image_path)
# first resizing, padding and making it square shape with keeping aspect ration
all_aug_image_path = [os.path.join(work_folder, path) for path in os.listdir(work_folder)]
for img_path in all_aug_image_path:
img = Image.open(img_path)
resize_image = padding_small_image (img, 150)
io.imsave(img_path, resize_image)
|
import numpy as np
import math
from operator import add
from scipy.stats import linregress
class DogGroup:
def __init__(self, height, weight, breed):
self.height = height
self.weight = weight
self.breed = breed
# implementing the factorial equation from scratch in python
def factorial(n):
""" Factorial Function is n*n-1*n-2...n-n """
res = 1
# starting from 1*1 to n. remember that python excludes the terminating number so we have to add 1 to n
for i in range(1, n + 1):
res = res * i
return res
print("{} is the factorial of 5!\n".format(factorial(5)))
# Plotting factorial vs number used for factorial
import matplotlib.pyplot as plt
n = np.arange(1, 10, 1)
y = [factorial(i) for i in n]
fig, ax = plt.subplots()
ax.plot(n, y)
ax.set(xlabel="n", ylabel="n!", title="n vs n!")
ax.grid()
# fig.savefig("test.png")
# plt.show()
######################################################################
# implementing euler's number function 2.17
def e():
""" Euler's number is defined as
the sum of 1/n! until infinity or a very large number
"""
res = 0
for n in range(100):
res = res + (1 / factorial(n))
return res
print(f"This is euler's number: {e()}\n")
######################################################################
def avg(X):
""" The averaging function from scratch for a collection of numbers X """
res = 0
for x in X:
res += x
res = res * 1 / len(X)
return res
######################################################################
def average(X):
""" The average function without the looping and indexing through a list"""
res = 0
for x in X:
res += x
res = res / len(X)
return res
np.random.seed(0)
X = np.random.randint(low=1, high=100, size=10)
print("If {} == {}, print: {}\n".format(avg(X), average(X), avg(X) == average(X)))
######################################################################
dog_heights_train = [600, 470, 170, 430, 300]
dog_spots = [10, 20, 5, 13, 18]
cat_heights = [500, 750, 120, 300, 123]
# implement the variance functionsd
def var(X):
"""
The variance function given a collection of numbers.
The one trick to this is that there are two for loops
for summation in the variance function.
"""
res = 0
for i in X:
for j in X:
res = res + 1.0 / 2.0 * (i - j) ** 2
res = res * 1 / (len(X) ** 2)
return res
print("The variance for an array of numbers X is: {}\n".format(var(X)))
def variance(X):
""" alternative implementation of the variance function """
res = 0
for x in X:
res = res + (x - average(X)) ** 2
res = res / len(X)
return res
def variance_1(values):
return sum([(x - average(values)) ** 2 for x in values]) / len(values)
def std_dev(X):
""" standard deviation implementation dependent on the variance calculation """
return variance(X) ** (1.0 / 2.0)
print("### Dog heigh summary statistics ###\n")
print(
"Mean of dog heights: {}\nVariance of dog heights: {}\nStandard Deviation of dog heights: {}\n".format(
average(dog_heights_train),
variance(dog_heights_train),
std_dev(dog_heights_train),
)
)
print("####################################\n")
def covariance(X, Y):
""" Covariance is a generatlization of correlation. Correlation describes the relationship between
two groups of numbers, whereas covariance describes it between two or more groups of numbers
return:
sum((x(i) - mean(x)) * (y(i) - mean(y)))
"""
res = 0
for x, y in zip(X, Y):
res += (x - average(X)) * (y - average(Y))
return res
print(
"This is the covariance between dog spots and dog heights: {}\n".format(
covariance(dog_heights_train, dog_spots)
)
)
######################################################################
############### Simple Linear Regression ############################
######################################################################
def regression_regularizer(type="l1"):
pass
def coeff(X, Y):
""" Estimate the weigtht coefficients of each predictor variable """
return covariance(X, Y) / variance(X)
def intercept(X, Y):
""" calculate the y-intercept of the linear regression function """
return average(Y) - coeff(X, Y) * average(X)
def simple_linear_regression(X_train, X_test, Y, random_error=np.random.random()):
""" Simple Linear Regression function """
y_pred = np.empty(shape=len(Y))
b0, b1 = intercept(X_train, Y), coeff(X_train, Y)
for x_test in X_test:
np.append(y_pred, b0 + b1 * x_test + random_error)
return y_pred
def root_mean_squared_error(actual, predicted):
""" Loss function by which we use to evaluate our SLR model """
sum_error = 0
for act, pred in zip(actual, predicted):
prediction_error = act - pred
sum_error += prediction_error ** 2
mean_error = sum_error / len(actual)
return math.sqrt(mean_error)
# test simple linear regression
dog_weights_train = [4.5, 3.5, 11.1, 3.4, 2.3]
dog_heights_test = list(reversed(dog_heights_train))
dog_weights_test = list(reversed(dog_weights_train))
train_dataset = [list(i) for i in zip(dog_heights_train, dog_weights_train)]
test_dataset = [list(i) for i in zip(dog_heights_test, dog_weights_test)]
# fitting the SLR to get predictions
y_pred = simple_linear_regression(
dog_heights_train,
dog_heights_test,
dog_weights_train,
random_error=np.random.rand(1),
)
print(
"This is the prediction of dog weights "
"given new dog height information from the "
"learned coefficients: {}\n".format(y_pred)
)
# evaluating the performance of the SLR
rmse = root_mean_squared_error(dog_weights_test, y_pred)
print("RMSE between the predicted and actual dog_weights is : {0:.3f}\n".format(rmse))
# Plotting the actual vs. the predicted values of the dog weights
fig, ax = plt.subplots()
ax.plot(dog_heights_test, dog_weights_test, "o-")
ax.plot(dog_heights_test, y_pred, ".-")
ax.plot(linregress(dog_heights_test, dog_weights_test))
ax.set(
xlabel="Dog heights",
ylabel="Dog weights",
title="Dog heights vs dog weight predictions",
)
plt.grid()
# plt.show()
######################################################################
######################################################################
def sigmoid(y):
""" Sigmoid function
you can use math.e instead of e() if you choose you
"""
return 1 / (1 + e() ** -y)
print("The sigmoid function result of 3 is {}\n".format(sigmoid(3)))
# plot the sigmoid function as a function of its value against X
a = np.arange(1, 20 + 1)
fig, ax = plt.subplots()
ax.plot(a, sigmoid(a))
ax.set(xlabel="x", ylabel="sigmoid value of x", title="x vs sigmoid")
ax.grid()
# plt.show()
def logistic_error_function(X, Y):
"""
logistic error function for Bernoulli/Binary outcome classification:
"""
j_theta = 0
for x, y in zip(X, Y):
j_theta = (
j_theta + y * math.log(sigmoid(x)) + (1 - y) * math.log(1 - sigmoid(x))
)
j_theta = j_theta * (-1 / len(X))
return j_theta
def logistic_regression(X_train, X_test, Y):
""" Logit Link Function """
# sigmoid function
prob_a = sigmoid(simple_linear_regression(X_train, X_test, Y))
return prob_a
prob_a = logistic_regression(dog_heights_train, dog_heights_test, dog_weights_train)
print("This is the output of the logistic regression model:\n{}".format(prob_a))
fig, ax = plt.subplots()
ax.plot(dog_heights_test, dog_weights_test)
ax.plot(dog_heights_test, prob_a)
ax.plot(dog_heights_test, y_pred)
ax.set(
xlabel="Dog heights (cm)",
ylabel="Dog weights (kg)",
title="Dog heights vs weights predictions",
)
plt.grid()
# plt.show()
|
import pyautogui as pag #library to run function related to mouse function
xscreen,yscreen = pag.size() #get screen size and print it
print('Xscreen: '+ str(xscreen).rjust(4)+' Yscreen:' + str(yscreen).rjust(4) )
try:
while True:
#print the posiition of the mouse coordinates
x,y = pag.position()
print('X: '+ str(x).rjust(4)+' Y:' + str(y).rjust(4) )
#make the mouse move based on key 'a' 's' 'd' 'w'
inputs = raw_input('move:')
if inputs == 'a':
pag.moveRel(-50,0,duration = 0.5)
elif inputs == 'd':
pag.moveRel(50,0,duration = 0.5)
elif inputs == 's':
pag.moveRel(0,50,duration = 0.5)
elif inputs == 'w':
pag.moveRel(0,-50,duration = 0.5)
else:
print("invalid")
except KeyboardInterrupt:
print('\nDone')
|
import numpy as np
import math
class Network(object):
def __init__(self, sizes):
"""
"sizes" -> [1,2,3], where the 1st layer was 1 neuron and the other 2 have 2 and 3 respectively. Note that the 1st layer
is the input layer.
The Bias and the Weights are initialized at random with a Gaussian distribution with mean 0, and varience 1.
"""
self.num_layers = len(sizes)
self.biases = [np.random.randn(y,1) for y in sizes[1:]]
self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])]
# helper variables
self.bias_nitem = sum(sizes[1:])
self.weight_nitem = sum([self.weights[i].size for i in range(self.num_layers-2)])
def tanh(self,z):
return np.tanh(z)
def sigmoid(self, z):
# The sigmoid function.
return 1.0/(1.0+np.exp(-z))
def output_nn(self, input_test):
# Returns the output of the nn with a certain input
output = input_test
for b, w in zip(self.biases, self.weights):
output = self.tanh(np.dot(w,output)+b)
return output
def softmax(self, output):
sum_a = sum([math.pow(math.e,y) for y in output])
output = [y/sum_a for y in output]
return output
def mean_error(self,X,y):
total_score=0
for i in range(X.shape[0]):
predicted = self.output_nn(X[i].reshape(-1,1))
actual = y[i]
total_score += np.power(np.argmax(predicted)-actual,2)/2 # mean-squared error
return total_score
def accuracy(self, X, y):
accuracy = 0
for i in range(X.shape[0]):
output = self.output_nn(X[i].reshape(-1,1))
#print("Raw Output:", str(output), " Real Output: ", str(np.argmax(output)), " target: ", y[i], "\n")
if(int(np.argmax(output)) == y[i]):
#print(accuracy)
accuracy += 1
#print("accuracy:", accuracy)
return accuracy / len(y) * 100
def __str__(self):
s = "\nBias:\n\n" + str(self.biases)
s += "\nWeights:\n\n" + str(self.weights)
s += "\n\n"
return s |
from Manager import Manager
def start():
exit = True
while exit:
menu()
option = get_option()
exit = execute_menu(option)
def menu():
print("____________Menu principal____________\n")
print("1. Agregar datos en una estructura\n")
print("2. Eliminar datos en una estructura\n")
print("3. Mostrar datos de una estructura\n")
print("0. Salir\n")
print("______________________________________\n")
def get_option():
option = input("Digite una opcion: ")
return option
def execute_menu(option):
exit = True
operation = ""
if option == "1":
operation = "Add"
add_delete_show_menu(operation)
option = get_option()
execute_add_delete_show(option, operation)
elif option == "2":
operation = "Delete"
add_delete_show_menu(operation)
option = get_option()
execute_add_delete_show(option, operation)
elif option == "3":
operation = "Show"
add_delete_show_menu(operation)
option = get_option()
execute_add_delete_show(option, operation)
elif option == "0":
exit = False
print("Gracias...")
else:
print("*Opcion invalida*")
return exit
def add_delete_show_menu(operation):
operation = get_ES_name(operation)
print("____________" + operation + " datos____________\n")
print("1. En un arbol B\n")
print("2. En un arbol AVL\n")
print("3. En un arbol B+\n")
print("4. En un arbol Rojo-Negro\n")
print("0. <-\n")
print("______________________________________")
def get_ES_name(operation):
if operation == "Add":
return "Agregar"
elif operation == "Delete":
return "Eliminar"
else:
return "Mostrar"
def execute_add_delete_show(option, operation):
strcuk_type = ""
if option == "1":
strcuk_type = "B"
execute_operation(operation, strcuk_type)
elif option == "2":
strcuk_type = "AVL"
execute_operation(operation, strcuk_type)
elif option == "3":
"""
strcuk_type = "B+"
execute_operation(operation, strcuk_type)
"""
pass
elif option == "4":
strcuk_type = "Rojo-Negro"
execute_operation(operation, strcuk_type)
elif option == "0":
pass
else:
print("Opcion invalida")
def execute_operation(operation, struck_type):
num = 0
if operation == "Add" or operation == "Delete":
num = get_input()
if operation == "Add":
print(str(Manager.add_item(num, struck_type)))
else:
print(str(Manager.delete_item(num, struck_type)))
else:
print(str(Manager.show_item(struck_type)))
def get_input():
success = True
while success:
try:
option = int(input("Digite el numero: "))
success = False
except ValueError:
print("El numero debe ser entero")
return option
if __name__ == "__main__":
start()
|
# how often does it beat ivv given 50 days?
# how often is 1 year up?
debug = None
import z
import queue
import buy
import sliding
import statistics
from sortedcontainers import SortedSet
import args
ETF = "VOO"
etf_wc, etf_bc, etf_ly, etf_l2y, etf_avg = 0, 0, 0, 0, 0
start = 650
istart = -1*start
req = start - 20
dates = z.getp("dates")
sdate = dates[istart]
years = -1*252*4
asdate = dates[years]
indexdate_4 = dates.index(asdate)
#years2 = -1*252*2
#sdate2 = dates[years]
#
years8 = -1*252*8
asdate8 = dates[years8]
better_etf = list()
def getetfChanges():
changes = list()
closes_50 = sliding.WindowQueue(50)
for i, row in enumerate(buy.getRows("VOO", sdate)):
c_close = float(row[z.closekey])
closes_50.add_tail(c_close)
if closes_50.full():
changes.append(round(closes_50.main[-1]/closes_50.main[0], 3))
return changes
etfchanges = getetfChanges()
last_prices = dict()
def proc(astock):
if debug:
print("astock: {}".format( astock))
global etf_wc, etf_bc, etf_ly, etf_l2y, etf_avg, better_etf, last_prices
prev_close = None
closes_252 = sliding.WindowQueue(252)
ups = list()
beativv = list()
closes_50 = sliding.WindowQueue(50)
etf_idx = 0
firstdate = None
index_adjust = 0
for i, row in enumerate(buy.getRows(astock, sdate)):
# adjust for first available date
if i == 0:
try:
myidx = dates.index(row['Date'])
index_adjust = myidx - dates.index(sdate)
except:
continue
try:
c_close = float(row[z.closekey])
except:
print("row: {}".format( row))
pass
closes_50.add_tail(c_close)
closes_252.add_tail(c_close)
# ive got a year worth of closes_252
if closes_50.full():
interval_change = round(closes_50.main[-1]/closes_50.main[0],3)
# if debug:
# date = row["Date"]
# print("etf_idx : {} date : {} interval_change : {} close : {}".format( etf_idx, date, interval_change, c_close ))
try:
beativv.append(1 if interval_change >= etfchanges[etf_idx + index_adjust] else 0)
except:
pass
etf_idx += 1
if closes_252.full():
ups.append(1 if closes_252.main[0] < closes_252.main[-1] else 0)
try:
ivvcounts = len(beativv)
ivvb = round(sum(beativv) / ivvcounts,3)
# if debug:
# print("beativv: {}".format( beativv))
except Exception as e:
print("e: {}".format( e))
ivvb = None
consideration = list(closes_252.main)[:240]
try:
high = max(consideration)
except:
return "NA", "NA", "NA","NA", "NA", "NA", "NA", "NA","NA", "NA"
low = min(consideration)
dfh1y = round(c_close / high,3)
gfl1y = round(c_close / low,3)
if debug:
print("dfh1y : {}".format( dfh1y ))
print("gfl1y : {}".format( gfl1y ))
wc, bc, avg, ly, l2y, avg8, newstock = annuals(astock)
if args.args.bta and not newstock:
buy.addPDic(astock, "ly", ly)
buy.addPDic(astock, "l2y", l2y)
buy.addPDic(astock, "avg", avg)
buy.addPDic(astock, "wc", wc)
last_prices[astock] = c_close
count = len(ups)
y1u = "NA"
if count > 30:
y1u = round(sum(ups) / count, 3)
if astock == ETF:
etf_wc, etf_bc, etf_ly, etf_l2y, etf_avg = wc, bc, ly, l2y, avg
if ly != "NA":
try:
if wc > etf_wc and bc > etf_bc and ly > etf_ly and l2y > etf_l2y and avg > etf_avg:
better_etf.append(astock)
except:
pass
# rank = buy.getMCRank(astock)
# if rank < 2600 and c_close > 1:
# buy.addSorted("ly", ly, astock)
# try:
# buy.addSorted("wc", wc, astock)
# buy.addSorted("bc", bc, astock)
# buy.addSorted("avg", avg, astock)
# except:
# pass
return y1u, ivvb, wc, bc, avg, ly, l2y, avg8, dfh1y, gfl1y
etf_dates = list()
missing_days = list()
def annuals(astock):
global etf_dates, missing_days
closes_252 = sliding.WindowQueue(252)
if debug:
print("start: {}".format( asdate8))
annual_list = list()
started_annuals = False
annualprices8 = list()
i_adjust = 0
newstock = False
days_missing = 0
first_date = None
for i, row in enumerate(buy.getRows(astock, asdate8)):
c_date = row["Date"]
if astock == ETF:
etf_dates.append(c_date)
if i == 0:
first_date = c_date
if i == 0 and asdate8 != c_date:
try:
myidx = dates.index(c_date)
except:
continue
index_adjust = myidx - dates.index(sdate)
# if debug:
# print("index_adjust : {}".format( index_adjust ))
# print("myidx : {}".format( myidx ))
if myidx > indexdate_4:
newstock = True
# stock as not available 8 years ago
i_adjust = dates.index(c_date) - dates.index(asdate8)
# if debug:
# print("i_adjust : {} first date {}".format( i_adjust, c_date ))
if astock != ETF:
adjusted = i + i_adjust
# print("{} i {} {} etf_dates: {}".format( adjusted, i, i_adjust, len(etf_dates)))
# print("c_date : {}".format( c_date ))
if etf_dates and not days_missing and etf_dates[adjusted] != c_date:
days_missing += 1
try:
c_close = float(row[z.closekey])
except:
continue
if not (i+i_adjust) % 252:
annualprices8.append(c_close)
if c_date == asdate:
started_annuals = True
if started_annuals or newstock:
closes_252.add_tail(c_close)
if closes_252.full():
annual_change = round(closes_252.main[-1]/closes_252.main[0],3)
# if annual_change > 3.2:
# annual_change = 3.2
annual_list.append(annual_change)
# if debug:
# print("annual_change : {} date {}".format( annual_change, row["Date"] ))
if days_missing > 20:
missing_days.append((days_missing, astock))
annualprices8.append(c_close)
prevprice = None
annuals8 = list()
for price in annualprices8:
try:
chg = round(price / prevprice, 3)
annuals8.append(chg)
except:
pass
prevprice = price
# if debug:
# print("annuals8: {}".format( annuals8))
avg8 = round((statistics.mean(annuals8) + statistics.median(annuals8))/2,3) if len(annuals8) > 4 and days_missing < 20 else "NA"
try:
ly = annual_list[-1]
except:
ly = "NA"
pass
try:
l2y = annual_list[-252]
except:
l2y = "NA"
wc = "NA"
bc = "NA"
avg = "NA"
if annual_list and (started_annuals or newstock):
wc = min(annual_list)
bc = max(annual_list)
median = statistics.median(annual_list)
mean = statistics.mean(annual_list)
avg = round((median + median + mean) / 3,3)
return wc, bc, avg, ly, l2y, avg8, newstock
import sys
def procs():
global better_etf, missing_days
# if astocks:
# current_module = sys.modules[__name__]
# current_module.stocks = astocks
try:
stocks.pop(stocks.index(ETF))
except:
pass
# try:
# stocks.insert(0, ETF)
# except:
# stocks.insert(0, ETF)
try:
prob_dic = z.getp("probs")
except:
pass
if prob_dic is None:
prob_dic = dict()
for i, astock in enumerate(stocks):
if not i % 100:
print(": {}".format( astock))
try:
prob_dic[astock] = proc(astock)
except Exception as e:
print("procs problem astock: {}".format( astock))
z.trace(e)
pass
if not debug:
z.setp(prob_dic, "probs")
for cat in buy.sortcats:
buy.saveSorted(cat)
z.setp(better_etf, "better_etf", True)
z.setp(last_prices, "last_prices")
# else:
# print("prob_dic: {}".format( prob_dic))
# print("wc, bc, avg, ly, l2y, avg8")
import args
if __name__ == '__main__':
print (stocks)
procs()
if args.args.bta:
buy.savePs()
|
from __future__ import division
import numpy as np
# from numba import jit
def n_step_advantage(rewards, state_values, gamma=0.90, n=10):
"""Compute the n-step forward view advantages of a series of experiences.
This function calculates the n-step forward view temporal difference and subtracts the
estimate of the state value as a baseline to obtain the advantage. See
:code:`n_step_temporal_difference` for details on the n-step return.
Parameters
----------
rewards: array of floats
The consecutively obtained scalar rewards.
state_values: array of floats
The estimates of the state values :math:`V(s_t)`. Note that these values should
represent the states just before obtaining the corresponding reward, so
`rewards[i]` was obtained by taking an action in the state with state value
`state_values[i]`. This function cannot check or enforce this, so take care
when providing input for this function.
gamma: float, optional, default=0.9
The discount factor for future rewards.
n: int, optional, default=10
The number of future rewards to incorporate explicitly in the return in a
discounted fashion.
Returns
-------
advantages: array of floats
The advantages of every experience tuple corresponding to the rewards and
state values.
"""
returns = n_step_forward_view_return(rewards, state_values, gamma=gamma, n=n)
return returns - np.asarray(state_values)
def n_step_temporal_difference(rewards, state_values, gamma=0.9, n=10):
"""Compute the n-step forward view temporal difference of a series of experiences.
The n-step return at time step :math:`t` is the sum of discounted rewards of steps
:math:`t:t + n`, plus the estimated value of the resulting state :math:`V(s_{t + n})`.
The following equation describes this formally:
:math:```
R_t = r_t + \\gamma r_{r+1} + \\gamma^2 r_{r+2} + ... + \\gamma^{n - 1}r_{t + n - 1}
+ \\gamma^n V_t(S_{t + n})
```
Parameters
----------
rewards: array of floats
The consecutively obtained scalar rewards.
state_values: array of floats
The estimates of the state values :math:`V(s_t)`. Note that these values should
represent the states just before obtaining the corresponding reward, so
`rewards[i]` was obtained by taking an action in the state with state value
`state_values[i]`. This function cannot check or enforce this, so take care
when providing input for this function.
gamma: float, optional, default=0.9
The discount factor for future rewards.
n: int, optional, default=10
The number of future rewards to incorporate explicitly in the return in a
discounted fashion.
Returns
-------
advantages: array of floats
The n-step forward view return of every experience tuple corresponding to the
provided rewards and state values.
"""
assert len(rewards) == len(state_values), ("rewards and state_values must"
" have the same length")
t = len(rewards)
returns = np.zeros(t)
gamma_array = gamma ** np.arange(n + 1)
rewards = np.append(rewards, np.zeros(n))
values = np.append(state_values, np.zeros(n))
returns = np.array([np.dot(gamma_array, np.append(rewards[i:(i + n)], values[i + n])) for i in range(t)])
return returns
# @jit(nopython=True)
def monte_carlo_discounted_mean_reward(rewards, *args, gamma=0.9, **kwargs):
"""Calculates the mean discounted reward, instead of summed.
.. math::
G_{t:t+n} = \\frac{\\sum_{i=0}^{n-1} \\gamma^i r_{t+i} + \\gamma^n V(s')}{\\sum_{i=0}^n \\gamma^i}
Parameters
----------
rewards: array of floats
The consecutively obtained scalar rewards.
gamma: float, optional, default=0.9
The discount factor for future rewards.
*args, **kwargs: any
To make function input compatible with other target functions. Parameters
are not used.
Returns
-------
returns: array of floats
The discounted mean reward of every experience tuple corresponding
to the provided rewards..
"""
T = len(rewards)
gamma_array = gamma ** np.arange(T)
return np.array([np.dot(rewards[i:], gamma_array[:(T - i)]) / np.sum(gamma_array[:(T - i)])
for i in range(T)])
# @jit(nopython=True)
def n_step_discounted_mean_reward(rewards, state_values, gamma=0.9, n=3):
"""Calculates the mean discounted reward, instead of summed.
.. math::
G_{t:t+n} = \\frac{\\sum_{i=0}^{n-1} \\gamma^i r_{t+i} + \\gamma^n V(s')}{\\sum_{i=0}^n \\gamma^i}
Parameters
----------
rewards: array of floats
The consecutively obtained scalar rewards.
state_values: array of floats
The estimates of the state values :math:`V(s_t)`. Note that these values should
represent the states just before obtaining the corresponding reward, so
`rewards[i]` was obtained by taking an action in the state with state value
`state_values[i]`. This function cannot check or enforce this, so take care
when providing input for this function.
gamma: float, optional, default=0.9
The discount factor for future rewards.
n: int, optional, default=10
The number of future rewards to incorporate explicitly in the return in a
discounted fashion.
Returns
-------
returns: array of floats
The n-step forward view discounted mean reward of every experience tuple corresponding
to the provided rewards and state values.
"""
T = len(rewards)
gamma_array = gamma ** np.arange(n + 1)
state_values = list(state_values)
state_values.append(0)
returns = np.zeros(T)
for i in range(T):
steps = min(n, T - i)
returns[i] = (np.dot(rewards[i:(i + steps)], gamma_array[:steps]) +
gamma_array[steps] * state_values[i + steps]) / np.sum(gamma_array[:(steps + 1)])
return returns
|
import metric_maximin
import metric_maximax
def calc(performance, maximise=True, alpha=0.5):
"""Returns the Optimism-Pessimism metric for a set of solutions
Metric obtained from:
Hurwicz, L. (1953) 'Optimality criterion for decision making under ignorance', Uncertainty and Expectations in Economics: Essays in Honour of GLS Shackle.
'performance' is a 2D list of the performance metric used to
evaluate each solution in each scenario
dimension 1 is the solution index
dimension 2 is the scenario index
'alpha' is a factor that shows what proportion of the optimism rule to use
0 < alpha < 1
'maximise' is a boolean value (assumed true) that is
true if the aim is to maximise the value of performance (e.g. profit)
false if the aim is to minimise the value of performance (e.g. cost)
returns a 1D list of the optimism-pessimism metric of robustness for each solution
"""
# Get the optimistic robustness
optimistic = metric_maximax.calc(performance, maximise)
# Get the pessimistic robustness
pessimistic = metric_maximin.calc(performance, maximise)
# Combine the optimistic and pessimistic values
robustness = []
for solution in range(len(optimistic)):
robustness.append(alpha * optimistic[solution] + (1 - alpha) * pessimistic[solution])
return robustness
if __name__ == "__main__":
performance = [[0.7, 0.8, 0.9], [0.8, 0.9, 1.0]]
robustness = calc(performance, 0.5)
print(robustness) |
hiddenimports = [
'fabio.edfimage',
'fabio.adscimage',
'fabio.tifimage',
'fabio.marccdimage',
'fabio.mar345image',
'fabio.fit2dmaskimage',
'fabio.brukerimage',
'fabio.bruker100image',
'fabio.pnmimage',
'fabio.GEimage',
'fabio.OXDimage',
'fabio.dm3image',
'fabio.HiPiCimage',
'fabio.pilatusimage',
'fabio.fit2dspreadsheetimage',
'fabio.kcdimage',
'fabio.cbfimage',
'fabio.xsdimage',
'fabio.binaryimage',
'fabio.pixiimage',
'fabio.raxisimage',
'fabio.numpyimage',
'fabio.eigerimage',
'fabio.hdf5image',
'fabio.fit2dimage',
'fabio.speimage',
'fabio.jpegimage',
'fabio.jpeg2kimage',
'fabio.mpaimage',
'fabio.mrcimage'
] |
"""
CCT 建模优化代码
束线
作者:赵润晓
日期:2021年5月1日
"""
import multiprocessing # since v0.1.1 多线程计算
import time # since v0.1.1 统计计算时长
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import matplotlib.pyplot as plt
import math
import random # since v0.1.1 随机数
import sys
import os # since v0.1.1 查看CPU核心数
import numpy
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import warnings # since v0.1.1 提醒方法过时
from packages.point import *
from packages.constants import *
from packages.base_utils import BaseUtils
from packages.local_coordinate_system import LocalCoordinateSystem
from packages.line2s import *
from packages.trajectory import Trajectory
from packages.particles import *
from packages.magnets import *
from packages.cct import CCT
class Beamline(Line2, Magnet, ApertureObject):
def __init__(self, trajectory: Trajectory) -> None:
"""
不要直接调用构造器
请使用 set_start_point
"""
self.magnets: List[Magnet] = []
self.trajectory: Trajectory = trajectory
# 2021年3月18日 新增,表示元件。List 中每个元素表示一个元件
# 元件由三部分组成,位置、元件自身、长度
# 其中位置表示沿着 Beamline 的长度
# 元件自身,使用 None 表示漂移段。
self.elements: List[Tuple[float, Magnet, float]] = []
def magnetic_field_at(self, point: P3) -> P3:
"""
返回 Beamline 在全局坐标系点 P3 处产生的磁场
"""
b: P3 = P3.zeros()
for m in self.magnets:
b += m.magnetic_field_at(point)
return b
# from Magnet
def magnetic_field_along(
self,
line2: Optional[Line2] = None,
p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),
step: float = 1 * MM,
) -> List[ValueWithDistance[P3]]:
"""
计算本对象在二维曲线 line2 上的磁场分布(line2 为 None 时,默认为 self.trajectory)
p2_t0_p3 是一个函数,用于把 line2 上的二维点转为三维,默认转为 z=0 的三维点
step 表示 line2 分段长度
-------
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).magnetic_field_along(
line2=line2, p2_t0_p3=p2_t0_p3, step=step
)
def magnetic_field_bz_along(
self,
line2: Optional[Line2] = None,
p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),
step: float = 1 * MM,
) -> List[P2]:
"""
计算本对象在二维曲线 line (line2 为 None 时,默认为 self.trajectory)上的磁场 Z 方向分量的分布
因为磁铁一般放置在 XY 平面,所以 Bz 一般可以看作自然坐标系下 By,也就是二级场大小
p2_t0_p3 是一个函数,用于把 line2 上的二维点转为三维,默认转为 z=0 的三维点
step 表示 line2 分段长度
返回 P2 的数组,P2 中 x 表示曲线 line2 上距离 s,y 表示前述距离对应的点的磁场 bz
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).magnetic_field_bz_along(
line2=line2, p2_t0_p3=p2_t0_p3, step=step
)
def graident_field_along(
self,
line2: Optional[Line2] = None,
good_field_area_width: float = 10 * MM,
step: float = 1 * MM,
point_number: int = 4,
) -> List[P2]:
"""
计算本对象在二维曲线 line2 (line2 为 None 时,默认为 self.trajectory)上的磁场梯度的分布
每一点的梯度,采用这点水平垂线上 Bz 的多项式拟合得到
good_field_area_width:水平垂线的长度,注意应小于等于好场区范围
step:line2 上取点间距
point_number:水平垂线上取点数目,越多则拟合越精确
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).graident_field_along(
line2=line2, good_field_area_width=good_field_area_width, step=step, point_number=point_number
)
def second_graident_field_along(
self,
line2: Optional[Line2] = None,
good_field_area_width: float = 10 * MM,
step: float = 1 * MM,
point_number: int = 4,
) -> List[P2]:
"""
计算本对象在二维曲线 line2 (line2 为 None 时,默认为 self.trajectory)上的磁场二阶梯度的分布(六极场)
每一点的梯度,采用这点水平垂线上 Bz 的多项式拟合得到
good_field_area_width:水平垂线的长度,注意应小于等于好场区范围
step:line2 上取点间距
point_number:水平垂线上取点数目,越多则拟合越精确
"""
if line2 is None:
line2 = self.trajectory
return super(Beamline, self).second_graident_field_along(
line2=line2, good_field_area_width=good_field_area_width, step=step, point_number=point_number
)
def track_ideal_particle(
self,
kinetic_MeV: float,
s: float = 0.0,
length: Optional[float] = None,
footstep: float = 5 * MM,
) -> List[P3]:
"""
束流跟踪,运行一个理想粒子,返回轨迹
kinetic_MeV 粒子动能,单位 MeV
s 起点位置
length 粒子运行长度,默认运动到束线尾部
footstep 粒子运动步长
"""
if length is None:
length = self.trajectory.get_length() - s
ip = ParticleFactory.create_proton_along(
self.trajectory, s, kinetic_MeV)
return ParticleRunner.run_get_trajectory(ip, self, length, footstep)
def track_phase_space_particle(
self,
x_mm: float,
xp_mrad: float,
y_mm: float,
yp_mrad,
delta: float,
kinetic_MeV: float,
s: float = 0.0,
length: Optional[float] = None,
footstep: float = 10 * MM,
) -> List[ValueWithDistance[PhaseSpaceParticle]]:
"""
运行一个相空间粒子
x_mm 相空间坐标 x,单位 mm
xp_mrad 相空间坐标 xp,单位 mrad
y_mm 相空间坐标 y,单位 mm
yp_mrad 相空间坐标 yp,单位 mrad
delta 动量分散
kinetic_MeV 正则动能,单位 MeV
s 在束线上的起点,默认 0.0
length 运动长度,如果为空则运行到束线尾
footstep 运动步长,默认 10*MM
返回值是一个 List[ValueWithDistance[PhaseSpaceParticle]]
即一个数组,数组元素是 ValueWithDistance
即对应运动位置的粒子的相空间坐标信息
"""
if length is None:
length = self.trajectory.get_length() - s
pp = PhaseSpaceParticle(
x=x_mm * MM,
xp=xp_mrad * MM,
y=y_mm * MM,
yp=yp_mrad * MM,
z=0.0,
delta=delta
)
# ip, distence = 0.0
ip = ParticleFactory.create_proton_along(
self.trajectory, s, kinetic_MeV)
# to rp, distence = 0.0
rp = ParticleFactory.create_from_phase_space_particle(
ideal_particle=ip,
coordinate_system=ip.get_natural_coordinate_system(),
phase_space_particle=pp
)
# run all info, distence from 0.0
all_info = ParticleRunner.run_get_all_info(
p=rp,
m=self,
length=length,
footstep=footstep
)
# for cp
ret: List[ValueWithDistance[PhaseSpaceParticle]] = []
for cp in all_info:
d = cp.distance # , distence from 0.0
cip = ParticleFactory.create_proton_along(
self.trajectory, d + s, kinetic_MeV) # 所以这里是 d + s
cpp = PhaseSpaceParticle.create_from_running_particle(
ideal_particle=cip,
coordinate_system=cip.get_natural_coordinate_system(),
running_particle=cp
)
ret.append(ValueWithDistance(
value=cpp, distance=d
))
return ret
def track_phase_ellipse(
self,
x_sigma_mm: float,
xp_sigma_mrad: float,
y_sigma_mm: float,
yp_sigma_mrad,
delta: float,
particle_number: int,
kinetic_MeV: float,
s: float = 0.0,
length: Optional[float] = None,
footstep: float = 10 * MM,
concurrency_level: int = 1,
report: bool = True
) -> Tuple[List[P2], List[P2]]:
"""
束流跟踪,运行两个相椭圆边界上的粒子,
返回一个长度 2 的元组,表示相空间 x-xp 平面和 y-yp 平面上粒子投影(单位 mm / mrad)
两个相椭圆,一个位于 xxp 平面,参数为 σx 和 σxp ,动量分散为 delta
另一个位于 xxp 平面,参数为 σx 和 σxp ,动量分散为 delta
x_sigma_mm σx 单位 mm
xp_sigma_mrad σxp 单位 mrad
y_sigma_mm σy 单位 mm
yp_sigma_mrad σyp 单位 mrad
delta 动量分散 单位 1
particle_number 粒子数目
kinetic_MeV 动能 单位 MeV
s 起点位置
length 粒子运行长度,默认运行到束线尾部
footstep 粒子运动步长
concurrency_level 并发等级(使用多少个核心进行粒子跟踪)
report 是否打印并行任务计划
"""
if length is None:
length = self.trajectory.get_length() - s
ip_start = ParticleFactory.create_proton_along(
self.trajectory, s, kinetic_MeV)
ip_end = ParticleFactory.create_proton_along(
self.trajectory, s + length, kinetic_MeV
)
pp_x = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=x_sigma_mm * MM,
xpMax=xp_sigma_mrad * MRAD,
delta=delta,
number=particle_number,
)
pp_y = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(
yMax=y_sigma_mm * MM,
ypMax=yp_sigma_mrad * MRAD,
delta=delta,
number=particle_number,
)
rp_x = ParticleFactory.create_from_phase_space_particles(
ideal_particle=ip_start,
coordinate_system=ip_start.get_natural_coordinate_system(),
phase_space_particles=pp_x,
)
rp_y = ParticleFactory.create_from_phase_space_particles(
ideal_particle=ip_start,
coordinate_system=ip_start.get_natural_coordinate_system(),
phase_space_particles=pp_y,
)
# run
# refactor v0.1.1 合并计算
ParticleRunner.run_only(
p=rp_x + rp_y, m=self, length=length, footstep=footstep, concurrency_level=concurrency_level,
report=report
)
pp_x_end = PhaseSpaceParticle.create_from_running_particles(
ideal_particle=ip_end,
coordinate_system=ip_end.get_natural_coordinate_system(),
running_particles=rp_x,
)
pp_y_end = PhaseSpaceParticle.create_from_running_particles(
ideal_particle=ip_end,
coordinate_system=ip_end.get_natural_coordinate_system(),
running_particles=rp_y,
)
xs = [pp.project_to_xxp_plane() / MM for pp in pp_x_end]
ys = [pp.project_to_yyp_plane() / MM for pp in pp_y_end]
s: BaseUtils.Statistic = BaseUtils.Statistic()
print(
f"delta={delta}," +
f"avg_size_x={s.clear().add_all(P2.extract(xs)[0]).half_width()}mm," +
f"avg_size_y={s.clear().add_all(P2.extract(ys)[0]).half_width()}mm"
)
return (xs, ys)
# from ApertureObject
def is_out_of_aperture(self, point: P3) -> bool:
"""
判断点 point 是否超出 Beamline 的任意一个元件的孔径
只有当粒子轴向投影在元件内部时,才会进行判断,
否则即时粒子距离轴线很远,也认为粒子没有超出孔径,
这是因为粒子不在元件内时,很可能处于另一个大孔径元件中,这样会造成误判。
注意:这个函数的效率极低!
"""
for m in self.magnets:
if isinstance(m, ApertureObject) and m.is_out_of_aperture(point):
print(f"beamline在{m}位置超出孔径")
return True
return False
def trace_is_out_of_aperture(
self, trace_with_distance: List[ValueWithDistance[P3]]
) -> bool:
"""
判断一条粒子轨迹是否超出孔径
注意:这个函数的效率极低!
"""
for pd in trace_with_distance:
if self.is_out_of_aperture(pd.value):
return True
return False
def get_length(self) -> float:
"""
获得 Beamline 的长度
"""
return self.trajectory.get_length()
def point_at(self, s: float) -> P2:
"""
获得 Beamline s 位置处的点 (x,y)
-------
"""
return self.trajectory.point_at(s)
def direct_at(self, s: float) -> P2:
"""
获得 Beamline s 位置处的方向
"""
return self.trajectory.direct_at(s)
class __BeamlineBuilder:
"""
构建 Beamline 的中间产物
"""
def __init__(self, start_point: P2) -> None:
self.start_point = start_point
def first_drift(self, direct: P2 = P2.x_direct(), length: float = 1.0) -> "Beamline":
"""
为 Beamline 添加第一个 drift
正如 Trajectory 的第一个曲线段必须是是直线一样
Beamline 中第一个元件必须是 drift
"""
bl = Beamline(
Trajectory.set_start_point(self.start_point).first_line(
direct=direct, length=length
)
)
bl.elements.append((0, None, length))
return bl
@staticmethod
# -> "Beamline.__BeamlineBuilder"
def set_start_point(start_point: P2 = P2.origin()):
"""
设置束线起点
"""
return Beamline.__BeamlineBuilder(start_point)
def append_drift(self, length: float) -> "Beamline":
"""
尾加漂移段
length 漂移段长度
"""
old_len = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
self.elements.append((old_len, None, length))
return self
def append_straight_dipole_magnet(
self,
magnetic_field: float,
length: float,
aperture_radius: float,
# field_direct: P2 = P2.y_direct()
) -> "Beamline":
"""
尾加直线二极铁
"""
old_length = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
lum = LocalUniformMagnet.create_local_uniform_magnet_along(
trajectory=self.trajectory,
s=old_length,
length=length,
magnetic_field=magnetic_field,
aperture_radius=aperture_radius,
)
self.magnets.append(lum)
self.elements.append((old_length, lum, length))
return self
def append_qs(
self,
length: float,
gradient: float,
second_gradient: float,
aperture_radius: float,
) -> "Beamline":
"""
尾加 QS 磁铁
length: float QS 磁铁长度
gradient: float 梯度 T/m
second_gradient: float 二阶梯度(六极场) T/m^2
aperture_radius: float 半孔径 单位 m
"""
old_length = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
qs = QS.create_qs_along(
trajectory=self.trajectory,
s=old_length,
length=length,
gradient=gradient,
second_gradient=second_gradient,
aperture_radius=aperture_radius,
)
self.magnets.append(qs)
self.elements.append((old_length, qs, length))
return self
def append_q(
self,
length: float,
gradient: float,
aperture_radius: float,
) -> "Beamline":
"""
尾加 Q 磁铁
length: float QS 磁铁长度
gradient: float 梯度 T/m
aperture_radius: float 半孔径 单位 m
"""
old_length = self.trajectory.get_length()
self.trajectory.add_strait_line(length=length)
q = Q.create_q_along(
trajectory=self.trajectory,
s=old_length,
length=length,
gradient=gradient,
aperture_radius=aperture_radius,
)
self.magnets.append(q)
self.elements.append((old_length, q, length))
return self
def append_dipole_cct(
self,
big_r: float,
small_r_inner: float,
small_r_outer: float,
bending_angle: float,
tilt_angles: List[float],
winding_number: int,
current: float,
disperse_number_per_winding: int = 120,
) -> "Beamline":
"""
尾加二极CCT
big_r: float 偏转半径
small_r_inner: float 内层半孔径
small_r_outer: float 外层半孔径
bending_angle: float 偏转角度(正数表示逆时针、负数表示顺时针)
tilt_angles: List[float] 各极倾斜角
winding_number: int 匝数
current: float 电流
disperse_number_per_winding: int 每匝分段数目,越大计算越精确
"""
old_length = self.trajectory.get_length()
cct_length = big_r * abs(BaseUtils.angle_to_radian(bending_angle))
self.trajectory.add_arc_line(
radius=big_r, clockwise=bending_angle < 0, angle_deg=abs(bending_angle)
)
cct_inner = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_r_inner,
bending_angle=abs(bending_angle),
tilt_angles=tilt_angles,
winding_number=winding_number,
current=current,
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
2 * math.pi * winding_number,
BaseUtils.angle_to_radian(bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct_inner)
self.elements.append((old_length, cct_inner, cct_length))
cct_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_r_outer,
bending_angle=abs(bending_angle),
tilt_angles=BaseUtils.list_multiply(tilt_angles, -1),
winding_number=winding_number,
current=current,
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
-2 * math.pi * winding_number,
BaseUtils.angle_to_radian(bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct_outer)
self.elements.append((old_length, cct_outer, cct_length))
return self
def append_agcct(
self,
big_r: float,
small_rs: List[float],
bending_angles: List[float],
tilt_angles: List[List[float]],
winding_numbers: List[List[int]],
currents: List[float],
disperse_number_per_winding: int = 120,
) -> "Beamline":
"""
尾加 agcct
本质是两层二极 CCT 和两层交变四极 CCT
big_r: float 偏转半径,单位 m
small_rs: List[float] 各层 CCT 的孔径,一共四层,从大到小排列。分别是二极CCT外层、内层,四极CCT外层、内层
bending_angles: List[float] 交变四极 CCT 每个 part 的偏转半径(正数表示逆时针、负数表示顺时针),要么全正数,要么全负数。不需要传入二极 CCT 偏转半径,因为它就是 sum(bending_angles)
tilt_angles: List[List[float]] 二极 CCT 和四极 CCT 的倾斜角,典型值 [[30],[90,30]],只有两个元素的二维数组
winding_numbers: List[List[int]], 二极 CCT 和四极 CCT 的匝数,典型值 [[128],[21,50,50]] 表示二极 CCT 128匝,四极交变 CCT 为 21、50、50 匝
currents: List[float] 二极 CCT 和四极 CCT 的电流,典型值 [8000,9000]
disperse_number_per_winding: int 每匝分段数目,越大计算越精确
添加 CCT 的顺序为:
外层二极 CCT
内层二极 CCT
part1 四极 CCT 内层
part1 四极 CCT 外层
part2 四极 CCT 内层
part2 四极 CCT 外层
... ...
"""
if len(small_rs) != 4:
raise ValueError(
f"small_rs({small_rs}),长度应为4,分别是二极CCT外层、内层,四极CCT外层、内层")
if not BaseUtils.is_sorted(small_rs[::-1]):
raise ValueError(
f"small_rs({small_rs}),应从大到小排列,分别是二极CCT外层、内层,四极CCT外层、内层")
total_bending_angle = sum(bending_angles)
old_length = self.trajectory.get_length()
cct_length = big_r * \
abs(BaseUtils.angle_to_radian(total_bending_angle))
self.trajectory.add_arc_line(
radius=big_r,
clockwise=total_bending_angle < 0,
angle_deg=abs(total_bending_angle),
)
# 构建二极 CCT 外层
cct2_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_rs[0],
bending_angle=abs(total_bending_angle),
tilt_angles=BaseUtils.list_multiply(tilt_angles[0], -1),
winding_number=winding_numbers[0][0],
current=currents[0],
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
-2 * math.pi * winding_numbers[0][0],
BaseUtils.angle_to_radian(total_bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct2_outer)
self.elements.append((old_length, cct2_outer, cct_length))
# 构建二极 CCT 内层
cct2_innter = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=small_rs[1],
bending_angle=abs(total_bending_angle),
tilt_angles=tilt_angles[0],
winding_number=winding_numbers[0][0],
current=currents[0],
starting_point_in_ksi_phi_coordinate=P2.origin(),
end_point_in_ksi_phi_coordinate=P2(
2 * math.pi * winding_numbers[0][0],
BaseUtils.angle_to_radian(total_bending_angle),
),
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(cct2_innter)
self.elements.append((old_length, cct2_innter, cct_length))
# 构建内外侧四极交变 CCT
# 提取参数
agcct_small_r_out = small_rs[2]
agcct_small_r_in = small_rs[3]
agcct_winding_nums: List[int] = winding_numbers[1]
agcct_bending_angles: List[float] = bending_angles
agcct_bending_angles_rad: List[float] = BaseUtils.angle_to_radian(
agcct_bending_angles
)
agcct_tilt_angles: List[float] = tilt_angles[1]
agcct_current: float = currents[1]
# 构建 part1
agcct_index = 0
agcct_start_in = P2.origin()
agcct_start_out = P2.origin()
agcct_end_in = P2(
((-1.0) ** agcct_index) * 2 * math.pi *
agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_end_out = P2(
((-1.0) ** (agcct_index + 1))
* 2
* math.pi
* agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_part1_inner = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_in,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=BaseUtils.list_multiply(agcct_tilt_angles, -1),
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_in,
end_point_in_ksi_phi_coordinate=agcct_end_in,
disperse_number_per_winding=disperse_number_per_winding,
)
agcct_part1_length = big_r * \
BaseUtils.angle_to_radian(abs(agcct_bending_angles[agcct_index]))
self.magnets.append(agcct_part1_inner)
self.elements.append(
(old_length, agcct_part1_inner, agcct_part1_length))
agcct_part1_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_out,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=agcct_tilt_angles,
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_out,
end_point_in_ksi_phi_coordinate=agcct_end_out,
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(agcct_part1_outer)
self.elements.append(
(old_length, agcct_part1_outer, agcct_part1_length))
old_length_i = old_length + agcct_part1_length
# 构建 part2 和之后的 part
for ignore in range(len(agcct_bending_angles) - 1):
agcct_index += 1
agcct_start_in = agcct_end_in + P2(
0,
agcct_bending_angles_rad[agcct_index - 1]
/ agcct_winding_nums[agcct_index - 1],
)
agcct_start_out = agcct_end_out + P2(
0,
agcct_bending_angles_rad[agcct_index - 1]
/ agcct_winding_nums[agcct_index - 1],
)
agcct_end_in = agcct_start_in + P2(
((-1) ** agcct_index) * 2 * math.pi *
agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_end_out = agcct_start_out + P2(
((-1) ** (agcct_index + 1))
* 2
* math.pi
* agcct_winding_nums[agcct_index],
agcct_bending_angles_rad[agcct_index],
)
agcct_parti_inner = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_in,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=BaseUtils.list_multiply(agcct_tilt_angles, -1),
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_in,
end_point_in_ksi_phi_coordinate=agcct_end_in,
disperse_number_per_winding=disperse_number_per_winding,
)
agcct_parti_length = big_r * \
BaseUtils.angle_to_radian(
abs(agcct_bending_angles[agcct_index]))
self.magnets.append(agcct_parti_inner)
self.elements.append(
(old_length_i, agcct_parti_inner, agcct_parti_length))
agcct_parti_outer = CCT.create_cct_along(
trajectory=self.trajectory,
s=old_length,
big_r=big_r,
small_r=agcct_small_r_out,
bending_angle=abs(agcct_bending_angles[agcct_index]),
tilt_angles=agcct_tilt_angles,
winding_number=agcct_winding_nums[agcct_index],
current=agcct_current,
starting_point_in_ksi_phi_coordinate=agcct_start_out,
end_point_in_ksi_phi_coordinate=agcct_end_out,
disperse_number_per_winding=disperse_number_per_winding,
)
self.magnets.append(agcct_parti_outer)
self.elements.append(
(old_length_i, agcct_parti_outer, agcct_parti_length))
old_length_i += agcct_parti_length
return self
def get_magnets(self) -> List[Magnet]:
return self.magnets
def get_trajectory(self) -> Trajectory:
return self.trajectory
def __str__(self) -> str:
return f"beamline(magnet_size={len(self.magnets)}, traj_len={self.trajectory.get_length()})"
def __repr__(self) -> str:
return self.__str__() |
from string import punctuation
from nltk.corpus import wordnet as wn
def preprocess_text(sentence):
sentence = sentence.lower()
for p in punctuation:
sentence = sentence.replace(p, '')
return sentence
def indicators():
return {
**dict.fromkeys(['noun', 'nn', 'thing', 'object', 'nn', 'verb', 'action', 'occurence',
'adjective', 'describe', 'describes', 'description', 'numeral', 'preposition',
'adverb', 'pronoun', 'conjunction', 'determiner', 'exclamation'], 'spec_pos'),
**dict.fromkeys(['class', 'part of speech', 'pos', 'category', 'type', 'kind of'], 'pos_type'),
**dict.fromkeys(['clue', 'random', 'hint'], 'clue'),
**dict.fromkeys(['context', 'situation', 'situations', 'example', 'use', 'used', 'usage', 'next to'],
'context'),
**dict.fromkeys(['similar', 'relate', 'relationship', 'correlate', 'remind', 'synonym'], 'similar'),
**dict.fromkeys(['common', 'often', 'occur', 'often', 'frequency'], 'frequency'),
**dict.fromkeys(['length', 'amount', 'long', 'consist', 'many', 'characters', 'letters'], 'length'),
**dict.fromkeys(['first', 'start', 'starts', '1st'], 'first'),
**dict.fromkeys(['second', '2nd'], 'second'),
**dict.fromkeys(['last', 'end', 'ends', 'ending', 'final', 'finish', 'conclude'], 'last'),
**dict.fromkeys(['third', 'fourth', 'fifth', 'sixth', '3rd', '4th', '5th'], 'illegal'),
**dict.fromkeys(['i think', 'guess'], 'guess'),
**dict.fromkeys(['stop', 'quit', 'give up', 'done', 'exit'], 'stop'),
}
def pos_indicators():
return {
**dict.fromkeys(['noun', 'thing', 'object'], 'noun'),
**dict.fromkeys(['verb', 'you do' 'action', 'occurence'], 'verb'),
**dict.fromkeys(['adjective', 'describe', 'describes', 'description'], 'adjective'),
**dict.fromkeys(['preposition'], 'preposition'),
**dict.fromkeys(['numeral'], 'numeral'),
**dict.fromkeys(['adverb'], 'adverb'),
**dict.fromkeys(['conjunction'], 'conjunction'),
**dict.fromkeys(['determiner'], 'determiner'),
**dict.fromkeys(['exclamation'], 'exclamation'),
}
def match_input(sentence, game_word):
sentence = preprocess_text(sentence)
for word in sentence.split():
if word in indicators():
return respond(indicators().get(word), word, game_word, sentence)
def respond(indicating_word, base_word, game_word, sentence): # TODO: Refactor variable names
if indicating_word is indicators().get('noun'):
pos = pos_indicators().get(base_word)
article = get_indefinite_article(base_word) # TODO: Check if works...
if game_word.is_pos(pos):
return "Yes, it's " + article + " " + pos # TODO: a or an!
else:
return "No, it's not " + article + " " + pos
elif indicating_word is indicators().get('class'):
pos = game_word.get_pos()
if pos:
return "It's a " + pos
else:
return "Damn"
elif indicating_word is indicators().get('context'):
context = game_word.example()
if context:
return context
else:
return "Oh, I can't come up with any examples :("
elif indicating_word is indicators().get('similar'):
sentence = sentence.split()
comparison_word = sentence[-1] # Heuristic solution, other solution possible?
if not comparison_word == "to":
similarity = game_word.similarity_to(comparison_word)
if similarity is not None:
if similarity == 1:
return "They're not similar, they are the same :)"
elif 0.5 < similarity < 1:
closest_hypernym = game_word.closest_hypernym(comparison_word)
return "Yeah, they are both a type of " + closest_hypernym
elif 0.2 <= similarity <= 0.5:
closest_hypernym = game_word.closest_hypernym(comparison_word)
return "There's some similarities, they are both a type of " + closest_hypernym
else:
return "I can't see any similarities with " + comparison_word
else:
return "Dunno"
elif indicating_word is indicators().get('first'):
return "The first letter is " + game_word.first_letter()
elif indicating_word is indicators().get('second'):
return "The second letter is " + game_word.second_letter()
elif indicating_word is indicators().get('last'):
return "The last letter is " + game_word.last_letter()
elif indicating_word is indicators().get('third'):
return "Huh, stop asking questions like that!"
elif indicating_word is indicators().get('long'):
return "The word has " + str(game_word.length()) + " letters"
elif indicating_word is indicators().get('common'):
frequency = game_word.get_frequency()
if frequency < 150:
return "It's very common"
elif 150 < frequency < 300:
return "It's fairly common"
else:
return "It's not that common"
elif indicating_word is indicators().get('clue'):
return "Might rhyme with " + game_word.rhymes()
elif indicating_word is indicators().get('stop'):
exit()
elif indicating_word is indicators().get('guess'):
sentence = sentence.split()
comparison_word = wn.synsets(sentence[-1])
if comparison_word:
comparison_word = comparison_word[0].lemma_names()[0]
if comparison_word == game_word.get_lemma():
return "correct"
return "Nope!"
# Simple solution that works in this case
def get_indefinite_article(word):
vowels = ["a", "e", "i", "o", "u"]
return "an" if word[0] in vowels else "a"
|
from template.page import *
from template.config import *
from template.index import Index
import time
import copy
from math import floor
import threading
import concurrent.futures
import os
import json
INDIRECTION_COLUMN = 0
RID_COLUMN = 1
TIMESTAMP_COLUMN = 2
SCHEMA_ENCODING_COLUMN = 3
BASE_RID = 4
class Record:
def __init__(self, rid, key, columns):
self.rid = rid
self.key = key
self.columns = columns
class Table:
"""
:param name: string #Table name
:param num_columns: int #Number of Columns: all columns are integer
:param key: int #Index of table key in columns
:variable keyToRID #
:variable baseRID #The current RID used to store a new base record
:variable tailRIDs #The current RID used for updating a base record, used for tail record
"""
def __init__(self, name, num_columns, key, path = "./", baseRID = -1, tailRIDs = [], keyToRID = {}, numMerges = 0):
self.name = name
self.key = key
self.num_columns = num_columns
# map key to RID for query operations
self.path = path
self.baseRID = baseRID
self.keyToRID = keyToRID
self.index = Index(self)
#lock manager per table
self.numMerges = numMerges
# new tailRID array, each element holds the tailRID of each Page Range.
self.tailRIDs = tailRIDs
# used for latching Page Dir
# Calls insert on the correct page range
# 1. Check if page is already in bufferpool (getBasePagePath(self, baseRID) compared to BP.pages dictionary {page_path: page_object})
# a. If not, then recreate the page and call BP.handleReplacement(getBasePagePath)
# b. Else get page object from bufferpool queue
# 2. Handle IsPageFull Logic: Check meta file or recreate base page and have it manually check to determine if full
# 3. Then call recreatedPage.insert(RID, recordData)
def insert(self, record):
BP.latch.acquire()
key = record[0]
self.baseRID += 1
currentBaseRID = self.baseRID
self.keyToRID[key] = self.baseRID
selectedPageRange = self.getPageRange(self.baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
BasePagePath = self.getBasePagePath(self.baseRID)
BPindex = BP.pathInBP(BasePagePath)
# Page not in bufferpool
if BPindex is None:
# recreate page
page = BasePage(self.num_columns, selectedPageRange, BasePagePath)
# Create folder if needed
if os.path.exists(BasePagePath):
page.readPageFromDisk(BasePagePath)
# add to bufferpool
BPindex = BP.add(page)
# Get page location in bufferpool
else:
BPindex = BP.refresh(BPindex)
BP.bufferpool[BPindex].insert(self.baseRID, record)
self.finishedModifyingRecord(BPindex)
# PD unlatch
BP.latch.release()
if self.index:
self.index.latch.acquire()
self.indexInsert(record)
self.index.latch.release()
return [self, currentBaseRID, key]
# m1_tester expects a list of record objects, but we should only be passing back certain columns
def select(self, key, column, query_columns):
BP.latch.acquire()
if key not in self.keyToRID:
BP.latch.release()
return False
baseRID = self.keyToRID[key]
selectedPageRange = self.getPageRange(baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
BasePagePath = self.getBasePagePath(baseRID)
basePageOffset = self.calculatePageOffset(baseRID)
BPindex = self.getBasePageBPIndex(BasePagePath, selectedPageRange)
baseRecord = BP.bufferpool[BPindex].getRecord(basePageOffset)
if baseRecord[RID_COLUMN] == INVALID:
BP.bufferpool[BPindex].pinned -=1
BP.latch.release()
return False
mostUpdatedRecord = self.getMostUpdatedRecord(baseRecord, BPindex, selectedPageRange, key)
BP.bufferpool[BPindex].pinned -=1
# BP unlatch
BP.latch.release()
returned_record_columns = self.setupReturnedRecord(mostUpdatedRecord, query_columns)
return [Record(mostUpdatedRecord.rid, mostUpdatedRecord.key, returned_record_columns)]
# 1. Pull base record into BP if needed so we can get the record and update base record data/bp status
# 2. Get the most updated tail record into BP so that we can create cumulative record
# 3. Add tail page to BP if needed and insert the cumulative tail record into latest tail page
# 4/5. Check if a merge should occur and udpate index
def update(self, key, record, isTransaction = False):
BP.latch.acquire()
if key not in self.keyToRID:
BP.latch.release()
return False
# 1.
baseRID = self.keyToRID[key]
selectedPageRange = self.getPageRange(baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
BasePagePath = self.getBasePagePath(baseRID)
baseBPindex = self.getBasePageBPIndex(BasePagePath, selectedPageRange)
basePageOffset = self.calculatePageOffset(baseRID)
baseRecord = BP.bufferpool[baseBPindex].getRecord(basePageOffset)
if baseRecord[RID_COLUMN] == INVALID:
BP.bufferpool[baseBPindex].pinned -=1
BP.latch.release()
return False
self.tailRIDs[selectedPageRange] += 1
tailRID = self.tailRIDs[selectedPageRange]
# if transaction, don't update indirection column yet
if isTransaction:
BP.bufferpool[baseBPindex].newRecordAppended(baseRecord[INDIRECTION_COLUMN], basePageOffset)
else:
BP.bufferpool[baseBPindex].newRecordAppended(tailRID, basePageOffset)
self.finishedModifyingRecord(baseBPindex)
# 2.
if baseRecord[SCHEMA_ENCODING_COLUMN] == 1 and not self.recordHasBeenMerged(baseRecord, BP.bufferpool[baseBPindex].TPS):
previousTailRecord = self.getPreviousTailRecord(baseRecord, selectedPageRange)
cumulativeRecord = self.createCumulativeRecord(previousTailRecord, record, previousTailRecord[RID_COLUMN], baseRecord[RID_COLUMN], selectedPageRange, MetaElements + 1)
else:
cumulativeRecord = self.createCumulativeRecord(baseRecord, record, baseRecord[RID_COLUMN], baseRecord[RID_COLUMN], selectedPageRange, MetaElements)
# 3.
TailPagePath = self.getTailPagePath(tailRID, selectedPageRange)
tailBPindex = self.getTailPageBufferIndex(selectedPageRange, TailPagePath)
BP.bufferpool[tailBPindex].insert(cumulativeRecord)
self.finishedModifyingRecord(tailBPindex)
# 4.
if self.numMerges == 0 and self.calculateTailPageIndex(tailRID) >= MergePolicy:
self.initiateMerge(selectedPageRange)
elif self.numMerges > 0 and self.calculateTailPageIndex(tailRID) >= self.numMerges * MergePolicy + MergePolicy:
self.initiateMerge(selectedPageRange)
BP.latch.release()
# 5.
if self.index:
self.index.latch.acquire()
self.indexUpdate(cumulativeRecord)
self.index.latch.release()
return [self, tailRID, selectedPageRange, baseRID]
def deleteBaseRecord(self, baseRID):
pageOffset = self.calculatePageOffset(baseRID)
BasePagePath = self.getBasePagePath(baseRID)
baseBPindex = BP.pathInBP(BasePagePath)
if baseBPindex is None:
page = BasePage(self.num_columns, 0, BasePagePath)
page.readPageFromDisk(BasePagePath)
baseBPindex = BP.add(page)
else:
baseBPindex = BP.refresh(baseBPindex)
BP.bufferpool[baseBPindex].invalidateRecord(pageOffset)
self.finishedModifyingRecord(baseBPindex)
def deleteTailRecord(self, tailRID, selectedPageRange):
pageOffset = self.calculatePageOffset(tailRID)
TailPagePath = self.getTailPagePath(tailRID, selectedPageRange)
tailBPindex = BP.pathInBP(TailPagePath)
if tailBPindex is None:
# here we know that the page is not in the bufferpool (So the page exists only on disk)
page = TailPage(self.num_columns, selectedPageRange, TailPagePath)
page.readPageFromDisk(TailPagePath)
tailBPindex = BP.add(page)
else:
# here the page is in the bufferpool, so we will refresh it.
tailBPindex = BP.refresh(tailBPindex)
nextRID = BP.bufferpool[tailBPindex].invalidateRecord(pageOffset)
self.finishedModifyingRecord(tailBPindex)
def updateBaseIndirection(self, baseRID, tailRID):
selectedPageRange = self.getPageRange(baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
BasePagePath = self.getBasePagePath(baseRID)
# BP latch
BP.latch.acquire()
baseBPindex = self.getBasePageBPIndex(BasePagePath, selectedPageRange)
basePageOffset = self.calculatePageOffset(baseRID)
baseRecord = BP.bufferpool[baseBPindex].getRecord(basePageOffset)
if baseRecord[INDIRECTION_COLUMN] < tailRID:
BP.bufferpool[baseBPindex].newRecordAppended(tailRID, basePageOffset)
self.finishedModifyingRecord(baseBPindex)
BP.latch.release()
def finishedModifyingRecord(self, BPindex):
BP.bufferpool[BPindex].dirty = True
BP.bufferpool[BPindex].pinned -= 1
def sum(self, start_range, end_range, aggregate_column_index):
summation = 0
none_in_range = True
for key in range(start_range, end_range + 1):
if key not in self.keyToRID:
continue
baseRID = self.keyToRID[key]
selectedPageRange = self.getPageRange(baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
BasePagePath = self.getBasePagePath(baseRID)
basePageOffset = self.calculatePageOffset(baseRID)
# BP latch
BP.latch.acquire()
BPindex = self.getBasePageBPIndex(BasePagePath, selectedPageRange)
baseRecord = BP.bufferpool[BPindex].getRecord(basePageOffset)
if baseRecord[RID_COLUMN] == INVALID:
BP.bufferpool[BPindex].pinned -=1
# BP unlatch
BP.latch.release()
continue
mostUpdatedRecord = self.getMostUpdatedRecord(baseRecord, BPindex, selectedPageRange, key)
BP.bufferpool[BPindex].pinned -=1
# BP unlatch
BP.latch.release()
query_columns = [1, 1, 1, 1, 1]
returned_record_columns = self.setupReturnedRecord(mostUpdatedRecord, query_columns)
none_in_range = False
summation += mostUpdatedRecord.columns[aggregate_column_index]
if (none_in_range):
return False
else:
return summation
def getMostUpdatedRecord(self, baseRecord, BPindex, selectedPageRange, key):
if baseRecord[SCHEMA_ENCODING_COLUMN] == 1 and not self.recordHasBeenMerged(baseRecord, BP.bufferpool[BPindex].TPS):
previousTailRecord = self.getPreviousTailRecord(baseRecord, selectedPageRange)
record = Record(previousTailRecord[RID_COLUMN], key, previousTailRecord[MetaElements + 1:])
else:
record = Record(baseRecord[RID_COLUMN], key, baseRecord[MetaElements:])
return record
def setupReturnedRecord(self, record, query_columns):
returned_record_columns = []
for query_column in range(0, len(query_columns)):
if (query_columns[query_column] == 1):
returned_record_columns.append(record.columns[query_column])
else:
returned_record_columns.append(None)
return returned_record_columns
def delete(self, key):
if key not in self.keyToRID:
return False
baseRID = self.keyToRID[key]
selectedPageRange = self.getPageRange(baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
BasePagePath = self.getBasePagePath(baseRID)
basePageOffset = self.calculatePageOffset(baseRID)
# BP latch
BP.latch.acquire()
baseBPindex = self.getBasePageBPIndex(BasePagePath, selectedPageRange)
baseRecord = BP.bufferpool[baseBPindex].getRecord(basePageOffset)
# Invalidate base record
BP.bufferpool[baseBPindex].invalidateRecord(basePageOffset)
self.finishedModifyingRecord(baseBPindex)
# Recurse through tail record indirections, invalidating each tail record until invalidated base record reached
if baseRecord[SCHEMA_ENCODING_COLUMN] == 1:
self.invalidateTailRecords(baseRecord[INDIRECTION_COLUMN], baseRID, selectedPageRange)
# BP unlatch
BP.latch.release()
if self.index:
# Index latch
self.index.latch.acquire()
self.indexDelete(baseRID)
# Index unlatch
self.index.latch.release()
def invalidateTailRecords(self, indirectionRID, baseRID, selectedPageRange):
if indirectionRID == baseRID:
return
else:
pageOffset = self.calculatePageOffset(indirectionRID)
TailPagePath = self.getTailPagePath(indirectionRID, selectedPageRange)
tailBPindex = BP.pathInBP(TailPagePath)
if tailBPindex is None:
# here we know that the page is not in the bufferpool (So the page exists only on disk)
page = TailPage(self.num_columns, selectedPageRange, TailPagePath)
page.readPageFromDisk(TailPagePath)
tailBPindex = BP.add(page)
else:
# here the page is in the bufferpool, so we will refresh it.
tailBPindex = BP.refresh(tailBPindex)
nextRID = BP.bufferpool[tailBPindex].invalidateRecord(pageOffset)
self.finishedModifyingRecord(tailBPindex)
self.invalidateTailRecords(nextRID, baseRID, selectedPageRange)
def getBasePageBPIndex(self, BasePagePath, selectedPageRange):
BPindex = BP.pathInBP(BasePagePath)
if BPindex is None:
# the path does exist, so go read the basepage from disk
page = BasePage(self.num_columns, selectedPageRange, BasePagePath)
page.readPageFromDisk(BasePagePath)
BPindex = BP.add(page)
else:
# here the page is in the bufferpool, so we will refresh it.
BPindex = BP.refresh(BPindex)
return BPindex
def getTailPageBufferIndex(self, selectedPageRange, TailPagePath):
BPindex = BP.pathInBP(TailPagePath)
if BPindex is None:
# here we know that the page is not in the bufferpool (So either the page exists, but is only on disk OR we are inserting a record into a new base page)
if not os.path.exists(TailPagePath):
# the page is not in the bufferpool and the path does not exist, so we must be inserting a record into a new tail page
page = TailPage(self.num_columns, selectedPageRange, TailPagePath)
BPindex = BP.add(page)
else:
# the path does exist, so go read the basepage from disk
page = TailPage(self.num_columns, selectedPageRange, TailPagePath)
page.readPageFromDisk(TailPagePath)
BPindex = BP.add(page)
else:
# here the page is in the bufferpool, so we will refresh it.
BPindex = BP.refresh(BPindex)
return BPindex
def getTailPagePath(self, tailRID, selectedPageRange):
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
selectedTailPage = self.calculateTailPageIndex(tailRID)
TailPagePath = PageRangePath + "/tailPage_" + str(selectedTailPage)
return TailPagePath
def getPreviousTailRecord(self, baseRecord, selectedPageRange):
previousTailPagePath = self.getTailPagePath(baseRecord[INDIRECTION_COLUMN], selectedPageRange)
previousBufferIndex = self.getTailPageBufferIndex(selectedPageRange, previousTailPagePath)
previousTailPageOffset = self.calculatePageOffset(baseRecord[INDIRECTION_COLUMN])
previousTailRecord = BP.bufferpool[previousBufferIndex].getRecord(previousTailPageOffset)
BP.bufferpool[previousBufferIndex].pinned -= 1
return previousTailRecord
def writeMetaJsonToDisk(self, path):
MetaJsonPath = path + "/Meta.json"
f = open(MetaJsonPath, "w")
metaDictionary = {
"name": self.name,
"key": self.key,
"num_columns": self.num_columns,
"baseRID": self.baseRID,
"keyToRID": self.keyToRID,
"tailRIDs": self.tailRIDs,
"numMerges": self.numMerges
}
json.dump(metaDictionary, f, indent=4)
f.close()
pass
def close(self, path):
self.writeMetaJsonToDisk(path)
BP.kickAll()
def calculateBasePageIndex(self, baseRID):
pageRange = 0
while baseRID >= RecordsPerPageRange:
baseRID -= RecordsPerPageRange
while baseRID >= ElementsPerPhysicalPage:
pageRange += 1
baseRID -= ElementsPerPhysicalPage
return pageRange
def calculateTailPageIndex(self, tailRID):
pageIndex = 0
while tailRID >= RecordsPerPageRange:
pageIndex += PagesPerPageRange
tailRID -= RecordsPerPageRange
while tailRID >= ElementsPerPhysicalPage:
pageIndex += 1
tailRID -= ElementsPerPhysicalPage
return pageIndex
# 1. Call perform merge on background thread
# 2. Have BP only write metaData pages for any pages currently being merged which are also in the BP still
# 3. Replace all returned consolidated base page data pages and Page_Meta at the path
def initiateMerge(self, pageRange):
# 1.
with concurrent.futures.ThreadPoolExecutor() as executor:
backgroundThread = executor.submit(self.performMerge, pageRange)
mergedBasePages = backgroundThread.result()
if mergedBasePages == None:
return
for mergedBasePage in mergedBasePages:
BPIndex = BP.pathInBP(mergedBasePage.path)
# 2.
if BPIndex != None:
BP.bufferpool[BPIndex].consolidated = True
# 3.
mergedBasePage.writeDataToDisk(mergedBasePage.path)
self.numMerges += 1
# 1. Recreate all full base pages and tail pages
# a. if tail pages haven't been written out yet, don't perform (see function for more in-depth explanation)
# 2. Map base pages to their path and keep track of updatedBaseRecords
# 3. Iterate through reversed tail records
# a. Keep track of seen base records so they only get updated once
# 4. Get base record by matching paths with tail records baseRID and update with tail page data
def performMerge(self, pageRange):
# 1.
basePages = self.getAllFullBasePages(pageRange)
tailPages = self.getAllFullTailPagesReversed(pageRange)
if tailPages == None:
return None
# 2.
updatedBaseRecords = set()
mappedBasePages = {}
for basePage in basePages:
mappedBasePages[basePage.path] = basePage
# 3.
for tailPage in tailPages:
allTailRecords = tailPage.getAllRecordsReversed()
for tailRecord in allTailRecords:
# 3a.
if tailRecord[BASE_RID] in updatedBaseRecords:
continue
else:
updatedBaseRecords.add(tailRecord[BASE_RID])
# 4.
basePagePath = self.getBasePagePath(tailRecord[BASE_RID])
if basePagePath in mappedBasePages:
basePage = mappedBasePages[basePagePath]
pageOffset = self.calculatePageOffset(tailRecord[BASE_RID])
basePage.mergeTailRecord(pageOffset, tailRecord[RID_COLUMN], tailRecord[tailPage.numMetaElements():])
return basePages
def getBasePagePath(self, baseRID):
selectedPageRange = self.getPageRange(baseRID)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
if not os.path.exists(PageRangePath):
os.mkdir(PageRangePath)
if len(self.tailRIDs) <= selectedPageRange:
self.tailRIDs.append(-1)
selectedBasePage = self.calculateBasePageIndex(baseRID)
BasePagePath = PageRangePath + "/basePage_" + str(selectedBasePage)
return BasePagePath
# translate RID to actual pageOffset
def calculatePageOffset(self, RID):
offset = RID
while offset >= RecordsPerPageRange:
offset -= RecordsPerPageRange
while offset >= ElementsPerPhysicalPage:
offset -= ElementsPerPhysicalPage
return offset
# 1. If base page not committed yet, don't add to merge queue (dir path doesn't exist)
# 2. Check if base page is full and therefore eligible for merge
# 3. Return list of full base pages
def getAllFullBasePages(self, selectedPageRange):
allFullBasePages = []
# iterate from 0 to most recently updated pageRange (handle case for only 1 pageRange)
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
for selectedBasePage in range(0, self.calculateBasePageIndex(self.baseRID) + 1):
BasePagePath = PageRangePath + "/basePage_" + str(selectedBasePage)
# 1.
if not os.path.exists(BasePagePath):
continue
# 2.
MetaPagePath = BasePagePath + "/Page_Meta.json"
f = open(MetaPagePath, "r")
metaDictionary = json.load(f)
f.close()
if ElementsPerPhysicalPage != metaDictionary["num_records"]:
continue
# 3.
else:
allFullBasePages.append(self.getBasePage(selectedPageRange, BasePagePath))
return allFullBasePages
# 1. Iterate in reverse from self.numMerges * MergePolicy + MergePolicy - 1 through self.numMerges * MergePolicy (9-0, 19-10, etc.)
# 2. If tail page in range not committed yet (dir path doesn't exist), don't perform merge
def getAllFullTailPagesReversed(self, selectedPageRange):
allFullTailPages = []
# 1.
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
for selectedTailPage in range(self.numMerges * MergePolicy + MergePolicy, self.numMerges * MergePolicy - 1, -1):
TailPagePath = PageRangePath + "/tailPage_" + str(selectedTailPage)
# 2. Tail page in range not committed yet, don't perform merge
if not os.path.isdir(TailPagePath):
return None
allFullTailPages.append(self.getTailPage(selectedPageRange, TailPagePath))
return allFullTailPages
# Getting pages outside of bufferpool for merge
def getTailPage(self, selectedPageRange, TailPagePath):
page = TailPage(self.num_columns, selectedPageRange, TailPagePath)
page.readPageFromDisk(TailPagePath)
return page
# Getting pages outside of bufferpool for merge
def getBasePage(self, selectedPageRange, BasePagePath):
page = BasePage(self.num_columns, selectedPageRange, BasePagePath)
page.readPageFromDisk(BasePagePath)
return page
def getPageRange(self, baseRID):
return floor(baseRID / RecordsPerPageRange)
# Base record's indirection is pointing to a record that's already been merged
def recordHasBeenMerged(self, baseRecord, TPS):
if baseRecord[INDIRECTION_COLUMN] <= TPS:
return True
return False
# Cumulative splicing
def createCumulativeRecord(self, oldRecord, updatedRecord, indirectionColumn, baseRID, selectedPageRange, NumMetaElements):
createdRecord = []
for metaIndex in range(0, MetaElements + 1):
createdRecord.append(0)
createdRecord[INDIRECTION_COLUMN] = indirectionColumn
createdRecord[RID_COLUMN] = self.tailRIDs[selectedPageRange]
createdRecord[TIMESTAMP_COLUMN] = round(time.time() * 1000)
createdRecord[SCHEMA_ENCODING_COLUMN] = 1
createdRecord[BASE_RID] = baseRID
for columnIndex in range(0, len(updatedRecord)):
#use data from the oldRecord
if updatedRecord[columnIndex] == None:
createdRecord.append(oldRecord[columnIndex + NumMetaElements])
else:
createdRecord.append(updatedRecord[columnIndex])
return createdRecord
def getAllUpdatedRecords(self):
allRecords = []
for selectedPageRange in range(0, self.getPageRange(self.baseRID) + 1):
PageRangePath = self.path + "/pageRange_" + str(selectedPageRange)
for selectedBasePage in range(0, self.calculateBasePageIndex(self.baseRID) + 1):
BasePagePath = PageRangePath + "/basePage_" + str(selectedBasePage)
BPindex = self.getBasePageBPIndex(BasePagePath, selectedPageRange)
for baseRecord in BP.bufferpool[BPindex].getAllRecords():
if baseRecord[RID_COLUMN] == INVALID:
continue
#check for tail page
if baseRecord[INDIRECTION_COLUMN] != 0:
tailIndex = self.calculateTailPageIndex(baseRecord[INDIRECTION_COLUMN])
TailPagePath = PageRangePath + "/tailPage_" + str(tailIndex)
tailBPindex = BP.pathInBP(TailPagePath)
if tailBPindex is None:
# the path does exist, so go read the basepage from disk
page = TailPage(self.num_columns, selectedPageRange, TailPagePath)
page.readPageFromDisk(TailPagePath)
tailBPindex = BP.add(page)
else:
# here the page is in the bufferpool, so we will refresh it.
tailBPindex = BP.refresh(tailBPindex)
for tailRecord in BP.bufferpool[tailBPindex].getAllRecords():
if (tailRecord[RID_COLUMN] == baseRecord[INDIRECTION_COLUMN]):
allRecords.append(tailRecord)
elif baseRecord[INDIRECTION_COLUMN] == 0:
allRecords.append(baseRecord)
BP.bufferpool[tailBPindex].pinned -= 1
BP.bufferpool[BPindex].pinned -=1
return allRecords
def indexInsert(self, record):
RID = self.baseRID
newRecord = [record[0],record[1],record[2],record[3],record[4], RID]
incrementer = 0
for index in self.index.indices:
incrementer += 1
if index != None:
index.insert(newRecord,incrementer)
def indexUpdate(self, record):
newRecord = [record[5],record[6],record[7],record[8],record[9], record[RID_COLUMN]]
incrementer = 0
for index in self.index.indices:
incrementer += 1
if index != None:
index.findAndChange(newRecord,record[RID_COLUMN])
def indexDelete(self,RID):
newRecord = [-1,-1,-1,-1,-1,-1]
incrementer = 0
for index in self.index.indices:
incrementer += 1
if index != None:
index.findAndChange(newRecord,RID) |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = 'Man Li'
import os
import re
import sys
import time
import json
import random
import requests
from requests.exceptions import ReadTimeout, ConnectionError, RequestException
import csv
from lxml import etree
from multiprocessing import Process
from itertools import chain
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
#USER_AGENTS 随机头信息
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
#构造请求头
HEADER = {
'User-Agent': random.choice(USER_AGENTS),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
}
proxies = {
"http": "http://119.101.115.209:9999",
"https": "https://1.192.244.90:9999",
}
#Get网页,返回内容
def get_html( url_path, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.get(
url_path,#路径
headers=HEADER,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=False,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
r.raise_for_status()
#防止中文乱码
r.encoding = 'gb2312'
return r.text
except ReadTimeout:
print('Timeout')
time.sleep(5)
return get_html(url_path)
except ConnectionError:
print('Connection error')
time.sleep(5)
return get_html(url_path)
except RequestException:
print('RequestException')
time.sleep(5)
return get_html(url_path)
def get_headers( url_path, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.get(
url_path,#路径
headers=HEADER,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=True,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
r.raise_for_status()
#print r.headers#获取响应头
#print r.cookies#获取cookies
return r.headers
except ReadTimeout:
print('Timeout')
except ConnectionError:
print('Connection error')
except RequestException:
print('RequestException')
def get_now_Location( url_path, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.get(
url_path,#路径
headers=HEADER,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=True,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
r.raise_for_status()
#print r.headers#获取响应头
#print r.cookies#获取cookies
return r.url
except ReadTimeout:
print('Timeout')
except ConnectionError:
print('Connection error')
except RequestException:
print('RequestException')
#Post
def post_html( url_path, datas, payload = '', cookies = '',proxies = ''):
try:
s = requests.Session()
r = s.post(
url_path,#路径
headers=HEADER,
data = datas,#请求头
params=payload,#传参 @payload 字典或者json
cookies=cookies,#cookies
verify=True,#SSL验证 @verify False忽略;True开启
proxies=proxies,#代理
timeout=30)#@timeout 超时单位 秒
#r.raise_for_status()
#print r.headers#获取响应头
#print r.cookies#获取cookies
return r.text
except ReadTimeout:
print('Timeout')
except ConnectionError:
print('Connection error')
except RequestException:
print('RequestException')
#匹配所有<href>
def h1h1(html):
reg = r"<h1.+?</ul>"
reger = re.compile(reg)
data = re.findall(reger, str(html))
return data
def filter_td(html):
html = str(html)
reg = r"<td.+?>(.+?)</td>"
reger = re.compile(reg, re.S)
data = re.findall(reger, html)
return data
def get_a(html):
html = str(html)
reg = r"<a.+?>(.+?)</a>"
reger = re.compile(reg)
data = re.findall(reger, html)
return data
def filter_href(html):
html = str(html)
reg = r"(?<=href=\").+?(?=\")|(?<=href=\').+?(?=\')"
reger = re.compile(reg)
data = re.findall(reger, html)
return data
def filter_href_2(html):
html = str(html)
reg = r"<a.+?href=(.+?)>"
reger = re.compile(reg)
data = re.findall(reger, html)
return data
def filter_href_3(html):
html = str(html)
reg = r"<a.+?href=\"(.+?)\">"
reger = re.compile(reg)
data = re.findall(reger, html)
return data
def get_span_title(html):
html = str(html)
reg = r"<span.+?title=\"(.+?)\">.+?</span>"
reger = re.compile(reg)
data = re.findall(reger, html)
return data
def get_txt(html):
#'>(.*)<'
html = str(html)
reg = r"<[^>]+>"
reger = re.compile(reg)
data = reger.sub("", html)
return data
#获取邮编
def get_youbian(html):
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body/table[2]/tbody/tr[1]/td/h1/text()')
#print(info)
#t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return info[0]
#获取城市
def get_chengshi(html):
#/html/body/table[2]/tbody/tr[2]/td[2]
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body/table[2]/tbody/tr[2]/td[2]')
#print(info)
t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return t.decode("utf-8")
#获取地区
def get_diqu(html):
#/html/body/table[2]/tbody/tr[3]/td/table/tbody
html = str(html)
all_list_datas = []
datas = etree.HTML(html)
info = datas.xpath('/html/body/table[2]/tbody/tr[3]/td/table/tbody')
#print(info)
t = etree.tostring(info[0], encoding="utf-8", pretty_print=True)
return t.decode("utf-8")
'''
#首页遍历到各个城市
print()
aaa = filter_href_2(datas)
for aa in aaa:
hrefs = "http://www.yb21.cn"+aa
print(hrefs,"\n")
'''
'''
#城市遍历到各个邮编
urltest2 = "http://www.yb21.cn/post/city/1101.html"
datas2 = get_html(urltest2)
bbb = filter_href_3(datas2)
for bb in bbb:
if "code" in bb:
hrefs = "http://www.yb21.cn"+bb
print(hrefs,"\n")
'''
urltest3 = "http://www.yb21.cn/post/code/838200.html"
#获取数据的总接口
def get_datas_info(urls):
datas3 = get_html(urls)
youbiannumber = get_youbian(datas3) #邮政编码
print("[邮政编码] = "+youbiannumber)
#/html/body/table[2]/tbody/tr[1]/td/h1
#print(get_chengshi(datas3))
chengshi = get_txt(get_chengshi(datas3))
print("[城市] = "+chengshi)
shen = chengshi.split("-")[0].strip()
shi = chengshi.split("-")[1].strip()
xian = chengshi.split("-")[2].strip()
#print(get_diqu(datas3))
diqu = filter_td(get_diqu(datas3))
print("[地区] = "+str(diqu))
for adddatas in diqu:
if ('\xa0' in adddatas) and ('<td>' not in adddatas):
diqu_val = adddatas.strip()
print("[邮政编码] = "+youbiannumber)
print("[省] = "+shen)
print("[市] = "+shi)
print("[县] = "+xian)
print("[地区] = "+diqu_val)
#写入数据到csv
addlist = [youbiannumber,shen,shi,xian,diqu_val]
print(addlist)
with open("D:/youbian_2.csv", 'a', newline='', encoding='utf-8') as f:
print(" ===> add ok !!!")
csv_write = csv.writer(f,dialect='excel')
csv_write.writerow(addlist)
print("__________________________________\n")
#get_datas_info(urltest3)
#2级运行函数
def run2(urls):
datas2 = get_html(urls)
bbb = filter_href_3(datas2)
for bb in bbb:
if "code" in bb:
hrefs = "http://www.yb21.cn"+bb
print(hrefs,"\n")
get_datas_info(hrefs)
#run2("http://www.yb21.cn/post/city/1101.html")
#1级主运行函数
def run1():
testurl = "http://www.yb21.cn/post/"
datas = get_html(testurl)
aaa = filter_href_2(datas)
for aa in aaa:
hrefs = "http://www.yb21.cn"+aa
print(hrefs,"\n")
run2(hrefs)
run1()
|
import time
import VehiclePWMModule
vehicle_servo = VehiclePWMModule.vehiclePWM("servo")
vehicle_esc = VehiclePWMModule.vehiclePWM("esc")
while(True):
#vehicle_esc.stop()
vehicle_esc.accel(1)#Forward
time.sleep(1)
vehicle_esc.accel(-10)
time.sleep(1)
|
#converted for ue4 use from
#https://github.com/tensorflow/docs/blob/master/site/en/tutorials/_index.ipynb
import tensorflow as tf
import unreal_engine as ue
from TFPluginAPI import TFPluginAPI
#additional includes
from tensorflow.python.keras import backend as K #to ensure things work well with multi-threading
import numpy as np #for reshaping input
import operator #used for getting max prediction from 1x10 output array
import random
class MnistTutorial(TFPluginAPI):
#keras stop callback
class StopCallback(tf.keras.callbacks.Callback):
def __init__(self, outer):
self.outer = outer
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
if(self.outer.shouldStop):
#notify on first call
if not (self.model.stop_training):
ue.log('Early stop called!')
self.model.stop_training = True
else:
if(batch % 5 == 0):
#json convertible types are float64 not float32
logs['acc'] = np.float64(logs['acc'])
logs['loss'] = np.float64(logs['loss'])
self.outer.callEvent('TrainingUpdateEvent', logs, True)
#callback an example image from batch to see the actual data we're training on
if((batch*self.outer.batch_size) % 100 == 0):
index = random.randint(0,self.outer.batch_size)*batch
self.outer.jsonPixels['pixels'] = self.outer.x_train[index].ravel().tolist()
self.outer.callEvent('PixelEvent', self.outer.jsonPixels, True)
#Called when TensorflowComponent sends Json input
def onJsonInput(self, jsonInput):
#build the result object
result = {'prediction':-1}
#If we try to predict before training is complete
if not hasattr(self, 'model'):
ue.log_warning("Warning! No 'model' found, prediction invalid. Did training complete?")
return result
#prepare the input, reshape 784 array to a 1x28x28 array
x_raw = jsonInput['pixels']
x = np.reshape(x_raw, (1, 28, 28))
#run the input through our network using stored model and graph
with self.graph.as_default():
output = self.model.predict(x)
#convert output array to max value prediction index (0-10)
index, value = max(enumerate(output[0]), key=operator.itemgetter(1))
#Optionally log the output so you can see the weights for each value and final prediction
ue.log('Output array: ' + str(output) + ',\nPrediction: ' + str(index))
result['prediction'] = index
return result
#Called when TensorflowComponent signals begin training (default: begin play)
def onBeginTraining(self):
ue.log("starting MnistTutorial training")
#training parameters
self.batch_size = 128
num_classes = 10
epochs = 3
#reset the session each time we get training calls
self.kerasCallback = self.StopCallback(self)
K.clear_session()
#load mnist data set
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#rescale 0-255 -> 0-1.0
x_train, x_test = x_train / 255.0, x_test / 255.0
#define model
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(num_classes, activation=tf.nn.softmax)
])
model.compile( optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
#pre-fill our callEvent data to optimize callbacks
jsonPixels = {}
size = {'x':28, 'y':28}
jsonPixels['size'] = size
self.jsonPixels = jsonPixels
self.x_train = x_train
#this will do the actual training
model.fit(x_train, y_train,
batch_size=self.batch_size,
epochs=epochs,
callbacks=[self.kerasCallback])
model.evaluate(x_test, y_test)
ue.log("Training complete.")
#store our model and graph for prediction
self.graph = tf.get_default_graph()
self.model = model
#required function to get our api
def getApi():
#return CLASSNAME.getInstance()
return MnistTutorial.getInstance()
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.text import slugify
from django.db.models.signals import post_save
from django.dispatch import receiver
from ckeditor.fields import RichTextField
from django.utils.html import mark_safe
# Create your models here.
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager,self).get_queryset()\
.filter(status="publicado")
class Category(models.Model):
name = models.CharField(max_length=100)
published = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Categoria"
verbose_name_plural = "Categorias"
ordering= ['-created']
def __str__(self):
return self.name
class Post(models.Model):
STATUS = (
('rascunho', 'Rascunho'),
('publicado','Publicado')
)
title = models.CharField(max_length=250, verbose_name="Título")
slug = models.SlugField(max_length=250)
author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="Autor")
#content = models.TextField(verbose_name="Conteúdo")
content = RichTextField(verbose_name="Conteúdo")
published = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
changed = models.DateTimeField(auto_now=True)
status =models.CharField(max_length=10,choices=STATUS, default='rascunho')
image = models.ImageField(upload_to="blog",blank=True, null=True)
category = models.ManyToManyField(Category,related_name="get_posts" )
@property
def view_image(self):
return mark_safe('<img src="%s" width = "400px" /> ' %self.image.url)
class Meta:
ordering = ('-published',)
objects = models.Manager()
publishedManager = PublishedManager()
# url absoluta
def get_absolute_url(self):
#return reverse('detail',args=[self.pk])
return reverse('detail',args=[self.slug])
def get_absolute_update(self):
return reverse('post_edit',args=[self.slug])
#return reverse('post_edit',args=[self.pk])
def get_absolute_delete(self):
#return reverse('post_edits',args=[self.slug])
return reverse('post_delete',args=[self.pk])
def __str__(self):
return self.title
@receiver(post_save,sender=Post)
def insert_slug(sender,instance,**kwargs):
if not instance.slug:
instance.slug = slugify(instance.title)
return instance.save() |
b = "Hello, how are you?"
print(b[2:]) |
# Generated by Django 2.2.6 on 2019-10-16 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='shippingaddress',
name='first_name',
field=models.CharField(default='t', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='shippingaddress',
name='last_name',
field=models.CharField(default='t', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='shippingaddress',
name='phone',
field=models.CharField(default='t', max_length=30),
preserve_default=False,
),
]
|
n= int(input('Numarul de linii:'))
X=[[int(input())for a in range(n)]for b in range(n)]
print(' Matricea:')
for a in range(len(X)):
print(X[a])
s1=0
for a in range(0, len(X)):
s1+=X[a][a]
s2=0
for a in range(0, len(X)):
s2+=X[len(X)-a-1][a]
print('Suma componentelor diagonalei principale{s1}, Suma diagonalei secundare{s2}')
s3=0
for a in X:
for b in a:
if X.index(a)<a.index(b):
s3+=b
print('Suma componentelor aflate mai sus de diagonala principala{s3}')
s4=0
for a in X:
for b in a:
if X.index(a)>a.index(b):
s4+=b
print('Suma componentelor aflate mai jos de diagonala principala{s4}')
s5=0
for a in X:
for b in a:
if X.index(a)+a.index(b)<n-1:
s5+=b
print('Suma componentelor aflate mai sus de diagonala secundara{s5}')
s6=0
for a in X:
for b in a:
if X.index(a)+a.index(b)>n-a:
s6+=b
print('Suma componentelor aflate mai jos de diagonala secundara{s6}') |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 22:35:01 2020
@author: sumant
"""
with open("mytask.txt",'w') as f:
f.write("Task Name : \n")
f.write("------------\n")
while True:
print("My To Do App")
print("============")
print("1. Add Task")
print("2. View All Tasks")
print("0. Exit")
opt = int(input("Please Choose option: "))
if opt == 1:
task = input("Enter task name: ")
with open("mytask.txt",'a') as f:
f.write(f"{task}\n")
print("Task added")
elif opt == 2:
with open("mytask.txt",'r') as f:
task_list=f.read()
print(task_list)
elif opt == 0:
print("Bye")
break
else:
print("Invalid Choice Bye..!")
break
|
def result(ai, k):
rt = 0
for a in ai[::-1]:
if a <= k:
rt += k // a
k %= a
return rt
_ = list(map(int, input().split()))
N, K = _[0], _[1]
ai = list()
for n in range(N):
ai.append(int(input()))
print(result(ai, K))
|
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/
"""
n = set(nums)
li = list()
for i in range(len(nums)):
if i+1 not in n:
li.append(i+1)
return li
"""
negate existing index.
def findDisappearedNumbers(self, nums):
for i in range(len(nums)):
m = abs(nums[i]) - 1
nums[m] = -nums[m] if nums[m]>0 else nums[m]
li = []
for i in range(len(nums)):
if nums[i] > 0:
li.append(i+1)
""" |
#!/usr/bin/env python
"""
v0.1 Scipt used with PDB, to simulate what the ptf_master.py ipython tasks
do, when given a new diff-object.
- Useful for debugging source clustering, classification, ...
NOTE: 20090615: typically break around:
break ingest_tools.py:4528
NOTE: Before running with a test object/source, make sure it's IDs are not in RDB tables:
mysql> delete from ptf_events where id=1;
delete from ptf_events where id=1;
Query OK, 1 row affected (0.00 sec)
mysql> delete from obj_srcid_lookup where obj_id=1 and survey_id=3;
delete from obj_srcid_lookup where obj_id=1 and survey_id=3;
Query OK, 1 row affected (0.00 sec)
"""
import sys
import os
import MySQLdb
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + 'Software/ingest_tools'))
import ptf_master
import ingest_tools # just needed to set PDB breakpoints
pars = { \
'mysql_user':"pteluser", \
'mysql_hostname':"192.168.1.25", \
'mysql_database':'object_test_db', \
'mysql_port':3306}
db = MySQLdb.connect(host=pars['mysql_hostname'], \
user=pars['mysql_user'], \
db=pars['mysql_database'], \
port=pars['mysql_port'])
cursor = db.cursor()
DiffObjSourcePopulator = ptf_master.Diff_Obj_Source_Populator(use_postgre_ptf=True)
print "DONE: DiffObjSourcePopulator = ptf_master.Diff_Obj_Source_Populator()"
# NOTE: the list order corresponds to order of INSERT into MySQL RDB:
#"""
test_objs = [ \
{'obj_id': 0,
'ra': 224.55255677,
'dec': 18.44210702,
'realbogus': 0.9,
'flux': 1803.0,
't_val': 0.0},
]
for dict_elem in test_objs:
ra = dict_elem['ra']
dec = dict_elem['dec']
obj_id = dict_elem['obj_id']
realbogus = dict_elem['realbogus']
t_val = dict_elem['t_val']
flux = dict_elem['flux']
diff_obj = {'decl': dec, 'ub1_zp_new': 27.5, 'flux_aper_err': 100.331, 'filt': 9, 'obj_ids': [obj_id], 'unclear': 0.0050000000000000001, 'lmt_mg_new': 20.608000000000001, 'mag_ref': 14.8698, 'sub_m': 19.296199999999999, 'id': [obj_id], 'sub_m_err': 0.0717, 'src_id': 0, 'f_aper_err': 85.051199999999994, 'flags2': 0, 'mag_ref_err': 0.0063, 'objc_type': 10, 'flux_err': 119.136, 'ub1_zp_ref': 25.600000000000001, 'suspect': 0.002, 'ra': ra, 'b_image': [1.0069999999999999], 'a_image': [1.2529999999999999], 'filts': [9], 'mag_err': [0.0717], 'dec_rms': 1.0, 'mag': [19.296199999999999], 'f_aper': 3228.6500000000001, 'lmt_mag_ref': 21.876899999999999, 'dec': dec, 'ra_rms': 1.0, 'sub_id': [13695L], 'maybe': 0.0, 'bogus': 0.0, 'm': 14.894467501872207, 'flux_aper': 15923.299999999999, 'filter': ['R'], 'm_err': 0.0717, 'lmt_mg_ref': [21.876899999999999], 'flags': 0, 't': t_val, 'flux': flux, 'ujd': [t_val], 'realish': 0.0, 'realbogus':realbogus}
k_list = diff_obj.keys()
k_list.remove('filt')
k_list.remove('filts')
k_list.remove('obj_ids')
k_list.remove('sub_m')
k_list.remove('sub_m_err')
k_list.remove('src_id')
k_list.remove('flags')
k_list.remove('flags2')
k_list.remove('objc_type')
k_list.remove('lmt_mag_ref')
k_list.remove('m')
k_list.remove('m_err')
k_list.remove('t')
k_list.remove('dec')
#k_list.remove('id')
if False:
#v_str = str(map(lambda x: str(diff_obj[x]), k_list))[1:-1].replace("'","")
v_str = ""
for k in k_list:
v = diff_obj[k]
if k == 'filter':
v_str += '"%s", ' % (str(v[0]))
elif type(v) == type([]):
v_str += str(v[0]) + ", "
else:
v_str += str(v) + ", "
insert_str = "INSERT INTO object_test_db.ptf_events (%s) VALUES (%s)" % ( \
str(k_list)[1:-1].replace("'",""),
v_str[:-2])
# # #cursor.execute(insert_str)
print " (new) INSERT id=", obj_id
insert_str_2 = "INSERT INTO obj_srcid_lookup (src_id, obj_id, survey_id) VALUES (0, %d, 3)" % (obj_id)
# # #cursor.execute(insert_str_2)
print
(srcid_xml_tuple_list, n_objs) = DiffObjSourcePopulator.ingest_diffobj(diff_obj, feat_db=DiffObjSourcePopulator.feat_db)
# NOTE: enable the fillowing if you want te TEST / DEBUG the classification code:
DiffObjSourcePopulator.class_interface.classify_and_insert_using_vosource_list(srcid_xml_tuple_list)
print 'done'
"""
test_objs = [ \
{'obj_id': 0,
'ra': 169.383670165,
'dec': 53.303472271,
'realbogus': 0.9,
'flux': 1803.0,
't_val': 0.0},
{'obj_id': 0,
'ra': 80.00,
'dec': -80.0,
'realbogus': 0.001,
'flux': 1803.0,
't_val': 2454972.0},
{'obj_id': 1,
'ra': 80.00,
'dec': -80.0,
'realbogus': 0.001,
'flux': 1803.1,
't_val': 2454972.1},
{'obj_id': 2,
'ra': 80.00,
'dec': -80.0,
'realbogus': 0.3,
'flux': 1803.2,
't_val': 2454972.2},
{'obj_id': 3,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.3,
'flux': 1803.3,
't_val': 2454972.3},
{'obj_id': 4,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.4,
'flux': 1803.4,
't_val': 2454972.4},
{'obj_id': 5,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.5,
'flux': 1803.5,
't_val': 2454972.5},
{'obj_id': 6,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.6,
'flux': 1803.6,
't_val': 2454972.6},
{'obj_id': 7,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.7,
'flux': 1803.7,
't_val': 2454972.7},
{'obj_id': 8,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.8,
'flux': 1803.8,
't_val': 2454972.8},
{'obj_id': 9,
'ra': 80.00,
'dec': (-80.00000),
'realbogus': 0.9,
'flux': 1803.9,
't_val': 2454972.9},
]
"""
|
# Generated by Django 3.1.1 on 2020-09-27 12:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapingApp', '0003_auto_20200927_1555'),
]
operations = [
migrations.AlterField(
model_name='parliament',
name='date_born',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterModelTable(
name='parliament',
table=None,
),
]
|
import unittest
import numpy as np
import hpgeom as hpg
import healsparse
class FracdetTestCase(unittest.TestCase):
def test_fracdet_map_float(self):
"""
Test fracdet_map functionality for floats
"""
nside_coverage = 16
nside_fracdet = 32
nside_map = 512
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
full_map = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
full_map[0: int(non_masked_px*nfine)] = 1 + np.random.random(size=int(non_masked_px*nfine))
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
# Test that the fracdet map is equal to the coverage map with same nside_coverage
fracdet_map1 = sparse_map.fracdet_map(nside_coverage)
np.testing.assert_array_almost_equal(fracdet_map1[:], sparse_map.coverage_map)
# Test that the fracdet map is good for target nside
fracdet_map2 = sparse_map.fracdet_map(nside_fracdet)
fracdet_map_orig = self.compute_fracdet_map(nside_map, nside_fracdet,
non_masked_px, nfine)
np.testing.assert_array_almost_equal(fracdet_map2[:], fracdet_map_orig)
def test_fracdet_map_int(self):
"""
Test fracdet_map functionality for ints
"""
nside_coverage = 16
nside_fracdet = 32
nside_map = 512
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
sentinel = healsparse.utils.check_sentinel(np.int32, None)
full_map = np.zeros(hpg.nside_to_npixel(nside_map), dtype=np.int32) + sentinel
full_map[0: int(non_masked_px*nfine)] = 1
sparse_map = healsparse.HealSparseMap(healpix_map=full_map,
nside_coverage=nside_coverage,
sentinel=sentinel)
# Test that the fracdet map is equal to the coverage map with same nside_coverage
fracdet_map1 = sparse_map.fracdet_map(nside_coverage)
np.testing.assert_array_almost_equal(fracdet_map1[:], sparse_map.coverage_map)
# Test that the fracdet map is good for target nside
fracdet_map2 = sparse_map.fracdet_map(nside_fracdet)
fracdet_map_orig = self.compute_fracdet_map(nside_map, nside_fracdet,
non_masked_px, nfine)
np.testing.assert_array_almost_equal(fracdet_map2[:], fracdet_map_orig)
def test_fracdet_map_recarray(self):
"""
Test fracdet_map functionality for recarrays
"""
nside_coverage = 16
nside_fracdet = 32
nside_map = 512
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
dtype = [('a', np.float64),
('b', np.int32)]
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
dtype, primary='a')
sparse_map.update_values_pix(np.arange(int(non_masked_px*nfine)),
np.ones(1, dtype=dtype))
# Test that the fracdet map is equal to the coverage map with same nside_coverage
fracdet_map1 = sparse_map.fracdet_map(nside_coverage)
np.testing.assert_array_almost_equal(fracdet_map1[:], sparse_map.coverage_map)
# Test that the fracdet map is good for target nside
fracdet_map2 = sparse_map.fracdet_map(nside_fracdet)
fracdet_map_orig = self.compute_fracdet_map(nside_map, nside_fracdet,
non_masked_px, nfine)
np.testing.assert_array_almost_equal(fracdet_map2[:], fracdet_map_orig)
def test_fracdet_map_widemask(self):
"""
Test fracdet_map functionality for wide masks
"""
nside_coverage = 16
nside_fracdet = 32
nside_map = 512
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
# Do a 1-byte wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
healsparse.WIDE_MASK,
wide_mask_maxbits=2)
# Set bits in different columns
sparse_map.set_bits_pix(np.arange(int(non_masked_px*nfine)), [1])
# Test that the fracdet map is equal to the coverage map with same nside_coverage
fracdet_map1 = sparse_map.fracdet_map(nside_coverage)
np.testing.assert_array_almost_equal(fracdet_map1[:], sparse_map.coverage_map)
# Test that the fracdet map is good for target nside
fracdet_map2 = sparse_map.fracdet_map(nside_fracdet)
fracdet_map_orig = self.compute_fracdet_map(nside_map, nside_fracdet,
non_masked_px, nfine)
np.testing.assert_array_almost_equal(fracdet_map2[:], fracdet_map_orig)
# Do a 3-byte wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
healsparse.WIDE_MASK,
wide_mask_maxbits=24)
# Set bits in different columns
sparse_map.set_bits_pix(np.arange(int(2*nfine)), [2])
sparse_map.set_bits_pix(np.arange(int(non_masked_px*nfine)), [20])
# Test that the fracdet map is equal to the coverage map with same nside_coverage
fracdet_map1 = sparse_map.fracdet_map(nside_coverage)
np.testing.assert_array_almost_equal(fracdet_map1[:], sparse_map.coverage_map)
# Test that the fracdet map is good for target nside
fracdet_map2 = sparse_map.fracdet_map(nside_fracdet)
fracdet_map_orig = self.compute_fracdet_map(nside_map, nside_fracdet,
non_masked_px, nfine)
np.testing.assert_array_almost_equal(fracdet_map2[:], fracdet_map_orig)
def test_fracdet_map_raises(self):
"""
Test limitations of fracdet_map
"""
nside_coverage = 16
nside_map = 512
non_masked_px = 10.5
nfine = (nside_map//nside_coverage)**2
full_map = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
full_map[0: int(non_masked_px*nfine)] = 1 + np.random.random(size=int(non_masked_px*nfine))
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
for nside_fracdet in [8, 1024]:
self.assertRaises(ValueError, sparse_map.fracdet_map, nside_fracdet)
def compute_fracdet_map(self, nside_map, nside_fracdet, non_masked_px, nfine):
bit_shift = healsparse.utils._compute_bitshift(nside_fracdet, nside_map)
fracdet_map_orig = np.zeros(hpg.nside_to_npixel(nside_fracdet), dtype=np.float64)
idx_frac = np.right_shift(np.arange(int(non_masked_px*nfine)), bit_shift)
unique_idx_frac = np.unique(idx_frac)
idx_counts = np.bincount(idx_frac, minlength=hpg.nside_to_npixel(nside_fracdet)).astype(np.float64)
nfine_frac = (nside_map//nside_fracdet)**2
fracdet_map_orig[unique_idx_frac] = idx_counts[unique_idx_frac]/nfine_frac
return fracdet_map_orig
if __name__ == '__main__':
unittest.main()
|
"""
RainbowDash admin bot, includes code from autohelp.py and others.
Togglesmg from GreaseMonkey's whitelist script,
togglespade based off nogunall from hacktools.py
Adds /masterrefresh which will refresh the master connection every 30 min.
/togglesmg
/toggleshotty
/togglesemi
/togglespade
/noscope
/togglenade
/togglecaps
Modified by GreaseMonkey for cleanliness.
Also, soft tabs suck.
"""
from twisted.internet import reactor
import re
from commands import add, admin, name, get_player, alias
import commands
from blockinfo import grief_check
from aimbot2 import hackinfo
from pyspades.constants import *
from pyspades import contained as loaders
REFRESH_MASTER = True
WEAPON_TOGGLE = True
GRIEFCHECK_ON_VOTEKICK = True
HACKCHECK_ON_VOTEKICK = True
RACISM_BAN_LENGTH = 1440
WPN_NAME_LIST = {
RIFLE_WEAPON: "Rifle",
SMG_WEAPON: "SMG",
SHOTGUN_WEAPON: "Shotgun",
}
WPN_SHORTCUT_LIST = {
RIFLE_WEAPON: "semi",
SMG_WEAPON: "smg",
SHOTGUN_WEAPON: "shotty",
}
offenders = []
masterisrunning = 1
weapon_reload = loaders.WeaponReload()
badwords = re.compile(".*(fuck|shit|bitch|cunt|cocksucker|nigg(er|a)|penis|admin).*", re.IGNORECASE)
slurpat = re.compile(".*(nigg(er|a|3r)|niqq(er|a|3r)).*", re.IGNORECASE)
griefpat = re.compile(".*(gr.*f.*(ing|er)|grief|destroy|gief|geif|giraf).*", re.IGNORECASE)
aimbotpat = re.compile(".*(aim|bot|ha(ck|x)|cheat|h4x|hex|hacer).*", re.IGNORECASE)
def slur_match(player, msg):
return (not slurpat.match(msg) is None)
def name_match(player):
return (not badwords.match(player.name) is None)
def name_empty(player):
if (player.name == " ") or (player.name == ""):
return True
else:
return False
def grief_match(reason):
return (not griefpat.match(reason) is None)
def hack_match(reason):
return (not aimbotpat.match(reason) is None)
def fill_weapon(player):
weapon = player.weapon_object
weapon.set_shoot(True)
weapon.current_ammo = player.bulletcount
weapon_reload.player_id = player.player_id
weapon_reload.clip_ammo = weapon.current_ammo
weapon_reload.reserve_ammo = weapon.current_stock
player.send_contained(weapon_reload)
def empty_weapon(player):
weapon = player.weapon_object
weapon.set_shoot(False)
player.bulletcount = weapon.current_ammo
weapon.current_ammo = 0
weapon_reload.player_id = player.player_id
weapon_reload.clip_ammo = weapon.current_ammo
weapon_reload.reserve_ammo = weapon.current_stock
player.send_contained(weapon_reload)
def empty_weapon_full(player):
weapon = player.weapon_object
weapon.set_shoot(False)
weapon.current_ammo = 0
weapon.current_stock = 0
weapon_reload.player_id = player.player_id
weapon_reload.clip_ammo = weapon.current_ammo
weapon_reload.reserve_ammo = weapon.current_stock
player.send_contained(weapon_reload)
@admin
def status(self):
if hasattr(self, 'send_chat'):
self.send_chat("--Server status--\n Rifle: %s\n SMG: %s\n Shotty: %s\n Grenades: %s\n Noscope: %s\n Spade War: %s" % (self.protocol.wpn_banned[RIFLE_WEAPON],self.protocol.wpn_banned[SMG_WEAPON],self.protocol.wpn_banned[SHOTGUN_WEAPON],self.protocol.nade_banned,self.protocol.noscope,self.protocol.spade_only))
else:
self.protocol.irc_say("--Server status--\n Rifle: %s\n SMG: %s\n Shotty: %s\n Grenades: %s\n Noscope: %s\n Spade War: %s" % (self.protocol.wpn_banned[RIFLE_WEAPON],self.protocol.wpn_banned[SMG_WEAPON],self.protocol.wpn_banned[SHOTGUN_WEAPON],self.protocol.nade_banned,self.protocol.noscope,self.protocol.spade_only))
add(status)
@admin
def noscope(connection):
protocol = connection.protocol
protocol.noscope = not protocol.noscope
if protocol.noscope:
message = "Server is now noscopes only! If you use your scope, you will run out of ammo!"
else:
message = "Server is back to normal, snipe away!"
protocol.send_chat(message, irc = True)
add(noscope)
@admin
def masterrefresh(self):
global masterisrunning
if not REFRESH_MASTER:
if hasattr(self, 'send_chat'):
self.send_chat("Command not enabled")
else:
self.protocol.irc_say("Command not enabled")
return
elif not masterisrunning:
masterisrunning = 1
if hasattr(self, 'send_chat'):
self.send_chat("Master will now be refreshed every 30 min.")
else:
self.protocol.irc_say("Master will now be refreshed every 30 min.")
masterloop(self)
else:
masterisrunning = 0
if hasattr(self, 'send_chat'):
self.send_chat("Master refresh is now off.")
else:
self.protocol.irc_say("Master refresh is now off.")
return
add(masterrefresh)
def masterloop(self):
if REFRESH_MASTER and masterisrunning:
protocol = self.protocol
protocol.set_master_state(not protocol.master)
protocol.set_master_state(not protocol.master)
protocol.irc_say("Master was toggled")
reactor.callLater(1800,masterloop,self)
@admin
def togglecaps(connection, player = None):
protocol = connection.protocol
if player is not None:
player = get_player(protocol, player)
elif connection in protocol.players:
player = connection
else:
raise ValueError()
player.no_caps = not player.no_caps
status = "no longer" if player.no_caps else "now"
protocol.irc_say("%s can %s use caps" % (player.name, status))
add(togglecaps)
@admin
def togglenade(connection):
protocol = connection.protocol
protocol.nade_banned = not protocol.nade_banned
if protocol.nade_banned:
message = "%s disabled grenades!" % (connection.name)
for player in protocol.players.itervalues():
player.grenades = 0
else:
message = "Grenades are enabled"
for player in protocol.players.itervalues():
player.grenades = 3
protocol.send_chat(message, irc = True)
add(togglenade)
@admin
def togglespade(connection):
protocol = connection.protocol
protocol.spade_only = not protocol.spade_only
if protocol.spade_only:
message = "%s incited a melee rampage!" % (connection.name)
for player in protocol.players.itervalues():
empty_weapon_full(player)
else:
message = "Melee rampage is over, snipe away!"
for player in protocol.players.itervalues():
player.refill()
protocol.send_chat(message, irc = True)
add(togglespade)
def add_toggle_wpn(weapon_token):
banned_name = WPN_SHORTCUT_LIST[weapon_token]
weapon_name = WPN_NAME_LIST[weapon_token]
@admin
def _f1(connection):
protocol = connection.protocol
protocol.wpn_banned[weapon_token] = not protocol.wpn_banned[weapon_token]
if protocol.wpn_banned[RIFLE_WEAPON] and protocol.wpn_banned[SMG_WEAPON] and protocol.wpn_banned[SHOTGUN_WEAPON]:
if hasattr(connection, 'send_chat'):
connection.send_chat("Cannot disable all weapons")
protocol.irc_say("Cannot disable all weapons")
protocol.wpn_banned[weapon_token] = False
return
status = "disabled" if protocol.wpn_banned[weapon_token] else "enabled"
if protocol.wpn_banned[weapon_token]:
for pv in protocol.players:
p = protocol.players[pv]
if p.weapon == weapon_token:
if not protocol.wpn_banned[RIFLE_WEAPON]:
weapon = RIFLE_WEAPON
elif not protocol.wpn_banned[SHOTGUN_WEAPON]:
weapon = SHOTGUN_WEAPON
else:
weapon = SMG_WEAPON
p.send_chat("%s disabled - weapon changed" % weapon_name)
p.set_weapon(weapon, False, False)
if hasattr(connection, 'send_chat'):
connection.send_chat("%s is now %s" % (weapon_name, status))
protocol.irc_say("%s %s by %s" % (weapon_name, status, connection.name))
return name("toggle%s" % banned_name)(_f1)
for wpn in [RIFLE_WEAPON, SMG_WEAPON, SHOTGUN_WEAPON]:
if WEAPON_TOGGLE:
add(add_toggle_wpn(wpn))
def apply_script(protocol, connection, config):
protocol.wpn_banned = {
RIFLE_WEAPON: False,
SMG_WEAPON: False,
SHOTGUN_WEAPON: False,
}
protocol.noscope = False
protocol.spade_only = False
protocol.nade_banned = False
protocol.racist = []
protocol.muted = []
connection.no_caps = False
def unmuteracist(connection):
connection.mute = False
message = '%s has been unmuted.' % (connection.name)
protocol.muted.pop()
connection.protocol.send_chat(message, irc = True)
def slurpunish(connection, reason = "Racist"):
if not connection.address[0] in protocol.racist:
protocol.racist[:0] = [connection.address[0]]
protocol.muted[:0] = [connection.address[0]]
connection.mute = True
message = '%s has been muted for Racism. Next offence will result in a ban' % (connection.name)
connection.protocol.send_chat(message, irc = True)
reactor.callLater(300.0,unmuteracist,connection)
else:
connection.ban("Autoban: Repeat Racist", RACISM_BAN_LENGTH)
def checkname(self):
if name_match(self) or name_empty(self):
for i in range(10):
self.send_chat("Please change your name")
reactor.callLater(5.0,namepunish,self)
def namepunish(connection):
connection.kick("Please get a new name")
class AdminbotConnection(connection):
def on_secondary_fire_set(self, secondary):
if self.tool == WEAPON_TOOL:
if secondary:
if self.protocol.noscope:
self.send_chat("You can't kill people while scoped in!")
empty_weapon(self)
else:
if self.protocol.noscope:
reactor.callLater(1,fill_weapon,self)
return connection.on_secondary_fire_set(self, secondary)
def on_grenade(self, time_left):
if self.protocol.nade_banned:
pass
else:
return connection.on_grenade(self,time_left)
def on_refill(self):
if self.protocol.nade_banned:
self.grenades = 0
if self.protocol.spade_only:
reactor.callLater(.1,empty_weapon_full,self)
return connection.on_refill(self)
def on_shoot_set(self, fire):
if self.protocol.spade_only:
reactor.callLater(.1,empty_weapon_full,self)
return connection.on_shoot_set(self, fire)
def on_spawn(self, pos):
if self.protocol.nade_banned:
self.grenades = 0
if self.protocol.spade_only:
reactor.callLater(.1,empty_weapon_full,self)
return connection.on_spawn(self, pos)
def on_chat(self, value, global_message):
if self.no_caps:
value = value.lower()
if slur_match(self, value):
reactor.callLater(0.0, slurpunish, self)
return connection.on_chat(self, value, global_message)
def on_team_join(self, team):
reactor.callLater(0.5,checkname,self)
if self.address[0] in self.protocol.muted:
self.mute = True
def on_weapon_set(self, wpnid):
if self.protocol.wpn_banned[wpnid]:
self.send_chat("%s is disabled" % WPN_NAME_LIST[wpnid])
return False
return connection.on_weapon_set(self, wpnid)
def set_weapon(self, weapon, local = False, no_kill = False, *args, **kwargs):
if self.protocol.wpn_banned[weapon]:
self.send_chat("%s is disabled" % WPN_NAME_LIST[weapon])
if not self.protocol.wpn_banned[RIFLE_WEAPON]:
weapon = RIFLE_WEAPON
elif not self.protocol.wpn_banned[SHOTGUN_WEAPON]:
weapon = SHOTGUN_WEAPON
else:
weapon = SMG_WEAPON
if local:
no_kill = True
local = False
return connection.set_weapon(self, weapon, local, no_kill, *args, **kwargs)
class AdminbotProtocol(protocol):
def on_votekick_start(self, instigator, victim, reason):
result = protocol.on_votekick_start(self, instigator, victim, reason)
if result is None and GRIEFCHECK_ON_VOTEKICK and grief_match(reason):
message = grief_check(instigator, victim.name)
message2 = grief_check(instigator, victim.name,5)
#irc_relay = instigator.protocol.irc_relay
#if irc_relay.factory.bot and irc_relay.factory.bot.colors:
# message = '\x02* ' + message + '\x02'
# message2 = '\x02* ' + message2 + '\x02'
#irc_relay.send(message)
#irc_relay.send(message2)
#self.irc_say(grief_check(instigator, victim.name))
#self.irc_say(grief_check(instigator, victim.name,5))
if result is None and HACKCHECK_ON_VOTEKICK and hack_match(reason):
message = hackinfo(instigator, victim.name)
irc_relay = instigator.protocol.irc_relay
if irc_relay.factory.bot and irc_relay.factory.bot.colors:
message = '\x0304* ' + message + '\x0f'
irc_relay.send(message)
return result
return AdminbotProtocol, AdminbotConnection
|
#test.py
#!/usr/bin/python
import sys
import os
import os.path
import getopt
def usage():
print("Syntax: fc [OPTIONS] [PATH]")
print("Options:")
print("-h/--help: Prints out this help section")
print("-f/--files: Prints name and number of files in the directory. Also prints total number of files")
print("-s/--sizes: prints name and size of files in the directory. Also prints tota file size")
print("-q/--quiet: Only prints the total number of files (if -f is present) or total size (if -s is present). No per-file/directory info will be printed")
def main(argv):
try:
opts, args = getopt.getopt(argv, "hq:fs", ["help", "files", "sizes", "quiet"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(2)
elif opt in("-f", "--files"):
totfiles=0
for (dirpath, dirnames, filenames) in os.walk(argv[1]):
print("Directory Name: {}".format(dirpath))
print("Files in Directory: {}".format(len(filenames)))
totfiles+=len(filenames)
print("Total number of files={}".format(totfiles))
elif opt in("-s", "--sizes"):
totsize=0
for (dirpath, dirnames, filenames) in os.walk(argv[1]):
print("Directory name: {}".format(dirpath))
for fname in filenames:
print("File name: {} File size: {} bytes".format(fname, os.path.getsize(os.path.join(dirpath, fname))))
totsize+=os.path.getsize(os.path.join(dirpath, fname))
print("Total Size={} bytes".format(totsize))
elif opt in("-q", "--quiet"):
if argv[1] in("-s","--sizes"):
totsize=0
for (dirpath, dirnames, filenames) in os.walk(argv[2]):
print("Directory name: {}".format(dirpath))
for fname in filenames:
totsize+=os.path.getsize(os.path.join(dirpath, fname))
print("Total Size={} bytes".format(totsize))
elif argv[1] in("-f", "--files"):
totfiles=0
for (dirpath, dirnames, filenames) in os.walk(argv[2]):
print("Directory Name: {}".format(dirpath))
totfiles+=len(filenames)
print("Total number of files={}".format(totfiles))
else:
usage()
sys.exit(2)
if __name__=="__main__":
main(sys.argv[1:])
|
#late fusion
import itertools
import random
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import time
import argparse
import torch.utils.data as utils_data
from sklearn.model_selection import GridSearchCV,KFold
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
parser = argparse.ArgumentParser(description='PyTorch RNN/LSTM/GRU Language Model')
parser.add_argument('--model', type=str, default='lstm',
help='type of recurrent net (rnn, gru, lstm)')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
args = parser.parse_args()
batch_size = 1
#training
# le - a
# li - b
# we - c
# wi - d
# je - e
# ji - f
# de - g
# di - h
#testing
# ba - i
# po -j
# ko - k
# ga - l
alp1 = ['c','d','e','f', 'i', 'j']
alp2 = ['g', 'a', 'b','h', 'k', 'l']
p1 = list(itertools.product(alp1,repeat=3))
p2 = list(itertools.product(alp2,repeat=3))
data1 = [list(i) for i in p1]
data2 = [list(i) for i in p2]
train_aba_patterns, train_abb_patterns, train_abc_patterns, train_aaa_patterns,train_aab_patterns = [], [], [], [],[]
test_aba_patterns, test_abb_patterns, test_abc_patterns, test_aaa_patterns, test_aab_patterns = [], [], [], [], []
for i in data1:
if(i[0]==i[2] and i[0]!= i[1] and i[1]!=i[2]):
train_aba_patterns.append(i)
random.shuffle(train_aba_patterns)
for i in data2:
if (i[0]==i[2] and i[0]!= i[1] and i[1]!=i[2]):
test_aba_patterns.append(i)
random.shuffle(test_aba_patterns)
aba_train_patterns = train_aba_patterns[:30]
aba_valid_patterns = test_aba_patterns[:15]
aba_test_patterns = test_aba_patterns[16:30]
alphabet_size = 12
sample_space = ['a','b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l']
sample_space_len = len(sample_space)
# decoding char from int
char_from_int = dict((i, c) for i, c in enumerate(sample_space))
char_to_int = dict((c, i) for i, c in enumerate(sample_space))
training_data, validation_data, testing_data = [], [], []
tr_data, ts_data, vs_data, vs_tar_data, tr_tar_data, ts_tar_data = [], [], [], [], [], []
for i in aba_train_patterns:
training_data.append([char_to_int[char] for char in i])
for i in aba_valid_patterns:
validation_data.append([char_to_int[char] for char in i])
for i in aba_test_patterns:
testing_data.append([char_to_int[char] for char in i])
for i in training_data:
tr_data.append(i[:-1])
for i in training_data:
tr_tar_data.append(i[-1:])
for i in validation_data:
vs_data.append(i[:-1])
for i in validation_data:
vs_tar_data.append(i[-1:])
for i in testing_data:
ts_data.append(i[:-1])
for i in testing_data:
ts_tar_data.append(i[-1:])
out, inp1, inp2 = [], [], []
diff1, diff2, diff3 = [], [], []
for i in tr_data:
diff1.append(abs(i[0]-i[1]))
for i in vs_data:
diff2.append(abs(i[0]-i[1]))
for i in ts_data:
diff3.append(abs(i[0]-i[1]))
# x1 = Variable(torch.cat(inp1), requires_grad=True)
# x2 = Variable(torch.cat(inp2), requires_grad=True)
# y = Variable(torch.cat(out), requires_grad=False)
#
#
# x_1 = Variable(torch.cat(inp3), requires_grad=True)
# x_2 = Variable(torch.cat(inp4), requires_grad=True)
# y_y = Variable(torch.cat(out_t), requires_grad=False)
DR_train_list, DR_valid_list, DR_test_list = [],[],[]
diff1 = [[i] for i in diff1]
diff2 = [[i] for i in diff2]
diff3 = [[i] for i in diff3]
for i,j in zip(tr_data, diff1):
DR_train_list.append(i+j)
for i,j in zip(vs_data, diff2):
DR_valid_list.append(i+j)
for i,j in zip(ts_data, diff3):
DR_test_list.append(i+j)
#t1 = [torch.LongTensor(np.array(i)) for i in tr_data]
t1 = [torch.LongTensor(np.array(i)) for i in DR_train_list]
f1 = [torch.LongTensor(np.array(i)) for i in tr_tar_data]
t2 = [torch.LongTensor(np.array(i)) for i in DR_valid_list]
#t2 = [torch.LongTensor(np.array(i)) for i in vs_data]
f2 = [torch.LongTensor(np.array(i)) for i in vs_tar_data]
t3 = [torch.LongTensor(np.array(i)) for i in DR_test_list]
#t3 = [torch.LongTensor(np.array(i)) for i in ts_data]
f3 = [torch.LongTensor(np.array(i)) for i in ts_tar_data]
dr_units = [[0,0,0,0,0,0,0,0.8,0.1,0,0,0], [0,0,0,0,0,0,0,0,0,0.9,0.1,0]]
dr_data1 = [torch.FloatTensor(i) for i in dr_units]
training_samples = utils_data.TensorDataset(torch.stack(t1),torch.stack(f1))
validation_samples = utils_data.TensorDataset(torch.stack(t2),torch.stack(f2))
testing_samples = utils_data.TensorDataset(torch.stack(t3),torch.stack(f3))
dataloader1 = utils_data.DataLoader(training_samples,1)
dataloader2 = utils_data.DataLoader(validation_samples,1)
dataloader3 = utils_data.DataLoader(testing_samples,1)
class CharRNN(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, model, n_layers):
super(CharRNN, self).__init__()
self.model = model.lower()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.embed = torch.nn.Embedding(input_size, hidden_size)
if self.model == "gru":
self.rnn = torch.nn.GRU(hidden_size, hidden_size, n_layers)
elif self.model == "lstm":
self.rnn = torch.nn.LSTM(hidden_size, hidden_size, n_layers)
elif self.model == "rnn":
self.rnn = torch.nn.RNN(hidden_size, hidden_size, n_layers)
self.h2o = torch.nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
batch_size = input.size(0)
encoded = self.embed(input.view(1,-1))
output, hidden = self.rnn(encoded.view(1, batch_size, -1), hidden)
out = self.h2o(output.view(batch_size, -1))
out1 = F.softmax(out)
return out1, hidden
def init_hidden(self, batch_size):
if self.model == "lstm":
return (torch.autograd.Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)),
torch.autograd.Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)))
return torch.autograd.Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size))
class CharRNN1(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, model, n_layers):
super(CharRNN1, self).__init__()
self.model = model.lower()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = torch.nn.Dropout(0.2)
self.embed = torch.nn.Embedding(input_size, hidden_size)
if self.model == "gru":
self.rnn = torch.nn.GRU(hidden_size, hidden_size, n_layers)
elif self.model == "lstm":
self.rnn = torch.nn.LSTM(hidden_size, hidden_size, n_layers)
elif self.model == "rnn":
self.rnn = torch.nn.RNN(hidden_size, hidden_size, n_layers)
self.h2h = torch.nn.Linear(output_size, output_size)
self.h2o = torch.nn.Linear(hidden_size, output_size)
def forward(self, input, hidden, dr_data):
batch_size = input.size(0)
encoded = self.embed(input.view(1,-1))
output, hidden = self.rnn(encoded.view(1, batch_size, -1), hidden)
out = self.h2o(output.view(batch_size, -1))
out = self.h2h(dr_data.view(batch_size,-1))
return out, hidden
def init_hidden(self, batch_size):
if self.model == "lstm":
return (torch.autograd.Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)),
torch.autograd.Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)))
return torch.autograd.Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size))
model = CharRNN(alphabet_size, 20, alphabet_size, args.model, batch_size)
model1 = CharRNN1(alphabet_size, 20, alphabet_size, args.model, batch_size)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def accu(y_true, y_pred):
y_pred = np.concatenate(tuple(y_pred))
y_true = np.concatenate(tuple([[t for t in y] for y in y_true])).reshape(y_pred.shape)
return (y_true == y_pred).sum() / float(len(y_true))
def evaluate():
total_loss = 0
correct = 0
total = 0
hidden = model1.init_hidden(batch_size)
for i,j in dataloader3:
for l in dr_data1:
hidden = repackage_hidden(hidden)
model1.zero_grad()
for c in range(i.size()[0]):
output, hidden = model1(Variable(i)[:,c], hidden, Variable(l).view(-1,12))
output = output.view(1,-1)
total_loss = criterion(output, Variable(j).view(-1))
values, target = torch.max(output, 1)
total += j.size(0)
correct += (target.view(-1, 1) == Variable(j)).sum()
return total_loss[0]/len(dataloader1), correct
def train():
model.train()
total_loss = 0
correct = 0
start_time = time.time()
hidden = model.init_hidden(batch_size)
#for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
for i, j in dataloader1:
model.zero_grad()
for c in range(i.size()[0]):
output, hidden = model(Variable(i)[:,c], hidden)
total_loss += criterion(output, Variable(j).view(-1))
total_loss.backward()
optimizer.step()
values, target = torch.max(output, 1)
correct += (target.view(-1, 1) == Variable(j)).sum()
return total_loss.data[0]/ len(dataloader1), correct
def validate():
model.eval()
total_loss = 0
correct = 0
start_time = time.time()
hidden = model.init_hidden(batch_size)
#for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
for i, j in dataloader2:
hidden = repackage_hidden(hidden)
model.zero_grad()
for c in range(i.size()[0]):
output, hidden = model(Variable(i)[:,c], hidden)
total_loss += criterion(output, Variable(j).view(-1))
values, target = torch.max(output, 1)
correct += (target.view(-1, 1) == Variable(j)).sum()
return total_loss.data[0]/ len(dataloader2), correct
# Loop over epochs.
lr = 0.1
best_val_loss = None
acc=0
nsim = 10
for sim in range(nsim):
model = CharRNN(alphabet_size, 20, alphabet_size, 'lstm', batch_size)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = torch.nn.CrossEntropyLoss()
try:
for epoch in range(1, 10):
epoch_start_time = time.time()
loss, cr = train()
val_loss, corr = validate()
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
print('best val loss after ', best_val_loss)
print('training loss', loss)
else:
lr /= 0.001
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
test_loss, correct = evaluate()
print('Simulation: ', sim, 'test loss', test_loss)
print('Accuracy of the network {} %'.format((correct.data.numpy() * [100]) / len(dataloader3)))
acc += (correct.data.numpy() * [100]) / len(dataloader3)
print('Avg Accuracy: ', acc / nsim)
|
# -*- coding: utf-8 -*-
"""
Application Values
"""
TEMP_CHAUDIERE_FAILURE_DEFAULT = 56 # Min Water temp before alerting of potential chaudiere failure
CHAUDIERE_DB_ROTATE_HOURS_DEFAULT = 5
CHAUDIERE_MINUTE_DB_ROTATE_DAYS_DEFAULT = 35
ALERTS_ENABLE_DEFAULT = False # Boolean to disable alerts (SMS/Mail) unless enable by user
"""
Phases
"""
PHASE_UNDEFINED = 0
PHASE_COMBUSTION = 6
PHASE_ALLUMAGE = 7
PHASE_MAINTIEN = 8
PHASE_ARRET = 9
PHASE_RISQUE_BOURAGE = 10
PHASE_SURVEILLANCE = 11 # idem COMBUSTION Mais mais fonctionnement à surveiller
PhaseColor = {
PHASE_UNDEFINED : '#f2f2f2', # grey
PHASE_COMBUSTION : '#e6f2ff', # blue
PHASE_ALLUMAGE : '#ffff99', # yellow
PHASE_MAINTIEN : '#e6ffe6', # green
PHASE_ARRET : '#ff4d4d', # red
PHASE_RISQUE_BOURAGE : '#b32400', # rouge sombre
PHASE_SURVEILLANCE : '#ff80b3', # rose
}
PhaseName = {
PHASE_UNDEFINED : 'Non defini',
PHASE_COMBUSTION : 'Combustion',
PHASE_ALLUMAGE : 'Allumage',
PHASE_MAINTIEN : 'Maintien de feu',
PHASE_ARRET : 'Arret',
PHASE_RISQUE_BOURAGE : 'Silo vide ou Risque Bourrage',
PHASE_SURVEILLANCE : 'Combustion a surveiller',
}
"""
Events
"""
EVENT_ALERT = 0
EVENT_ALLUMAGE = 1
EVENT_ARRET = 2
EventColor = {
EVENT_ALERT : '#f2f2f2', # grey
EVENT_ALLUMAGE : '#e6f2ff', # blue
EVENT_ARRET : '#ffff99', # yellow
}
EventName = {
EVENT_ALERT : 'Alert mail/sms',
EVENT_ALLUMAGE : 'Allumage',
EVENT_ARRET : 'Arret',
}
"""
Physical inputs / database fields
"""
TEMP_CHAUDIERE = 0
TEMP_FUMEE = 1
TEMP_BOITIER = 2
TEMP_CONSIGNE = 8
VENT_SECONDAIRE = 3
ALLUMAGE = 4
VENT_PRIMAIRE = 5
ALIMENTATION = 6
PHASE = 7
InputDb = {
TEMP_CHAUDIERE : 'temp0',
TEMP_FUMEE : 'temp1',
TEMP_BOITIER : 'temp2',
TEMP_CONSIGNE : 'temp3',
VENT_SECONDAIRE : 'watt0',
ALLUMAGE : 'watt1',
VENT_PRIMAIRE : 'watt2',
ALIMENTATION : 'watt3',
PHASE : 'phase'
}
InputName = {
TEMP_CHAUDIERE : 'Temp chaudiere' ,
TEMP_FUMEE : 'Temp fumee' ,
TEMP_BOITIER : 'Temp boitier' ,
TEMP_CONSIGNE : 'Temp consigne' ,
VENT_SECONDAIRE : 'Vent secondaire' ,
ALLUMAGE : 'Allumage' ,
VENT_PRIMAIRE : 'Vent primaire' ,
ALIMENTATION : 'Alimentation' ,
PHASE : 'Phase'
}
""" ChartLabel """
ChartLabel = {
"type": 'flags',
"name": '',
"data": [],
"onSeries": 'dataseries',
"shape": 'squarepin',
"showInLegend": False
}
""" ChartLegend """
ChartLegend = {
"text": '<b>Phases</b> : ',
"useHTML": True,
"verticalAlign": 'top',
"y": 55,
}
for key in PhaseName.keys():
ChartLegend['text'] += '<span style="background-color: '+PhaseColor[key]+\
'; border-radius: 3px; padding: 2px 6px; margin: 4px 5px;">' +\
PhaseName[key] + '</span>'
|
from rich import box
from rich.layout import Layout
from rich.prompt import Prompt
from rich.table import Table
from .prettify_ldma import Header, make_sponsor_message
class MenuLayout:
def __init__(self):
self._layout = Layout()
self._table = Table(title="AUTOBOT MENU", expand=True,
show_lines=True, box=box.SQUARE_DOUBLE_HEAD,
title_style="#0000ff italic")
def __rich__(self):
self._layout.split(
Layout(name="head", size=3),
Layout(name="body", ratio=1),
)
self._layout["body"].split_column(
Layout(name="mid_section", ratio=2),
Layout(name="table", ratio=3)
)
# Tables
self._table.add_column("Action Button", justify="center", header_style="#3be13b", no_wrap=True, style="#3be13b")
self._table.add_column("Action Description", justify="center", header_style="bold cyan", no_wrap=True,
style="cyan")
self._table.add_row("1", "CREATE NCR 🧩")
self._table.add_row("2", "CLOSE NCR 🎯")
self._table.add_row("3", "CANCEL NCR 🧨")
self._table.add_row("4", "LDMA PARSER 📆")
self._table.add_row("0", "EXIT AUTOBOT ⚔")
self._layout["head"].update(Header("WELCOME TO AUTOBOT"))
self._layout["mid_section"].update(make_sponsor_message())
self._layout["table"].update(self._table)
return self._layout
def get_menu_choice() -> int:
choice = Prompt.ask("Enter choice", choices=["1", "2", "3", "4", "0"])
return int(choice)
|
from django.apps import AppConfig
class WsocketConfig(AppConfig):
name = 'WSocket'
|
import copy
def testData():
otest = open('test.txt', 'r')
test = otest.readlines()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
#a list of numbers
new_list = []
similar = False
while not similar:
occupied = 0
#print(data)
new_list = []
for ri in range(0, len(data)):
row = data[ri]
new_row = ''
for si in range(0, len(row)):
seat = row[si]
up = False
down = False
right = False
left = False
if ri != 0: #If it's not on row 1, it's ok to search on the row above
up = True
if ri != len(data)-1: #if it's not on the last row, it's ok to search on the row below
down = True
if si != 0: #if it's not the first position on the row, it's ok to search to the left
left = True
if si != len(row)-1: #if it's not the last position on the row, it's ok to search to the right
right = True
adjacents = find_adjs(up, down, right, left, data, ri, si)
new_seat = check_seat(seat, adjacents)
new_row += new_seat
occupied += new_row.count('#')
new_list.append(new_row)
#print("\n")
#for line in new_list:
#print(line)
#if input("Continue?") == "\n":
#continue
if data != new_list:
#print("Not similar, go on")
data = copy.deepcopy(new_list)
else:
print("Similar, stop")
similar = True
print(occupied)
return occupied
def find_adjs(up, down, right, left, data, ri, si):
adjacents = []
upright = 0
upleft = 0
downright = 0
downleft = 0
if up:
up = data[ri-1][si]
adjacents.append(up)
if right:
upright = data[ri-1][si+1]
adjacents.append(upright)
if left:
upleft = data[ri-1][si-1]
adjacents.append(upleft)
if down:
down = data[ri+1][si]
adjacents.append(down)
if right:
downright = data[ri+1][si+1]
adjacents.append(downright)
if left:
downleft = data[ri+1][si-1]
adjacents.append(downleft)
if right:
right = data[ri][si+1]
adjacents.append(right)
if left:
left = data[ri][si-1]
adjacents.append(left)
return adjacents
def check_seat(seat, adjs):
new_seat = seat
#print(new_seat)
if seat == 'L':
if '#' not in adjs:
#if adjs.count('L') == len(adjs):
new_seat = '#'
if seat == '#':
if adjs.count('#') > 3:
new_seat = 'L'
#print(new_seat)
return new_seat
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-02-23 12:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('leaderboard', '0003_auto_20180223_0848'),
]
operations = [
migrations.RenameModel(
old_name='Scores',
new_name='Score',
),
migrations.AlterModelManagers(
name='score',
managers=[
('leaderboard', django.db.models.manager.Manager()),
],
),
migrations.RemoveIndex(
model_name='score',
name='leaderboard_categor_112d0a_idx',
),
migrations.RemoveIndex(
model_name='score',
name='leaderboard_created_8bf441_idx',
),
migrations.AddIndex(
model_name='score',
index=models.Index(fields=['category', '-created_at'], name='leaderboard_categor_d00e0b_idx'),
),
migrations.AddIndex(
model_name='score',
index=models.Index(fields=['created_at'], name='leaderboard_created_8a81ad_idx'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.