text
stringlengths 8
6.05M
|
|---|
from random import randint
from flask import Flask,render_template,request,redirect
from events import get_events
from search_parse import parse
app = Flask(__name__)
@app.route("/")
def main():
image = "bg/"+str(randint(0,59)+1)+".jpg"
calendar = get_events()
return render_template('index.html', events=calendar,image=image)
@app.route('/', methods=['POST'])
def search():
text = request.form['search']
processed_text = text.upper()
url = parse(text)
return redirect(url)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import sys
from textwrap import dedent
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals import export
from pants.backend.python.goals.export import ExportVenvsRequest, PythonResolveExportFormat
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import (
PythonDistribution,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.util_rules import local_dists_pep660, pex_from_targets
from pants.base.specs import RawSpecs
from pants.core.goals.export import ExportResults
from pants.core.util_rules import distdir
from pants.engine.internals.parametrize import Parametrize
from pants.engine.rules import QueryRule
from pants.engine.target import Targets
from pants.testutil.rule_runner import RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*export.rules(),
*pex_from_targets.rules(),
*target_types_rules.rules(),
*distdir.rules(),
*local_dists_pep660.rules(),
QueryRule(Targets, [RawSpecs]),
QueryRule(ExportResults, [ExportVenvsRequest]),
],
target_types=[PythonRequirementTarget, PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact, "parametrize": Parametrize},
)
@pytest.mark.parametrize(
"py_resolve_format",
[
PythonResolveExportFormat.symlinked_immutable_virtualenv,
PythonResolveExportFormat.mutable_virtualenv,
],
)
def test_export_venv_new_codepath(
rule_runner: RuleRunner,
py_resolve_format: PythonResolveExportFormat,
) -> None:
# We know that the current interpreter exists on the system.
vinfo = sys.version_info
current_interpreter = f"{vinfo.major}.{vinfo.minor}.{vinfo.micro}"
rule_runner.write_files(
{
"src/foo/__init__.py": "from colors import *",
"src/foo/BUILD": dedent(
"""\
python_sources(name='foo', resolve=parametrize('a', 'b'))
python_distribution(
name='dist',
provides=python_artifact(name='foo', version='1.2.3'),
dependencies=[':foo@resolve=a'],
)
python_requirement(name='req1', requirements=['ansicolors==1.1.8'], resolve='a')
python_requirement(name='req2', requirements=['ansicolors==1.1.8'], resolve='b')
"""
),
"lock.txt": "ansicolors==1.1.8",
}
)
format_flag = f"--export-py-resolve-format={py_resolve_format.value}"
rule_runner.set_options(
[
f"--python-interpreter-constraints=['=={current_interpreter}']",
"--python-enable-resolves=True",
"--python-resolves={'a': 'lock.txt', 'b': 'lock.txt'}",
"--export-resolve=a",
"--export-resolve=b",
# Turn off lockfile validation to make the test simpler.
"--python-invalid-lockfile-behavior=ignore",
# Turn off python synthetic lockfile targets to make the test simpler.
"--no-python-enable-lockfile-targets",
"--export-py-editable-in-resolve=['a', 'b']",
format_flag,
],
env_inherit={"PATH", "PYENV_ROOT"},
)
all_results = rule_runner.request(ExportResults, [ExportVenvsRequest(targets=())])
for result, resolve in zip(all_results, ["a", "b"]):
if py_resolve_format == PythonResolveExportFormat.symlinked_immutable_virtualenv:
assert len(result.post_processing_cmds) == 2
ppc0, ppc1 = result.post_processing_cmds
assert ppc0.argv == ("rmdir", "{digest_root}")
assert ppc0.extra_env == FrozenDict()
assert ppc1.argv[0:2] == ("ln", "-s")
# The third arg is the full path to the venv under the pex_root, which we
# don't easily know here, so we ignore it in this comparison.
assert ppc1.argv[3] == "{digest_root}"
assert ppc1.extra_env == FrozenDict()
else:
if resolve == "a":
# editable wheels are installed for a user resolve that has dists
assert len(result.post_processing_cmds) == 5
else:
# tool resolves (flake8) and user resolves w/o dists (b)
# do not run the commands to do editable installs
assert len(result.post_processing_cmds) == 2
ppc0 = result.post_processing_cmds[0]
# The first arg is the full path to the python interpreter, which we
# don't easily know here, so we ignore it in this comparison.
# The second arg is expected to be tmpdir/./pex.
tmpdir, pex_pex_name = os.path.split(os.path.normpath(ppc0.argv[1]))
assert pex_pex_name == "pex"
assert re.match(r"\{digest_root\}/\.[0-9a-f]{32}\.tmp", tmpdir)
# The third arg is expected to be tmpdir/{resolve}.pex.
req_pex_dir, req_pex_name = os.path.split(ppc0.argv[2])
assert req_pex_dir == tmpdir
assert req_pex_name == f"{resolve}.pex"
assert ppc0.argv[3:] == (
"venv",
"--pip",
"--collisions-ok",
"{digest_root}",
)
assert ppc0.extra_env["PEX_MODULE"] == "pex.tools"
assert ppc0.extra_env.get("PEX_ROOT") is not None
ppc1 = result.post_processing_cmds[-1]
assert ppc1.argv == ("rm", "-rf", tmpdir)
assert ppc1.extra_env == FrozenDict()
reldirs = [result.reldir for result in all_results]
assert reldirs == [
f"python/virtualenvs/a/{current_interpreter}",
f"python/virtualenvs/b/{current_interpreter}",
]
|
"""
Setup for simstream module.
Author: Jeff Kinnison (jkinniso@nd.edu)
"""
from setuptools import setup, find_packages
setup(
name="simstream",
version="0.1dev",
author="Jeff Kinnison",
author_email="jkinniso@nd.edu",
packages=find_packages(),
description="",
install_requires=[
"pika >= 0.10.0"
],
)
|
import os
import sys
import subprocess
import shutil
import fam
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/raxml/')
import experiments as exp
import time
import saved_metrics
import run_raxml_supportvalues as raxml
import sequence_model
def run_pargenes(datadir, pargenes_dir, subst_model, samples, cores):
raxml_command = ""
run_modeltest = (subst_model == "bestAA" or subst_model == "bestNT")
if (not run_modeltest):
raxml_command +="--model " + sequence_model.get_raxml_model(subst_model) + " --blopt nr_safe"
command = []
command.append(exp.python())
command.append(exp.pargenes_script_debug)
command.append("-a")
command.append(os.path.join(datadir, "alignments"))
command.append("-b")
command.append(str(samples))
command.append("-o")
command.append(pargenes_dir)
command.append("-c")
command.append(str(cores))
command.append("-s")
command.append("0")
command.append("-p")
command.append("0")
if (len(raxml_command) > 0):
command.append("-R")
command.append(raxml_command)
if (run_modeltest):
command.append("-m")
if (subst_model == "bestAA"):
command.append("-d")
command.append("aa")
command.append("--continue")
try:
subprocess.check_call(command, stdout = sys.stdout)
except:
command[0] = exp.python()
print(" ".join(command))
subprocess.check_call(command, stdout = sys.stdout)
def export_pargenes_trees(pargenes_dir, subst_model, samples, datadir):
families_dir = os.path.join(datadir, "families")
# tca scores
concatenated_dir = os.path.join(pargenes_dir, "concatenated_bootstraps")
if (os.path.isdir(concatenated_dir)):
for concatenation in os.listdir(concatenated_dir):
family = "_".join(concatenation.split("_")[:-1]) # remove everything after the last
src = os.path.join(concatenated_dir, concatenation)
dest = fam.get_bootstrap_trees(datadir, samples, subst_model, family)
shutil.copyfile(src, dest)
def run_pargenes_and_extract_trees(datadir, subst_model, samples, cores, pargenes_dir = "bootstrap", extract_trees = True, restart = False):
saved_metrics_key = "bootstrap" + str(samples)
if (pargenes_dir != "pargenes"):
saved_metrics_key = pargenes_dir
print(datadir)
print(subst_model)
print(pargenes_dir)
pargenes_dir = fam.get_run_dir(datadir, subst_model, pargenes_dir)
if (not restart):
shutil.rmtree(pargenes_dir, True)
start = time.time()
run_pargenes(datadir, pargenes_dir, subst_model, samples, cores)
saved_metrics.save_metrics(datadir, fam.get_run_name(saved_metrics_key, subst_model), (time.time() - start), "runtimes")
lb = fam.get_lb_from_run(os.path.join(pargenes_dir, "mlsearch_run"))
saved_metrics.save_metrics(datadir, fam.get_run_name(saved_metrics_key, subst_model), (time.time() - start) * lb, "seqtimes")
if (extract_trees):
export_pargenes_trees(pargenes_dir, subst_model, samples, datadir)
cleanup = True
if (cleanup):
shutil.rmtree(pargenes_dir, True)
if __name__ == "__main__":
if (len(sys.argv) < 6):
print("syntax: python run_raxml_supportvalues.py datadir subst_model samples cores restart")
sys.exit(1)
dataset = sys.argv[1]
subst_model = sys.argv[2]
samples = int(sys.argv[3])
cores = int(sys.argv[4])
restart = int(sys.argv[5]) == 1
run_pargenes_and_extract_trees(dataset, subst_model, samples, cores, restart = restart)
|
import datetime
import random
import json
from typing import Callable, Iterable, TypeVar
T = TypeVar('T')
RANDOM_BASE = [
'0123456789',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
]
def to_seconds(*, hours=0, minutes=0, seconds=0) -> int:
"""
>>> to_seconds(hours=1, minutes=1)
3660
>>> to_seconds(minutes=1, seconds=10)
70
"""
assert isinstance(hours, int), TypeError
assert isinstance(minutes, int), TypeError
assert isinstance(seconds, int), TypeError
return hours * 3600 + minutes * 60 + seconds
def indexof(fn: Callable[[T], bool], iterable: Iterable[T]) -> int:
""" 根据处理函数查找是否存在元素
如果存在满足条件的元素则返回下标,否则返回-1
>>> indexof(lambda n: n == 2, range(10))
2
>>> indexof(lambda n: n > 10, range(10))
-1
:param fn: 处理函数
:param iterable: 可迭代的数据
:return: 结果下标
"""
for idx, el in enumerate(iterable):
rst = fn(el)
if rst:
return idx
return -1
def addattr(obj, attr, value):
""" 为对象添加属性并返回对象
:param obj: 对象
:param attr: 属性名
:param value: 值
:return: 对象
"""
setattr(obj, attr, value)
return obj
def randomstr(length: int = 6, base: str = 'number') -> str:
""" 生成随机字符串
:param length:
:param base: 随机范围,枚举(number, letter, both)
:return:
"""
_base = {
'number': RANDOM_BASE[0],
'letter': RANDOM_BASE[1],
'both': RANDOM_BASE[1] + RANDOM_BASE[0]
}[base]
return ''.join([str(random.choice(_base)) for _ in range(length)])
def jsonformat(**kwargs):
return json.dumps(kwargs, indent=2)
def timedelta_to_zero() -> datetime.timedelta:
now = datetime.datetime.now()
next_zero = datetime.datetime(
year=now.year, month=now.month, day=now.day) + datetime.timedelta(
days=1)
return next_zero - now
def datetimestring(dt: datetime.datetime = None) -> str:
if not dt:
dt = datetime.datetime.now()
return f'{dt.year}-{dt.month}-{dt.day}'
|
"""
multiple thread, multiple connections
"""
import threading
import mysql.connector
from random import uniform
from time import sleep
def read_user_from_db():
"""
read user info from db
"""
sleep(uniform(0, 1))
user_db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="users"
)
user_cursor = user_db.cursor()
user_cursor.execute("select * from userinfo")
return user_cursor.fetchall()
if __name__ == "__main__":
threads = []
for i in xrange(1000):
t = threading.Thread(target=read_user_from_db)
t.daemon = True
t.start()
threads.append(t)
for thread in threads:
thread.join()
|
celsius = float(input("Please enter the temperature in Celsius: "))
fahrenheit = (celsius * 1.8) + 32
kelvin = celsius + 273.15
print('''\n%0.1f Celsius is equal to %0.2f degrees Fahrenheit.\n'''%(celsius, fahrenheit))
print('''\n%0.1f Celsius is equal to %0.2f Kelvin.\n'''%(celsius, kelvin))
|
EOF = 3
digits = set(list("0123456789"))
lettersdigitsunderscore = set(
list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789")
)
letters = set(list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"))
ws = set(list(" \t\n\r"))
badidentifiertoken = 1
class StreamReader:
def __init__(self, instream):
self.instream = instream
self.nextChars = ""
self.EOF = False
self.line = 1
self.column = 0
self.charsRead = 0
def readChar(self):
if len(self.nextChars) > 0:
nextChar = self.nextChars[0]
self.nextChars = self.nextChars[1:]
else:
nextChar = self.instream.read(1)
if nextChar == "":
nextChar = chr(EOF)
elif nextChar == "\n":
self.line += 1
self.column = 0
else:
self.column += 1
if nextChar == chr(EOF):
self.EOF = True
self.charsRead += 1
return nextChar
def unreadChar(self, ch):
self.EOF = False
self.nextChars = ch + self.nextChars
if ch == "\n":
self.line -= 1
else:
self.column -= 1
self.charsRead -= 1
def numCharsRead(self):
# return the number of characters read. This is useful when backtracking is performed
# in case no progress is being made in reading the stream.
return self.charsRead
def eof(self):
return self.EOF
def readUpTo(self, delimiter):
result = ""
done = False
while not done and not self.eof():
c = self.readChar()
if not self.eof():
result += c
if result[-len(delimiter) :] == delimiter:
done = True
return result
def readInt(self):
number = ""
self.skipWhiteSpace()
digit = self.readChar()
while digit in digits:
number += digit
digit = self.readChar()
self.unreadChar(digit)
return int(number)
def readIdentifier(self):
id = ""
self.skipWhiteSpace()
c = self.readChar()
if not c in letters:
print(
"Bad identifier token found in source file starting with",
c,
"at line",
self.line,
"and column",
self.column,
)
raise Exception(badidentifiertoken)
while c in lettersdigitsunderscore:
id += c
c = self.readChar()
self.unreadChar(c)
return id
def skipWhiteSpace(self):
c = self.readChar()
while c in ws:
c = self.readChar()
self.unreadChar(c)
def peek(self, value):
# Skip white space, then look for the value as the next characters in the input file.
# Remember the read characters, but return true if they are found and false otherwise.
readChars = ""
self.skipWhiteSpace()
done = False
while len(readChars) < len(value) and not done:
c = self.readChar()
if c == EOF:
done = True
else:
readChars += c
for i in range(len(readChars) - 1, -1, -1):
self.unreadChar(readChars[i])
if readChars == value:
return True
return False
def skipComments(self):
# skip comments
while self.peek("(*"):
self.readUpTo("*)")
def getLineNumber(self):
return self.line
def getColNumber(self):
return self.column
def getToken(self):
self.skipWhiteSpace()
c = self.readChar()
if c in digits:
self.unreadChar(c)
return self.readInt()
if c in letters:
self.unreadChar(c)
return self.readIdentifier()
return c
|
import Jetson.GPIO as GPIO
import time
from threading import Thread
def checkSuccess(target_time, output_pin):
global success
start=time.time()
while True:
current_time=time.time()
if (current_time-start>=target_time):
GPIO.output(output_pin,GPIO.LOW)
success = True
# time.sleep(1)
return 0
# print(success)
################# Initialize Various ###############
RT_pin=18 #turn right pin
LT_pin=13 #turn left pin
RB_pin=0 #vibration right pin
LB_pin=0 #vibration left pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(RT_pin,GPIO.OUT)
GPIO.setup(LT_pin,GPIO.OUT)
GPIO.setup(RB_pin,GPIO.OUT)
GPIO.setup(LB_pin,GPIO.OUT)
TA=0 #target_angle
PA=0 #previous_angle
PTA=0 #previous_target_angle
p_t = 0 #previous_time
success = False
direction = 0
# TT #target_time
# PT #previous_time
#################### Main Loop ####################
theta = 73
for frame in range(10):
TA = theta*9
print(success)
if frame==0: pass
else:
t.do_run = False
c_t=time.time()
if success==False: CA=(c_t-p_t)*216*direction+PA
else: CA=PTA
if TA == CA: pass
else:
direction = (TA-CA)/abs(TA-CA)
TT = abs(TA-CA)/216
if direction<0:
GPIO.output(RT_pin,GPIO.HIGH)
output_pin=RT_pin
else:
GPIO.output(LT_pin,GPIO.HIGH)
output_pin=LT_pin
t=Thread(target=checkSuccess, args=(TT,output_pin))
t.start()
p_t=time.time()
PTA=TA
PA=CA
time.sleep(0.5)
|
#!/usr/bin/env python
# encoding: utf-8
"""
pageobj.py
Created by yang.zhou on 2012-09-17.
Copyright (c) 2012 zhouyang.me. All rights reserved.
"""
import logging
import time
import hashlib
import urllib
from datetime import datetime
from dateutil import parser
from dateutil import tz
def getMd5(st=''):
md5_st = hashlib.md5(st)
return md5_st.hexdigest()
def getSha1(st=''):
sha1_st = hashlib.sha1(st)
return sha1_st.hexdigest()
def formatDatetime(aDatetime):
return aDatetime.strftime("%Y/%m/%d %X")
def formatDate(aDatetime):
return aDatetime.strftime("%Y/%m/%d")
def getWeiboTime(t):
utc_zone = tz.gettz('UTC')
weibo_zone = tz.gettz('Asia/Shanghai')
return parser.parse(t).replace(tzinfo=utc_zone).astimezone(weibo_zone)
def _encode_params(**kw):
args = []
for k, v in kw.iteritems():
qv = v.encode('utf-8') if isinstance(v, unicode) else str(v)
args.append('%s=%s' % (k, urllib.quote(qv)))
return '&'.join(args)
# Get striped string
def striping(content, start, length, ignore_white=True):
if len(content.decode("utf-8")) > length:
content = content.decode("utf-8")[start: length].encode("utf-8") + "..."
if ignore_white:
content = content.strip()
return content
|
def substring(string):
if not string:
return ''
length = len(string)
longest_sub = 0
sub_strings = []
for a in xrange(length):
unique = set()
for b in xrange(a, length):
unique.add(string[b])
if len(unique) > 2:
break
if b + 1 - a == longest_sub:
sub_strings.append(string[a:b + 1])
if b + 1 - a > longest_sub:
longest_sub = b + 1 - a
sub_strings = [string[a:b + 1]]
return sub_strings[0]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from opts import opts
from detectors.detector_factory import detector_factory
if __name__ == '__main__':
opt = opts().init()
image_name = opt.image_name_path
Detector = detector_factory[opt.task]
detector = Detector(opt)
ret = detector.run(image_name)
print(ret)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple test for validating that the Atari env initializes."""
import datetime
import os
import shutil
from absl import flags
from batch_rl.baselines import train
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class AtariInitTest(tf.test.TestCase):
def setUp(self):
super(AtariInitTest, self).setUp()
FLAGS.base_dir = os.path.join(
'/tmp/batch_rl_tests',
datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S'))
FLAGS.gin_files = ['batch_rl/baselines/configs/dqn.gin']
# `num_iterations` set to zero to prevent runner execution.
FLAGS.gin_bindings = [
'Runner.num_iterations=0',
'WrappedReplayBuffer.replay_capacity = 100' # To prevent OOM.
]
FLAGS.alsologtostderr = True
def test_atari_init(self):
"""Tests that a DQN agent is initialized."""
train.main([])
shutil.rmtree(FLAGS.base_dir)
if __name__ == '__main__':
tf.test.main()
|
from multiprocessing import Pool
import signal, os, time
def f(x):
print('hello',x)
return x*2
if __name__ == '__main__':
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
p = Pool(5)
signal.signal(signal.SIGINT, original_sigint_handler)
try:
res = p.map_async(f, range(10000000))
print("Waiting for results")
res.get(60)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
p.terminate()
else:
print("Normal termination")
p.close()
p.join()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 17:40:49 2018
@author: Cole Thompson
"""
import matplotlib.pyplot
from matplotlib.pyplot import *
import numpy
from numpy import *
x=arange(0,200.1,0.1)
y0=arange(0,200.1,0.1)
y1= 125 - x
y2= (200/1.3)-((1.2/1.3)*x)
xB= 20 + 0.0*y0
yB = 20 + 0.0*x
#y= x + y0
# Plot limits must be set for the graph.
xlim(100,110)
ylim(15,25)
# Plot axes need to be labled,title specified and legend shown.
xlabel('Donuts')
ylabel('Bagels')
title('Optimizing Breakfast')
plot(x,y1,'b', label='x + y >= 125')
plot(x,y2,'r', label='1.20x + 1.3y <= 250')
plot(xB,y0,'g', label='x >= 2')
plot(x,yB,'g', label='y >= 2')
#plot(x,y,'k--', label='z = x + y') # The dashed black line represents the objective function.
legend()
x= [0, 0, .8, 1.5, 1.5]
y= [0, 2.0, 2.4, 1.0, 0]
# Matplotlib will fill irregular polygons if the corner points are given.
# Different colors are possible. Alpha controls the level of darkness.
fill(x,y, color='grey', alpha=0.2)
grid()
show()
obj= matrix([3.0,4.0])
obj= transpose(obj)
corners= matrix([x,y])
corners= transpose(corners)
result= dot(corners,obj)
print ('Value of Objective Function at Each Corner Point:\n', result)
|
from flask_pagedown.fields import PageDownField
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, BooleanField, TextAreaField, SelectField
from wtforms import ValidationError
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from app.models import User, Role
#====================================================================================================
class LoginForm(FlaskForm):
name = StringField('Name', validators=[Length(0, 64)])
email = StringField('Email',validators=[Length(0,64)])
password = PasswordField('Password',validators=[DataRequired()])
remember_me = BooleanField('Keep Logged in')
submit = SubmitField('Login')
class RegistrationForm(FlaskForm):
email = StringField('Email',validators=[DataRequired(),Length(1,64),Email()])
name = StringField('Name',validators=[DataRequired(),Length(1,64),
Regexp('^[A-Za-z][A-Za-z0-9._]*$',0,
'Usernames must have only letters,numbers,dots or underscore.')
])
location = StringField('Location',validators=[Length(0,64)])
password = PasswordField('Password',validators=[DataRequired(),
EqualTo('password2',message='Passwords must match.')])
password2 = PasswordField('Comfirm Password',validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self,field): #自定义验证函数 validate+fieldname
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_name(self,field):
if User.query.filter_by(name=field.data).first():
raise ValidationError('Username already in use.')
#====================================================================================================
class EditProfileForm(FlaskForm):
name = StringField('Name',validators=[DataRequired(),Length(0,64)])
email = StringField('Email', validators=[
DataRequired(), Length(1, 64), Email()])
role = StringField('Role', render_kw={'readonly': True})
location = StringField('Location',validators=[Length(0,64)])
about_me = TextAreaField('About me')
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Submit')
class EditProfileForAdminForm(FlaskForm):
name = StringField('Name', validators=[DataRequired(), Length(1, 64),
Regexp('^[A-Za-z][A-Za-z0-9._]*$', 0,
'Usernames must have only letters,numbers,dots or underscore.')
])
email = StringField('Email', validators=[
DataRequired(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[DataRequired()])
location = StringField('Location',validators=[Length(0,64)])
about_me = TextAreaField('About me')
role = SelectField('Role',coerce=int,choices='',render_kw={'class':'form-control'},
validators=[DataRequired('Pleas choose a role.')])
confirmed = BooleanField('Confirmed')
submit = SubmitField('Submit')
def __init__(self, user,*args,**kwargs):
super(EditProfileForAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id,role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self,field):
if field.data!=self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_name(self,field):
if field.data!=self.user.name and User.query.filter_by(name=field.data).first():
raise ValidationError('Username already in use.')
#====================================================================================================
class PostForm(FlaskForm):
# body = TextAreaField("What's on your mind?",validators=[DataRequired()])
body = PageDownField("What's on your mind?",validators=[DataRequired()])
submit = SubmitField('Submit')
#====================================================================================================
class CommentForm(FlaskForm):
body = StringField('Comment:',validators=[DataRequired()])
submit = SubmitField('Submit')
|
def chess(tr, tc, pr, pc, size):# 传入左上角的坐标,特殊点坐标,以及size
global mark
global table
mark += 1
count = mark
if size == 1:
return
half = size // 2
# 确认特殊点位置以及子问题大小,并解决另外三个子问题
if pr < tr + half and pc < tc + half:
chess(tr, tc, pr, pc, half)
else:
table[tr + half - 1][tc + half - 1] = count
chess(tr, tc, tr + half - 1, tc + half - 1, half)
if pr < tr + half and pc >= tc + half:
chess(tr, tc + half, pr, pc, half)
else:
table[tr + half - 1][tc + half] = count
chess(tr, tc + half, tr + half - 1, tc + half, half)
if pr >= tr + half and pc < tc + half:
chess(tr + half, tc, pr, pc, half)
else:
table[tr + half][tc + half - 1] = count
chess(tr + half, tc, tr + half, tc + half - 1, half)
if pr >= tr + half and pc >= tc + half:
chess(tr + half, tc + half, pr, pc, half)
else:
table[tr + half][tc + half] = count
chess(tr + half, tc + half, tr + half, tc + half, half)
def show(table):
n = len(table)
for i in range(n):
for j in range(n):
print(table[i][j], end='\t')
print('')
if __name__ == "__main__":
mark = 0
n = 8
table = [["*" for x in range(n)] for y in range(n)]
show(table)
chess(0, 0, 4, 2, n)
show(table)
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
# from .home import views
urlpatterns = [
url(r'^$', include('home.urls', namespace="home")),
url(r'^stories/', 'home.views.verhalen'),
url(r'^overons/', 'home.views.aboutus'),
url(r'^colofon/', 'home.views.colofon'),
url(r'^durfjij/', include('polls.urls', namespace="polls")),
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include('blog.urls', namespace="blog")),
url(r'^contact/', include('contact.urls', namespace="contact")),
url(r'^iamgrey/', include('iamgrey.urls', namespace="iamgrey")),
url(r'^partners/', include('partners.urls', namespace="partners")),
# url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT,}),
]
# + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import matplotlib.pyplot as plt
mes = ["enero","febrero","marzo","abril","mayo","junio","julio","agosto","septiembre","octubre","noviembre","diciembre"]
ingresos = ["350.000","780.00","230.000","650.000","500.00","800.00","150.000","450.000","900.000","750.000","970.00","450.000"]
plt.bar(mes,ingresos, width = 0.8, color = "m")
plt.title("Ingresos durante el 2020")
plt.xlabel("Mes")
plt.ylabel("Ingresos")
plt.savefig("GraficoIngresos.png")
plt.show()
pieLabels = ["medellin","bogota","cali","pereira","barranquilla"]
sizes = [23,25,17,14,21]
pieExplode = [0,0.3,0,0,0]
plt.pie(sizes,labels=pieLabels, explode = pieExplode)
plt.title("Ciudades de colombia")
plt.savefig("TortaCiudades.png")
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 3 14:17:35 2018
@author: xingxf03
"""
import numpy as np
from sklearn import datasets,model_selection
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
mnist = datasets.fetch_mldata('MNIST original')
data,target = mnist.data,mnist.target
print('data.shape:{},target.shape:{}'.format(data.shape,target.shape))
index = np.random.choice(len(target), 70000, replace=False)
#获取特定大小的数据集
def mk_dataset(size):
train_img = [data[i] for i in index[:size]]
train_img = np.array(train_img)
train_target = [target[i] for i in index[:size]]
cond a train_target = np.array(train_target)
return train_img,train_target
|
"""Main module for testing DecisionTreeClassifier, KNeighborsClassifier,
RandomForestClassifier and GaussianNB on LeafClassification problem from kaggle.
Usage:
python3 words.py <URL>
"""
import sys
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from Constants.constants import *
from Common.common import *
from Algorithms.algorithms import *
from Data.data import *
def execute_algorithms(train_data, target_data, n_samples, data_scrubbing_description):
"""Executes Decision Tree, Random Forest, KNeighbors and GaussianNB algorithms on
raw or standardized training data.
Prints results of classification.
Plots results of Random Forest algorithm as a function of number of estimators.
Args:
train_data: Available data attributes with values.
target_data: Class attribute values.
n_samples: Number of times that classification will be executed.
data_scrubbing_description: Description on what data is being used.
"""
dt_clf = DecisionTreeClassifier()
kn_clf = KNeighborsClassifier()
nb_clf = GaussianNB()
n_estimators_array = np.array([1, 5, 10, 50, 100, 200])
dt_score, dt_score_std = run_algorithm(dt_clf, train_data, target_data, n_samples)
print_single_section("Decision Tree Classifier", data_scrubbing_description, dt_score)
rf_score_array = np.zeros(len(n_estimators_array))
rf_score_std_array = np.zeros(len(n_estimators_array))
for i in range(len(n_estimators_array)):
rf_clf = RandomForestClassifier(n_estimators = n_estimators_array[i])
rf_score_array[i], rf_score_std_array[i] = run_algorithm(rf_clf, train_data, target_data, n_samples)
print_multiple_section("Random Forest Classifier", data_scrubbing_description,
"For n_estimators = {0:0=3d} mean accuracy is {1:.6f}", n_estimators_array, rf_score_array)
plot_chart("Number of estimators", "accuracy", n_estimators_array, rf_score_array,
title_text='Random Forest Classifier ' + data_scrubbing_description, sigma_component=rf_score_std_array)
kn_score, kn_std = run_algorithm(kn_clf, train_data, target_data, 5)
print_single_section("KNeighbours Classifier", data_scrubbing_description, kn_score)
nb_score, nb_std = run_algorithm(nb_clf, train_data, target_data, 5)
print_single_section("Naive Bayes Classifier", data_scrubbing_description, nb_score)
def execute_pca_variance_calculation(train_data):
"""Split training set with skicit learn train_test_split function.
Train classifier on training set and evaluate it on test set.
Args:
clf: Used Classifier.
data: Available data attributes.
target: Class attribute values.
split_ratio: split ratio of data to be used for traning/testing.
Returns:
clf.score(testX, testY): Accuracy of evaluation on test data.
"""
n_components_array = ([1, 5, 10, 20, 50, 100, 150, 180])
vr = calculate_data_variance_ratio(n_components_array, train_data)
plot_pca_chart("Number of PCA components", "variance ratio", n_components_array, vr)
def execute_algorithms_with_pca(train_data, target_data, n_samples, data_scrubbing_description):
"""Executes Decision Tree, Random Forest, KNeighbors and GaussianNB algorithms on
raw or standardized training data as a function of number of PCA components.
Prints results of classification.
Plots results of all algorithms as a function of number of PCA components.
Args:
train_data: Available data attributes with values.
target_data: Class attribute values.
n_samples: Number of times that classification will be executed.
data_scrubbing_description: Description on what data is being used.
"""
dt_clf = DecisionTreeClassifier()
rf_clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
kn_clf = KNeighborsClassifier()
nb_clf = GaussianNB()
n_components_array = ([1, 5, 10, 20, 50, 100, 150, 180])
dt_score_array, dt_score_std_array = run_algorithm_with_pca(dt_clf, train_data, target_data, n_components_array, n_samples)
print_multiple_section("Decision Tree Classifier + PCA Decomposition", data_scrubbing_description,
"For {0:0=3d} PCA components mean accuracy is {1:.6f}", n_components_array, dt_score_array)
plot_chart('number of PCA components', 'accuracy', n_components_array, dt_score_array,
title_text='Decision Tree Classifier ' + data_scrubbing_description, sigma_component=dt_score_std_array)
rf_score_array, rf_score_std_array = run_algorithm_with_pca(rf_clf, train_data, target_data, n_components_array, n_samples)
print_multiple_section("Random Forest Classifier + PCA Decomposition", data_scrubbing_description,
"For n_estimators = {0:0=3d} mean accuracy is {1:.6f}", n_components_array, rf_score_array)
plot_chart('number of PCA components', 'accuracy', n_components_array, rf_score_array,
title_text='Random Forest Classifier ' + data_scrubbing_description, sigma_component=rf_score_std_array)
kn_score_array, kn_score_std_array = run_algorithm_with_pca(kn_clf, train_data, target_data, n_components_array, n_samples)
print_multiple_section("KNeigbors Classifier + PCA Decomposition", data_scrubbing_description,
"For {0:0=3d} PCA components mean accuracy is {1:.6f}", n_components_array, kn_score_array)
plot_chart('number of PCA components', 'accuracy', n_components_array, kn_score_array,
title_text='KNeigbors Classifier ' + data_scrubbing_description, sigma_component=kn_score_std_array)
nb_score_array, nb_score_std_array = run_algorithm_with_pca(nb_clf, train_data, target_data, n_components_array, n_samples)
print_multiple_section("Naive Bayes Classifier + PCA Decomposition", data_scrubbing_description,
"For {0:0=3d} PCA components mean accuracy is {1:.6f}", n_components_array, nb_score_array)
plot_chart('number of PCA components', 'accuracy', n_components_array, nb_score_array,
title_text='Naive Bayes Classifier ' + data_scrubbing_description, sigma_component=nb_score_std_array)
plot_multiple_charts(x_axis_array=n_components_array,
errorbar_two_dim_mean_array=[dt_score_array,
rf_score_array,
kn_score_array,
nb_score_array],
errorbar_two_dim_std_array=[dt_score_std_array,
rf_score_std_array,
kn_score_std_array,
nb_score_std_array],
x_label="num PCA components",
y_label="validation accuracy",
title_text="Algorithms " + data_scrubbing_description,
legend=['Decision Tree', 'Random Forest', 'k Nearest Neighbor', 'Naive Bayes'])
def main(n_samples):
"""Main function.
Loads train and test data and invokes execution of classification algorithms.
Args:
n_samples: Number of times that classification will be executed.
"""
train = read_csv(TRAIN_FILE_PATH)
#test = read_csv(TEST_FILE_PATH)
target = train['species']
train = train.drop(['id', 'species'], 1)
scaler = StandardScaler().fit(train)
train_standardized = scaler.transform(train)
print_header("Execute classification without data processing")
execute_algorithms(train, target, n_samples, "without any data preprocessing")
print_header("Execute classification with data standardization")
execute_algorithms(train_standardized, target, n_samples, "with data standardization")
print_header("Capture training data variance with PCA")
execute_pca_variance_calculation(train)
print_header("Execute classification after data decomposition")
execute_algorithms_with_pca(train, target, n_samples, "with data decomposition")
print_header("Execute classification after data decomposition and standardization")
execute_algorithms_with_pca(train_standardized, target, n_samples, "with data decomposition and standardization")
if __name__ == '__main__':
try:
main(sys.argv[1])
except IndexError as e:
print("Use default number of samples = 2")
main(2)
|
'''
---------------------------------------------------------------------------
arrayUtilities.py
Kirk D Evans 07/2018 kdevans@fs.fed.us
TetraTech EC for:
USDA Forest Service
Region 5 Remote Sensing Lab
script to: misc array and list function
know limitations: python 3.x
---------------------------------------------------------------------------
'''
import sys, os
import numpy as np
import general as g
sys.path.append(os.path.abspath('.'))
def indexCuts(intLen, intBreaks):
''' Return a list of list/array indeces describing the
cut points of an iterable of length intLen
into intBreaks slices of approximately equal length.
'''
if type(intLen) != int:
raise Exception('arg: intLen, must be integer')
if type(intLen) != int:
raise Exception('arg: intBreaks, must be integer')
if intBreaks < 1:
raise Exception('arg: intBreaks, must be greater than 0')
intwidth = float(intLen)/intBreaks
lstCuts = [0]
for i in range(1,intBreaks):
lstCuts.append(int(round(i * intwidth)))
lstCuts.append(intLen)
return lstCuts
def tupCuts(lstC):
''' Return list of tuples given lstC of length n:
[(lstC[0], lstC[1]), (lstC[1], lstC[2]), (lstC[0], lstC[1])..., (lstC[n-2], lstC[n-1])]
'''
return [(lstC[i], lstC[i+1]) for i in range(len(lstC) - 1)]
def splitSample(lst, tupBreak, bolMakeArray = True, fTransform = g.Return):
''' Given tupBreak (i,j), return two lists:
lstSubset = lst[i:j]
lstRest = lst[:i] + lst[j:], i.e. arr without lstIn
Optionally convert lstIn and lstOut to numpy arrays
Optionally apply function fTransform to elements of lstIn and lstOut
'''
if type(lst) not in (list, tuple):
raise Exception('arg: lst, must be list or tuple')
i, j = tupBreak
lstSubset = lst[i:j]
lstRest = lst[:i] + lst[j:]
if bolMakeArray:
return fTransform(np.array(lstSubset)), fTransform(np.array(lstRest))
else:
return [fTransform(k) for k in lstSubset], [fTransform(k) for k in lstRest]
if __name__ == "__main__":
pass
|
import os
import exputils
import shutil
def test_experimentstarter(tmpdir):
dir_path = os.path.dirname(os.path.realpath(__file__))
# change working directory to this path
os.chdir(dir_path)
############################################################################
## test 01 - serial
# copy the scripts in the temporary folder
directory = os.path.join(tmpdir.strpath, 'test_experimentstarter_01')
shutil.copytree('./start_scripts', directory)
# run scripts
exputils.start_experiments(directory=directory, is_parallel=False)
# check if the required files have been generated
assert os.path.isfile(os.path.join(directory, 'job04.txt'))
assert os.path.isfile(os.path.join(directory, 'job01/job01.txt'))
assert os.path.isfile(os.path.join(directory, 'job02/job02.txt'))
assert not os.path.isfile(os.path.join(directory, 'job03/job03.txt'))
############################################################################
## test 02 - parallel
# copy the scripts in the temporary folder
directory = os.path.join(tmpdir.strpath, 'test_experimentstarter_02')
shutil.copytree('./start_scripts', directory)
# run scripts
exputils.start_experiments(directory=directory, is_parallel=True)
# check if the required files have been generated
assert os.path.isfile(os.path.join(directory, 'job04.txt'))
assert os.path.isfile(os.path.join(directory, 'job01/job01.txt'))
assert os.path.isfile(os.path.join(directory, 'job02/job02.txt'))
assert not os.path.isfile(os.path.join(directory, 'job03/job03.txt'))
############################################################################
## test 03 - is_chdir=True
# copy the scripts in the temporary folder
directory = os.path.join(tmpdir.strpath, 'test_experimentstarter_03')
shutil.copytree('./start_scripts', directory)
# run scripts
exputils.start_experiments(directory=directory, is_parallel=True, is_chdir=True)
# check if the required files have been generated
assert os.path.isfile(os.path.join(directory, 'job04.txt'))
assert os.path.isfile(os.path.join(directory, 'job01/job01.txt'))
assert os.path.isfile(os.path.join(directory, 'job02/job02.txt'))
assert not os.path.isfile(os.path.join(directory, 'job03/job03.txt'))
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/12 19:37
# @Author : Yunhao Cao
# @File : storage.py
from sqlalchemy import Column, Integer, String, DateTime, orm, create_engine
from sqlalchemy.ext.declarative import declarative_base
__author__ = 'Yunhao Cao'
__all__ = [
'Item',
'Column',
'Integer',
'String',
'DateTime',
'Storage',
]
# 创建数据库实体的基类:
Item = declarative_base()
class Storage(object):
def __init__(self, config):
engine = create_engine(config)
self.session = orm.sessionmaker(bind=engine)()
def save(self, item):
self.session.add(item)
self.session.commit()
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" Contain all StoreBuilder errors
"""
__all__ = [
'UninitializedStore',
'CanNotInitializeStore',
'FailToSendStoreRecord',
]
class UninitializedStore(RuntimeError):
"""UninitializedStore
This error was raised when store is not initialized
"""
class CanNotInitializeStore(RuntimeError):
"""CanNotInitializeStore
This error was raised when StoreBuilder can't initialize store
"""
class FailToSendStoreRecord(Exception):
"""FailToSendStoreRecord
This error was raised when StoreBuilder fail to send StoreRecord
"""
|
#!/usr/bin/env python3
#
# Auxiliary script for obtaining all inaugural speeches of all U.S.
# presidents from Wikipedia.
import re
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
def getName(title):
i = title.index("'")
n = title[:i]
n = n.replace(" ", "_")
return n
def getSpeech(name, url):
page = requests.get(url)
content = page.content
soup = BeautifulSoup(content, "html.parser")
header = soup.find("div", class_="gen_header_title")
div = soup.find(id="mw-content-text")
year = re.search(r'\((\d+)\)', header.text).group(1)
# Remove all licence containers
licences = soup.find_all("div", class_="licenseContainer licenseBanner")
for licence in licences:
licence.decompose()
speech = ""
for p in div.find_all("p", recursive=True):
speech += p.text + "\n"
return year,speech
overviewURL = "https://en.wikisource.org/wiki/Category:U.S._Presidential_Inaugural_Addresses"
baseURL = urljoin(overviewURL, '/')
overviewPage = requests.get(overviewURL)
overviewContent = overviewPage.content
soup = BeautifulSoup(overviewContent, "html.parser")
for category in soup.find_all("div", class_="mw-category-group"):
ul = category.find("ul")
for li in ul.find_all("li"):
a = li.find("a")
page = a['href']
name = getName(a.text)
print("Processing %s..." % name)
url = urljoin(baseURL, page)
year, speech = getSpeech(name, url)
with open("%s.txt" % (year + "_" + name), "w") as f:
f.write(speech)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
import ConfigParser
def read_config():
try:
config_inst = ConfigParser.ConfigParser()
config_inst.read('mushroom.conf')
####################################################
db_conn_info['HOST'] = config_inst.get('DB', 'host')
db_conn_info['USER'] = config_inst.get('DB', 'user')
db_conn_info['PASSWORD'] = config_inst.get('DB', 'password')
db_conn_info['DATABASE'] = config_inst.get('DB', 'database')
####################################################
arm_server_addr = config_inst.get('ARMServer', 'address')
arm_server_port = config_inst.getint('ARMServer', 'port')
ARM_SERVER_ADDR[0] = arm_server_addr
ARM_SERVER_ADDR[1] = arm_server_port
django_server_addr = config_inst.get('DjangoServer', 'address')
django_server_port = config_inst.getint('DjangoServer', 'port')
DJANGO_SERVER_ADDR[0] = django_server_addr
DJANGO_SERVER_ADDR[1] = django_server_port
####################################################
log_file['ERROR'] = config_inst.get('Log', 'error_path')
log_file['COMMUNICATION'] = config_inst.get('Log', 'communication_path')
log_file['DEBUG'] = config_inst.get('Log', 'debug_path')
log_file['WORK'] = config_inst.get('Log', 'work_path')
if config_inst.getint('Log', 'error_open') == 1:
log_handler.enable_error()
else:
log_handler.disable_error()
if config_inst.getint('Log', 'communication_open') == 1:
log_handler.enable_communication()
else:
log_handler.disable_communication()
if config_inst.getint('Log', 'debug_open') == 1:
log_handler.enable_debug()
else:
log_handler.disable_debug()
if config_inst.getint('Log', 'work_open') == 1:
log_handler.enable_work()
else:
log_handler.disable_work()
####################################################
MAX_TASK_ID = config_inst.getint('Task', 'max_session_id')
if MAX_TASK_ID > 16777215:
MAX_TASK_ID = 16777215
####################################################
return SUC
except ConfigParser.Error, e:
log_msg = str(e)
log_handler.error(log_msg)
return FAI
if __name__ == '__main__':
read_config()
print db_conn_info
print 'ARM_SERVER_ADDR: %s' %str(ARM_SERVER_ADDR)
print 'DJANGO_SERVER_ADDR: %s' %str(DJANGO_SERVER_ADDR)
print 'log_file: %s' %str(log_file)
print 'log_conf: %s' %str(log_conf)
print 'MAX_TASK_ID: %d' %MAX_TASK_ID
|
#Orientaçao a objeto
class Carro():
#Construtor
def __init__(self, modelo = '', cor = '', velociadade = '', ano = ''): #self é o único parametro obrigatorio
self.modelo = modelo
self.cor = cor
self.velocidade = velocidade
self.ano = ano
def acelerar(self):
self.velocidade += 10
def frear(self):
self.velocidade -= 10
if self.velocidade == 0:
print('O carro está parado')
#criar objeto
fusca = Carro('Fusca', 'Vermelho', 0, 1975)
verona = Carro()
print(fusca.modelo)
print(fusca.cor)
print(fusca.velocidade)
print(fusca.ano)
fusca.acelerar()
fusca.acelerar()
print(fusca.velocidade)
|
# Generated by Django 2.2.2 on 2019-06-25 18:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20190625_2016'),
]
operations = [
migrations.AlterModelOptions(
name='history',
options={'get_latest_by': ['time'], 'ordering': ['-time']},
),
]
|
import pytest
import numpy as np
from numba import cuda
from libgdf_cffi import ffi, libgdf, GDFError
from .utils import new_column, unwrap_devary, get_dtype, gen_rand, fix_zeros
def test_cuda_error():
dtype = np.float32
col = new_column()
gdf_dtype = get_dtype(dtype)
libgdf.gdf_column_view(col, ffi.NULL, ffi.NULL, 0, gdf_dtype)
#with pytest.raises(GDFError) as raises:
# libgdf.gdf_add_generic(col, col, col)
#raises.match("CUDA ERROR.")
|
import knn
class Process:
def __init__(self,trainDataPath,testDataPath):
self.trainPath=trainDataPath
self.testPath=testDataPath
self.trainData=[]
self.trainDataPredict=[]
self.testData=[]
self.testDataPredict=[]
def process(self,path):
data=[]
mdict={}
dict0={'a':0,'b':1}
mdict[0]=dict0
dict3={'u':4,'y':5,'l':6,'t':7}
mdict[3]=dict3
dict4={'g':8,'p':9,'gg':10}
mdict[4]=dict4
dict5={'c':11, 'd':12, 'cc':13, 'i':14, 'j':15, 'k':16,
'm':17,'r':18, 'q':19, 'w':20, 'x':21, 'e':22, 'aa':23, 'ff':24}
mdict[5]=dict5
dict6={'v':25,'h':26,'bb':27,'j':28,'n':29,'z':30,'dd':31,'ff':32,'o':33}
mdict[6]=dict6
dict8={'t':35,'f':36}
mdict[8]=dict8
dict9={'t':37,'f':38}
mdict[9]=dict9
dict11={'t':40,'f':41}
mdict[11]=dict11
dict12={'g':42,'p':43,'s':44}
mdict[12]=dict12
dict14={1:2,2:3,7:34,10:39,13:45,14:46}
mdict[14]=dict14
f= open(path)
for line in f:
k=line.split(',')
q=[0]*47
for i in range(0,len(k)-1):
if not((i==1) or (i==2) or (i==7) or (i==10) or
(i==13) or (i==14)):
temp=mdict[i]
q[temp[k[i]]]=1
else:
temp=mdict[14]
q[temp[i]]=float(k[i])
data.append(q)
return data
def trainPredictValues(self,path):
data=[]
f= open(path)
for line in f:
k=line.split(',')
k[-1]=k[-1].strip()
data.append(k[-1])
return data
def getData(self):
self.trainData=self.process(self.trainPath)
self.trainDataPredict = self.trainPredictValues(self.trainPath)
self.testData=self.process(self.testPath)
self.testDataPredict = self.trainPredictValues(self.testPath)
#self.testData=self.process(self.trainPath)
def predict(self,n):
k= knn.kNearestNeighbours()
k.train(self.trainData,self.trainDataPredict)
sol = k.test(self.testData,n)
cor=0
wrng=0
print sol
print self.testDataPredict
for i in range(0,len(sol)):
if sol[i]== self.testDataPredict[i]:
cor=cor+1
else:
wrng = wrng +1
print "for k=",n
print 100*cor/len(sol)
|
import pickle, cv2, math, timeit, random, time, os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pathlib import Path
from sklearn.neighbors import KNeighborsClassifier
def nothing(x):
pass
# - Find CONTOURS function
def find_contours(filename):
picture = cv2.imread(filename) # picture to read
# - color converstion
picture_gray = cv2.cvtColor(picture,cv2.COLOR_BGR2GRAY)
# - THRESHOLD
ret, picture_thresh = cv2.threshold(picture_gray,127,255,cv2.THRESH_OTSU)
##picture_thresh2 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)#9,6
# - find CONTOURS
contour_list, hierarchy = cv2.findContours(picture_thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
return contour_list
def feature_extraction(input_mod,features,filename):
curvature_threshold = 0.08 # constant values for features calculations
polygon_tolerance = 0.05 # constant values for features calculations
k = 4
for contour in input_mod:
# - FIND VERTICES
arc = cv2.arcLength(contour, True)
contour_vertices = cv2.approxPolyDP(contour, 0.01*arc, True)
vertices__contour_area = cv2.contourArea(contour_vertices)
# - LIMIT SIZE
if vertices__contour_area > 18000 and vertices__contour_area < 55000:
curvature_chain = []
cont_ar = np.asarray(contour)
vertices = len(contour_vertices)
##FEATURE_extraction_algorithms
ellipse_feature = cv2.fitEllipse(contour)
(center,axes,orientation) = ellipse_feature
majoraxis_length_feature = max(axes)
minoraxis_length_feature = min(axes)
axes_ratio_feature = minoraxis_length_feature/majoraxis_length_feature
area_feature = cv2.contourArea(contour)
perimeter_feature = cv2.arcLength(contour,True)
area_ratio_feature = perimeter_feature / area_feature
perimeter_ratio_feature = minoraxis_length_feature / perimeter_feature
epsilon_feature = polygon_tolerance*perimeter_feature
vertex_approx_feature = 1.0 / len(cv2.approxPolyDP(contour,epsilon_feature,True))
length_feature = len(input_mod)
##### begin of Eris Chintelli code
# CURVATURE & CONVEXITY
for i in range(cont_ar.shape[0]-k):
num = cont_ar[i][0][1]-cont_ar[i-k][0][1] # y
den = cont_ar[i][0][0]-cont_ar[i-k][0][0] # x
angle_prev = -np.arctan2(num,den)/np.pi
num = cont_ar[i+k][0][1]-cont_ar[i][0][1] # y
den = cont_ar[i+k][0][0]-cont_ar[i][0][0] # x
angle_next = -np.arctan2(num,den)/np.pi
new_curvature = angle_next-angle_prev
curvature_chain.append(new_curvature)
convexity = 0
concavity = 0
for i in range(len(curvature_chain)):
if curvature_chain[i] > curvature_threshold:
convexity += 1
if curvature_chain[i] < -curvature_threshold:
concavity += 1
convexity_ratio = convexity / float(i+1)
concavity_ratio = concavity / float(i+1)
##### end of Eris Chinchilli code
'''
crn_f = cv2.imread('/home/alf/Desktop/Major1/Code/Test1/Final/Gestures/'+filename)
crn_f = cv2.cvtColor(crn_f, cv2.COLOR_BGR2GRAY)
crn = np.float32(crn_f)
crn = cv2.cornerHarris(crn,2,3,0.04)
#hull = cv2.convexHull(contour)
#hull_area = cv2.contourArea(hull)
#solidity = area_feature / float(hull_area)
'''
# - DRAW CONTOURS (TRAINING)
##img_draw = cv2.imread('/home/alf/Desktop/Major1/Code/Test1/Final/Gestures/'+filename)
#img_resize = cv2.resize(img_draw,(0,0),fx=0.3 , fy=0.3)
##cv2.drawContours(img_draw,[contour],-1,(0,255,0),4)
##cv2.imshow('contours',img_draw)
#cv2.waitKey()
feature_values=[]
# - CHECK FOR MISSING FEATURE, CO
counting_error = 0#COUNTING ERROR
for ft in nr_features:
# - CHECK FOR MISSING FEATURE, COLLECT & APPEND DATA
if features_list[ft] in locals():
feature=eval(features_list[ft])
feature_values.append(feature)
print '%s' %(features_list[ft]), feature
# - SET FEATURE VARIABLE=FALSE,ERROR & APPEND DATA
else:
counting_error+=1
feature = False # DIRECT APPROACH
feature_values.append(feature)
if counting_error==len(features_list_array):
feed_r = 0
else:
feed_r = 1
return feature_values,feed_r
# - Start --------------------------------------------------------------------------------------------------------------------------------
# - CLASSIFIER
classifier = KNeighborsClassifier(3)
# - INIT
pth='/home/alf/Desktop/Major1/Code/Test1/Final/Gestures/'
shape_names=['cls','opn','two']
# - Features
features_list = ['axes_ratio_feature','concavity_ratio','convexity_ratio','area_ratio_feature','vertex_approx_feature','length_feature','perimeter_ratio_feature','vertices']
nr_features = [0, 1, 2, 3, 4, 5, 6, 7] # ideally shound be nr_features= len(features_list)
features_list_array = [features_list[ft] for ft in nr_features]
# - World - nr_pic_folder examples for each object
#nr_pic_folder = 115 # number of training picture per folder
n1 = 0 # Loop Count
feature_space_values = []
labels = []
model_name = '/home/alf/Desktop/Major1/Code/Test1/Final/test_new_model08.sav'
my_file = Path(model_name)
tots_loop = 0
last_cnt = 0
# - CHECK FOR & TRAIN MODEL
print 'Training was began'
# - TRAIN LOOP
for folder in range(len(shape_names)):# Loop shape_names Times ######
nr_pic_folder=115
last_cnt = 0
while last_cnt < nr_pic_folder:
#for s in range(nr_pic_folder):# Loop nr_pic_folder Times (Total = shape_names x nr_pic_folder) # Do not use
tots_loop+=1
files_frm_fldrs = [str(filename)for filename in os.listdir(pth+shape_names[folder])]#
random_folder = random.randint(1, nr_pic_folder)
filename_update = shape_names[folder]+'/'+files_frm_fldrs[random_folder]
#print ' - random_folder :',random_folder
print ' - SHAPE NAME :',shape_names[folder]
print ' - Train Count : ',n1
print ' - last_cnt : ',last_cnt
print ' - tots : ',tots_loop
print ' - Folder : ',folder
print ' - filename_update :',filename_update
current_contour = find_contours(pth+filename_update)
train_feature_values,feedd = feature_extraction(current_contour,features_list_array,filename_update) # chain = contours(i)
if feedd == 1:
n1+=1
last_cnt+=1
feature_space_values.append(train_feature_values)
labels.append(folder)
else:
nr_pic_folder+=1
print ' - Labels : ',labels
print ' - Space : ',feature_space_values
# - TRAIN MODEL
classifier.fit(np.asarray(feature_space_values), np.asarray(labels))# - X_train
# - SAVE MODEL TO FILE
pickle.dump(classifier, open(model_name, 'wb'))
if my_file.is_file():
print 'MODEL SAVED : NO erros occur'
else:
print 'MODEL NOT SAVED : Something happen'
|
from flask import Flask, render_template, request
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map, icons
import json
import pandas as pd
import requests
import geopandas as gpd
app = Flask(__name__)
markers = [{
"coords":{'lat': 38.694862, 'lng': -122.772130},
'iconImage':'http://maps.google.com/mapfiles/ms/icons/firedept.png',
'content':'<h3>Kincade Fire</h3> <p>74% contained as of 11/02. For fire status updates visit <a href=\\"https:\/\/www.fire.ca.gov/incidents/2019/10/23/kincade-fire/\\" target=\\"_blank\\">Cal-fire website</a>.</p>'
},
{
'coords':{'lat':38.434535, 'lng':-122.701085},
'iconImage':'http://maps.google.com/mapfiles/ms/icons/homegardenbusiness.png',
'content':'<h3>Santa Rosa Veterans Memorial Building (evacuation center)</h3> <p>If you are trying to locate your large animal contact Animal Services at <a href=\\"tel:17075657100\\">(707) 565-7100</a>.</p>'
},
{
'coords':{'lat':38.610939, 'lng':-122.868083},
'iconImage':'http://maps.google.com/mapfiles/ms/icons/homegardenbusiness.png',
'content':'<h3>St. Paul’s Church Healdsburg CA(warming center)</h3>'
}
]
# reading the road_closures csv
road = pd.read_csv('road_closures.csv')
#getting only the closed roads related to fire
road = road[road['status'].str.contains('Fire')]
#loops over the each closed road
# gets and formates the
road_geo = []
for i in road.road:
i = i.replace(' ', '%20')
url = f'https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input={i}&inputtype=textquery&fields=geometry&key={YOUR_API_KEY}'
res = requests.get(url)
go = res.json()
mo =go['candidates'][0]['geometry']['location']
road_geo.append({"coords":mo, "iconImage" : 'http://maps.google.com/mapfiles/kml/shapes/caution.png'})
fire_map = gpd.read_file('MODIS_C6_USA_contiguous_and_Hawaii_24h/')
fire_map_loc = []
for i,k in zip(fire_map['LATITUDE'],fire_map['LONGITUDE']):
fire_map_loc.append({"coords" : {'lat' : i,'lng' : k }, 'iconImage' : 'http://maps.google.com/mapfiles/ms/icons/firedept.png'})
@app.route('/')
def hello_world():
return render_template('dir.html', markers=json.dumps(markers), road_geo=json.dumps(road_geo), fire_map_loc=json.dumps(fire_map_loc))
if __name__ == '__main__':
app.run(host="localhost", port=8000, debug=True)
url = f'https://maps.googleapis.com/maps/api/geocode/json?address=Walmart+Falls+Church&key={YOUR_API_KEY}'
|
def fourSum(nums, target) :
#头尾两个k、m的for循环,i、j从两端往中间逼近的双指针
nums.sort()#排序,便于去重
n=len(nums)
res=[]
for k in range(n-3):#k遍历
#print(nums[k])
if k>0 and nums[k]==nums[k-1]:continue #去重,取相等元素的第一个,取过的数不再取
for m in range(n-1,k+2,-1):
#print(nums[m])
if m<n-1 and nums[m]==nums[m+1]:continue#去重
i=k+1
#print(nums[i])
j=m-1
#print(nums[j])
while i<j:
sum=nums[i]+nums[j]+nums[k]+nums[m]
cha=sum-target
if cha<0:#和小了就加大一点
i+=1
while i<j and nums[i]==nums[i-1]:i+=1#去重,取过的数不再取
elif cha>0:#和大了就减小一点
j-=1
while i<j and nums[j]==nums[j+1]:j-=1#去重,取过的数不再取
else:
res.append([nums[k],nums[i],nums[j],nums[m]])#符合条件,添加到输出列表中
i+=1#注意此时需要移动指针,不然会死循环
j-=1#而且两个指针都要移动,去重
while i<j and nums[i]==nums[i-1]:i+=1#作用同上
while i<j and nums[j]==nums[j]+1:j-=1
return res
nums=[-3,-1,0,2,4,5]
target=0
print(fourSum(nums, target))
|
import albumentations
class DataTransformManager:
def __init__(self, used_img_size, final_img_size, transform_params, custom_additional_targets=None):
if custom_additional_targets is None:
custom_additional_targets = {"image2": "image", "image3": "image", "image4": "image"}
self._custom_additional_targets = custom_additional_targets
self._ratio = max(float(final_img_size[0]) / used_img_size[0],
float(final_img_size[1]) / used_img_size[1])
self._final_img_size = final_img_size
self._scale_compose = [
albumentations.Resize(
height=int(used_img_size[0] * self._ratio),
width=int(used_img_size[1] * self._ratio),
always_apply=True
),
albumentations.CenterCrop(
height=self._final_img_size[0],
width=self._final_img_size[1],
always_apply=True,
p=1
)
]
self._normalize_transform = albumentations.Normalize()
self._normalize_no_transform = albumentations.Normalize(mean=(0, 0, 0), std=(1, 1, 1))
self._train_compose = self._scale_compose
if "flip" in transform_params and transform_params["flip"]:
flip_compose = [albumentations.HorizontalFlip()]
self._train_compose = flip_compose + self._train_compose
if "filters" in transform_params and transform_params["filters"]:
random_compose = [
albumentations.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
contrast_limit=(-0.2, 0.2), p=0.5),
albumentations.RandomGamma(gamma_limit=(90, 110), p=0.5),
albumentations.ChannelShuffle(p=0.5),
]
self._train_compose = random_compose + self._train_compose
if "normalize" in transform_params and transform_params["normalize"]:
self._train_compose.append(albumentations.Normalize())
else:
self._train_compose.append(albumentations.Normalize(mean=(0, 0, 0), std=(1, 1, 1)))
def get_train_transform(self):
return albumentations.Compose(self._train_compose, additional_targets=self._custom_additional_targets)
def get_validation_transform(self, with_resize=True, with_normalize=True):
scale_compose = self._scale_compose if with_resize else []
return albumentations.Compose(scale_compose + self.get_normalize(with_normalize),
additional_targets=self._custom_additional_targets)
def get_test_transform(self, with_normalize=True):
return albumentations.Compose(self._scale_compose + self.get_normalize(with_normalize),
additional_targets=self._custom_additional_targets)
def get_normalize(self, with_normalize=True):
if with_normalize:
return [self._normalize_transform]
return [self._normalize_no_transform]
def get_normalize_transform(self, with_normalize=True):
return albumentations.Compose(self.get_normalize(with_normalize))
|
"""
Drunken Python
Python got drunk and the built-in functions str() and int() are acting odd:
str(4) ➞ 4
str("4") ➞ 4
int("4") ➞ "4"
int(4) ➞ "4"
You need to create two functions to substitute
str() and int(). A function called int_to_str()
that converts integers into strings and a
function called str_to_int() that converts strings
into integers.
Examples:
int_to_str(4) ➞ "4"
str_to_int("4") ➞ 4
int_to_str(29348) ➞ "29348"
Notes
This is meant to illustrate the dangers of
using already-existing function names.
Extra points if you can de-drunk Python.
"""
# int, str = str, int
def int_to_str(n):
return str(n)
def str_to_int(s):
return int(s)
|
from collections import defaultdict
# DFS에 필요한 데이터를 직접 생성하고, DFS를 진행하는 문제
def solution(begin, target, words):
# words 데이터 간의 연결 리스트 생성
Ldic = defaultdict(list)
words.append(begin)
for i in words:
for j in words:
check = 0
for n in range(len(j)):
if i[n] == j[n]:
check += 1
if check == len(j) - 1:
Ldic[i].append(j)
# DFS
answer = - 1
visitor = []
stack = [begin]
while stack:
check = 0
answer += 1
q = stack.pop()
if q == target:
return answer
if q not in visitor:
visitor.append(q)
for i in Ldic[q]:
if i not in visitor:
check += 1
stack.append(i)
if check == 0:
answer -= 1
return 0
# 잘못된 풀이
def solution(begin, target, words):
answer = 1
queue = [begin]
print(queue)
while queue:
if queue[0] == target:
return answer
for i in range(len(queue[0])):
if queue[0][i] != target[i]:
sub = queue[0][:i] + target[i] + queue[0][i + 1:]
print(sub)
if sub in words:
print(sub)
queue.append(sub)
print(queue)
queue.pop(0)
answer += 1
print(queue)
return 0
# 잘못된 풀이
answer = 0
def solution(begin, target, words):
global answer
try:
words.index(target)
answer = 4
return answer
except:
return answer
def dfs(start, begin_list, target, words):
global answer
begin_list[start] = 'c'
answer = 3
print(begin_list)
print('daw', words.index(''.join(begin_list)))
# try:
# idx = words.index(''.join(begin_list))
# del words[idx]
# answer += 1
# print(answer)
# dfs(start+1, begin_list, target, words)
# except:
# return answer
# print('d')
|
from __future__ import absolute_import
import glob
import logging
from dialog.configs import DialogConfiguration
from dialog.run_pipeline import create_dialog_agent, load_parsers
from log_analysis.argument_parser import model_on_logs_arguments_parser
from log_analysis.training_data_from_dialog import build_log_summaries
def parse_arguments():
"""Parses and logs command-line arguments.
Returns:
Namespace: Namespace containing parsed arguments.
"""
args = model_on_logs_arguments_parser().parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(levelname)s: %(asctime)s: %(message)s')
assert (args.log_directories != "")
assert (args.alpha >= args.beta)
DialogConfiguration.alpha = args.alpha
DialogConfiguration.beta = args.beta
logging.info("Log Level: %s", args.log_level)
logging.info("Log directories: %s", args.log_directories)
return args
class Interface(object):
def __init__(self, conversation):
self.conversation = conversation
self.error = False
self._sys_utterance = self._system_utterance_from_log()
self._user_utterance = self._user_utterance_from_log()
def get(self):
if self.error:
return "stop"
utterance = self._user_utterance.next()
return utterance
def put(self, actual_sys_utterance):
log_sys_utterance = self._sys_utterance.next()
if log_sys_utterance != actual_sys_utterance:
self.error = True
def _system_utterance_from_log(self):
for utterance in self.conversation.sys_utterances:
yield utterance
def _user_utterance_from_log(self):
for utterance in self.conversation.user_utterances:
yield utterance
def main():
args = parse_arguments()
log_files = glob.glob(args.log_directories + "/*.log")
log_summaries = build_log_summaries(log_files)
t_channel_parser, a_channel_parser, t_fn_parser, a_fn_parser, keyword_parser = load_parsers()
for log_summary in log_summaries:
n = len(log_summary.goals)
for i in xrange(n):
goal, conv = log_summary.goals[i], log_summary.conversations[i]
# logging.info("Processing log with recipe: %s", goal.recipe_url)
interface = Interface(conv)
dialog_agent = create_dialog_agent(
trigger_channel_parser=t_channel_parser,
trigger_fn_parser=t_fn_parser,
action_channel_parser=a_channel_parser,
action_fn_parser=a_fn_parser,
keyword_parser=keyword_parser, istream=interface,
ostream=interface)
dialog_agent.start_session()
if interface.error:
logging.info("Error in log with recipe: %s", goal.recipe_url)
if __name__ == '__main__':
main()
|
import pygal
from data_visualization.die import Die
die1 = Die(8)
die2 = Die(8)
die3 = Die(8)
# Make some rolls and store the results in a list
rolls = [die1.roll() + die2.roll() + die3.roll() for roll_num in range(100000)]
# Analyze the results
frequencies = [rolls.count(value) for value in range(3, (die1.num_sides + die2.num_sides + die3.num_sides + 1))]
# Visualize the results
hist = pygal.Bar()
hist.title = "Results of rolling three D8 die 100,000 times."
hist.x_labels = [str(i) for i in range(3, 25)]
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add('D8 + D8 + D8', frequencies)
hist.render_to_file('three_d8_visual.svg')
|
""" Calc code example """
class Calc(object):
""" Calculator Class """
def __init__(self, first, second):
self._first = first
self._second = second
def sum_call(self):
""" Sum Def """
return self._first+self._second
def div_call(self):
""" Div Def """
return self._first/self._second
def mult_call(self):
""" Mult Def """
return self._first*self._second
def sub_call(self):
""" Subs Def """
return self._first-self._second
def main():
""" Initiate the Calc """
first_value = 500
second_value = 39
# Object creation
calc_run = Calc(first_value, second_value)
# Sum
print("Sum of {} + {} is: {}".format(first_value, second_value,
calc_run.sum_call()))
# Division
print("Division of {} / {} is: {}".format(first_value, second_value,
calc_run.div_call()))
# Multiplication
print("Multiplication of {} * {} is: {}".format(first_value, second_value,
calc_run.mult_call()))
# Subtraction
print("Subtraction of {} - {} is: {}".format(first_value, second_value,
calc_run.sub_call()))
if __name__ == '__main__':
main()
|
import sys
import pandas as pd
import util
import numpy as np
def answer(x):
if x is np.nan:
return 0
return 1
def loadorder(f, vali):
order = pd.read_csv(f)
v = set()
a = open(vali).read().split('\n')[:-1]
for line in a:
v.add(line)
order['ts'] = order['time'].apply(util.convert_ts)
order['call'] = order['passenger_id'].apply(lambda x:1)
order['answer'] = order['driver_id'].apply(answer)
order = order.drop('time', 1)
order = order.drop('passenger_id', 1)
order = order.drop('driver_id', 1)
if __name__ == '__main__':
loadorder(sys.argv[1], sys.argv[2])
|
import logging
from schedule_matcher_bot import ScheduleMatchingBot
def main():
logging.debug('[start] Schedule-matching bot.')
schedule_matching_bot = ScheduleMatchingBot()
schedule_matching_bot.start()
if __name__ == '__main__':
main()
|
num = str(int(input()))
reverse = int(num[::-1])
print(reverse)
|
# -*- CODING: PTYHON V2 -*-
from carizy.items import CarizyItem
from scrapy import Request
import scrapy
class CarizyspiderSpider(scrapy.Spider):
name = 'carizyspider'
#start_urls = ['http://www.carizy.com/voiture-occasion?page={i}'
allowed_domains = ['carizy.com']
custom_settings = {
'LOG_FILE': 'logs/carizy.log',
'LOG_LEVEL':'ERROR'
}
def start_requests(self):
start_url = "http://www.carizy.com/voiture-occasion?page={i}"
for i in range(0,66):
yield Request(start_url.format(i=i), self.parse)
def parse(self, response):
print('PROCESSING...' + response.url)
for annonce in response.xpath("//div[contains(@class,'col-lg-8 col-md-8 col-sm-8 col-xs-7')]"):
item = CarizyItem()
try:
item['TITLE'] = annonce.css('h2.title-model::text').extract_first()
except:
print('ERROR TITLE PARSE...' + response.url)
try:
item['ANNONCE_LINK'] = response.urljoin(annonce.css('a::attr(href)').extract_first())
except:
print('ERROR ANNONCE LINK PARSE...' + response.url)
yield item
|
# 注意两条链表遍历完后还加出来的进位
# 需要额外补一个点
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
h1, h2 = l1, l2
b = 0
head = ListNode(0)
pre = head
while h1 and h2:
node = ListNode(0)
node.val = (h1.val + h2.val + b) % 10
b = (h1.val + h2.val + b) // 10
pre.next = node
pre = node
h1, h2 = h1.next, h2.next
if h1:
while h1:
node = ListNode(0)
node.val = (h1.val + b) % 10
b = (h1.val + b) // 10
pre.next = node
pre = node
h1 = h1.next
else:
while h2:
node = ListNode(0)
node.val = (h2.val + b) % 10
b = (h2.val + b) // 10
pre.next = node
pre = node
h2 = h2.next
if b:
node = ListNode(0)
node.val = b
pre.next = node
return head.next
# 精简算法,但实际上每次都判断,耗时更长
# 只是代码量少了而已
class Solution:
def addTwoNumbers(self, h1: ListNode, h2: ListNode) -> ListNode:
b = 0
head = ListNode(0)
pre = head
while h1 or h2 or b:
val1 = h1.val if h1 else 0
val2 = h2.val if h2 else 0
s = val1 + val2 + b
node = ListNode(0)
node.val = s % 10
b = s // 10
pre.next = node
pre = node
if h1: h1 = h1.next
if h2: h2 = h2.next
return head.next
|
from rest_framework.response import Response
from rest_framework import status
def judger_account_required():
def decorator(func):
def _wrapped_view(request, *args, **kwargs):
if not request.user.is_authenticated:
return Response(
{'detail': 'Login required.'}, status=status.HTTP_401_UNAUTHORIZED
)
if not request.user.is_judger:
return Response(
{'detail': 'Judger account required.'}, status=status.HTTP_403_FORBIDDEN
)
return func(request, *args, **kwargs)
return _wrapped_view
return decorator
|
import collections
import os
import subprocess
import _pyterminalsize
_sources = ('environment', 'stdin', 'stdout', 'stderr', 'tput', 'fallback')
SizeSource = collections.namedtuple('SizeSource', _sources)(*_sources)
Size = collections.namedtuple('Size', ('columns', 'lines', 'source'))
def _from_tput():
# tput doesn't respond when stderr is piped.
# But, if we don't have TERM, tput will spew:
# $ env -i tput cols
# tput: No value for $TERM and no -T specified
if not os.environ.get('TERM'):
raise OSError('Cannot determine cols / lines without TERM')
proc = subprocess.Popen(
('tput', '-S'), stdout=subprocess.PIPE, stdin=subprocess.PIPE,
)
output = proc.communicate(b'cols\nlines\n')[0]
if proc.returncode:
raise OSError('tput returned ' + str(proc.returncode))
columns, lines = map(int, output.splitlines())
return columns, lines
def get_terminal_size(fallback=(80, 24)):
# First try from the environment (I'm not even sure if this is possible?)
try:
return Size(
int(os.environ['COLUMNS']), int(os.environ['LINES']),
SizeSource.environment,
)
except (ValueError, KeyError):
pass
# Then try from file descriptors
for fd, source in (
(0, SizeSource.stdin),
(1, SizeSource.stdout),
(2, SizeSource.stderr),
):
try:
return Size(*(_pyterminalsize.get_terminal_size(fd) + (source,)))
except OSError:
pass
# Then try from tput (this makes cygwin work)
try:
return Size(*(_from_tput() + (SizeSource.tput,)))
except OSError:
pass
return Size(*(fallback + (SizeSource.fallback,)))
|
import math.pi
import numpy
#INPUTS: list of pairable vectors. Obj1 should be a list of vectors
#that you want to match to the corresponding obj2.
def vector_angle(v1, v2):
v1_u = v1 / numpy.linalg.norm(v1)
v2_u = v2 / numpy.linalg.norm(v2)
return numpy.arccos(numpy.clip(numpy.dot(v1_u, v2_u), -1.0, 1.0))
def zero_vector_pair(v_pair):
trans = [0. - v_pair[0][0], 0. - v_pair[0][1], 0. - v_pair[0][2]]
new_pair = []
for point in v_pair:
new_point = numpy.array(point) + numpy.array(trans)
new_pair.append(new_point)
return new_pair
def calc_rmsa(obj1, obj2, ratio=(pi/6.0)):
compare_vectors = zip(obj1, obj2)
vector_ang_sum = 0.0
for vector_pairs in compare_vectors:
vector1 = zero_vector_pair(vector_pairs[0])
vector2 = zero_vector_pair(vector_pairs[1])
vector_ang_sum += vector_angle(numpy.array(vector1[1]), numpy.array(vector2[1]))
rmsa = ((vector_ang_sum/ratio)**2 / len(compare_vectors))**0.5
return rmsa
|
#!/usr/bin/env python2
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Fam Zheng <fam@euphon.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from message import Message
import series
import patch
import pymongo
import datetime
import pickle
import bson.binary
import search
class MessageDuplicated(Exception):
pass
class MessageNotFound(Exception):
pass
def _list_add(l, *add):
return l + [x for x in add if x not in l]
def _sensible_cmp(x, y):
"""Compare two patches by message-id sequence, otherwise date"""
if not patch.is_patch(x) or not patch.is_patch(y):
return -cmp(x.get_date(), y.get_date())
a = x.get_message_id()
b = y.get_message_id()
while b and a.startswith(b[0]):
a = a[1:]
b = b[1:]
while b and a.endswith(b[-1]):
a = a[:-1]
b = b[:-1]
try:
an = int(a)
bn = int(b)
return cmp(an, bn)
except:
return cmp(a, b)
class DB(object):
_status_prefix = "s-"
def __init__(self, server, port, dbname):
self._db_name = dbname + "-default"
self._client = pymongo.MongoClient(server, port)
self._db = self._client[self._db_name]
self._messages = self._db.messages
self._identities = self._db.identities
def reset(self):
self._messages.remove()
self._messages.create_index([('message-id', pymongo.DESCENDING),
('in-reply-to', pymongo.DESCENDING),
('date', pymongo.DESCENDING),
('untagged-subject', pymongo.DESCENDING)])
def _init_status(self, m, d):
status = {}
for k, v in d.iteritems():
if k.startswith(self._status_prefix):
m.set_status(k[len(self._status_prefix):], v)
def _series_from_dict(self, d):
if 'mbox' not in d:
return None
ret = series.Series(d['mbox'])
self._init_status(ret, d)
return ret
def _message_from_dict(self, d):
if 'mbox' not in d:
return None
ret = Message(d['mbox'])
self._init_status(ret, d)
return ret
def get_message(self, msg_id):
r = self._messages.find_one({"message-id": msg_id})
if not r:
return None
return self._message_from_dict(r)
def get_series(self, msg_id):
r = self._messages.find_one({"message-id": msg_id})
if r and series.is_series(self._message_from_dict(r)):
return self._series_from_dict(r)
def _status_list_add(self, msg_id, field, new):
if isinstance(new, tuple):
new = list(new)
l = self.get_status(msg_id, field, [])
l = _list_add(l, new)
self.set_status(msg_id, field, l)
def _add_patch(self, msg_id, patch_msg_id):
return self._status_list_add(msg_id, "patches", patch_msg_id)
def _add_reply(self, msg_id, reply_msg_id):
return self._status_list_add(msg_id, "replies", reply_msg_id)
def _obsolete_previous_series(self, m):
name = m.get_subject(strip_tags=True)
version = m.get_version()
prev = self._messages.find({"untagged-subject": name, "is-series": True})
for p in prev:
pm = self._message_from_dict(p)
if version <= max(pm.get_status("obsoleted-by-version", 0), pm.get_version()):
continue
print "obsolete '%s' %d => %d" % (name, pm.get_version(), version)
self.set_statuses(p['message-id'], {'obsoleted-by': m.get_message_id(),
'obsoleted-by-version': m.get_version()})
def _get_top_message(self, msg_id, check):
seen = set([msg_id])
while True:
m = self.get_message(msg_id)
if not m:
return None
if check(m):
return m
msg_id = m.get_in_reply_to()
if msg_id in seen or not msg_id:
return None
seen.add(msg_id)
def _get_top_series_or_patch(self, msg_id):
return self._get_top_message(msg_id, lambda x: series.is_series(x) or patch.is_patch(x))
def _get_top_series(self, msg_id):
return self._get_top_message(msg_id, series.is_series)
def process_message(self, msg_id):
"""Process a new seen msg and update db"""
m = self.get_message(msg_id)
assert m
irt = m.get_in_reply_to()
revby = m.get_reviewed_by()
p = self._get_top_series_or_patch(msg_id)
s = self._get_top_series(msg_id)
if irt:
# A reply to some other message
self._add_reply(irt, msg_id)
if patch.is_patch(m):
self._add_patch(irt, msg_id)
elif m.is_reply():
if s:
self._status_list_add(s.get_message_id(), "repliers", m.get_from())
if revby:
if p:
# Mark the target of review, either a patch or a series, reviewed
self._status_list_add(p.get_message_id(), "reviewed-by", revby)
if s:
self._status_list_add(s.get_message_id(), "reviewers", revby)
if patch.is_patch(p):
# This is a review on patch, book it in series
self._status_list_add(s.get_message_id(),
"reviewed-patches",
p.get_message_id())
else:
# This is a review on series, mark all patches reviewed
for i in self.get_patches(s):
self._status_list_add(s.get_message_id(),
"reviewed-patches",
i.get_message_id())
else:
# A top message
if series.is_series(m):
self._obsolete_previous_series(m)
def add_message(self, m):
"""Add a new message to DB"""
e = self._messages.find_one({'message-id': m.get_message_id()})
if e and e.get('from'):
raise MessageDuplicated(e)
d = {
'message-id': m.get_message_id(),
'mbox': bson.binary.Binary(m.mbox()),
'in-reply-to': m.get_in_reply_to(),
'date': m.get_date(),
'from': m.get_from(),
'subject': m.get_subject(),
'untagged-subject': m.get_subject(strip_tags=True),
'tags': list(m.get_tags()),
'is-series': series.is_series(m),
}
if e:
for k, v in e.iteritems():
d[k] = d.get(k, v)
self._messages.save(d)
return m.get_message_id()
def get_statuses(self, msg_id):
r = {"message-id": msg_id}
m = self._messages.find_one(r)
if m:
for k, v in m.iteritems():
if k.startswith(self._status_prefix):
r[k[len(self._status_prefix):]] = v
return r
def get_status(self, msg_id, st, default=None):
s = self.get_statuses(msg_id)
return s.get(st, default)
def set_statuses(self, msg_id, args):
m = self._messages.find_one({"message-id": msg_id})
if not m:
m = {"message-id": msg_id}
for k, v in args.iteritems():
key = self._status_prefix + k
if v is None and key in m:
del m[key]
continue
m[key] = v
self._messages.save(m)
def set_status(self, msg_id, name, value):
return self.set_statuses(msg_id, {name: value})
def _find_series_iter(self, query="", skip=0, limit=0, sort_keys=['date']):
q = {'is-series': True}
sort = [(s, pymongo.DESCENDING) for s in sort_keys]
if query:
filter0 = search.Filter(query)
else:
filter0 = None
n = 0
for i in self._messages.find(q, sort=sort):
s = self._series_from_dict(i)
if not series.is_series(s):
continue
if not query or filter0.match(s):
n += 1
if n > skip:
yield s
if limit and n > limit + skip:
break
def find_series_count(self, query=""):
num = 0
for i in self._find_series_iter(query=query):
num += 1
return num
def find_series(self, query="", skip=0, limit=0, sort_keys=['date']):
"""query all the series with tags and status with pagination, but skip
and limit are applied before tags and status filtering"""
for m in self._find_series_iter(query=query, skip=skip, limit=limit, sort_keys=sort_keys):
yield m
def find_messages(self):
for i in self._messages.find():
if not i.get('mbox'):
continue
yield self._message_from_dict(i)
def get_patches(self, s):
r = [self.get_message(x) for x in s.get_status("patches", [])]
r.sort(_sensible_cmp)
if not r:
r = [s]
return r
def get_replies(self, m):
r = [self.get_message(x) for x in m.get_status("replies", [])]
r.sort(_sensible_cmp)
return r
def save_identity_pair(self, i, key):
self._identities.remove({'identity': i})
self._identities.insert({'identity': i, 'key': key})
def get_key(self, i):
a = self._identities.find_one({'identity': i})
if a:
return str(a['key'])
|
# @created 25-8-2015
# @author MCS
# @description comms.py module for high level communications between ESTR and PC
import serial # for UART hardware abstraction.
class Comms(object):
def __init__(self, COM_port, baudrate): # initialise the COMs port on the PC.
"""initialise the COMs port on the PC, and opens it."""
self.ser = serial.Serial()
self.ser.port = COM_port
self.ser.baudrate = baudrate
self.ser.timeout = 0
self.ser.parity = serial.PARITY_EVEN
self.ser.open()
def sendStr(self, str):
"""Write a string to the COM port."""
self.ser.write(str)
def readChar(self):
"""Read a character from the COM port."""
return self.ser.read()
def openCOMPort(self):
"""Open the COM port."""
self.ser.open()
def closeCOMPort(self):
"""Close the COM port."""
self.ser.close()
def inWaiting(self):
"""return the number of bytes waiting in the COM port."""
return self.ser.inWaiting()
|
#!/usr/bin/env python
import pygame
import mimo
from .BaseScene import SceneBase
from utils import utils
from utils import neopixelmatrix as graphics
from utils.NeoSprite import NeoSprite, AnimatedNeoSprite, TextNeoSprite, SpriteFromFrames
from utils import constants
# Boot Scene
# should reset all button and light states,
# clear the led matrix and led ring
# all inputs are locked
# set colors for all leds, turn all leds and increase the brightness
# this boot scene should take some seconds, max 10? 8-6?
# some aditional test display that specific modules are loading.
# after that change to the next scene - tutorial
class BootScene(SceneBase):
def __init__(self):
SceneBase.__init__(self)
self.logo = utils.Sprite(
constants.SPRITES_INTRO + 'logo_MCorp.png',
constants.VIEWPORT_CENTER_X,
constants.VIEWPORT_CENTER_Y
)
self.logo.SetOpacity(0)
self.sfx_mimo_logo = utils.get_sound('assets/audio/SFX/M_OS/UI_Booth.ogg')
self.AddTrigger(0.1, self.sfx_mimo_logo, 'play')
self.AddTrigger(9.2, self, 'SwitchToScene', "Edit")
mimo.set_led_brightness(150)
font = pygame.font.Font("assets/fonts/VCR_OSD_MONO_1.001.ttf", 24)
self.title = utils.Text("M-OS STARTING", font)
self.title.SetOpacity(0)
self.title.SetColor(constants.PALETTE_TEXT_RED)
self.title.SetPosition(constants.VIEWPORT_CENTER_X, 500)
self.text_updater_counter = 0
self.text_updater_frequency = 0.06
self.text_updater_values = ['|', '\\', '-', '/']
self.text_updater_index = 0
resolution = 6
self.AddTween("easeInOutSine", 1.5, self.title, "opacity", 0, 255, 0)
self.AddTween("easeInOutSine", 1.5, self.logo, "opacity", 0, 255, 0, resolution)
self.AddTween("easeInOutSine", 1.5, self.logo, "opacity", 255, 0, 1.5, resolution)
self.AddTween("easeInOutSine", 1.5, self.logo, "opacity", 0, 255, 3, resolution)
self.AddTween("easeInOutSine", 1.5, self.logo, "opacity", 255, 0, 4.5, resolution)
self.AddTween("easeInOutSine", 1.5, self.logo, "opacity", 0, 255, 6, resolution)
self.AddTween("easeInOutSine", 1.5, self.logo, "opacity", 255, 0, 7.5, resolution)
self.AddTween("easeInOutSine", 1.5, self.title, "opacity", 255, 0, 7.5, resolution)
self.brightness = 1
self.cache_brightness = 1
self.scheduleTextLoader('es')
self.whiteSprite = NeoSprite('assets/white.png')
self.reset_mimo()
def ProcessInput(self, events, pressed_keys):
pass
def Update(self, dt):
SceneBase.Update(self, dt)
self.text_updater_counter += dt
if self.text_updater_counter > self.text_updater_frequency:
if self.cache_brightness != int(self.brightness):
mimo.set_led_brightness(int(self.brightness))
self.cache_brightness = self.brightness
self.text_updater_index += 1
self.text_updater_counter = 0
if self.text_updater_index >= len(self.text_updater_values):
self.text_updater_index = 0
self.title.DecorateText(self.text_updater_values[self.text_updater_index] + ' ', ' '+self.text_updater_values[-self.text_updater_index])
def Render(self, screen):
screen.fill(constants.PALETTE_TEXT_BLACK)
self.logo.RenderWithAlpha(screen)
self.title.RenderWithAlpha(screen)
graphics.setColor(0xfff)
self.whiteSprite.render()
graphics.render()
def reset_mimo(self):
mimo.set_led_brightness(1)
max_brightness = 150
mat_all_lights = []
for index in range(0, 28):
mat_all_lights += [index, 255, 255, 255]
mimo.set_material_leds_color(mat_all_lights)
opt_all_lights = []
for index in range(0, 5):
opt_all_lights += [index, 255, 255, 255]
mimo.set_optimization_leds_color(opt_all_lights)
mat_lights_on = []
for index in range(0, 28):
mat_lights_on += [index, 0, 0, 0]
opt_lights_on = []
for index in range(0, 5):
opt_lights_on += [index, 0, 0, 0]
self.brightness = 1
self.AddTween("easeInOutSine", 1, self, "brightness", 1, max_brightness, 0)
self.AddTween("easeInOutSine", 1, self, "brightness", max_brightness, 1, 1.5)
self.AddTween("easeInOutSine", 1, self, "brightness", 1, max_brightness, 3)
self.AddTween("easeInOutSine", 1, self, "brightness", max_brightness, 1, 4.5)
self.AddTween("easeInOutSine", 1, self, "brightness", 1, max_brightness, 6)
self.AddTween("easeInOutSine", 1, self, "brightness", max_brightness, 1, 7.5)
self.AddTrigger(9.1, mimo, 'set_material_leds_color', mat_lights_on)
self.AddTrigger(9.1, mimo, 'set_optimization_leds_color', opt_lights_on)
self.AddTrigger(9.1, mimo, 'clean_matrix')
def scheduleTextLoader(self, lang):
if lang == 'en':
self.AddTrigger(1.9, self.title, 'SetText', '')
self.AddTrigger(2, self.title, 'SetText', 'LOADING')
self.AddTrigger(2.1, self.title, 'SetText', 'LOADING EMOSENSE')
self.AddTrigger(2.2, self.title, 'SetText', 'LOADING EMOSENSE PREDICTOR')
self.AddTrigger(2.3, self.title, 'SetText', 'LOADING EMOSENSE PREDICTOR.')
self.AddTrigger(2.4, self.title, 'SetText', 'LOADING EMOSENSE PREDICTOR..')
self.AddTrigger(2.5, self.title, 'SetText', 'LOADING EMOSENSE PREDICTOR...')
self.AddTrigger(3.5, self.title, 'SetText', '')
self.AddTrigger(3.6, self.title, 'SetText', 'PROCESSING')
self.AddTrigger(3.7, self.title, 'SetText', 'PROCESSING EMOTIONAL')
self.AddTrigger(3.8, self.title, 'SetText', 'PROCESSING EMOTIONAL OPTIMIZATION')
self.AddTrigger(3.9, self.title, 'SetText', 'PROCESSING EMOTIONAL OPTIMIZATION MODULES')
self.AddTrigger(4.0, self.title, 'SetText', 'PROCESSING EMOTIONAL OPTIMIZATION MODULES.')
self.AddTrigger(4.1, self.title, 'SetText', 'PROCESSING EMOTIONAL OPTIMIZATION MODULES..')
self.AddTrigger(4.2, self.title, 'SetText', 'PROCESSING EMOTIONAL OPTIMIZATION MODULES...')
self.AddTrigger(5.0, self.title, 'SetText', '')
self.AddTrigger(5.1, self.title, 'SetText', 'INITIALIZING')
self.AddTrigger(5.2, self.title, 'SetText', 'INITIALIZING PUCHINTZKY')
self.AddTrigger(5.3, self.title, 'SetText', 'INITIALIZING PUCHINTZKY ALGORITHM')
self.AddTrigger(5.4, self.title, 'SetText', 'INITIALIZING PUCHINTZKY ALGORITHM ENGINE')
self.AddTrigger(5.5, self.title, 'SetText', 'INITIALIZING PUCHINTZKY ALGORITHM ENGINE.')
self.AddTrigger(5.6, self.title, 'SetText', 'INITIALIZING PUCHINTZKY ALGORITHM ENGINE..')
self.AddTrigger(5.7, self.title, 'SetText', 'INITIALIZING PUCHINTZKY ALGORITHM ENGINE...')
self.AddTrigger(6.0, self.title, 'SetText', 'M')
self.AddTrigger(6.1, self.title, 'SetText', 'M-')
self.AddTrigger(6.2, self.title, 'SetText', 'M-O')
self.AddTrigger(6.3, self.title, 'SetText', 'M-OS')
self.AddTrigger(6.4, self.title, 'SetText', 'M-OS ')
self.AddTrigger(6.5, self.title, 'SetText', 'M-OS I')
self.AddTrigger(6.6, self.title, 'SetText', 'M-OS IS')
self.AddTrigger(6.7, self.title, 'SetText', 'M-OS IS ')
self.AddTrigger(6.8, self.title, 'SetText', 'M-OS IS R')
self.AddTrigger(6.9, self.title, 'SetText', 'M-OS IS RE')
self.AddTrigger(7.0, self.title, 'SetText', 'M-OS IS REA')
self.AddTrigger(7.1, self.title, 'SetText', 'M-OS IS READ')
self.AddTrigger(7.2, self.title, 'SetText', 'M-OS IS READY')
elif lang == 'es':
self.AddTrigger(1.9, self.title, 'SetText', '')
self.AddTrigger(2, self.title, 'SetText', 'CARGANDO')
self.AddTrigger(2.1, self.title, 'SetText', 'CARGANDO EMOSENSE')
self.AddTrigger(2.2, self.title, 'SetText', 'CARGANDO EMOSENSE PREDICTOR')
self.AddTrigger(2.3, self.title, 'SetText', 'CARGANDO EMOSENSE PREDICTOR.')
self.AddTrigger(2.4, self.title, 'SetText', 'CARGANDO EMOSENSE PREDICTOR..')
self.AddTrigger(2.5, self.title, 'SetText', 'CARGANDO EMOSENSE PREDICTOR...')
self.AddTrigger(3.5, self.title, 'SetText', '')
self.AddTrigger(3.6, self.title, 'SetText', 'PROCESANDO')
self.AddTrigger(3.7, self.title, 'SetText', 'PROCESANDO MÓDULOS')
self.AddTrigger(3.8, self.title, 'SetText', 'PROCESANDO MÓDULOS DE OPTIMIZACIÓN')
self.AddTrigger(3.9, self.title, 'SetText', 'PROCESANDO MÓDULOS DE OPTIMIZACIÓN EMOCIONAL')
self.AddTrigger(4.0, self.title, 'SetText', 'PROCESANDO MÓDULOS DE OPTIMIZACIÓN EMOCIONAL.')
self.AddTrigger(4.1, self.title, 'SetText', 'PROCESANDO MÓDULOS DE OPTIMIZACIÓN EMOCIONAL..')
self.AddTrigger(4.2, self.title, 'SetText', 'PROCESANDO MÓDULOS DE OPTIMIZACIÓN EMOCIONAL...')
self.AddTrigger(5.0, self.title, 'SetText', '')
self.AddTrigger(5.1, self.title, 'SetText', 'INICIALIZANDO')
self.AddTrigger(5.2, self.title, 'SetText', 'INICIALIZANDO ALGORITMO')
self.AddTrigger(5.3, self.title, 'SetText', 'INICIALIZANDO ALGORITMO COMPLEJO')
self.AddTrigger(5.4, self.title, 'SetText', 'INICIALIZANDO ALGORITMO COMPLEJO DE PUCHINTZKY')
self.AddTrigger(5.5, self.title, 'SetText', 'INICIALIZANDO ALGORITMO COMPLEJO DE PUCHINTZKY.')
self.AddTrigger(5.6, self.title, 'SetText', 'INICIALIZANDO ALGORITMO COMPLEJO DE PUCHINTZKY..')
self.AddTrigger(5.7, self.title, 'SetText', 'INICIALIZANDO ALGORITMO COMPLEJO DE PUCHINTZKY...')
self.AddTrigger(6.0, self.title, 'SetText', 'M')
self.AddTrigger(6.1, self.title, 'SetText', 'M-')
self.AddTrigger(6.2, self.title, 'SetText', 'M-O')
self.AddTrigger(6.3, self.title, 'SetText', 'M-OS')
self.AddTrigger(6.4, self.title, 'SetText', 'M-OS ')
self.AddTrigger(6.5, self.title, 'SetText', 'M-OS E')
self.AddTrigger(6.6, self.title, 'SetText', 'M-OS ES')
self.AddTrigger(6.7, self.title, 'SetText', 'M-OS EST')
self.AddTrigger(6.8, self.title, 'SetText', 'M-OS ESTÁ')
self.AddTrigger(6.9, self.title, 'SetText', 'M-OS ESTÁ ')
self.AddTrigger(7.0, self.title, 'SetText', 'M-OS ESTÁ L')
self.AddTrigger(7.1, self.title, 'SetText', 'M-OS ESTÁ LI')
self.AddTrigger(7.2, self.title, 'SetText', 'M-OS ESTÁ LIS')
self.AddTrigger(7.3, self.title, 'SetText', 'M-OS ESTÁ LIST')
self.AddTrigger(7.4, self.title, 'SetText', 'M-OS ESTÁ LISTO')
|
# -*- coding: utf-8 -*-
import babel.dates
import re
import werkzeug
import math
from werkzeug.datastructures import OrderedMultiDict
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from odoo import fields, http, _
from odoo.addons.http_routing.models.ir_http import slug
from odoo.addons.website.controllers.main import QueryURL
from odoo.http import request
from odoo.tools.misc import get_lang
from odoo.osv import expression
class WebsiteServicesController(http.Controller):
_data_per_page = 30
_pager_max_pages = 5
@http.route(['''/serviskami/<model("services.website.mnc", "[('website_id', 'in', (False, current_website_id))]"):services>'''], type='http', auth="public", website=True, sitemap=False)
def services_detail(self, services, **post):
if not services.can_access_from_current_website():
raise werkzeug.exceptions.NotFound()
values = {
'services': services,
}
return request.render("mnc_x_gjs_website.services_detail", values)
class WebsiteNewsController(http.Controller):
_data_per_page = 30
_pager_max_pages = 5
@http.route(['''/berita/<model("news.website.mnc", "[('website_id', 'in', (False, current_website_id))]"):news>'''], type='http', auth="public", website=True, sitemap=False)
def news_detail(self, news, **post):
if not news.can_access_from_current_website():
raise werkzeug.exceptions.NotFound()
values = {
'news': news,
}
return request.render("mnc_x_gjs_website.news_detail", values)
class WebsiteCarrierController(http.Controller):
_data_per_page = 30
_pager_max_pages = 5
@http.route(['''/karir/<model("carrier.website.mnc", "[('website_id', 'in', (False, current_website_id))]"):carrier>'''], type='http', auth="public", website=True, sitemap=False)
def services_detail(self, carrier, **post):
if not carrier.can_access_from_current_website():
raise werkzeug.exceptions.NotFound()
values = {
'carrier': carrier,
}
return request.render("mnc_x_gjs_website.carrier_detail", values)
|
"""
DATASET GENERATION
"""
# IMPORTING LIBRARIES
# * General libraries
import cv2
import os
import glob
import pandas as pd
import numpy as np
import shutil
from shutil import copyfile
import argparse
# * ML specific libraries
import torch
import torchvision
from torch.utils.data import DataLoader
from sklearn import preprocessing, model_selection
# Testing function for convert_dataset function
def test_convert_dataset(path):
# setup
df = pd.DataFrame({
"Unnamed: 0": ["11.jpg", "11.jpg", "11.jpg", "11.jpg", 1],
"0": [0, 0, 224, 224, "a"],
"1": [0, 0, 224, 224, 2],
"2": [224, 0, 0, 224, "random"],
"3": [224, 0, 0, 224, -1.00],
})
df_exp = pd.DataFrame({
"file_name": ["11.jpg", "11.jpg", "11.jpg", "11.jpg"],
"x_center_norm": [0.5, 0, 0.5, 1.0],
"y_center_norm": [0.5, 0, 0.5, 1.0],
"width_norm": [1.0, 0, -1, 0],
"height_norm": [1.0, 0, -1, 0],
})
# call function
actual = convert_dataset(path, df)
# set expectations
expected = df_exp
# assertion
pd.testing.assert_frame_equal(actual, expected)
return 0
# Function to split training and test set
def dataset_split(df, folder, train_img_path, train_label_path):
"""
Split dataset into training and test set
and store in a new directory structure
Args:
df: Data Frame of the split dataset
folder : Path of the original dataset
train_img_path : Path of the training images
train_label_path : Path of the training labels
"""
filenames = []
for name in df.file_name:
filenames.append(name)
"""
Directory Structure :
--Dataset_yolo
--Images
--Train
--Val
--Dataset_yolo
--Labels
--Train
--Val
Image format .jpg, Label format .txt
(Separate .txt file label for each image)
Inside label.txt : x_center_norm, y_center_norm, width_norm, height_norm
"""
for filename in filenames:
yolo_list = []
for i, row in df[df.file_name == filename].iterrows():
yolo_list.append([0, row.x_center_norm, row.y_center_norm,
row.width_norm, row.height_norm])
yolo_list = np.array(yolo_list)
print("\n", yolo_list)
txt_filename = os.path.join(train_label_path,
str(row.file_name.split('.')[0])+".txt")
print("\n", txt_filename)
np.savetxt(txt_filename, yolo_list, fmt=["%d", "%f", "%f", "%f", "%f"])
shutil.copyfile(os.path.join(folder, row.file_name),
os.path.join(train_img_path, row.file_name))
return(0)
# Function to convert Dataset into YoloV5 compatible format
def convert_dataset(path, table):
"""
Convert dataset into Yolo V5 compatible format
Args:
path: Global path
table : Data Frame of the original dataset
"""
img_width = 224
img_height = 224
width = []
height = []
x_center = []
y_center = []
# YoloV5 compatible dataset has x_center_norm, y_center_norm,
# width_norm, height_norm as its columns
df = pd.DataFrame(columns=['file_name', 'x_center_norm',
'y_center_norm', 'width_norm', 'height_norm'])
table=table[table["0"].apply(lambda x: isinstance(x, (int, np.int64)))]
print(table)
df["file_name"] = table['Unnamed: 0'].astype(str)
df["width_norm"] = (table["2"]-table["0"]) / img_width
df["height_norm"] = (table["3"]-table["1"]) / img_height
df["x_center_norm"] = (table["0"]/img_width) + (df["width_norm"]/2)
df["y_center_norm"] = (table["1"]/img_height) + (df["height_norm"]/2)
df["width_norm"] = df["width_norm"].astype(float)
df["height_norm"] = df["height_norm"].astype(float)
df["x_center_norm"] = df["x_center_norm"].astype(float)
df["y_center_norm"] = df["y_center_norm"].astype(float)
print(df.dtypes)
print(df)
df.to_csv(os.path.join(path, 'Dataset/Dataset_yolo/BB_labels_yolo.txt'))
return(df)
# Function to load dataset
def display_dataset_images(folder, table):
"""
Display dataset images initially
Args:
folder: Path of the original dataset
table : Data Frame of the original dataset
"""
print(table)
images = []
image_path = []
filename = []
x1 = []
y1 = []
x2 = []
y2 = []
start_point = []
end_point = []
i = 0
print("Displaying dataset images ... \n")
for i in range(len(table.index)):
print("Image", i)
image_path.append(table.iloc[i, 0])
image_path[i] = os.path.join(folder, image_path[i])
print(image_path[i])
# Gets xmin, ymin, xmax, ymax when
# Table (dataset_raw) is passed as argument
# Gets x_center_norm, y_center_norm,
# width_norm, height_norm when DF (dataset_yolo) is passed as argument
x1.append(table.iloc[i, 1])
y1.append(table.iloc[i, 2])
x2.append(table.iloc[i, 3])
y2.append(table.iloc[i, 4])
# De-normalizes in the case when DF is passed as argument
if "x_center_norm" in table:
image_path[i] = os.path.join(folder, table.iloc[i, 0])
print(image_path[i])
x1[i] = int(224*(x1[i] - x2[i]/2)) # 224(x_center-width/2)
y1[i] = int(224*(y1[i] - y2[i]/2)) # 224(x_center-height/2)
x2[i] = int(224*x2[i] + x1[i] - x2[i]/2)
# 224(width+x_center-width/2)
y2[i] = int(224*y2[i] + y1[i] - y2[i]/2)
# 224(width+x_center-height/2)
start_point.append((x1[i], y1[i]))
print("Bounding box \n(xmin,ymin)", start_point[i])
end_point.append((x2[i], y2[i]))
print("(xmax,ymax)", end_point[i])
font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.imread(image_path[i])
img = cv2.rectangle(img, start_point[i], end_point[i],
color=(255, 0, 0), thickness=2)
img = cv2.putText(img, table.name, (10, 20), font, 0.5,
(255, 255, 255), 2, cv2.LINE_AA)
cv2.waitKey(100)
# Displays images
cv2.imshow('image', img)
if img is not None:
images.append(img)
i = i + 1
cv2.waitKey(1000)
return(images, start_point, end_point)
# Define main function
def main():
# Define path of the dataset
path = os.path.dirname(os.path.abspath(__file__))
print("Current directory : ")
print(path)
folder = os.path.join(path, "Dataset/Dataset_raw")
os.chdir(path)
# Arguments parser
parser = argparse.ArgumentParser()
parser.add_argument('-p', "--path", default=folder, type=str,
help="Path to the raw dataset directory")
args = parser.parse_args()
print(args.path)
# Load dataset
csv_path = os.path.join(args.path, "BB_labels.csv")
print(path)
table = pd.read_csv(csv_path)
table.name = 'Raw'
print(table)
# Display dataset
print("Raw dataset ... \n")
display_dataset_images(args.path, table)
# Basic unit testing for convert_dataset function
print("Test-case \n")
test_convert_dataset(path)
# Convert dataset to Yolo Compatible
df = convert_dataset(path, table)
# Train-test split
df_train, df_valid = model_selection.train_test_split(
df, test_size=0.2,
random_state=13,
shuffle=True)
train_img_path = os.path.join(path, 'Dataset/Dataset_yolo/images/train')
train_label_path = os.path.join(path, 'Dataset/Dataset_yolo/labels/train')
valid_img_path = os.path.join(path, "Dataset/Dataset_yolo/images/val")
valid_label_path = os.path.join(path, "Dataset/Dataset_yolo/labels/val")
dataset_split(df_train, args.path, train_img_path, train_label_path)
dataset_split(df_valid, args.path, valid_img_path, valid_label_path)
print("No. of Training images", len(os.listdir(train_img_path)))
print("No. of Training labels", len(os.listdir(train_label_path)))
print("No. of valid images", len(os.listdir(valid_img_path)))
print("No. of valid labels", len(os.listdir(valid_label_path)))
# Display converted dataset for verification
print("Training dataset ... \n")
df_train.name = 'Train'
display_dataset_images(train_img_path, df_train)
print("Validation dataset ... \n")
df_valid.name = 'Test'
display_dataset_images(valid_img_path, df_valid)
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.7 on 2021-03-19 21:53
import django.db.models.deletion
from django.db import migrations, models
import delivery.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Courier',
fields=[
('courier_id',
models.PositiveIntegerField(primary_key=True, serialize=False,
verbose_name='Идентификатор курьера')),
('courier_type', models.CharField(
choices=[('foot', 'Пеший'), ('bike', 'Велокурьер'),
('car', 'Курьер на автомобиле')], max_length=4,
verbose_name='Тип курьера')),
],
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('assign_time', models.DateTimeField(auto_now_add=True,
verbose_name='Время выдачи курьеру')),
('expected_reward', models.PositiveIntegerField(
verbose_name='Ожидаемое вознаграждение')),
('courier',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='invoices',
to='delivery.courier',
verbose_name='Назначенный курьер')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('code',
models.PositiveIntegerField(primary_key=True, serialize=False,
verbose_name='Код района')),
],
),
migrations.CreateModel(
name='TimeInterval',
fields=[
('name', models.CharField(max_length=11, primary_key=True,
serialize=False, validators=[
delivery.validators.interval_validator],
verbose_name='Интервал(HH:MM-HH:MM)')),
('begin', models.PositiveIntegerField(
verbose_name='Начало интервала в минутах')),
('end', models.PositiveIntegerField(
verbose_name='Конец интервала в минутах')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('order_id',
models.PositiveIntegerField(primary_key=True, serialize=False,
verbose_name='Идентификатор заказа')),
('weight', models.DecimalField(decimal_places=2, max_digits=4,
validators=[
delivery.validators.weight_validator])),
('delivery_hours',
models.ManyToManyField(db_index=True, related_name='orders',
to='delivery.TimeInterval',
verbose_name='Часы работы')),
('region',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT,
related_name='orders', to='delivery.region',
verbose_name='Район заказа')),
],
),
migrations.CreateModel(
name='InvoiceOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('complete_time', models.DateTimeField(null=True,
verbose_name='Время завершения заказа')),
('delivery_time', models.PositiveIntegerField(null=True,
verbose_name='Время доставки в секундах')),
('invoice',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='invoice_orders',
to='delivery.invoice')),
('order',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='invoice_orders',
to='delivery.order')),
],
),
migrations.AddField(
model_name='invoice',
name='orders',
field=models.ManyToManyField(db_index=True,
related_name='invoices',
through='delivery.InvoiceOrder',
to='delivery.Order',
verbose_name='Заказы'),
),
migrations.AddField(
model_name='courier',
name='regions',
field=models.ManyToManyField(db_index=True,
related_name='couriers',
to='delivery.Region',
verbose_name='Районы доставки'),
),
migrations.AddField(
model_name='courier',
name='working_hours',
field=models.ManyToManyField(db_index=True,
related_name='couriers',
to='delivery.TimeInterval',
verbose_name='Часы работы'),
),
]
|
#coding=UTF-8
'''
Created on 2017年3月13日
@author: admin
'''
import os, os.path, datetime, locale
locale.setlocale(locale.LC_CTYPE, 'chinese')
base_dir = "D:\\apache-tomcat-7.0.6\\webapps\\"
l=os.listdir(base_dir)
l.sort(key=lambda fn: os.path.getmtime(base_dir+fn) if not os.path.isdir(base_dir+fn) else 0)
d=datetime.datetime.fromtimestamp(os.path.getmtime(base_dir+l[-1]))
print('最后改动的文件是'+l[-1]+",时间:"+d.strftime("%Y年%m月%d日 %H时%M分%S秒"))
|
import collections
class TreeNode(object):
""" Definition of a binary tree node."""
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
levels = []
if not root:
return levels
level = 0
queue = collections.deque([root,])
while queue:
# start the current level
levels.append([])
# number of elements in the current level
level_length = len(queue)
for _ in range(level_length):
node = queue.popleft()
# fulfill the current level
levels[level].append(node.val)
# add child nodes of the current level
# in the queue for the next level
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
# go to next level
level += 1
return levels
# Tree Node
# 3
# / \
# 9 20
# / \ / \
# 7 6 15 17
root = TreeNode(3)
root.left = TreeNode(9)
root.left.left = TreeNode(7)
root.left.right = TreeNode(6)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(17)
result = Solution().levelOrder(root)
print(result)
|
import tensorflow as tf
import numpy as np
import os
import scipy.misc
# output_img = graph.get_tensor_by_name("output_img:0")
# x = graph.get_tensor_by_name("x:0")
# batch_size = graph.get_tensor_by_name("batch_size:0")
batch_size_now = 2
values = np.load('celeba_variables.npz')
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 64, 64, 3], name="x")
batch_size = tf.placeholder(tf.int32, None, name="batch_size")
def random_filelist(batch_size):
index = np.random.uniform(1, 202599.99, batch_size)
index = index.astype(int)
filelist = np.array(['%06i.png' % i for i in index])
return filelist
def nums_to_filelist(index):
filelist = np.array(['%06i.png' % i for i in index])
return filelist
# def weight_variable(shape):
# initial = tf.truncated_normal(shape, stddev=0.1)
# return tf.Variable(initial)
#
# def bias_variable(shape):
# initial = tf.constant(0.1, shape=shape)
# return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def deconv2d(x, W, output_shape):
return tf.nn.conv2d_transpose(x, W,output_shape, strides=[1, 2, 2, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# num_convs = 2
num_filters1 = 16
num_filters2 = 32
num_fc1 = 2048
num_fc2 = 512
# Rueckweg
channels = 48 # 8*3RGB also: MUSS DURCH 3 TEILBAR SEIN!
W_conv1 = tf.Variable(values['W_conv1_v'])
b_conv1 = tf.Variable(values['b_conv1_v'])
# x_flat = tf.reshape(x, [-1])
x_image = tf.reshape(x, [-1, 64, 64, 3])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = tf.Variable(values['W_conv2_v'])
b_conv2 = tf.Variable(values['b_conv2_v'])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2) # output last convlayer
W_fc1 = tf.Variable(values['W_fc1_v'])
b_fc1 = tf.Variable(values['b_fc1_v'])
h_pool2_flat = tf.reshape(h_pool2, [-1, 16*16*num_filters2])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
W_fc2 = tf.Variable(values['W_fc2_v'])
b_fc2 = tf.Variable(values['b_fc2_v'])
y_conv_ = tf.matmul(h_fc1, W_fc2)
y_conv = tf.reduce_mean(tf.add(y_conv_, b_fc2), axis=0, keep_dims=True)
W_fc1_r = tf.Variable(values['W_fc1_r_v'])
b_fc1_r = tf.Variable(values['b_fc1_r_v'])
h_fc1_r_ = tf.matmul(y_conv, W_fc1_r)
h_fc1_r = tf.add(h_fc1_r_, b_fc1_r)
h_fc1_r_flat = tf.reshape(h_fc1_r, [-1, 16, 16, channels])
W_conv2_r = tf.Variable(values['W_conv2_r_v'])
b_conv2_r = tf.Variable(values['b_conv2_r_v'])
output_shape_conv2r = [1, 32, 32, channels]
h_conv2_r = tf.nn.relu(deconv2d(h_fc1_r_flat, W_conv2_r, output_shape_conv2r) + b_conv2_r) # deconvolution1
W_conv1_r = tf.Variable(values['W_conv1_r_v'])
b_conv1_r = tf.Variable(values['b_conv1_r_v'])
output_shape_conv1r = [1, 64, 64, channels]
h_conv1_r = deconv2d(h_conv2_r, W_conv1_r, output_shape_conv1r) + b_conv1_r # deconvolution 2
# output_img = tf.nn.softmax(tf.reshape(tf.reduce_mean(h_conv1_r, axis=3, keep_dims=True), [-1]), name='output_img')
# output_img = tf.reshape(tf.reduce_mean(h_conv1_r, axis=3, keep_dims=True), [-1], name='output_img')
output_img = tf.reshape(h_conv1_r, [1, 64, 64, 3, channels//3])
output_img = tf.reduce_mean(output_img, axis=4, name='output_img')
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
filelist = nums_to_filelist([1, 2])
batch = np.array([scipy.misc.imread('./Datasets/img_align_celeba_resized/'+bild) for bild in filelist])
img = sess.run(output_img, feed_dict={x: batch, batch_size: batch_size_now})
scipy.misc.imsave('imagesCeleba64x64_mean/image.png', img[0])
|
import numpy as np
import sys
sys.path.append('..')
from chap11.dubins_params import dubins_params
from message_types.msg_path import msg_path
class path_manager:
def __init__(self):
# message sent to path follower
self.path = msg_path()
# pointers to previous, current, and next waypoints
self.ptr_previous = 0
self.ptr_current = 1
self.ptr_next = 2
self.ptrs_updated = True
# flag that request new waypoints from path planner
self.flag_need_new_waypoints = True
self.num_waypoints = 0
self.halfspace_n = np.inf * np.ones((3,1))
self.halfspace_r = np.inf * np.ones((3,1))
# state of the manager state machine
self.manager_state = 1
# dubins path parameters
self.dubins_path = dubins_params()
self.state_changed = True
def update(self, waypoints, radius, state):
if waypoints.flag_waypoints_changed:
waypoints.flag_waypoints_changed = False
self.num_waypoints = waypoints.num_waypoints
self.initialize_pointers()
self.manager_state = 1
self.flag_need_new_waypoints = False
if self.path.flag_path_changed:
self.path.flag_path_changed = False
if waypoints.num_waypoints == 0:
waypoints.flag_manager_requests_waypoints = True
else:
if waypoints.type == 'straight_line':
self.line_manager(waypoints, state)
elif waypoints.type == 'fillet':
self.fillet_manager(waypoints, radius, state)
elif waypoints.type == 'dubins':
self.dubins_manager(waypoints, radius, state)
else:
print('Error in Path Manager: Undefined waypoint type.')
return self.path
def line_manager(self, waypoints, state):
p = np.array([[state.pn, state.pe, -state.h]]).T
w_im1 = waypoints.ned[:,self.ptr_previous].reshape(3,1)
w_i = waypoints.ned[:,self.ptr_current].reshape(3,1)
w_ip1 = waypoints.ned[:,self.ptr_next].reshape(3,1)
q_im1 = w_i -w_im1
q_im1 /= np.linalg.norm(q_im1)
q_i = w_ip1 - w_i
q_i /= np.linalg.norm(q_i)
n_i = q_im1 + q_i
n_i /= np.linalg.norm(n_i)
self.halfspace_r = w_i
self.halfspace_n = n_i
self.path.airspeed = waypoints.airspeed.item(self.ptr_current)
self.path.flag = 'line'
if self.inHalfSpace(p):
self.increment_pointers()
self.path.flag_path_changed = True
self.path.line_origin = w_i
self.path.line_direction = q_i
else:
self.path.flag_path_changed = False
self.path.line_origin = w_im1
self.path.line_direction = q_im1
def fillet_manager(self, waypoints, radius, state):
p = np.array([[state.pn, state.pe, -state.h]]).T
w_im1 = waypoints.ned[:,self.ptr_previous].reshape(3,1)
w_i = waypoints.ned[:,self.ptr_current].reshape(3,1)
w_ip1 = waypoints.ned[:,self.ptr_next].reshape(3,1)
q_im1 = w_i -w_im1
q_im1 /= np.linalg.norm(q_im1)
q_i = w_ip1 - w_i
q_i /= np.linalg.norm(q_i)
var_phi = np.arccos(-q_im1.T @ q_i)
if self.manager_state == 1:
self.path.flag_path_changed = self.state_changed
self.state_changed = False
self.path.flag = 'line'
self.path.line_origin = w_im1
self.path.line_direction = q_im1
self.path.airspeed = waypoints.airspeed.item(self.ptr_current)
z = w_i - (radius/np.tan(var_phi/2.0))*q_im1
self.halfspace_r = z
self.halfspace_n = q_im1
if self.inHalfSpace(p):
self.manager_state = 2
self.state_changed = True
else:
self.path.flag_path_changed = self.state_changed
self.state_changed = False
direction = (q_im1-q_i)
direction /= np.linalg.norm(direction)
c = w_i - (radius/np.sin(var_phi/2.0))*direction
lam = np.sign(q_im1.item(0)*q_i.item(1)-q_im1.item(1)*q_i.item(0))
self.path.flag = 'orbit'
self.path.airspeed = waypoints.airspeed.item(self.ptr_current)
self.path.orbit_center = c
self.path.orbit_radius = radius
if lam > 0:
self.path.orbit_direction = 'CW'
else:
self.path.orbit_direction = 'CCW'
z = w_i + (radius/np.tan(var_phi/2.0))*q_i
self.halfspace_r = z
self.halfspace_n = q_i
if self.inHalfSpace(p):
self.increment_pointers()
self.manager_state = 1
self.state_changed = True
def dubins_manager(self, waypoints, radius, state):
p = np.array([[state.pn, state.pe, -state.h]]).T
self.path.airspeed = waypoints.airspeed.item(self.ptr_current)
if self.ptrs_updated:
self.ptrs_updated = False
ps = waypoints.ned[:,self.ptr_previous].reshape(3,1)
pe = waypoints.ned[:,self.ptr_current].reshape(3,1)
chis = waypoints.course.item(self.ptr_previous)
chie = waypoints.course.item(self.ptr_current)
self.dubins_path.update(ps,chis,pe,chie,radius)
if self.manager_state == 1:
self.path.flag_path_changed = self.state_changed
self.state_changed = False
self.path.flag = 'orbit'
self.path.orbit_center = self.dubins_path.center_s
self.path.orbit_radius = radius
if self.dubins_path.dir_s > 0:
self.path.orbit_direction = 'CW'
else:
self.path.orbit_direction = 'CCW'
self.halfspace_n = self.dubins_path.n1
self.halfspace_r = self.dubins_path.r1
if self.inHalfSpace(p):
self.manager_state = 2
self.state_changed = True
elif self.manager_state == 2:
self.halfspace_n = self.dubins_path.n1
self.halfspace_r = self.dubins_path.r1
if self.inHalfSpace(p):
self.manager_state = 3
self.state_changed = True
elif self.manager_state == 3:
self.path.flag_path_changed = self.state_changed
self.state_changed = False
self.path.flag = 'line'
self.path.line_origin = self.dubins_path.r1
self.path.line_direction = self.dubins_path.n1
self.halfspace_n = self.dubins_path.n1
self.halfspace_r = self.dubins_path.r2
if self.inHalfSpace(p):
self.manager_state = 4
self.state_changed = True
elif self.manager_state == 4:
self.path.flag_path_changed = self.state_changed
self.state_changed = False
self.path.flag = 'orbit'
self.path.orbit_center = self.dubins_path.center_e
if self.dubins_path.dir_e > 0:
self.path.orbit_direction = 'CW'
else:
self.path.orbit_direction = 'CCW'
self.halfspace_n = self.dubins_path.n3
self.halfspace_r = self.dubins_path.r3
if self.inHalfSpace(p):
self.manager_state = 5
self.path.dubins_state_changed = True
else:
self.path.flag_path_changed = self.state_changed
self.state_changed = False
self.halfspace_n = self.dubins_path.n3
self.halfspace_r = self.dubins_path.r3
if self.inHalfSpace(p):
self.manager_state = 1
self.path.dubins_state_changed = True
self.increment_pointers()
def initialize_pointers(self):
self.ptr_previous = 0
self.ptr_current = 1
self.ptr_next = 2
self.ptrs_updated = True
def increment_pointers(self):
self.ptr_previous += 1
if self.ptr_previous >= self.num_waypoints:
self.ptr_previous = 0
self.ptr_current += 1
if self.ptr_current >= self.num_waypoints:
self.ptr_current = 0
self.ptr_next += 1
if self.ptr_next >= self.num_waypoints:
self.ptr_next = 0
self.ptrs_updated = True
def inHalfSpace(self, pos):
if (pos-self.halfspace_r).T @ self.halfspace_n >= 0:
return True
else:
return False
|
import yaml
import os
import git
import logging
from .i_repository_parser import IRepositoryParser
class RosdistroRepositoryParser(IRepositoryParser):
"""
Pulls the rosdistro-package and gets all urls from the rosdistro files.
"""
def __init__(self, settings: dict):
"""
Creates a new instance of the RosdistroRepositoryParser class
:param settings: Settings containing information about rosdistro_workspace and rosdistro_url
"""
self.__settings = settings
def __get_rosdistro_repository(self) -> None:
"""
Clones the repository from rosdistro_url into rosdistro_workspace (defined in settings)
:return: None
"""
if not os.path.exists(self.__settings["rosdistro_workspace"]):
os.makedirs(self.__settings["rosdistro_workspace"])
try:
logging.info("[RosdistroRepositoryParser]: Cloning rosdistro repository...")
git.Repo.clone_from(self.__settings["rosdistro_url"], self.__settings["rosdistro_workspace"])
except git.exc.GitCommandError:
logging.warning("[RosdistroRepositoryParser]: Repository already exists, pulling changes...")
repo = git.Repo(self.__settings["rosdistro_workspace"])
repo.remotes.origin.pull()
logging.info("[RosdistroRepositoryParser]: Rosdistro up-to-date...")
def __get_urls_from_file(self, file_path: str, repository_dict: dict) -> None:
"""
Gets the URLs from a distribution.yaml that adheres to rosdistro-specs.
:param file_path: path to a distribution.yaml file
:param repository_dict: dictionary with repository-type (git, svn, hg, ...) as key and the repo-url as value
:return: None
"""
# Load file.
file = open(file_path, 'r')
rosdistro = yaml.load(file)
# Iterate repositories and add them to the repository_dict.
for repository in rosdistro["repositories"]:
try:
vcs_type = str(rosdistro["repositories"][repository]["doc"]["type"])
url = str(rosdistro["repositories"][repository]["doc"]["url"])
repository_dict[vcs_type].add(url)
except KeyError:
pass
try:
vcs_type = str(rosdistro["repositories"][repository]["doc"]["type"])
url = str(rosdistro["repositories"][repository]["source"]["url"])
repository_dict[vcs_type].add(url)
except KeyError:
pass
try:
# This has to be a git repository (required by bloom)
repository_dict["git"].add(rosdistro["repositories"][repository]["release"]["url"])
except KeyError:
pass
def parse_repositories(self, repository_dict: dict) -> None:
# Actually get the repository
self.__get_rosdistro_repository()
# Parse index.yaml
index_file = open(self.__settings["rosdistro_workspace"] + "index.yaml", "r")
index_yaml = yaml.load(index_file)
# Get all urls from all distribution.yaml files
for distribution in index_yaml["distributions"]:
logging.info("Parsing distribution " + index_yaml["distributions"][distribution]["distribution"][0])
self.__get_urls_from_file(self.__settings["rosdistro_workspace"]
+ index_yaml["distributions"][distribution]["distribution"][0],
repository_dict)
|
'''
Another Lottery
Even in times of an economic crisis, people in Byteland still like
to participate in lotteries. With a bit of luck, they might get rid
of all their sorrows and become rich.
The most popular lottery in Byteland consists of m rounds. In each round,
everyone can purchase as many tickets as he wishes, and among all tickets
sold in this round, one
ticket is chosen randomly, each one with the same probability. The owner
of that ticket wins the prize money of this round. Since people in Byteland
like powers of 2, the prize money for the winner of round i amounts to 2i
Bytelandian Dollars.
Can you determine for each participant in the lottery the probability that
he will win more money than anybody else?
Input
The input consists of several test cases. Each test case starts with a line
containing two integers n and m, the number of participants in the lottery
and the number of rounds in the lottery. You may assume that 1 ≤ n ≤ 10000
and 1 ≤ m ≤ 30.
The following n lines contain the description of the tickets bought by the
participant. The ith such line contains m non-negative integers c1, ..., cm,
where cj (1 ≤ j ≤ m) is the amount of tickets of round j bought by participant i.
The total number of tickets sold in each round is between 1 and 109.
The input ends with a line containing 2 zeros.
Output
For each test case, print n lines of output, where line i contains the
probability as a reduced fraction that participant i wins the most money.
See the sample output for details.
'''
def reducefraction(numerator, denominator):
def gcd(numerator, denominator):
while denominator != 0:
aux = denominator
denominator = numerator % denominator
numerator = aux
return numerator
greatest = gcd(numerator, denominator)
numerator /= greatest
denominator /= greatest
return int(numerator), int(denominator)
while True:
participants, rounds = [int(x) for x in input().split()]
if participants == 0 and rounds == 0:
break
listLastTicket = []
for x in range(1, participants + 1):
ticket = input().split()
listLastTicket.append(ticket[rounds - 1])
Sum = sum(int(i) for i in listLastTicket)
for x in listLastTicket:
numerator, denominator = reducefraction(int(x), Sum)
print('{} / {}'.format(numerator, denominator))
|
from selenium import webdriver
import pytest
from selenium.webdriver.common.by import By
sticker_new = "//*[@id='box-%s']//*[@class='product column shadow hover-light']//*[@title='%s']//*[@title='New']"
sticker_sale = "//*[@id='box-%s']//*[@class='product column shadow hover-light']//*[@title='%s']//*[@title='On Sale']"
products = [("most-popular", "Blue Duck"), ("most-popular", "Yellow Duck"), ("most-popular", "Purple Duck"),
("most-popular", "Red Duck"), ("most-popular", "Green Duck"), ("campaigns", "Yellow Duck"),
("latest-products", "Blue Duck"), ("latest-products", "Yellow Duck"), ("latest-products", "Purple Duck"),
("latest-products", "Red Duck"), ("latest-products", "Green Duck")]
@pytest.yield_fixture()
def driver():
_driver = webdriver.Chrome()
yield _driver
_driver.quit()
def login(driver, username, password):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def sum_of_stickers(driver, group, product):
amount_of_new_stickers = len(driver.find_elements(By.XPATH, sticker_new % (group, product)))
amount_of_sale_stickers = len(driver.find_elements(By.XPATH, sticker_sale % (group, product)))
return amount_of_new_stickers + amount_of_sale_stickers
def test_home_task_8(driver):
login(driver, username="admin", password="admin")
driver.find_element(By.XPATH, "//*[@title='Catalog']").click()
for (group, product) in products:
assert sum_of_stickers(driver, group, product) == 1
|
"""
A simple Point class.
NOTE: This is NOT rosegraphics -- it is your OWN Point class.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and SOLUTION by Muqing Zheng. September 2015.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
test_init()
test_repr()
test_clone()
test_move_to()
test_move_by()
test_number_of_moves_made()
test_distance_from()
test_distance_from_start()
test_distance_traveled()
test_closer_to()
test_halfway_to()
# ----------------------------------------------------------------------
# DONE: 2. With your instructor, READ THE INSTRUCTIONS
# in file m0_INSTRUCTIONS.txt, asking questions as needed.
#
# Then write a class called Point that knows nothing
# and has no data yet. Check it for syntax (notational) errors.
# ----------------------------------------------------------------------
class Point:
def __init__(self, x, y,): # Done __init__
self.x = x
self.y = y
self.initial_x = x
self.initial_y = y
self.count = 0
self.distance = 0
self.travel = 0
def __repr__(self): # Done __repr__
return('Point(' + str(self.x) + ', ' + str(self.y) + ')')
def clone(self): # Done clone()
return Point(self.x, self.y)
def move_to(self, dex, dey): # Done move_to()
self.travel += self.distance_from(Point(dex, dey))
self.x = dex
self.y = dey
self.count += 1
self.distance = self.distance_from(Point(self.initial_x, self.initial_y))
def move_by(self, dx, dy): # Done move_by()
self.travel += self.distance_from(Point(self.x + dx, self.y + dy))
self.x = self.x + dx
self.y = self.y + dy
self.count += 1
self.distance = self.distance_from(Point(self.initial_x, self.initial_y))
def number_of_moves_made(self): # Done number_of_moves_made()
return self.count
def distance_from(self, Point): # Done distance_from()
return ((self.x - Point.x) ** 2 + (self.y - Point.y) ** 2) ** 0.5
def distance_from_start(self): # Done distance_from_start()
return self.distance
def distance_traveled(self): # Done distance_traveled()
return self.travel
def closer_to(self, Point1, Point2): # Done closer_to()
d1 = self.distance_from(Point1)
d2 = self.distance_from(Point2)
if d1 > d2:
return Point2
elif d1 < d2:
return Point1
else:
return Point1
def halfway_to(self, Point1): # Done halfway_to()
newx = (self.x + Point1.x) / 2
newy = (self.y + Point1.y) / 2
return Point(newx, newy)
def test_init():
"""
Tests the __init__ method of the Point class.
The __init__ method:
-- Has two arguments: x and y, both numbers.
-- It sets instance variables:
x
y
to the given coordinates. Other methods should maintain
these variables as needed so that they always indicate
the CURRENT position of the Point.
-- The __init__ method runs when one constructs a Point.
-- There are TWO underscores on each side.
-- For example, the following invokes the __init__ method,
as part of the construction of the Point object:
p = Point(30, 18)
print(p.x) # Should print 30
print(p.y) # Should print 18
"""
# ------------------------------------------------------------------
# DONE: 3.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the __init__ method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __init__ method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(30, 18) # "Done test for __init__"
expected_x = 30
expected_y = 18
print('Expected x:', expected_x, 'Actual:', p1.x)
print('Expected y:', expected_y, 'Actual:', p1.y)
p2 = Point(0, 0) # "Done more tests for __init__"
expected_x = 0
expected_y = 0
print('Expected x:', expected_x, 'Actual:', p2.x)
print('Expected y:', expected_y, 'Actual:', p2.y)
expected_x = 30 # "Done more tests for __init__"
expected_y = 18
print('Expected x:', expected_x, 'Actual:', p1.x)
print('Expected y:', expected_y, 'Actual:', p1.y)
# Instructor's tests
p1 = Point(30, 18)
print('p1: Should print 30, 18:', p1.x, p1.y)
p2 = Point(100, -40)
print('p1: Should still print 30, 18:', p1.x, p1.y)
print('p2: Should print 100, -40:', p2.x, p2.y)
p1.y = 500
print('p1: Should now print 30, 500:', p1.x, p1.y)
print('p2: Should still print 100, -40:', p2.x, p2.y)
def test_repr():
"""
Tests the __repr__ method of the Point class.
The __repr__ method:
-- Has no arguments.
-- Returns a string representation of a Point like this:
'Point(x, y)'
where x and y are replaced by the Point's x and y coordinates.
-- The __repr__ called by the print and other functions
when the Point must be displayed.
-- There are TWO underscores on each side.
-- For example, the following invokes the __repr__ method,
as part of what the print function does.
p = Point(30, 18)
print(p) # Should display Point(30, 18)
"""
# ------------------------------------------------------------------
# DONE: 4.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the __repr__ method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __repr__ method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(30, 18) # "Done test for __repr__"
expectedp1 = 'Point(30,18)'
print('Expected:', expectedp1, 'Actual:', p1)
p2 = Point(1.1, 1.8) # "Done more tests for __repr__"
expectedp2 = 'Point(1.1,1.8)'
print('Expected:', expectedp2, 'Actual:', p2)
p3 = Point(16, 22) # "Done more tests for __repr__"
expectedp2 = 'Point(16,22)'
print('Expected:', expectedp2, 'Actual:', p3)
# Instructor's tests
p1 = Point(30, 18)
print('p1: Should print Point(30, 18):', p1)
p2 = Point(100, -40)
print('p1: Should still print Point(30, 18):', p1)
print('p2: Should print Point(100, -40):', p2)
p1.y = 500
print('p1: Should now print Point(30, 500):', p1)
print('p2: Should still print Point(100, -40):', p2)
p1 = Point(555, 444)
print('p1: Should now print Point(555, 444):', p1)
print('p2: Should still print Point(100, -40):', p2)
def test_clone():
"""
Tests the clone method of the Point class.
The clone method:
-- Has no arguments.
-- Returns a new Point whose x and y coordinates are the
same as the x and y coordinates of this Point.
-- For example, if a Point p is at (10, 8) and clone
is applied to it, then a new Point at (10, 8)
should be returned.
"""
# ------------------------------------------------------------------
# DONE: 5.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the clone method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the clone method of the Point class.')
print('-----------------------------------------------------------')
p = Point(10, 18) # "Done test for clone()"
p2 = p.clone()
p.x = -30
expected = '10'
print('Expected value:', expected, 'Actual:', p2.x)
p = Point(16, 20) # "Done more tests for clone()"
p2 = p.clone()
p.y = -30
expected = '20'
print('Expected value:', expected, 'Actual:', p2.y)
p = Point(10, 18) # "Done more tests for clone()"
p2 = p.clone()
p.y = -30
expected = '18'
print('Expected value:', expected, 'Actual:', p2.y)
# Instructor's tests
p1 = Point(10, 8)
print('p1: Should print Point(10, 8):', p1)
p2 = p1.clone()
p3 = p2.clone()
print('p1: Should print Point(10, 8):', p1)
print('p2: Should print Point(10, 8):', p2)
print('p3: Should print Point(10, 8):', p3)
p1.x = 999
print('p1: Should now print Point(999, 8):', p1)
print('p2: Should still print Point(10, 8):', p2)
print('p3: Should still print Point(10, 8):', p3)
p1.y = 333
p2 = Point(11, 22)
p3.x = 777
p3.y = 555
print('p1: Should now print Point(999, 333):', p1)
print('p2: Should still print Point(11, 22):', p2)
print('p3: Should now print Point(777, 555):', p3)
def test_move_to():
"""
Tests the move_to method of the Point class.
The move_to method:
-- Has two arguments, x and y, both numbers.
-- Moves the Point to the given x and y coordinates.
That is, changes the Point's coordinates to the given ones.
-- For example, if a Point p is at (10, 8) and move_to
is applied to it with arguments 5 and -1,
then Point p's new position should be (5, -1).
"""
# ------------------------------------------------------------------
# DONE: 6.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the move_to method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the move_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8) # "Done test for move_to()"
p1.move_to(5, -1)
expected = 'Point(5,-1)'
print('Expected:', expected, 'Actual:', p1)
p1 = Point(10, 8) # "Done more tests for move_to()"
p1.move_to(7, -1)
expected = 'Point(7,-1)'
print('Expected:', expected, 'Actual:', p1)
p1 = Point(10, 8) # "Done more tests for move_to()"
p1.move_to(15, -11)
expected = 'Point(15,-11)'
print('Expected:', expected, 'Actual:', p1)
# Instructor's tests
p1 = Point(10, 8)
p2 = Point(50, 20)
print('p1: Should print Point(10, 8):', p1)
print('p2: Should print Point(50, 20):', p2)
p1.move_to(5, -1)
p2.move_to(0, 0)
print('p1: Should now print Point(5, -1):', p1)
print('p2: Should now print Point(0, 0):', p2)
p2.y = 99
print('p1: Should still print Point(5, -1):', p1)
print('p2: Should now print Point(0, 99):', p2)
p2.move_to(0, 222)
print('p1: Should still print Point(5, -1):', p1)
print('p2: Should now print Point(0, 222):', p2)
def test_move_by():
"""
Tests the move_by method of the Point class.
The move_by method:
-- Has two arguments, dx and dy, both numbers.
-- Translates the Point by the given dx and dy amounts.
That is, changes the Point's x-coordinate to what-it-was + dx,
and changes its y-coordinate to what-it-was + dy.
-- For example, if a Point p is at (10, 8) and move_by
is applied to it with arguments 5 and -1,
then Point p's new position should be (15, 7).
"""
# ------------------------------------------------------------------
# DONE: 7.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the move_by method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the move_by method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8) # "Done test for move_by()"
p1.move_by(5, -1)
expected = 'Point(15,7)'
print('Expected:', expected, 'Actual:', p1)
p1 = Point(10, 8) # "Done more tests for move_by()"
p1.move_by(15, -1)
expected = 'Point(25,7)'
print('Expected:', expected, 'Actual:', p1)
p1 = Point(10, 8) # "Done test for move_by()"
p1.move_by(-5, 1)
expected = 'Point(5,9)'
print('Expected:', expected, 'Actual:', p1)
# Instructor's tests
p1 = Point(10, 8)
p2 = Point(50, 20)
print('p1: Should print Point(10, 8):', p1)
print('p2: Should print Point(50, 20):', p2)
p1.move_by(5, -1)
p2.move_by(0, 0)
print('p1: Should now print Point(15, 7):', p1)
print('p2: Should now print Point(50, 20):', p2)
p2.move_by(200, 0)
print('p1: Should still print Point(15, 7):', p1)
print('p2: Should now print Point(250, 20):', p2)
p2.move_by(-100, 300)
print('p1: Should still print Point(15, 7):', p1)
print('p2: Should now print Point(150, 320):', p2)
def test_number_of_moves_made():
"""
Tests the number_of_moves_made method of the Point class.
The number_of_moves_made method:
-- Has no arguments.
-- Returns the number of times the Point has moved.
That is, returns the number of times move_to and move_by
have been called on this Point.
-- For example, if a Point p is constructed and later:
-- is moved somewhere by move_to [or move_by]
-- is moved somewhere by move_to [or move_by]
-- is moved somewhere by move_by [or move_to]
-- is moved somewhere by move_to [or move_by]
then this method should return 4.
If thereafter, Point p:
-- is moved somewhere by move_by [or move_to]
then this method should (then) return 5.
"""
# ------------------------------------------------------------------
# DONE: 8.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the number_of_moves_made method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the number_of_moves_made method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8) # "Done test for number_of_moves_made()"
p1.move_by(5, -1)
p1.move_to(5, -1)
expected = '2'
print('Expected:', expected, 'Actual:', p1.number_of_moves_made())
p2 = Point(10, 8) # "Done more tests for number_of_moves_made()"
p2.move_to(5, -1)
expected = '1'
print('Expected:', expected, 'Actual:', p2.number_of_moves_made())
p3 = Point(10, 8) # "Done more tests for number_of_moves_made()"
p3.move_by(5, -1)
p3.move_to(15, -1)
p3.move_to(5, -11)
p3.move_to(25, -1)
expected = '4'
print('Expected:', expected, 'Actual:', p3.number_of_moves_made())
# Instructor's tests
p1 = Point(10, 8)
p2 = Point(50, 20)
print('p1: Should print Point(10, 8):', p1)
print('p2: Should print Point(50, 20):', p2)
p1.move_by(5, -1)
p2.move_by(0, 0)
print('p1: Moves made should be 1:', p1.number_of_moves_made())
print('p2: Moves made should be 1:', p2.number_of_moves_made())
p2.move_by(200, 0)
p2.move_by(-100, 300)
p2.move_to(-100, 300)
p1.move_to(3, 3)
print('p1: Moves made should be 2:', p1.number_of_moves_made())
print('p2: Moves made should be 4:', p2.number_of_moves_made())
p1.move_by(200, 0)
p1.move_by(-100, 300)
p1.move_to(-100, 300)
p1.move_to(3, 3)
print('p1: Moves made should be 6:', p1.number_of_moves_made())
print('p2: Moves made should be 4:', p2.number_of_moves_made())
p1.x = 400
print('p1: Moves made should be 6:', p1.number_of_moves_made())
print('p2: Moves made should be 4:', p2.number_of_moves_made())
p1.move_to(3, 3)
p2.move_by(0, 0)
print('p1: Moves made should be 7:', p1.number_of_moves_made())
print('p2: Moves made should be 5:', p2.number_of_moves_made())
def test_distance_from():
"""
Tests the distance_from method of the Point class.
The distance_from method:
-- Has one argument, another Point object.
-- Returns the distance the Point is from that given Point.
-- For example, if the Point is at (1, 5) and the given Point
(i.e., the argument) is at (10, 5),
then this method should return 9.
"""
# ------------------------------------------------------------------
# DONE: 9.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the distance_from method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the distance_from method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(1, 5) # "Done test for distance_from()"
p2 = Point(10, 5)
expected = '9.0'
print('Expected:', expected, 'Actual:', p1.distance_from(p2))
p1 = Point(1, 5) # "Done more tests for distance_from()"
p2 = Point(4, 9)
expected = '5.0'
print('Expected:', expected, 'Actual:', p1.distance_from(p2))
p1 = Point(1, 5) # "Done more tests for distance_from()"
p2 = Point(13, 10)
expected = '13.0'
print('Expected:', expected, 'Actual:', p1.distance_from(p2))
# Instructor's tests
p1 = Point(1, 5)
p2 = Point(10, 5)
p3 = Point(13, 9)
print('p1 to p2: Should be 9.0', p1.distance_from(p2))
print('p2 to p1: Should be 9.0', p2.distance_from(p1))
print('p2 to p3: Should be 5.0', p2.distance_from(p3))
print('p3 to p2: Should be 5.0', p3.distance_from(p2))
print('p1 to p3: Should be about 12.65', p1.distance_from(p3))
print('p3 to p1: Should be about 12.65', p3.distance_from(p1))
print('p1 to p1: Should be 0.0', p1.distance_from(p1))
print('p2 to p2: Should be 0.0', p2.distance_from(p2))
print('p3 to p3: Should be 0.0', p3.distance_from(p3))
p4 = p1.clone()
print('p1 to p4: Should be 0.0', p1.distance_from(p4))
print('p4 to p2: Should be 0.0', p4.distance_from(p1))
print('p4 to p2: Should be 9.0', p4.distance_from(p2))
print('p2 to p4: Should be 9.0', p2.distance_from(p4))
def test_distance_from_start():
"""
Tests the distance_from_start method of the Point class.
The distance_from_start method:
-- Has no arguments.
-- Returns the distance from the Point's current position
to the position at which the Point began, that is,
its position when it was constructed.
-- For example, if a Point p is constructed at (20, 30) and later:
-- is moved somewhere by move_to [or move_by]
-- is moved somewhere by move_to [or move_by]
-- is moved somewhere by move_by [or move_to]
-- is moved to (21, 31) by move_to [or move_by]
then this method should return (approximately) 1.414
since 1.414 is the distance from (20, 30) to (21, 31).
If thereafter, Point p:
-- is moved to (50, 70) by move_by with arguments 29 and 39
this method should (then) return 50
since 50 is the distance from (20, 30) to (50, 70).
"""
# ------------------------------------------------------------------
# DONE: 10.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the distance_from_start method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the distance_from_start method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(20, 30) # "Done test for distance_from()"
p1.move_to(50, 70)
p1.move_to(70, 150)
expected = '130.0'
print('Expected:', expected, 'Actual:', p1.distance_from_start())
p1 = Point(30, 30) # "Done more tests for distance_from()"
p1.move_by(50, 70)
p1.move_to(60, 70)
expected = '50.0'
print('Expected:', expected, 'Actual:', p1.distance_from_start())
p1 = Point(10, 10) # "Done more tests for distance_from()"
p1.move_to(50, 70)
p1.move_by(60, 70)
expected = '164.0'
print('Expected:', expected, 'Actual:', p1.distance_from_start())
# Instructor's tests
p1 = Point(20, 30)
p1.move_to(111, 222)
p1.move_by(10, 20)
p1.move_to(0, 0)
p1.move_to(21, 31)
print('p1 from start to (21, 31), should be about 1.414',
p1.distance_from_start())
p1.move_by(29, 39)
print('p1 from start to (50, 70), should be about 50.0',
p1.distance_from_start())
p2 = Point(1, 1)
print('p2 from start to (1, 1), should be about 0.0',
p2.distance_from_start())
p2.move_to(11, 1)
print('p2 from start to (11, 1), should be about 10.0',
p2.distance_from_start())
p2.move_to(999, 999)
p2.move_to(1, 1)
print('p2 from start to (1, 1), should be about 0.0',
p2.distance_from_start())
def test_distance_traveled():
"""
Tests the distance_traveled method of the Point class.
The distance_traveled method:
-- Has no arguments.
-- Returns the distance that the Point has traveled via calls
to move_to and move_by.
-- For example, if a Point p is constructed at (20, 30) and later:
-- is moved to (21, 30) by move_to [or move_by]
-- is moved to (21, 38 by move_to [or move_by]
then this method should return 9
since it moved 1 unit on the first move
and 8 units on the second move, for a total of 9 units.
If thereafter, Point p:
-- is moved to (22, 39) by move_by [or move_to]
then this method should (then) return 10.414
since it has now moved 1 unit, then 8 units, then 1.414 units,
for a total of 10.414 units.
"""
# ------------------------------------------------------------------
# DONE: 11.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the distance_traveled method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the distance_traveled method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(20, 30) # "Done test for distance_traveled()"
p1.move_to(21, 30)
expected = '1'
print('Expected:', expected, 'Actual:', p1.distance_traveled())
p1 = Point(20, 30) # "Done more tests for distance_traveled()"
p1.move_to(21, 30)
p1.move_by(10, 20)
expected = '23.4'
print('Expected:', expected, 'Actual:', p1.distance_traveled())
p1 = Point(20, 30) # "Done more tests for distance_traveled()"
p1.move_by(21, 30)
p1.move_to(10, 20)
expected = '87.2'
print('Expected:', expected, 'Actual:', p1.distance_traveled())
# Instructor's tests
p1 = Point(20, 30)
p1.move_to(21, 30)
p1.move_to(21, 38)
print('p1 has traveled 9.0', p1.distance_traveled())
p1.move_by(1, 1)
print('p1 has now traveled about 10.414', p1.distance_traveled())
p2 = Point(0, 0)
p3 = Point(100, 22)
p4 = Point(0, 555)
for k in range(100):
p2.move_by(0, k + 1)
p3.move_by(k + 1, 0)
p4.move_to(k + 1, 555)
print('p2 has now traveled', 101 * 50.0, p2.distance_traveled())
print('p3 has now traveled', 101 * 50.0, p3.distance_traveled())
print('p4 has now traveled 100.0', p4.distance_traveled())
def test_closer_to():
"""
Tests the closer_to method of the Point class.
The closer_to method:
-- Has two arguments p2 and p3, both Point objects.
-- Returns whichever of p2 and p3 the Point is closer to.
(Just to be specific, it should return p2 in the case of a tie.)
-- For example, if the Point is at (10, 20)
and p1 is at (15, 20) and p2 is at (14, 24),
then p2 should be returned since the distance from
the Point to p1 is 5 and the distance from the Point
to p2 is the square root of 32, which is more than 5.
"""
# ------------------------------------------------------------------
# DONE: 12.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the closer_to method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the closer_to method of the Point class.')
print('-----------------------------------------------------------')
p = Point(10, 20) # "Done test for closer_to()"
p1 = Point(15, 20)
p2 = Point(14, 24)
expected = 'Point(15, 20)'
print('Expected:', expected, 'Actual:', p.closer_to(p1, p2))
p = Point(10, 20) # "Done more tests for closer_to()"
p1 = Point(11, 20)
p2 = Point(12, 24)
expected = 'Point(11, 20)'
print('Expected:', expected, 'Actual:', p.closer_to(p1, p2))
p = Point(10, 20) # "Done more tests for closer_to()"
p1 = Point(111, 20)
p2 = Point(12, 24)
expected = 'Point(12, 20)'
print('Expected:', expected, 'Actual:', p.closer_to(p1, p2))
# Instructor's tests
p1 = Point(10, 20)
p2 = Point(15, 20)
p3 = Point(14, 24)
print('Should be', p2, p1.closer_to(p2, p3))
print('Should be', p2, p1.closer_to(p3, p2))
print('Should be', p1, p1.closer_to(p1, p3))
print('Should be', p2, p2.closer_to(p3, p2))
print('Should be', p3, p3.closer_to(p3, p3))
p4 = p1.clone()
p5 = p1.clone()
print('Should be', p4, p1.closer_to(p4, p5))
print('Should be True:', p1.closer_to(p4, p5) is p4)
print('Should be False:', p1.closer_to(p4, p5) is p5)
def test_halfway_to():
"""
Tests the halfway_to method of the Point class.
The halfway_to method:
-- Has one argument p2, a Point.
-- Returns a new Point that is halfway between the Point and p2.
That is, the x coordinate of the new Point is the average of the
x coordinate of the Point and the x coordinate of p2,
and likewise for the new Point's y coordinate.
"""
# ------------------------------------------------------------------
# DONE: 13.
# a. Implement this TEST function. COMMIT YOUR WORK.
# b. CHECK this TEST function, correcting it as needed. COMMIT.
# c. Implement and test the halfway_to method. COMMIT.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the halfway_to method of the Point class.')
print('-----------------------------------------------------------')
p = Point(10, 20) # "Done test for halfway_to()"
p1 = Point(14, 20)
expected = 'Point(12, 20)'
print('Expected:', expected, 'Actual:', p.halfway_to(p1))
p = Point(15, 20) # "Done test for halfway_to()"
p1 = Point(13, 20)
expected = 'Point(14, 20)'
print('Expected:', expected, 'Actual:', p.halfway_to(p1))
p = Point(10, 10) # "Done test for halfway_to()"
p1 = Point(20, 20)
expected = 'Point(15, 15)'
print('Expected:', expected, 'Actual:', p.halfway_to(p1))
# Instructor's tests
p1 = Point(10, 20)
p2 = Point(30, 100)
print('Should be Point(20.0, 60.0)', p1.halfway_to(p2))
print('Should be Point(20.0, 60.0)', p2.halfway_to(p1))
print('Should be Point(10.0, 20.0)', p1.halfway_to(p1))
p3 = Point(-10, 20)
p4 = Point(30, -100)
print('Should be Point(10.0, -40.0)', p3.halfway_to(p4))
print('Should be Point(10.0, -40.0)', p3.halfway_to(p4))
print('Should be Point(-10.0, 20.0)', p3.halfway_to(p3))
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
class User:
def __init__(self, name ,email):
self. name=name
self. email=email
self. account_balance=0
def make_deposit(self,amount):
self.account_balance+=amount
return self
def make_withdrawal(self,amount):
self. account_balance-= amount
return self
def display_user(self):
print(self.name,self.account_balance)
return self
def transfer_money(self, other_user,amount):
self.account_balance=-amount
self.other_user=+amount
return self
Hoda=User("Hoda","hoda@lgmail.com")
Laila=User("Laila","laila@gmail.com")
Ahmad=User("Ahmad","Ahmadd@gmail.com")
Hoda.make_deposit(200)
Hoda.make_deposit(200)
Hoda.make_withdrawal(300)
Hoda.display_user()
Laila.make_deposit(200)
Laila.make_deposit(1200)
Laila.make_withdrawal(400)
Laila.make_withdrawal(100)
Laila.display_user()
Ahmad.make_deposit(900)
Ahmad.make_withdrawal(100)
Ahmad.make_withdrawal(50)
Ahmad.make_withdrawal(50)
Ahmad.display_user()
|
import unittest
from greeting.greeting import hello
class TestGreeting(unittest.TestCase):
def test_hello(self):
testparams = [
["Lilla", "Hello Lilla"],
["Béla", "Hello Béla"],
]
for name, greeting in testparams:
with self.subTest("Testing with data", input=name, expected=greeting):
self.assertEqual(hello(name), greeting, "The parameter is incorrect")
if __name__ == "__main__":
unittest.main()
|
from django.contrib.auth import get_user_model
from rest_framework import authentication, exceptions
from rest_framework.generics import *
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from post_app.serializers import *
class PostCreateAPIView(ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostCreateUpdateSerializer
def perform_create(self, serializer):
serializer.save(user= self.request.user)
class PostListViewAPI(ListAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
def get_queryset(self):
#do stuff with queryset
return self.queryset.all()
class PostDetailAPIView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostUpdateAPIView(RetrieveUpdateAPIView):
queryset = Post.objects.all()
serializer_class = PostCreateUpdateSerializer
'''
def perform_create(self, serializer):
serializer.save(user= self.request.user)
'''
class PostDeleteAPIView(DestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class UserDetailsList(RetrieveAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class CommentCreateAPIView(ListCreateAPIView):
queryset = Comment.objects.all()
serializer_class = CommentCreateSerializer
def perform_create(self, serializer):
post = self.kwargs['post']
name = self.request.user.username
user = Profile.objects.get(user__username= name)
serializer.save(user=user, post_id=post)
class LikeCreateAPIView(APIView):
#queryset = Like.objects.all()
#serializer_class = LikeCreateSerializer
def post(self, request, post):
id = request.user.id
user = Poster.objects.get(id=id)
post = Post.objects.get(id = post)
try:
Like.objects.get(user=user, post=post)
return Response(data= {'error': 'already liked'} ,status = status.HTTP_400_BAD_REQUEST)
except:
Like.objects.create(user=user, post=post)
post.nr_likes = Like.objects.filter(post=post).count()
post.save()
return Response(status = status.HTTP_200_OK)
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given n, how many structurally unique BST's (binary search trees) that store values 1...n?
# For example,
# Given n = 3, there are a total of 5 unique BST's.
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
# G(n) is number of unique BST for a sequence of length n.
# F(i, n), 1 <= i <= n: the number of unique BST, where the number i is the root of BST,
# and the sequence ranges from 1 to n.
# G(n) = F(1, n) + F(2, n) + F(3, n) + ... + F(n, n)
# F(i, n) = G(i - 1) * G(n - i)
# G(n) = G(0) * G(n - 1) + G(1) + G(n - 2) + G(2) + G(n - 3) + ... + G(n - 1) * G(n)
# 19 / 19 test cases passed.
# Status: Accepted
# Runtime: 29 ms
# Your runtime beats 81.28 % of python submissions.
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
if not n:
return 0
combine_list = [0] * (n + 1)
combine_list[0] = combine_list[1] = 1
for i in range(2, n+1):
for j in range(1, i+1):
combine_list[i] += combine_list[j-1] * combine_list[i-j]
return combine_list[1]
# Catalan number!
# 19 / 19 test cases passed.
# Status: Accepted
# Runtime: 35 ms
# Your runtime beats 39.76 % of python submissions.
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
from functools import reduce
mul = lambda x, y: x * y
# reduce(mul, iterator, 1) to handle the 0.
# Otherwise TypeError: reduce() of empty sequence with no initial value.
return reduce(mul, range(n + 1, (2 * n) + 1), 1) // (reduce(mul, range(1, n + 1), 1) * (n+1))
if __name__ == '__main__':
print(Solution().numTrees(0))
print(Solution().numTrees(3))
print(Solution().numTrees(4))
print(Solution().numTrees(5))
|
x = memoryview(bytes(5))
print(x)
print(type(x))
|
import os
from bsm.util import ensure_list
from bsm.util import safe_rmdir
from bsm.util import safe_mkdir
from bsm.util import call_and_log
def run(param):
source_dir = param['config_package'].get('path', {}).get('source')
if not source_dir:
return {'success': False, 'message': 'Path "source" is not specified'}
build_dir = param['config_package'].get('path', {}).get('build')
if not build_dir:
return {'success': False, 'message': 'Path "build" is not specified'}
install_dir = param['config_package'].get('path', {}).get('install')
if not install_dir:
return {'success': False, 'message': 'Path "install" is not specified'}
if source_dir != build_dir:
safe_rmdir(build_dir)
safe_mkdir(build_dir)
configure_args = param['config_package'].get('configure', {}).get('args', [])
configure_args = ensure_list(configure_args)
configure_args = [p.format(**param['config_package_install_path']) for p in configure_args]
if not param['config_package'].get('configure', {}).get('ignore_install_prefix', False):
configure_args.insert(0, '--prefix='+install_dir)
env = param.get('env')
env_configure = env.copy()
for k, v in param['config_package'].get('configure', {}).get('env', {}).items():
env_configure[k] = v.format(**param['config_package_install_path'])
configure_path = os.path.join(source_dir, 'configure')
with open(param['log_file'], 'w') as f:
cmd = [configure_path] + configure_args
ret = call_and_log(cmd, log=f, cwd=build_dir, env=env_configure)
return {'success': ret==0, 'message': 'Configure exit code: {0}'.format(ret)}
|
import random
def score(_goal, _user_input):
bulls_counter = 0
cows_counter = 0
for i in range(0, 4):
if _user_input[i] == _goal[i]:
cows_counter += 1
bulls_counter -= 1
if _user_input[i] in set(_goal):
bulls_counter += 1
return cows_counter, bulls_counter
if __name__ == "__main__":
goal = str(random.randint(1000, 9999))
won = False
iterations = 1
temp_score = (0, 0)
print(goal)
while not won:
print("Please input number: ")
user_input = str(input())
tmp_score = score(goal, user_input)
# +ifs for language things
if tmp_score == (4, 0):
print("You won in " + str(iterations) + " turns")
won = True
else:
print(str(tmp_score[0]) + " cows, " + str(tmp_score[1]) + " bulls")
iterations += 1
|
#!/usr/bin/python3
'''
contain teardown method
'''
from flask import Flask, jsonify
from models import storage
from api.v1.views import app_views
from flask_cors import CORS
import os
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": ["0.0.0.0"]}})
app.register_blueprint(app_views)
@app.errorhandler(404)
def handle_404(error):
''' custom JSON 404 error'''
return (jsonify({"error": "Not found"}), 404)
@app.teardown_appcontext
def teardown(exception):
'''
Teardown method for storage session
'''
storage.close()
if __name__ == "__main__":
app.run("0.0.0.0", 5000)
|
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
m = MultipartEncoder(
fields={
'file': ("test.cfg", open('test.cfg', 'rb'), 'text/plain')}
)
#print m.to_string()
r = requests.post('http://localhost:9000/upload', data=m,
headers={'Content-Type': m.content_type})
print r.text
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.backend.python.subsystems.python_tool_base import PythonToolBase, get_lockfile_metadata
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.lockfile_metadata import PythonLockfileMetadataV3
from pants.backend.python.util_rules.pex_requirements import (
LoadedLockfile,
LoadedLockfileRequest,
Lockfile,
PexRequirements,
Resolve,
)
from pants.engine.internals.native_engine import EMPTY_DIGEST
from pants.testutil.option_util import create_subsystem
from pants.testutil.rule_runner import MockGet, run_rule_with_mocks
from pants.util.ordered_set import FrozenOrderedSet
class _DummyTool(PythonToolBase):
options_scope = "dummy"
default_lockfile_resource = ("dummy", "dummy")
def test_install_from_resolve_default() -> None:
tool = create_subsystem(
_DummyTool,
lockfile="dummy.lock",
install_from_resolve="dummy_resolve",
requirements=["foo", "bar", "baz"],
)
pex_reqs = tool.pex_requirements()
assert isinstance(pex_reqs, PexRequirements)
assert pex_reqs.from_superset == Resolve("dummy_resolve", False)
assert pex_reqs.req_strings_or_addrs == FrozenOrderedSet(["bar", "baz", "foo"])
def test_get_lockfile_metadata() -> None:
tool = create_subsystem(
_DummyTool,
lockfile="dummy.lock",
install_from_resolve="dummy_resolve",
requirements=["foo", "bar", "baz"],
)
metadata = PythonLockfileMetadataV3(
valid_for_interpreter_constraints=InterpreterConstraints(),
requirements=set(),
manylinux=None,
requirement_constraints=set(),
only_binary=set(),
no_binary=set(),
)
lockfile = Lockfile("dummy_url", "dummy_description_of_origin", "dummy_resolve")
loaded_lockfile = LoadedLockfile(EMPTY_DIGEST, "", metadata, 0, True, None, lockfile)
assert (
run_rule_with_mocks(
get_lockfile_metadata,
rule_args=[tool],
mock_gets=[
MockGet(Lockfile, (Resolve,), lambda x: lockfile),
MockGet(
LoadedLockfile,
(LoadedLockfileRequest,),
lambda x: loaded_lockfile if x.lockfile == lockfile else None,
),
],
)
== metadata
)
|
# -*- coding: utf-8 -*-
import yaml
from django import forms
import msrest
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.subscription import SubscriptionClient
from azure.mgmt.containerservice import ContainerServiceClient
from architect.manager.client import BaseClient
from architect.manager.validators import validate_manager_name
from architect.manager.models import Manager
from celery.utils.log import get_logger
import json
logger = get_logger(__name__)
DEFAULT_RESOURCES = [
'az_subscription',
# 'az_load_balancer',
'az_location',
'az_resource_group',
'az_managed_cluster',
'az_virtual_machine_size',
'az_virtual_machine',
'az_network',
'az_subnet',
# 'az_network_interface',
# '__all__'
]
RESOURCE_MAP = {
'Microsoft.ContainerRegistry/registries': 'az_registry',
'Microsoft.ContainerRegistry/registries/replications': 'az_registry_replication',
'Microsoft.Compute/availabilitySets': 'az_availability_set',
'Microsoft.Compute/disks': 'az_disk',
'Microsoft.Compute/images': 'az_image',
'Microsoft.Compute/virtualMachineScaleSets': 'az_virtual_machine_scale_set',
'Microsoft.Compute/virtualMachines': 'az_virtual_machine',
'Microsoft.Compute/virtualMachines/extensions': 'az_virtual_machine_extension',
'Microsoft.ContainerService/managedClusters': 'az_kubernetes_cluster',
'Microsoft.Network/dnszones': 'az_dns_zone',
'Microsoft.Network/virtualNetworks': 'az_network',
'Microsoft.Network/routeTables': 'az_route_table',
'Microsoft.Network/loadBalancers': 'az_load_balancer',
'Microsoft.Network/networkInterfaces': 'az_network_interface',
'Microsoft.Network/networkSecurityGroups': 'az_security_group',
'Microsoft.Network/publicIPAddresses': 'az_public_ip_address',
'Microsoft.OperationalInsights/workspaces': 'az_workspace',
'Microsoft.OperationsManagement/solutions': 'az_solution',
'Microsoft.Storage/storageAccounts': 'az_storage_account',
}
class MicrosoftAzureClient(BaseClient):
credentials = None
resource_group = {}
size_location = {}
network = []
def __init__(self, **kwargs):
super(MicrosoftAzureClient, self).__init__(**kwargs)
def auth(self):
if self.credentials is None:
self.credentials = ServicePrincipalCredentials(
client_id=self.metadata['client_id'],
secret=self.metadata['client_secret'],
tenant=self.metadata['tenant_id']
)
self.resource_api = ResourceManagementClient(self.credentials, self.metadata['subscription_id'])
self.compute_api = ComputeManagementClient(self.credentials, self.metadata['subscription_id'])
self.network_api = NetworkManagementClient(self.credentials, self.metadata['subscription_id'])
self.container_service_api = ContainerServiceClient(self.credentials, self.metadata['subscription_id'])
self.subscription_api = SubscriptionClient(self.credentials)
return True
def update_resources(self, resources=None):
if self.auth():
if resources is None:
resources = DEFAULT_RESOURCES
for resource in resources:
metadata = self.get_resource_metadata(resource)
self.process_resource_metadata(resource, metadata)
count = len(self.resources.get(resource, {}))
logger.info("Processed {} {} resources".format(count,
resource))
self.process_relation_metadata()
def get_resource_status(self, kind, metadata):
if kind == 'az_resource_group':
state = metadata.get('properties', {}).get('provisioning_state', '')
if state == 'Succeeded':
return 'active'
elif kind in ['az_virtual_machine_size', 'az_location']:
return 'active'
elif kind in ['az_virtual_machine', 'az_managed_cluster', 'az_network', 'az_subnet']:
state = metadata.get('provisioning_state', '')
if state == 'Succeeded':
return 'active'
elif state == 'Creating':
return 'build'
elif kind == 'az_subscription':
if metadata.get('state', '') == 'Enabled':
return 'active'
return 'unknown'
def process_relation_metadata(self):
for resource_id, resource in self.resources.get('az_managed_cluster', {}).items():
self._create_relation(
'in_resource_group',
resource_id,
self.get_group_id_from_resource_id(resource_id))
self._create_relation(
'at_location',
resource_id,
resource['metadata']['location'])
for resource_id, resource in self.resources.get('az_subnet', {}).items():
self._create_relation(
'in_resource_group',
resource_id,
self.get_group_id_from_resource_id(resource_id))
for resource_id, resource in self.resources.get('az_network', {}).items():
self._create_relation(
'in_resource_group',
resource_id,
self.get_group_id_from_resource_id(resource_id))
for subnet in resource['metadata'].get('subnets', []):
self._create_relation(
'in_network',
subnet['id'],
resource_id)
for resource_id, resource in self.resources.get('az_virtual_machine', {}).items():
self._create_relation(
'in_resource_group',
resource_id,
self.resource_group[self.get_group_name_from_resource_id(resource_id).lower()])
self._create_relation(
'has_size',
resource_id,
resource['metadata']['hardware_profile']['vm_size'])
self._create_relation(
'at_location',
resource_id,
resource['metadata']['location'])
for location, sizes in self.size_location.items():
for size in sizes:
self._create_relation(
'at_location',
size,
location)
for resource_id, resource in self.resources.get('az_resource_group', {}).items():
self._create_relation(
'at_location',
resource_id,
resource['metadata']['location'])
self._create_relation(
'in_subscription',
resource_id,
self.get_subscription_id_from_resource_id(resource_id))
def get_resource_metadata(self, kind):
logger.info("Getting {} resources".format(kind))
response = []
if kind == 'az_subscription':
response = self.subscription_api.subscriptions.list(raw=True)
elif kind == 'az_managed_cluster':
for subscription in self.subscription_api.subscriptions.list(raw=True):
for item in self.container_service_api.managed_clusters.list(subscription.subscription_id, raw=True):
response.append(item)
elif kind == 'az_location':
for subscription in self.subscription_api.subscriptions.list(raw=True):
for item in self.subscription_api.subscriptions.list_locations(subscription.subscription_id, raw=True):
response.append(item)
elif kind == 'az_resource_group':
for subscription in self.subscription_api.subscriptions.list(raw=True):
for item in self.resource_api.resource_groups.list(raw=True):
response.append(item)
elif kind == 'az_subnet':
for network in self.network:
for subnet in self.network_api.subnets.list(resource_group_name=network[0], virtual_network_name=network[1], raw=True):
response.append(subnet)
elif kind == 'az_load_balancer':
for subscription in self.subscription_api.subscriptions.list(raw=True):
for load_balancer in self.network_api.load_balancers.list_all(raw=True):
response.append(load_balancer)
elif kind == 'az_network_interface':
for network_interface in self.network_api.network_interfaces.list(raw=True):
response.append(network_interface)
logger.info(network_interface)
elif kind == 'az_network':
for subscription in self.subscription_api.subscriptions.list(raw=True):
for virtual_network in self.network_api.virtual_networks.list_all(raw=True):
response.append(virtual_network)
self.network.append((virtual_network.id.split('/')[4], virtual_network.name,))
elif kind == 'az_virtual_machine':
for subscription in self.subscription_api.subscriptions.list(raw=True):
for virtual_machine in self.compute_api.virtual_machines.list_all(raw=True):
response.append(virtual_machine)
elif kind == 'az_virtual_machine_size':
for subscription in self.subscription_api.subscriptions.list(raw=True):
size_names = {}
for location in self.subscription_api.subscriptions.list_locations(subscription.subscription_id, raw=True):
try:
for size in self.compute_api.virtual_machine_sizes.list(location.name):
if not location.name in self.size_location:
self.size_location[location.name] = []
self.size_location[location.name].append(size.name)
size_names[size.name] = size
except msrest.exceptions.ClientException as error:
logger.error(error)
for size_name, size in size_names.items():
response.append(size)
elif kind == '__all__':
response = self.resource_api.resources.list(raw=True)
return response
def process_resource_metadata(self, kind, metadata):
if kind == 'az_resource_group':
for item in metadata:
resource = item.__dict__
resource['properties'] = resource['properties'].__dict__
self._create_resource(resource['id'],
resource['name'],
kind,
metadata=resource)
self.resource_group[resource['name'].lower()] = resource['id']
elif kind == 'az_virtual_machine_size':
for item in metadata:
resource = item.__dict__
self._create_resource(resource['name'],
resource['name'],
kind,
metadata=resource)
elif kind == 'az_load_balancer':
for item in metadata:
resource = item.__dict__
resource['sku'] = resource['sku'].__dict__
if resource['inbound_nat_pools'] is not None:
inbound_nat_pools = []
for res in resource.pop('inbound_nat_pools'):
if not isinstance(res, str):
res = res.__dict__
inbound_nat_pools.append(res)
resource['inbound_nat_pools'] = inbound_nat_pools
if resource['outbound_rules'] is not None:
outbound_rules = []
for res in resource.pop('outbound_rules'):
if not isinstance(res, str):
res = res.__dict__
outbound_rules.append(res)
resource['outbound_rules'] = outbound_rules
if resource['inbound_nat_rules'] is not None:
inbound_nat_rules = []
for res in resource.pop('inbound_nat_rules'):
if not isinstance(res, str):
res = res.__dict__
inbound_nat_rules.append(res)
resource['inbound_nat_rules'] = inbound_nat_rules
if resource['probes'] is not None:
probes = []
for res in resource.pop('probes'):
if not isinstance(res, str):
res = res.__dict__
if res['load_balancing_rules'] is not None:
load_balancing_rules = []
for subres in resource.pop('load_balancing_rules'):
if not isinstance(subres, str):
subres = subres.__dict__
load_balancing_rules.append(subres)
res['load_balancing_rules'].append(load_balancing_rules)
probes.append(res)
resource['probes'] = probes
if resource['frontend_ip_configurations'] is not None:
frontend_ip_configurations = []
for res in resource.pop('frontend_ip_configurations'):
if not isinstance(res, str):
res = res.__dict__
frontend_ip_configurations.append(res)
resource['frontend_ip_configurations'] = frontend_ip_configurations
if resource['backend_address_pools'] is not None:
backend_address_pools = []
for res in resource.pop('backend_address_pools'):
if not isinstance(res, str):
res = res.__dict__
backend_address_pools.append(res)
resource['backend_address_pools'] = backend_address_pools
if resource['load_balancing_rules'] is not None:
load_balancing_rules = []
for res in resource.pop('load_balancing_rules'):
if not isinstance(res, str):
res = res.__dict__
load_balancing_rules.append(res)
resource['load_balancing_rules'] = load_balancing_rules
logger.info(resource)
self._create_resource(resource['id'],
resource['name'],
kind,
metadata=resource)
elif kind == 'az_subscription':
for item in metadata:
resource = item.__dict__
resource['subscription_policies'] = resource['subscription_policies'].__dict__
self._create_resource(resource['id'],
resource['display_name'],
kind,
metadata=resource)
elif kind == 'az_managed_cluster':
for item in metadata:
resource = item.__dict__
resource['network_profile'] = resource.pop('network_profile').__dict__
if resource['aad_profile'] is not None:
resource['aad_profile'] = resource.pop('aad_profile').__dict__
if resource['linux_profile'] is not None:
resource['linux_profile'] = resource.pop('linux_profile').__dict__
resource['linux_profile']['ssh'] = resource['linux_profile'].pop('ssh').__dict__
public_keys = []
for public_key in resource['linux_profile']['ssh'].pop('public_keys'):
public_keys.append(public_key.__dict__)
resource['linux_profile']['ssh']['public_keys'] = public_keys
resource['service_principal_profile'] = resource.pop('service_principal_profile').__dict__
if resource['addon_profiles'] is not None:
addon_profiles = []
for res in resource.pop('addon_profiles'):
if not isinstance(res, str):
res = res.__dict__
addon_profiles.append(res)
resource['addon_profiles'] = addon_profiles
if resource['agent_pool_profiles'] is not None:
agent_pool_profiles = []
for res in resource.pop('agent_pool_profiles'):
res = res.__dict__
agent_pool_profiles.append(res)
resource['agent_pool_profiles'] = agent_pool_profiles
self._create_resource(resource['id'],
resource['name'],
kind,
metadata=resource)
elif kind == 'az_location':
for item in metadata:
resource = item.__dict__
self._create_resource(resource['name'],
resource['display_name'],
kind,
metadata=resource)
elif kind == 'az_virtual_machine':
for item in metadata:
resource = item.__dict__
resource['hardware_profile'] = resource.pop('hardware_profile').__dict__
resource['storage_profile'] = resource.pop('storage_profile').__dict__
if resource['storage_profile']['image_reference'] is not None:
resource['storage_profile']['image_reference'] = resource['storage_profile'].pop('image_reference').__dict__
data_disks = []
if 'data_disks' in resource['storage_profile']:
for data_disk in resource['storage_profile'].pop('data_disks'):
data_disk = data_disk.__dict__
if data_disk['managed_disk'] is not None:
data_disk['managed_disk'] = data_disk.pop('managed_disk').__dict__
if data_disk['vhd'] is not None:
data_disk['vhd'] = data_disk.pop('vhd').__dict__
data_disks.append(data_disk)
resource['storage_profile']['data_disks'] = data_disks
resource['storage_profile']['os_disk'] = resource['storage_profile'].pop('os_disk').__dict__
if resource['storage_profile']['os_disk']['managed_disk'] is not None:
resource['storage_profile']['os_disk']['managed_disk'] = resource['storage_profile']['os_disk'].pop('managed_disk').__dict__
if resource['storage_profile']['os_disk']['image'] is not None:
resource['storage_profile']['os_disk']['image'] = resource['storage_profile']['os_disk'].pop('image').__dict__
if resource['storage_profile']['os_disk']['vhd'] is not None:
resource['storage_profile']['os_disk']['vhd'] = resource['storage_profile']['os_disk'].pop('vhd').__dict__
network_interfaces = []
resource['network_profile'] = resource.pop('network_profile').__dict__
for network_interface in resource['network_profile'].pop('network_interfaces'):
network_interfaces.append(network_interface.__dict__)
resource['network_profile']['network_interfaces'] = network_interfaces
if resource['diagnostics_profile'] is not None:
resource['diagnostics_profile'] = resource.pop('diagnostics_profile').__dict__
resource['diagnostics_profile']['boot_diagnostics'] = resource['diagnostics_profile'].pop('boot_diagnostics').__dict__
resource['os_profile'] = resource.pop('os_profile').__dict__
if 'linux_configuration' in resource['os_profile']:
resource['os_profile']['linux_configuration'] = resource['os_profile'].pop('linux_configuration').__dict__
resource['os_profile']['linux_configuration']['ssh'] = resource['os_profile']['linux_configuration'].pop('ssh').__dict__
public_keys = []
for public_key in resource['os_profile']['linux_configuration']['ssh'].pop('public_keys'):
public_keys.append(public_key.__dict__)
resource['os_profile']['linux_configuration']['ssh']['public_keys'] = public_keys
if resource['resources'] is not None:
resources = []
for res in resource.pop('resources'):
resources.append(res.__dict__)
resource['resources'] = resources
if resource['availability_set'] is not None:
resource['availability_set'] = resource.pop('availability_set').__dict__
self._create_resource(resource['id'],
resource['name'],
kind,
metadata=resource)
elif kind == 'az_network':
for item in metadata:
resource = item.__dict__
resource['address_space'] = resource.pop('address_space').__dict__
if 'network_security_group' in resource:
resource['network_security_group'] = resource.pop('network_security_group').__dict__
if resource['dhcp_options'] is not None:
resource['dhcp_options'] = resource.pop('dhcp_options').__dict__
if len(resource['virtual_network_peerings']) > 0:
virtual_network_peerings = []
for res in resource.pop('virtual_network_peerings'):
res = res.__dict__
res['remote_virtual_network'] = res.pop('remote_virtual_network').__dict__
res['remote_address_space'] = res.pop('remote_address_space').__dict__
virtual_network_peerings.append(res)
resource['virtual_network_peerings'] = virtual_network_peerings
if resource['subnets'] is not None:
subnets = []
for res in resource.pop('subnets'):
res = res.__dict__
if res['route_table'] is not None:
res['route_table'] = res.pop('route_table').__dict__
if 'network_security_group' in res and res['network_security_group'] is not None:
res['network_security_group'] = res.pop('network_security_group').__dict__
if res['ip_configurations'] is not None:
ip_configurations = []
for ress in res.pop('ip_configurations'):
ip_configurations.append(ress.__dict__)
res['ip_configurations'] = ip_configurations
subnets.append(res)
resource['subnets'] = subnets
self._create_resource(resource['id'],
resource['name'],
kind,
metadata=resource)
elif kind == 'az_subnet':
for item in metadata:
resource = item.__dict__
if resource['route_table'] is not None:
resource['route_table'] = resource.pop('route_table').__dict__
if resource['network_security_group'] is not None:
resource['network_security_group'] = resource.pop('network_security_group').__dict__
if resource['ip_configurations'] is not None:
ip_configurations = []
for res in resource.pop('ip_configurations'):
ip_configurations.append(res.__dict__)
resource['ip_configurations'] = ip_configurations
self._create_resource(resource['id'],
resource['name'],
kind,
metadata=resource)
elif kind == '__all__':
for item in metadata:
resource = item.__dict__
if resource.get('sku', None) != None:
resource['sku'] = resource['sku'].__dict__
if resource.get('identity', None) != None:
identity = resource['identity'].__dict__
identity['type'] = identity['type'].__dict__
resource['identity'] = identity
if resource['type'] not in RESOURCE_MAP:
logger.info(resource['type'])
self._create_resource(resource['id'],
resource['name'],
RESOURCE_MAP[resource['type']],
metadata=resource)
def get_subscription_id_from_resource_id(self, id):
parts = id.split('/')
return '/'.join(parts[:3])
def get_group_id_from_resource_id(self, id):
parts = id.split('/')
return '/'.join(parts[:5]).replace('/resourcegroups/', '/resourceGroups/')
def get_group_name_from_resource_id(self, id):
parts = id.split('/')
return parts[4]
def get_resource_action_fields(self, resource, action):
fields = {}
if resource.kind == 'az_managed_cluster':
if action == 'create_manager':
initial_name = resource.metadata['node_resource_group'].replace('MC_', '')
fields['name'] = forms.CharField(label='New manager name',
validators=[validate_manager_name],
help_text='Managed cluster <strong>{}</strong> from resource group <strong>{}</strong> will be imported.'.format(
resource.name, self.get_group_name_from_resource_id(resource.uid)),
initial=initial_name)
return fields
def process_resource_action(self, resource, action, data):
if resource.kind == 'az_managed_cluster':
if action == 'create_manager':
if self.auth():
raw_data = self.container_service_api.managed_clusters.list_cluster_admin_credentials(self.get_group_name_from_resource_id(resource.uid),
resource.metadata['name'])
for raw_kubeconfig in raw_data.kubeconfigs:
kubeconfig_yaml = raw_kubeconfig.value.decode()
kubeconfig = yaml.load(kubeconfig_yaml)
cluster = kubeconfig['clusters'][0]['cluster']
user = kubeconfig['users'][0]['user']
manager = Manager.objects.create(
name=data['name'],
engine="kubernetes",
metadata={
'user': user,
'cluster': cluster,
'engine': "kubernetes",
'scope': "global"
})
manager.save()
if manager.client().check_status():
manager.status = 'active'
else:
manager.status = 'error'
manager.save()
|
import psycopg2
import win32com.client
import pythoncom
import time
#================== Database Connection =================== st
conn_string = "host='localhost' dbname ='Anthouse' user='blue1028' password='ehdghks57'"
try:
conn = psycopg2.connect(conn_string)
except:
print("error database connection")
curs = conn.cursor()
class XASessionEvents:
logInState = 0
def OnLogin(self, code, msg):
print("OnLogin method is called")
print(str(code))
print(str(msg))
if str(code) == '0000':
XASessionEvents.logInState = 1
def OnLogout(self):
print("OnLogout method is called")
def OnDisconnect(self):
print("OnDisconnect method is called")
class XAQueryEvents:
queryState = 0
def OnReceiveData(self,szTrCode):
print("ReceiveData")
XAQueryEvents.queryState = 1
def OnReceiveMessage(self, systemError, messageCode, message):
print("ReceiveMessage")
if __name__ == "__main__":
server_addr = "hts.ebestsec.co.kr"
server_port = 20001
server_type = 0
user_id = "songdh10"
user_pass ="gusdl57"
user_certificate_pass="gusdlsla57"
inXASession = win32com.client.DispatchWithEvents("XA_Session.XASession", XASessionEvents)
inXASession.ConnectServer(server_addr, server_port)
inXASession.Login(user_id, user_pass, user_certificate_pass, server_type, 0)
while XASessionEvents.logInState == 0:
pythoncom.PumpWaitingMessages()
inXAQuery2 = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery", XAQueryEvents)
inXAQuery2.LoadFromResFile("C:\\eBEST\\xingAPI\\Res\\t1901.res")
curs.execute("SELECT s_code FROM sdata_stock2")
result = curs.fetchall()
print(result)
for i in result:
s_codelist = list(i)
print(s_codelist[0])
inXAQuery2.SetFieldData('t1901InBlock', 'shcode', 0 , "%s"%(s_codelist[0]))
inXAQuery2.Request(0)
print(inXAQuery2.Request(0))
while XAQueryEvents.queryState == 0:
pythoncom.PumpWaitingMessages()
hname = inXAQuery2.GetFieldData('t1901OutBlock','hname',0)
price = inXAQuery2.GetFieldData('t1901OutBlock','price',0)
sign = inXAQuery2.GetFieldData('t1901OutBlock','sign',0)
change = inXAQuery2.GetFieldData('t1901OutBlock','change',0)
diff = inXAQuery2.GetFieldData('t1901OutBlock','diff',0)
volume = inXAQuery2.GetFieldData('t1901OutBlock','volume',0)
recprice = inXAQuery2.GetFieldData('t1901OutBlock','recprice',0)
avgp = inXAQuery2.GetFieldData('t1901OutBlock','avg',0)
uplmtprice = inXAQuery2.GetFieldData('t1901OutBlock','uplmtprice',0)
dnlmtprice = inXAQuery2.GetFieldData('t1901OutBlock','dnlmtprice',0)
jnilvolume = inXAQuery2.GetFieldData('t1901OutBlock','jnilvolume',0)
volumediff = inXAQuery2.GetFieldData('t1901OutBlock','volumediff',0)
openp = inXAQuery2.GetFieldData('t1901OutBlock','open',0)
opentime = inXAQuery2.GetFieldData('t1901OutBlock','opentime',0)
high = inXAQuery2.GetFieldData('t1901OutBlock','high',0)
hightime = inXAQuery2.GetFieldData('t1901OutBlock','hightime',0)
low = inXAQuery2.GetFieldData('t1901OutBlock','low',0)
lowtime = inXAQuery2.GetFieldData('t1901OutBlock','lowtime',0)
high52w = inXAQuery2.GetFieldData('t1901OutBlock','high52w',0)
high52wdate = inXAQuery2.GetFieldData('t1901OutBlock','high52wdate',0)
low52w = inXAQuery2.GetFieldData('t1901OutBlock','low52w',0)
low52wdate = inXAQuery2.GetFieldData('t1901OutBlock','low52wdate',0)
exhratio = inXAQuery2.GetFieldData('t1901OutBlock','exhratio',0)
flmtvol = inXAQuery2.GetFieldData('t1901OutBlock','flmtvol',0)
per = inXAQuery2.GetFieldData('t1901OutBlock','per',0)
listing = inXAQuery2.GetFieldData('t1901OutBlock','listing',0)
jkrate = inXAQuery2.GetFieldData('t1901OutBlock','jkrate',0)
vol = inXAQuery2.GetFieldData('t1901OutBlock','vol',0)
shcode = inXAQuery2.GetFieldData('t1901OutBlock','shcode',0)
valuep = inXAQuery2.GetFieldData('t1901OutBlock','value',0)
highyear = inXAQuery2.GetFieldData('t1901OutBlock','highyear',0)
highyeardate = inXAQuery2.GetFieldData('t1901OutBlock','highyeardate',0)
lowyear = inXAQuery2.GetFieldData('t1901OutBlock','lowyear',0)
lowyeardate = inXAQuery2.GetFieldData('t1901OutBlock','lowyeardate',0)
upname = inXAQuery2.GetFieldData('t1901OutBlock','upname',0)
upcode = inXAQuery2.GetFieldData('t1901OutBlock','upcode',0)
upprice = inXAQuery2.GetFieldData('t1901OutBlock','upprice',0)
upsign = inXAQuery2.GetFieldData('t1901OutBlock','upsign',0)
upchange = inXAQuery2.GetFieldData('t1901OutBlock','upchange',0)
updiff = inXAQuery2.GetFieldData('t1901OutBlock','updiff',0)
curs.execute("INSERT INTO sdata_stock_current (hname,price,sign,change,diff,volume,recprice,avgp,uplmtprice,dnlmtprice,jnilvolume,volumediff,openp,opentime,high,hightime,low,lowtime,high52w,high52wdate,low52w,low52wdate,exhratio,flmtvol,per,listing,jkrate,vol,shcode,valuep,highyear,highyeardate,lowyear,lowyeardate,upname,upcode,upprice,upsign,upchange,updiff) VALUES ('%s','%s', '%s', '%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s','%s', '%s', '%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s','%s', '%s','%s')"%(hname,price,sign,change,diff,volume,recprice,avgp,uplmtprice,dnlmtprice,jnilvolume,volumediff,openp,opentime,high,hightime,low,lowtime,high52w,high52wdate,low52w,low52wdate,exhratio,flmtvol,per,listing,jkrate,vol,shcode,valuep,highyear,highyeardate,lowyear,lowyeardate,upname,upcode,upprice,upsign,upchange,updiff))
print(XAQueryEvents.queryState)
XAQueryEvents.queryState = 0
conn.commit()
time.sleep(1.1)
|
import openpyxl
from openpyxl.chart import PieChart, Reference
wb = openpyxl.load_workbook("..\data\pie_chart.xlsx")
sh = wb.active
#print(sh.max_row)
data = Reference(sh, min_col=2, min_row=1, max_row=sh.max_row)
labels = Reference(sh, min_col=1, min_row=2, max_row=sh.max_row)
chart = PieChart()
chart.title = "各部門業績"
chart.add_data(data, titles_from_data=True)
chart.set_categories(labels)
sh.add_chart(chart, "D3")
wb.save("..\data\pie_chart.xlsx")
|
import matcom.tools.edge_calculators as edg
import numpy as np
from matcom.pipelines.generate_structure_collection import FRAMEWORK_FEATURIZER
from collections import defaultdict
from dataspace.base import Pipe, in_batches
from dataspace.workspaces.remote_db import MongoFrame
from pymatgen.core import Structure
from pymatgen.analysis.defects.generators import VacancyGenerator
from pandas import DataFrame
'''
this module implements a pipeline for generating a mongo database containing
a graph structure from a database of structural feature vectors. pipeline
operations are implemented as instance methods
'''
class GenerateGraphCollection(Pipe):
'''
structures (verticies) within a similarity threshold are connected by edges
to form a graph of the structure space. additional edges connect structures
that are similar to another when a defect (vacancy/interstical) is induced
in one of the structures (Ex rocksalt + intersticial = BCC). the graph
structure is stored as an adjacency list to conserve storage/memory.
Notes: document schema for the graph collection
"material_id" (str) the source vertex
"edges" (list of str) the destination verticies
"vacancy_edges" (dict) the destination verticies for each symmetrically
inequivalant site (keys are site indicies, values are lists of str)
Attributes:
source (MongoFrame) a workspace which retrieves structural features
destination (MongoFrame) a workspace which stores graph structure
'''
def __init__(self, host='localhost', port=27017,
database='structure_graphs',
structure_collection='structure', graph_collection='graph'):
'''
Args:
host (str) hostname or IP address or Unix domain socket path
port (int) port number on which to connect
database (str) name of a pymongo database
structure_collection (str) name of a pymongo collection that holds
data on the structures being analyzed
graph_collection (str) name of a pymongo collection that holds data
on the graph representation of the structures
'''
structure_space = MongoFrame(
host=host, port=port, database=database,
collection=structure_collection)
graph_space = MongoFrame(
host=host, port=port, database=database,
collection=graph_collection)
Pipe.__init__(self, source=structure_space, destination=graph_space)
def _load_structure_features(self):
'''
loads feature vectors into self.source.memory
'''
self.source.from_storage(filter={'structure_features':
{'$exists': True}},
projection={'material_id': 1,
'structure_features': 1,
'_id': 0})
self.source.compress_memory(column='structure_features',
decompress=True)
self.source.memory.set_index('material_id', inplace=True)
def _load_structures(self, material_ids):
'''
loads structures into self.source.memory
'''
self.source.from_storage(filter={'material_id':
{'$in': material_ids}},
projection={'material_id': 1,
'structure': 1,
'_id': 0})
self.source.memory.set_index('material_id', inplace=True)
self.source.memory = self.source.memory.loc[material_ids]
def update_verticies(self,
criteria={'structure_features': {'$exists': True}}):
'''
populate verticies in graph space with verticies from structure space
Notes:
IO limited method
'''
self.source.from_storage(filter=criteria,
projection={'material_id': 1})
self.transfer(to='destination')
self.destination.to_storage(identifier='material_id', upsert=True)
@in_batches
def update_edges(self, threshold=0.5, batch_size=10000,
edge_calculator=edg.pairwise_squared_similarity):
'''
solve for undirected, boolean edges based on exact similarity
Notes:
Tranformation limited method
Args:
threshold (float) distance threshold to connect an edge
batch_size (int) batch size for computing pairwise distances when
generating graph edges. subject to memory constraints
edge_calculator (func) a pairwise edge calculator that returns an
N x M adjacency matrix
'''
# load material ids without defined edges
self.destination.from_storage(filter={'edges':
{'$exists': False}},
projection={'material_id': 1},
limit=batch_size)
if len(self.destination.memory.index) == 0:
return 0 # returns False when update is complete
else:
# saves ids of source verticies from batch
source_ids = self.destination.memory['material_id'].values
self.destination.memory = None # cleanup memory
# saves the potential destination verticies and clean-up memory
self._load_structure_features()
all_ids = self.source.memory.index.values
all_vectors = self.source.memory.values
source_vectors = self.source.memory.loc[source_ids].values
self.source.memory = None # cleanup memory
# determines edge matrix and coresponding adjacency list
edge_matrix = edge_calculator(
all_vectors, source_vectors, threshold)
adjacency_list = {}
for j in range(edge_matrix.shape[1]):
adjacency_list[source_ids[j]] = {
'edges': list(all_ids[edge_matrix[:, j]])}
# stores edges in the graph collection
self.destination.memory = DataFrame.from_dict(
adjacency_list, orient='index').reset_index().rename(
columns={'index': 'material_id'})
self.destination.to_storage(identifier='material_id')
return 1 # returns True to continue the update
@in_batches
def update_vacancy_edges(self, threshold=0.5, batch_size=100,
edge_calculator=edg.pairwise_squared_similarity,
featurizer=FRAMEWORK_FEATURIZER):
'''
solve for directed, boolean edges based on similarity with a vacancy
Notes:
Transformation limited method (featurization of vacancy structures)
Args:
threshold (float) distance threshold to connect an edge
batch_size (int) batch size for computing pairwise distances when
generating graph edges. subject to memory constraints
edge_calculator (func) a sub-pairwise distance calculator that
returns an N x M adjacency matrix
featurizer (BaseFeaturizer) an instance of a structural featurizer
'''
# loads a batch of verticies without defined edges
self.destination.from_storage(
filter={'vacancy_edges':
{'$exists': False}},
projection={'material_id': 1},
limit=batch_size)
if len(self.destination.memory.index) == 0:
return 0 # returns False when update is complete
else:
# gets the source vertex ids for the current batch
source_ids = self.destination.memory['material_id'].values
self.destination.memory = None # cleanup memory
# gets the potential destination vertex ids and their features
self._load_structure_features()
all_ids = self.source.memory.index.values
all_vectors = self.source.memory.values
vector_labels = np.array(
[s.split('.')[1] for s in self.source.memory.columns.values])
self.source.memory = None # cleanup memory
# calculates feature vectors for each (source) vacancy structure
self._load_structures(list(source_ids))
source_structures = self.source.memory['structure'].values
self.source.memory = None # cleanup memory
vacancy_structures = []
for material_id, structure in zip(source_ids, source_structures):
structure = Structure.from_dict(structure)
for site_i, vacancy in enumerate(VacancyGenerator(structure)):
vacancies = [
material_id,
str(site_i),
vacancy.generate_defect_structure(supercell=(1, 1, 1))
]
vacancy_structures.append(vacancies)
vacancy_structures = DataFrame(
data=vacancy_structures,
columns=['source_id', 'site_index', 'structure'])
vacancy_vectors = featurizer.featurize_dataframe(
vacancy_structures, 'structure', ignore_errors=True,
pbar=False, inplace=False)[vector_labels].values
# determine edge matrix and coresponding adjacency list
edge_matrix = edge_calculator(
all_vectors, vacancy_vectors, threshold)
adjacency_list = defaultdict(dict)
for j in range(edge_matrix.shape[1]):
source_id = vacancy_structures['source_id'][j]
site_index = vacancy_structures['site_index'][j]
adjacency_list[source_id][site_index] = list(
all_ids[edge_matrix[:, j]])
# store edges in graph space
self.destination.memory = DataFrame.from_records(
list(adjacency_list.items()),
columns=['material_id', 'vacancy_edges'])
self.destination.to_storage(identifier='material_id')
return 1 # return True to continue the update
if __name__ == '__main__':
gen = GenerateGraphCollection()
# gen.destination.delete_storage(clear_collection=True)
# gen.update_verticies()
# gen.update_edges()
# gen.update_vacancy_edges()
|
if __name__=="__main__":
import pymysql
pymysql.install_as_MySQLdb()
from blog import db
from blog import User
db.create_all()
user1=User(username='Corey',email='c@gmail.com',password='1234')
db.session.add(user1)
db.session.commit()
print(User.query.all())
|
#Challenge: Implement a queue with two stacks.
class stack:
def __init__(self):
self.container = []
def __repr__(self):
return str(self.container)
def push(self, elem):
self.container.append(elem)
def pull(self):
return self.container.pop()
def peek(self):
return self.container[len(self.container)-1]
def isempty(self):
return self.container is []
class twostackqueue:
def __init__(self):
self.instack = stack()
self.outstack = stack()
def __repr__(self):
return str(self.instack) + '<->' + str(self.outstack)
def enqueue(self, elem):
self.instack.push(elem)
def dequeue(self):
if self.outstack.isempty():
while not self.instack.isempty():
self.outstack.push(self.instack.pull())
return self.outstack.pull()
else:
return self.outstack.pull()
|
from multiprocessing import Process
import time
import traceback
from logging import Logger
from core.schema import S1
from services import poller_worker
from services.service_base import ServiceBase
from utils import config
from core import Data
class Poller(ServiceBase):
def __init__(self, logger, name, data, providers, config_path, dummy=False):
"""
@type logger: Logger
@type data: Data
"""
super(Poller, self).__init__(logger, name, data, providers, config_path)
self.kwargs = None
# default poller driver period
self.period_s = 2
# how many gids are allowed to expire in period_s before new worker is launched
self.gid_set_threshold = 100
# number of worker processes
self.workers_min = 3
# max number of worker process
self.workers_max = 4
# default gid poll period, 10 min
self.gid_poll_s = 600
# default no poll period, 30 min
self.gid_no_poll_s = 1800
self.started_at = time.time()
self.stats = {
'hour': (self.started_at, 0),
'day': (self.started_at, 0),
}
self.channel_handler = {
S1.poller_channel_name('all-out'): self.on_all_out,
S1.poller_channel_name(self.name): self.on_my_name
}
def get_worker(self, sub_name):
kwargs = self.kwargs
kwargs['name'] = sub_name
return Process(target=poller_worker.run_poller_worker, name=sub_name, kwargs=self.kwargs)
def on_terminate(self, *args, **kwargs):
"""
Called by signal handlers from ServiceBase
WARNING: This can be called multiple times during process termination!
"""
self.logger.warning('Poller master is terminating...')
# stop workers
while self.stop_worker():
self.logger.warning('One worker stopped')
# stop self
self.send_exit(S1.poller_channel_name(self.name), self_target=True)
self.logger.warning('Poller master terminate sequence complete!')
def on_exit(self, channel):
self.logger.warning('Poller master terminating listener...')
self.terminate()
def on_raw(self, channel, raw):
try:
# channel_handler is one of the two routines below
self.channel_handler[channel](raw)
except Exception as e:
self.logger.error('ERROR: Exception in on_raw: {0}, \r\n{1}'.format(e, traceback.format_exc()))
def on_all_out(self, gid):
"""
Reschedules the gid for next poll based on gid activity, other factors may be added later
@param gid: assuming raw data is gid
"""
at_time = time.time()
# default poll period for each gid is 10 * 60 sec
next_time = at_time + self.gid_poll_s
try:
# get num results in this 3-hour period
if not self.data.cache.get_num_minute_updates(gid, int(at_time), 90):
# no updates this time of day --> add 45 minutes to next poll epoch
next_time += self.gid_no_poll_s
except Exception as e:
msg = 'Exception while processing stats [{0}], [{1}], {2}'
self.logger.error(msg.format(gid, e, traceback.format_exc()))
# store just polled gid in sorted gid set
self.data.balancer.add_gid_set(gid, next_time)
def on_my_name(self, raw):
self.schedule_next_batch(allow_worker_start=False)
def on_timeout(self):
self.schedule_next_batch(allow_worker_start=True)
def schedule_next_batch(self, allow_worker_start=False):
try:
self.logger.info('[{0}] wake up!'.format(self.name))
# get the gid set until all processed
while True:
at_time = time.time()
gid_set = self.data.balancer.get_next_poll_set(at_time + self.period_s / 2.0)
gid_set_len = len(gid_set)
if not gid_set_len:
self.logger.warning('[{0}] Empty gid_set...'.format(self.name))
return
elif allow_worker_start and gid_set_len > self.gid_set_threshold:
self.logger.warning('Gid set count [{0}] above threshold, starting worker...'.format(gid_set_len))
self.start_worker()
self.logger.info('[{0}] Invoking poll for [{1}] items...'.format(self.name, gid_set_len))
# clean orphaned gids
update_set = [gid for gid in gid_set if not self.data.check_orphan(gid, at_time)]
# post each gid to poller
for gid in update_set:
# move next poll time for the gid to avoid duplicate polling
self.data.balancer.add_gid_set(gid, at_time + self.gid_poll_s)
# post to pollers
self.broadcast_command(S1.poller_channel_name('all'), S1.msg_update(), gid)
# update stats
self.update_stats(at_time, len(update_set))
except Exception as e:
self.logger.warning('Exception in poller driver: {0}'.format(e))
self.logger.exception(traceback.format_exc())
self.data.unregister_poller(self.name)
def update_stats(self, at_time, count):
s = self.stats['hour']
self.stats['hour'] = (s[0], s[1] + count)
s = self.stats['day']
self.stats['day'] = (s[0], s[1] + count)
# set in DB
self.data.balancer.set_poller_stats(self.name, hour=self.stats['hour'][1], day=self.stats['day'][1])
# clean stats if lapsed
if at_time - self.stats['hour'][0] > 3600:
# reset counters
self.stats['hour'] = (at_time, 0)
if at_time - self.stats['day'][0] > 86400:
self.stats['day'] = (at_time, 0)
def run(self, *args, **kwargs):
self.kwargs = kwargs
cfg = config.load_config(kwargs['config_path'], 'poller.json')
self.gid_poll_s = cfg['gid_poll_s'] if 'gid_poll_s' in cfg else self.gid_poll_s
self.period_s = cfg['period_s'] if 'period_s' in cfg else self.period_s
self.workers_min = cfg['workers_min'] if 'workers_min' in cfg else self.workers_min
self.workers_max = cfg['workers_max'] if 'workers_max' in cfg else self.workers_max
self.logger.info('Poller v[{0}], name=[{1}], poll delay=[{2}]s, period=[{3}]s starting...'.format(config.version, self.name, self.gid_poll_s, self.period_s))
# give pub sub some time... not using syncho notifications...
time.sleep(1)
# register self as poller
self.data.register_poller(self.name)
# start worker processes
for n in range(0, self.workers_min):
self.start_worker()
# drop message to self to do immediate poll round
self.broadcast_data(S1.poller_channel_name(self.name), '#')
# start listening
self.listener([S1.poller_channel_name('all-out'), S1.poller_channel_name(self.name)], None, timeout=self.period_s)
self.logger.warning('Poller master listener exit!')
# un-register self
self.data.unregister_poller(self.name)
# force kill any remaining workers
while self.workers:
p = self.workers.popitem()
self.logger.warning('Terminating remaining poller {0}!'.format(p[0]))
p[1].terminate()
self.logger.warning('Poller master process exit!')
|
#!/usr/bin/env python
#
# ssl_sigs.py
# Create Suricata and Snort signatures to detect an inbound SSL Cert for a single domain.
#
# Mega thanks to Darien Huss[1] and his work on a DNS signature script which is where most of this code was ripped from. Another big thanks to Travis Green for assistance.
# [1]https://github.com/darienhuss/dns_sigs
#
# Example: $ python ssl_sigs.py -d something.bad.com -m "Ursnif Injects" -s 100000000 -r "31d7c3e829be03400641f80b821ef728|0421008445828ceb46f496700a5fa65e"
#
# OUTPUT:
#=========================[Certificate Signatures]=========================
#
#Suricata 3.2.+ SSL Cert Rule:
#alert tls $EXTERNAL_NET any -> $HOME_NET any (msg:"ET TROJAN Observed Malicious SSL Cert (Ursnif Injects)"; flow:established,to_client; tls_cert_subject; content:"CN=something.bad.com"; nocase; isdataat:!1,relative; reference:md5,31d7c3e829be03400641f80b821ef728; reference:md5,0421008445828ceb46f496700a5fa65e; classtype:trojan-activity; sid:100000000; rev:1;)
#
#Suricata 1.3+ SSL Cert Rule:
#alert tls $EXTERNAL_NET any -> $HOME_NET any (msg:"ET TROJAN Observed Malicious SSL Cert (Ursnif Injects)"; flow:established,to_client; content:"|55 04 03|"; content:"|11|something.bad.com"; distance:1; within:18; fast_pattern; reference:md5,31d7c3e829be03400641f80b821ef728; reference:md5,0421008445828ceb46f496700a5fa65e; classtype:trojan-activity; sid:100000000; rev:1;)
#
#Snort 2.9+ SSL Cert Rule:
#alert tcp $EXTERNAL_NET 443 -> $HOME_NET any (msg:"ET TROJAN Observed Malicious SSL Cert (Ursnif Injects); flow:established,to_client; content:"|55 04 03|"; content:"|11|something.bad.com"; distance:1; within:18; fast_pattern; reference:md5,31d7c3e829be03400641f80b821ef728; reference:md5,0421008445828ceb46f496700a5fa65e; classtype:trojan-activity; sid:100000000; rev:1;)
#
# You can also use -t/--sni to also print the equivilent TLS SNI signaures (useful for detecting the cert via the outbound request incase domain is down/cert is gone)
#
# $ python ssl_sigs.py -d something.bad.com -m "ET TROJAN Observed Malicious SSL Cert (Ursnif Injects)" -s 100000000 -r "31d7c3e829be03400641f80b821ef728|0421008445828ceb46f496700a5fa65e" -t
#
# <snip>
#
#=========================[SNI Signatures]=========================
#
#Suricata 3.2+ TLS SNI Cert Rule:
#alert tls $HOME_NET any -> $EXTERNAL_NET any (msg:"ET TROJAN Observed Ursnif Injects Domain (something .bad .com in TLS SNI)"; flow:established,to_server; tls_sni; content:"something.bad.com"; isdataat:!1,relative; reference:md5,31d7c3e829be03400641f80b821ef728; reference:md5,0421008445828ceb46f496700a5fa65e; classtype:trojan-activity; sid:100000001; rev:1;)
#
#Suricata 1.3+ TLS SNI Cert Rule:
#alert tls $HOME_NET any -> $EXTERNAL_NET any (msg:"ET TROJAN Observed Ursnif Injects Domain (something .bad .com in TLS SNI)"; flow:established,to_server; content:"|00 00 11|something.bad.com|00|"; fast_pattern; reference:md5,31d7c3e829be03400641f80b821ef728; reference:md5,0421008445828ceb46f496700a5fa65e; classtype:trojan-activity; sid:100000001; rev:1;)
#
#Snort 2.9+ TLS SNI Cert Rule:
#alert tcp $HOME_NET any -> $EXTERNAL_NET 443 (msg:"ET TROJAN Observed Ursnif Injects Domain (something .bad .com in TLS SNI)"; flow:established,to_server; content:"|00 00 11|something.bad.com|00|"; fast_pattern; reference:md5,31d7c3e829be03400641f80b821ef728; reference:md5,0421008445828ceb46f496700a5fa65e; classtype:trojan-activity; sid:100000001; rev:1;)
#
import argparse,re
def main():
parser = argparse.ArgumentParser(description='Create Suricata/Snort SSL Certificate Signatures')
parser.add_argument('-d','--domain', help='Domain name',required=True,default="")
parser.add_argument('-m','--message', help='Malware name and or Activity (e.g. "Urnsif Injects")',required=True,default="")
parser.add_argument('-r','--reference', help='Provide a md5 or url reference, or list of references separated by a |',required=False,default="")
parser.add_argument('-c','--classtype', help='Provide signature classtype (default: trojan-activity)',required=False,default="trojan-activity")
parser.add_argument('-s','--sid', help='Provide starting sid number (default: 10000000)',required=False,default="10000000")
parser.add_argument('-t','--sni', help='Include TLS SNI signatures also',action="store_true",required=False,default="")
parser.add_argument('-C','--category', help='Add a category for this rule (default: TROJAN',required=False,default="TROJAN")
parser.add_argument('-n','--rulesetname', help='Add a custom ruleset name (default: ET', required=False,default="ET")
args = parser.parse_args()
domain = args.domain
message = args.message
references = args.reference
classtype = args.classtype
sid = int(args.sid)
sni = args.sni
category = args.category
rulesetname = args.rulesetname
reference = ''
if references:
md5_re = re.compile('^[a-f0-9]{32}$')
references = references.split('|')
for ref in references :
if md5_re.search(ref):
reference += 'reference:md5,%s; ' % ref
else:
reference += 'reference:url,%s; ' % ref
domain_len = '|{:02x}|'.format(len(domain))
within = len(domain_len + domain) - 3
domain_len_tlssni = '|00 00 {:02x}|'.format(len(domain))
tls_sid = sid + 1
domain_defang = re.sub(r"\.", " .", domain)
rule_stub_start_suri = 'alert tls $EXTERNAL_NET any -> $HOME_NET any (msg:"%s %s Observed Malicious SSL Cert (%s)"; flow:established,to_client; content:"|55 04 03|"; ' % (rulesetname,category,message)
rule_stub_start_suri_current = 'alert tls $EXTERNAL_NET any -> $HOME_NET any (msg:"%s %s Observed Malicious SSL Cert (%s)"; flow:established,to_client; tls_cert_subject; ' % (rulesetname,category,message)
rule_stub_start_snort = 'alert tcp $EXTERNAL_NET 443 -> $HOME_NET any (msg:"%s %s Observed Malicious SSL Cert (%s)"; flow:established,to_client; content:"|55 04 03|"; ' % (rulesetname,category,message)
rule_stub_content_suri_current = 'content:"CN=%s"; nocase; isdataat:!1,relative; ' % domain
rule_stub_len = 'content:"%s%s"; distance:1; ' % (domain_len,domain)
rule_stub_within = 'within:%s; fast_pattern; ' % within
rule_stub_end = '%sclasstype:%s; sid:%s; rev:1;)' % (reference,classtype,sid)
sid += 1
#SSL Cert stuff
print '\r\n#=========================[Certificate Signatures]=========================\r\n'
print '#Suricata 3.2.+ SSL Cert Rule:\r\n' + rule_stub_start_suri_current + rule_stub_content_suri_current + rule_stub_end + '\r\n'
print '#Suricata 1.3+ SSL Cert Rule:\r\n' + rule_stub_start_suri + rule_stub_len + rule_stub_within + rule_stub_end + '\r\n'
print '#Snort 2.9+ SSL Cert Rule:\r\n' + rule_stub_start_snort + rule_stub_len + rule_stub_within + rule_stub_end + '\r\n'
#TLSSNI stuff
if sni:
tls_sni_rule_stub_start_suri = 'alert tls $HOME_NET any -> $EXTERNAL_NET any (msg:"%s %s Observed %s Domain (%s in TLS SNI)"; flow:established,to_server; ' % (rulesetname,category,message,domain_defang)
tls_sni_rule_stub_start_snort = 'alert tcp $HOME_NET any -> $EXTERNAL_NET 443 (msg:"%s %s Observed %s Domain (%s in TLS SNI)"; flow:established,to_server; ' % (rulesetname,category,message,domain_defang)
rule_stub_content_suri_4 = 'tls_sni; content:"%s"; isdataat:!1,relative; ' % domain
rule_stub_content_snort_suri2 = 'content:"%s%s|00|"; fast_pattern; ' % (domain_len_tlssni,domain)
rule_stub_end_tlssni = '%sclasstype:%s; sid:%s; rev:1;)' % (reference,classtype,tls_sid)
print '\r\n#=========================[SNI Signatures]=========================\r\n'
print '#Suricata 3.2+ TLS SNI Cert Rule:\r\n' + tls_sni_rule_stub_start_suri + rule_stub_content_suri_4 + rule_stub_end_tlssni + '\r\n'
print '#Suricata 1.3+ TLS SNI Cert Rule:\r\n' + tls_sni_rule_stub_start_suri + rule_stub_content_snort_suri2 + rule_stub_end_tlssni + '\r\n'
print '#Snort 2.9+ TLS SNI Cert Rule:\r\n' + tls_sni_rule_stub_start_snort + rule_stub_content_snort_suri2 + rule_stub_end_tlssni + '\r\n'
else:
print '\r\n'
if __name__ == '__main__':
main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pytest
import re
import time
from tests.common.environ import specific_build_type_timeout
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal, SkipIfIsilon
WAIT_TIME_MS = specific_build_type_timeout(60000, slow_build_timeout=100000)
# Skipping Isilon due to IMPALA-6998. TODO: Remove when there's a holistic revamp of
# what tests to run for non-HDFS platforms
@SkipIfLocal.multiple_impalad
@SkipIfIsilon.jira(reason="IMPALA-6998")
class TestRuntimeFilters(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestRuntimeFilters, cls).add_test_dimensions()
# Runtime filters are disabled on HBase
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format not in ['hbase'])
def test_basic_filters(self, vector):
self.run_test_case('QueryTest/runtime_filters', vector,
test_file_vars={'$RUNTIME_FILTER_WAIT_TIME_MS' : str(WAIT_TIME_MS)})
def test_wait_time(self, vector):
"""Test that a query that has global filters does not wait for them if run in LOCAL
mode"""
now = time.time()
self.run_test_case('QueryTest/runtime_filters_wait', vector)
duration_s = time.time() - now
assert duration_s < (WAIT_TIME_MS / 1000), \
"Query took too long (%ss, possibly waiting for missing filters?)" % str(duration)
def test_file_filtering(self, vector):
if 'kudu' in str(vector.get_value('table_format')):
return
self.change_database(self.client, vector.get_value('table_format'))
self.execute_query("SET RUNTIME_FILTER_MODE=GLOBAL")
self.execute_query("SET RUNTIME_FILTER_WAIT_TIME_MS=10000")
result = self.execute_query("""select STRAIGHT_JOIN * from alltypes inner join
(select * from alltypessmall where smallint_col=-1) v
on v.year = alltypes.year""")
assert re.search("Files rejected: 8 \(8\)", result.runtime_profile) is not None
assert re.search("Splits rejected: [^0] \([^0]\)", result.runtime_profile) is None
@SkipIfLocal.multiple_impalad
class TestBloomFilters(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestBloomFilters, cls).add_test_dimensions()
# Bloom filters are disabled on HBase, Kudu
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format not in ['hbase', 'kudu'])
def test_bloom_filters(self, vector):
self.run_test_case('QueryTest/bloom_filters', vector)
def test_bloom_wait_time(self, vector):
"""Test that a query that has global filters does not wait for them if run in LOCAL
mode"""
now = time.time()
self.run_test_case('QueryTest/bloom_filters_wait', vector)
duration_s = time.time() - now
assert duration_s < (WAIT_TIME_MS / 1000), \
"Query took too long (%ss, possibly waiting for missing filters?)" % str(duration)
@SkipIfLocal.multiple_impalad
class TestMinMaxFilters(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestMinMaxFilters, cls).add_test_dimensions()
# Min-max filters are only implemented for Kudu.
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format in ['kudu'])
def test_min_max_filters(self, vector):
self.run_test_case('QueryTest/min_max_filters', vector)
def test_large_strings(self, cursor, unique_database):
"""Tests that truncation of large strings by min-max filters still gives correct
results"""
table1 = "%s.min_max_filter_large_strings1" % unique_database
cursor.execute(
"create table %s (string_col string primary key) stored as kudu" % table1)
# Min-max bounds are truncated at 1024 characters, so construct some strings that are
# longer than that, as well as some that are very close to the min/max bounds.
matching_vals =\
('b' * 1100, 'b' * 1099 + 'c', 'd' * 1100, 'f'* 1099 + 'e', 'f' * 1100)
cursor.execute("insert into %s values ('%s'), ('%s'), ('%s'), ('%s'), ('%s')"
% ((table1,) + matching_vals))
non_matching_vals = ('b' * 1099 + 'a', 'c', 'f' * 1099 + 'g')
cursor.execute("insert into %s values ('%s'), ('%s'), ('%s')"
% ((table1,) + non_matching_vals))
table2 = "%s.min_max_filter_large_strings2" % unique_database
cursor.execute(
"create table %s (string_col string primary key) stored as kudu" % table2)
cursor.execute("insert into %s values ('%s'), ('%s'), ('%s'), ('%s'), ('%s')"
% ((table2,) + matching_vals))
cursor.execute("select count(*) from %s a, %s b where a.string_col = b.string_col"
% (table1, table2))
assert cursor.fetchall() == [(len(matching_vals),)]
# Insert a string that will have the max char (255) trailing after truncation, to
# test the path where adding 1 to the max bound after trunc overflows.
max_trail_str = "concat(repeat('h', 1000), repeat(chr(255), 50))"
cursor.execute("insert into %s values (%s)" % (table1, max_trail_str))
cursor.execute("insert into %s values (%s)" % (table2, max_trail_str))
cursor.execute("select count(*) from %s a, %s b where a.string_col = b.string_col"
% (table1, table2))
assert cursor.fetchall() == [(len(matching_vals) + 1,)]
# Insert a string that is entirely the max char to test the path where the max can't
# have 1 added to it after truncation and the filter is disabled.
all_max_str = "repeat(chr(255), 1030)"
cursor.execute("insert into %s values (%s)" % (table1, all_max_str))
cursor.execute("insert into %s values (%s)" % (table2, all_max_str))
cursor.execute("select count(*) from %s a, %s b where a.string_col = b.string_col"
% (table1, table2))
assert cursor.fetchall() == [(len(matching_vals) + 2,)]
@SkipIfLocal.multiple_impalad
class TestRuntimeRowFilters(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestRuntimeRowFilters, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format in ['parquet'])
def test_row_filters(self, vector):
self.run_test_case('QueryTest/runtime_row_filters', vector,
test_file_vars={'$RUNTIME_FILTER_WAIT_TIME_MS' : str(WAIT_TIME_MS)})
|
import requests
import bs4
#
# url = 'http://github.com'
# r = requests.get(url)
#
# r_html = r.text #r_html contain HTML
#
# soup = bs4.BeautifulSoup(r_html,features="lxml")
#
# title = soup.find('summary').text
#
# # print(r_html)
# print(title)
#
#
sauce = requests.get("https://niebezpiecznik.pl")
soup = bs4.BeautifulSoup(sauce.text, features="html.parser")
titles = soup.find_all()
# print(soup.find("h2").text)
for title in soup.find_all("h2"):
print(title.string)
|
import pygame, sys, time, random
from pygame.locals import *
import numpy as np
class Particle:
"""
@summary: Data class to store particle details i.e. Position, Direction and speed of movement, radius, etc
"""
def __init__(self):
self.__version = 0
"""@type: int"""
self.__position = []
# Sub dicts of the whole vertexMarkupDict
self.__movement = []
self.__radius = 0
# Python overrides -------------------------------------------------------------------------------------------------
def __str__(self):
printStr = ''
printStr += 'Position: (' + str(self.__position[0]) + ',' + str(self.__position[1]) + ') '
printStr += 'Direction and Speed: (' + str(self.__movement[0]) + ',' + str(self.__movement[1]) + ') '
printStr += 'Radius: ' + str(self.__radius)
return printStr
def __setitem__(self, position, movement, rad, c):
print position, movement, rad
# TODO: Check inputs
self.__position = position
self.__movement = movement
self.__radius = rad
# Properties -------------------------------------------------------------------------------------------------------
@property
def Position(self):
return self.__position
@property
def Movement(self):
return self.__movement
@property
def Radius(self):
return self.__radius
# Methods ----------------------------------------------------------------------------------------------------------
def SetPosition(self, pos):
self.__position = pos
def SetMovement(self, move):
self.__movement = move
def SetRadius(self, rad):
self.__radius = rad
def CalculateGrid(screenWidth, screenHeight, resolution):
x_size = resolution + divmod(screenWidth, resolution)[1]
y_size = resolution + divmod(screenHeight, resolution)[1]
print x_size, y_size
grid = []
for y in range(0, y_size):
temp_list = []
for x in range(0, x_size):
temp_list += [[x * (screenWidth / x_size), y * (screenHeight / y_size)]]
grid += [temp_list]
print np.array(grid).shape
return grid
pygame.init()
windowSurface = pygame.display.set_mode((500, 400), 0, 32)
pygame.display.set_caption("Paint")
# get screen size
info = pygame.display.Info()
sw = info.current_w
sh = info.current_h
grid = CalculateGrid(sw, sh, 50) # NEED TO CALCULATE OCCUPIED VALUE FOR ALL GRID CELLS!!!!!!!!!!!!!!!!
y_size = len(grid[:])
x_size = len(grid[0])
cell_size_x = sw / x_size
cell_size_y = sh / y_size
print x_size, y_size
# for celly in range(0, y_size):
# for cellx in range(0, x_size):
# print grid[celly][cellx][0]
max_dx = 5
max_dy = 5
min_radius = 15
max_radius = 60
circle_objs = []
num_circles = 10
for i in range(0, num_circles):
p = Particle()
p.SetRadius(random.randrange(min_radius, max_radius))
p.SetPosition([random.randrange(p.Radius, sw - p.Radius), random.randrange(p.Radius, sh - p.Radius)])
p.SetMovement([random.random() * max_dx + 1, random.random() * max_dy + 1])
circle_objs += [p]
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
windowSurface.fill(BLACK)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
windowSurface.fill(BLACK)
for particle in circle_objs:
dx = particle.Movement[0]
dy = particle.Movement[1]
radius = particle.Radius
# update position with direction
particle.SetPosition([particle.Position[0] + dx, particle.Position[1] + dy])
pos = particle.Position
# check bounds
if (pos[0] - radius) + dx < 0 or (pos[0] + radius) + dx > sw:
dx = -dx
particle.SetMovement([dx, dy])
if (pos[1] - radius) + dy < 0 or (pos[1] + radius) + dy > sh:
dy = -dy
particle.SetMovement([dx, dy])
# pygame.draw.circle(windowSurface, GREEN, (int(pos[0]), int(pos[1])), radius, 1)
for cellx in range(0, x_size):
for celly in range(0, y_size):
sum_cell = 0
for p in circle_objs:
sum_cell += pow(p.Radius, 2) / (pow((grid[celly][cellx][0]) - p.Position[0], 2) + pow((grid[celly][cellx][1]) - p.Position[1], 2))
if sum_cell > 1:
pygame.draw.rect(windowSurface, GREEN, [grid[celly][cellx][0], grid[celly][cellx][1], 3, 3], 0)
pygame.time.Clock().tick(20)
pygame.display.update()
|
import numpy as np
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
# Funcion que crea la matriz de H a utilizar en quadProg
# Q es una matriz cuadrada de (NxN)
# P es una matriz cuadrada de (NxN)
# R_i valor asociado a u
# N es el horizonte de prediccion
def Hqp(Q, P, R_1, R_2, N):
# Se consigue el tamano de las matrices
size_Q = np.shape(Q)[0]
size_P = np.shape(P)[0]
# Matriz de ceros de (NxN)
zeros_matriz = np.zeros((size_Q, size_Q))
zeros_columna = np.zeros((size_Q, 1))
zeros_fila = np.zeros((1 ,size_Q))
# Crear parte Q de la matriz H
H_q = 0
for i in range(N):
zeros_izq = np.tile(zeros_matriz, i)
zeros_der = np.tile(zeros_matriz, N-i)
fila = np.concatenate((zeros_izq, Q, zeros_der), axis=1)
if (np.isscalar(H_q)):
H_q = fila
else:
H_q = np.concatenate((H_q, fila), axis=0)
# Extender H_q para incluir matriz P
H_p = np.concatenate((np.tile(zeros_matriz, N), P), axis=1)
H_qp = np.concatenate((H_q, H_p), axis=0)
# Extender H_qp para incluir R_1 y R_2
# R_1
for i in range(N):
columna = np.tile(np.array([[0]]), (np.shape(H_qp)[0], 1))
H_qp = np.concatenate((H_qp, columna), axis=1)
fila_zeros = np.tile(np.array([[0]]), (1, np.shape(H_qp)[0]))
fila = np.concatenate((fila_zeros, np.array([[R_1]])), axis=1)
H_qp = np.concatenate((H_qp, fila), axis=0)
# R_2
for i in range(N):
columna = np.tile(np.array([[0]]), (np.shape(H_qp)[0], 1))
H_qp = np.concatenate((H_qp, columna), axis=1)
fila_zeros = np.tile(np.array([[0]]), (1, np.shape(H_qp)[0]))
fila = np.concatenate((fila_zeros, np.array([[R_2]])), axis=1)
H_qp = np.concatenate((H_qp, fila), axis=0)
H_qp = 2*H_qp
return H_qp
# Funcion que crea el vector f a utilizar en quadProg
# Hqp es la matriz H a ingresar en quadProg
def f(Hqp):
tamano = np.shape(Hqp)[0]
f = np.tile(np.array([[0]]), (tamano, 1))
return f
# Funcion que crea la matriz A a ingresar en quadProg
def Aqp(F, G, H, N):
# Se consigue el tamano de las matrices
size_F = np.shape(F)[0]
size_G= np.shape(G)[0]
size_H = np.shape(H)
# Matriz de ceros de (NxN)
zeros_matriz = np.zeros((size_F, size_F))
zeros_matriz_H = np.zeros(size_H)
zeros_columna = np.zeros((size_F, 1))
zeros_fila = np.zeros((1 ,size_F))
# Crear parte Fde la matriz Aqp
A_f = 0
for i in range(N):
zeros_izq = np.tile(zeros_matriz, i)
zeros_der = np.tile(zeros_matriz, N-i)
fila = np.concatenate((zeros_izq, F, zeros_der), axis=1)
if (np.isscalar(A_f)):
A_f = fila
else:
A_f = np.concatenate((A_f, fila), axis=0)
# Extender A_f para incluir matriz H
A_h = np.concatenate((np.tile(zeros_matriz_H, N), H), axis=1)
A_fh = np.concatenate((A_f, A_h), axis=0)
# Extender A_fh para incluir G
for i in range(N):
columna = np.tile(np.array([[0]]), (np.shape(A_fh)[0], 1))
A_fh = np.concatenate((A_fh, columna), axis=1)
fila_zeros = np.tile(zeros_columna, (1, np.shape(A_fh)[1]-1))
fila = np.concatenate((fila_zeros, G), axis=1)
A_fh = np.concatenate((A_fh, fila), axis=0)
columna_zeros = np.tile(np.array([[0]]), (np.shape(A_fh)[0], 1))
A_fh = np.concatenate((A_fh, columna_zeros), axis=1)
# Agregar ceros para condiciones de duk
# for i in range(N):
# columna = np.tile(np.array([[0]]), (np.shape(A_fh)[0], 1))
# A_fh = np.concatenate((A_fh, columna), axis=1)
return A_fh
# Funcion que crea el vector b a utilizar en quadProg
# f, g, h son vectores columna
def bqp(f, g, h, N):
f = np.tile(f, (N, 1))
g = np.tile(g, (N, 1))
bqp = np.concatenate((f, h, g), axis=0)
return bqp
# Funcion que crea la matriz Aeq a utilizar en quadProg
# A es una matriz cuadrada de (NxN)
# B es un vector de (Nx1)
# N es el horizonte de prediccion
def Aeq(A, B, N):
# Se consigue el tamano de las matrices
size_A = np.shape(A)[0]
size_B = np.shape(B)[0]
# Creacion de matrices
I = np.identity(size_A)
zeros_matrix = np.zeros((size_A, size_A))
zeros_vector = np.zeros((size_B, 1))
# Primera mitad de la matriz Aeq
Aeq_izquierda = np.concatenate((I, zeros_matrix), axis=1)
Aeq_izquierda = np.concatenate((Aeq_izquierda, np.concatenate((A, -I), axis=1)), axis=0)
for i in range(N - 1):
fila = np.concatenate((np.tile(zeros_matrix, i + 1), A), axis=1)
columna = np.concatenate((np.tile(zeros_matrix, (i + 2, 1)), -I))
Aeq_izquierda = np.concatenate((Aeq_izquierda, fila), axis=0)
Aeq_izquierda = np.concatenate((Aeq_izquierda, columna), axis=1)
# Segunda mitad de la matriz Aeq
Aeq_derecha = np.concatenate((zeros_vector, B), axis=0)
for i in range(N - 1):
fila = np.tile(zeros_vector, i + 1)
columna = np.concatenate((np.tile(zeros_vector, (i + 2, 1)), B), axis=0)
Aeq_derecha = np.concatenate((Aeq_derecha, fila), axis=0)
Aeq_derecha = np.concatenate((Aeq_derecha, columna), axis=1)
# Se crea la matriz completa
Aeq = np.concatenate((Aeq_izquierda, Aeq_derecha), axis=1)
# Extension de Aeq para considerar duk
columna = np.tile(zeros_vector, (N+1, N))
Aeq = np.concatenate((Aeq, columna), axis=1)
fila_izq = np.tile(np.transpose(zeros_vector), N+1)
fila_cent = np.array([[-1, 1]])
fila_der = np.tile(np.array([[0]]), 2*N-2)
fila = np.concatenate((fila_izq, fila_cent, fila_der), axis=1)
Aeq = np.concatenate((Aeq, fila), axis=0)
# Iteracion para horizonte N
for i in range(N-1):
zeros_izq = np.tile(np.transpose(zeros_vector), N+1)
zeros_cent = np.tile(np.array([[0]]), 2*i)
seq = np.array([[1, 0, -1, 1]])
pre_fila = np.concatenate((zeros_izq, zeros_cent, seq), axis=1)
zeros_der = np.tile(np.array([[0]]), np.shape(Aeq)[1] - np.shape(pre_fila)[1])
fila = np.concatenate((pre_fila, zeros_der), axis=1)
Aeq = np.concatenate((Aeq, fila), axis=0)
return Aeq
# Funcion que crea el vector beq a utilizar en quadProg
def beq(x_eq, Aeq):
columna_zeros = np.tile(np.array([[0]]), (np.shape(Aeq)[0]-np.shape(x_eq)[0], 1))
beq = np.concatenate((x_eq, columna_zeros), axis=0)
return beq
# Funcion que crea lb a utilizar en quadProg
def lb(lb_xk, lb_uk, lb_duk, N):
xk = np.tile(lb_xk, (N+1, 1))
uk = np.tile(lb_uk, (N, 1))
duk = np.tile(lb_duk, (N, 1))
lb = np.concatenate((xk, uk, duk), axis=0)
return lb
# Funcion que crea ub a utilizar en quadProg
def ub(ub_xk, ub_uk, ub_duk, N):
xk = np.tile(ub_xk, (N+1, 1))
uk = np.tile(ub_uk, (N, 1))
duk = np.tile(ub_duk, (N, 1))
ub = np.concatenate((xk, uk, duk), axis=0)
return ub
|
import os
import sys
path = '/var/django-apps/Mycompanytv'
if path not in sys.path:
sys.path.append(path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'media_server.settings'
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
from django.db import models
from django.contrib.auth.models import User
import datetime
from django.core.validators import MaxValueValidator, MinValueValidator
class Student(models.Model):
student_id = models.IntegerField(verbose_name='شماره دانش آموزی')
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='کاربر')
courses = models.ManyToManyField('Course', through='StudentCourse', related_name='students')
classrooms = models.ManyToManyField('Classroom', through='Register', related_name='students')
last_modified_date = models.DateTimeField(null=True)
def __str__(self):
return self.user.first_name + ' ' + self.user.last_name
class Teacher(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='کاربر')
hire_date = models.DateField(null=True, blank=True, verbose_name='تاریخ استخدام')
@property
def get_experience(self):
return datetime.datetime.now().year - self.hire_date.year
DIPLOMA, ASSOCIATE, BACHELOR, MASTER, PHD = 'DP', 'AS', 'BA', 'MA', 'PHD'
degree_choices = (
(DIPLOMA, 'دیپلم'),
(ASSOCIATE, 'فوق دیپلم'),
(BACHELOR, 'لیسانس'),
(MASTER, 'فوق لیسانس'),
(PHD, 'دکتری')
)
education_degree = models.CharField(max_length=2, choices=degree_choices, verbose_name='مدرک تحصیلی')
profession = models.ManyToManyField('Course', verbose_name='تخصص')
def __str__(self):
return self.user.first_name + ' ' + self.user.last_name
class LevelField(models.Model):
FIRST, SECOND, THIRD = 'first', 'second', 'third'
level_choices = (
(FIRST, 'اول'),
(SECOND, 'دوم'),
(THIRD, 'سوم')
)
level = models.CharField(max_length=10, choices=level_choices, default='first', verbose_name='پایه')
MATH, NATURAL, HUMANITY = 'math', 'natural', 'humanity'
field_choices = (
(MATH, 'ریاضی'),
(NATURAL, 'تجربی'),
(HUMANITY, 'انسانی')
)
field = models.CharField(max_length=10, choices=field_choices, verbose_name='رشته', blank=True)
def __str__(self):
return self.get_level_display() + ' ' + self.get_field_display()
class Classroom(models.Model):
level_field = models.ForeignKey('LevelField', on_delete=models.SET_NULL, null=True)
A, B, C = 'a', 'b', 'c'
branch_choices = (
(A, 'الف'),
(B, 'ب'),
(C, 'ج')
)
branch = models.CharField(max_length=2, choices=branch_choices, default='a', null=True, verbose_name='گروه',
blank=True)
education_year = models.CharField(max_length=20, null=True)
courses = models.ManyToManyField('Course', through='TeacherClassCourse', related_name='classrooms')
teachers = models.ManyToManyField('Teacher', through='TeacherClassCourse', related_name='classrooms')
is_active = models.BooleanField(verbose_name='فعال')
def __str__(self):
return str(self.level_field) + ' ' + self.get_branch_display()
class Course(models.Model):
name = models.CharField(max_length=20)
level_field = models.ForeignKey('LevelField', on_delete=models.SET_NULL, null=True)
unit = models.IntegerField()
def __str__(self):
return self.name
class StudentCourse(models.Model):
student = models.ForeignKey('Student', related_name='student_courses', on_delete=models.SET_NULL, null=True)
course = models.ForeignKey('Course', related_name='student_courses', on_delete=models.SET_NULL, null=True)
final_grade = models.FloatField(blank=True, null=True, validators=[
MaxValueValidator(20), MinValueValidator(0)
])
mid_grade = models.FloatField(blank=True, null=True, validators=[
MaxValueValidator(20), MinValueValidator(0)
])
class Register(models.Model):
student = models.ForeignKey('Student', related_name='registers', on_delete=models.SET_NULL, null=True,
verbose_name='دانش آموز')
classroom = models.ForeignKey('Classroom', related_name='registers', on_delete=models.SET_NULL, null=True,
verbose_name='کلاس')
is_active = models.BooleanField(verbose_name='فعال')
class TeacherClassCourse(models.Model):
teacher = models.ForeignKey('Teacher', related_name='teacher_class_courses', on_delete=models.SET_NULL, null=True,
verbose_name='معلم')
classroom = models.ForeignKey('Classroom', related_name='teacher_class_courses', on_delete=models.SET_NULL,
null=True, verbose_name='کلاس')
course = models.ForeignKey('Course', related_name='teacher_class_courses', on_delete=models.SET_NULL, null=True,
verbose_name='دزس')
class_time = models.ManyToManyField('ClassTime', related_name='teacher_class_course', verbose_name='زمان کلاس')
def __str__(self):
return str(self.classroom) + ' ' + str(self.course) + ' ' + str(self.teacher)
class ClassTime(models.Model):
A, B, C, D = '1', '2', '3', '4'
part_choices = (
(A, 'زنگ اول'),
(B, 'زنگ دوم'),
(C, 'زنگ سوم'),
(D, 'زنگ چهارم')
)
part = models.CharField(max_length=20, choices=part_choices, default='1', null=True, verbose_name='زنگ',
blank=True)
Saturday, Sunday, Monday, Tuesday, Wednesday = 'Sa', 'Su', 'Mo', 'Tu', 'We'
day_choices = (
(Saturday, 'شنبه'),
(Sunday, 'یکشنبه'),
(Monday, 'دوشنبه'),
(Tuesday, 'سه شنبه'),
(Wednesday, 'چهارشنبه')
)
day = models.CharField(max_length=20, choices=day_choices, default='Sa', null=True, verbose_name='روز',
blank=True)
def __str__(self):
return self.get_day_display() + ' ' + self.get_part_display()
class Assignment(models.Model):
file = models.FileField(upload_to='assignments')
sent_time = models.DateField()
deadline_time = models.DateField()
teacher_class_course = models.ForeignKey(TeacherClassCourse, on_delete=models.SET_NULL, null=True)
grade = models.IntegerField(null=True, blank=True, validators=[
MaxValueValidator(20), MinValueValidator(0)
])
description = models.TextField()
class StudentPresence(models.Model):
student_course = models.ForeignKey(StudentCourse, on_delete=models.SET_NULL, null=True)
date = models.DateField()
presence = models.BooleanField(verbose_name='حضور')
POSITIVE, NEGETIVE = 'pos', 'neg'
activity_choices = (
(POSITIVE, '+'),
(NEGETIVE, '-')
)
activity = models.CharField(max_length=20, choices=activity_choices, null=True, blank=True, verbose_name='فعالیت')
class TeacherPresence(models.Model):
teacher_class_course = models.ForeignKey(TeacherClassCourse, on_delete=models.SET_NULL, null=True)
date = models.DateField()
presence = models.BooleanField(verbose_name='حضور')
|
import random
from fxengine.event.event import SignalEvent
class TestRandomStrategy(object):
def __init__(self, events):
self.events = events
self.ticks = 0
random.seed(5)
def calculate_signals(self, event):
if event.type == 'TICK':
self.ticks += 1
if self.ticks % 5 == 0:
side = random.choice(["buy", "sell"])
order = SignalEvent(
event.instrument, "market", side
)
self.events.put(order)
|
import subprocess as sp
import os
def get_test(id, rank):
sp.call("wget --load-cookies cookies.txt 'http://gpe2.acm-icpc.tw/domjudge2/pctjury/testcase.php?probid=%s&rank=%s&fetch=input' -O '%s/%s.in'"%(id, rank, id, rank), shell=True)
sp.call("wget --load-cookies cookies.txt 'http://gpe2.acm-icpc.tw/domjudge2/pctjury/testcase.php?probid=%s&rank=%s&fetch=output' -O '%s/%s.out'"%(id, rank, id, rank), shell=True)
try:
x = open("%s/%s.in"%(id, rank)).read()
y = open("%s/%s.out"%(id, rank)).read()
except:
return True
error = open("error").read()
if x == error or y == error:
os.remove("%s/%s.in"%(id, rank))
os.remove("%s/%s.out"%(id, rank))
return False
return True
if __name__ == "__main__":
get_test(24941, 10)
|
def convert_hash_to_array(hash):
return sorted([[k,v] for k,v in hash.items()])
'''
Convert a hash into an array. Nothing more, Nothing less.
{name: 'Jeremy', age: 24, role: 'Software Engineer'}
should be converted into
[["name", "Jeremy"], ["age", 24], ["role", "Software Engineer"]]
Note: The output array should be sorted alphabetically.
'''
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#Loading the dataset
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
n_nodes_hl1 = 100
n_nodes_hl2 = 100
n_nodes_hl3 = 100
n_nodes_hl4 = 100
n_classes = 10
batch_size = 100
# PLACEHOLDERS
x = tf.placeholder(tf.float32,shape=[None,784])
y_true = tf.placeholder(tf.float32,[None,10])
def neural_network(data):
hidden_layer_1 = {'weights':tf.Variable(tf.random_normal([784,n_nodes_hl1])),
'bias':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_layer_2 = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_layer_3 = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),
'bias':tf.Variable(tf.random_normal([n_nodes_hl3]))}
hidden_layer_4 = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_nodes_hl4])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl4]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl4, n_classes])),
'bias': tf.Variable(tf.random_normal([n_classes]))}
#y=xW+b
l1 = tf.add(tf.matmul(data,hidden_layer_1['weights']),hidden_layer_1['bias'])
l1 = tf.nn.sigmoid(l1)
l2 = tf.add(tf.matmul(l1, hidden_layer_2['weights']), hidden_layer_2['bias'])
l2 = tf.nn.sigmoid(l2)
l3 = tf.add(tf.matmul(l2, hidden_layer_3['weights']), hidden_layer_3['bias'])
l3 = tf.nn.sigmoid(l3)
l4 = tf.add(tf.matmul(l3, hidden_layer_4['weights']), hidden_layer_4['bias'])
l4 = tf.nn.sigmoid(l4)
output = tf.matmul(l4,output_layer['weights']) + output_layer['bias']
return output
"""
#VARIABLES
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
#GRAPH OPERATIONS
y = tf.matmul(x,W)+b
#LOSS FUNCTION
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_true,logits=y))
#OPTIMIZER
optimizer = tf.train.AdamOptimizer(learning_rate=0.5)
train = optimizer.minimize(cross_entropy)
"""
def train_neural_network(x):
y_pred = neural_network(x)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_pred,labels=y_true))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(cross_entropy)
#CREATE SESSION
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(4000):
batch_x,batch_y = mnist.train.next_batch(100)
sess.run(train,feed_dict={x:batch_x,y_true:batch_y})
if(step%100==0):
#EVALUATE MODEL
pred = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(pred,tf.float32))
print(" Accuracy After ",step," Epoch " )
print(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels}))
print('\n')
train_neural_network(x)
|
from kivy.network.urlrequest import UrlRequest
def got_weather(req, results):
for key, value in results['weather'][0].items():
print(key, ': ', value)
if __name__ == '__main__':
ID = 5391811
URL = 'http://api.openweathermap.org/data/2.5/weather?q=San_Diego,CA&APPID='
req = UrlRequest(URL, got_weather, debug=True)
req.wait()
print 'Done'
'''
#!/bin/sh
export PYTHONOPTIMIZE=2
export ANDROID_ROOT=/system
export ANDROID_CACHE=/cache
export ANDROID_DATA=/data
export ANDROID_ASSETS=/system/app
export ANDROID_PRIVATE=/data/data/com.hipipal.qpyplus/files
export ANDROID_STORAGE=/storage
export ANDROID_PROPERTY_WORKSPACE=8,65536
export ANDROID_PUBLIC=/storage/sdcard1/com.hipipal.qpyplus
export PYLOC=/data/data/com.hipipal.qpyplus/files
export SDLOC=/storage/sdcard1/com.hipipal.qpyplus/lib/python2.7/
export PATH=$PYLOC/bin:$PATH
export PYTHONHOME=$PYLOC:$PYTHONHOME
export PYTHONPATH=$PYLOC/lib/python2.7/:$PYTHONPATH
export PYTHONPATH=$PYLOC/lib/python2.7/lib-dynload/:$PYTHONPATH
export PYTHONPATH=$PYLOC/lib/python2.7/site-package/:$PYTHONPATH
export PYTHONPATH=$SDLOC/site-packages/:$PYTHONPATH
export PYTHONSTARTUP=$SDLOC/site-packages/qpythoninit.py
export LD_LIBRARY_PATH=/data/data/com.hipipal.qpyplus/files/lib:/data/data/com.hipipal.qpyplus/files:/data/data/com.hipipal.qpyplus/lib
export TMPDIR=/storage/sdcard1/com.hipipal.qpyplus/cache
'''
|
from django import forms
class QuestionForm(forms.Form):
id=forms.IntegerField()
question=forms.CharField(required=True,max_length=100)
answer=forms.BooleanField(required=False)
comment=forms.CharField(required=True,max_length=100)
class DeleteForm(forms.Form):
id=forms.IntegerField()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import operator
import os
# Start Functions
def clear():
'''
Clear console
'''
os.system('cls' if os.name=='nt' else 'clear')
def print_words(pos=False):
'''
Print words resolves
Arguments:
pos - View numbers
'''
print_pos = ' '
print_pos_bar = ' '
print_pos_letters = ' '
for i in range(num_letters):
if pos:
print_pos = print_pos + str(i + 1) + ' '
print_pos_bar = print_pos_bar + '| '
print_pos_letters = print_pos_letters + word_resolve[i] + ' '
print(print_pos)
print(print_pos_bar)
print(print_pos_letters)
print('')
print('')
def best_letter(word_resolve):
'''
Search best letter
Arguments:
word_resolve - list with word resolve
'''
# Read all words and count letters
letters = dict()
ignore = ('\xb1', '\xc3', '\xb6', '.', '\xae')
words = open('words.txt')
# Read alls words for dictionary
for word in words.readlines():
# Just consider which have the same length
if len(word_resolve) == len(word.strip()):
l_word = list(word.strip().lower())
# Check word_resolve is same letters
fit = True
for i in range(len(l_word)):
if word_resolve[i] != l_word[i] and word_resolve[i] != '_':
fit = False
# Count the letters
if fit:
for letter in l_word:
if letter.lower() not in ignore:
if letter in letters:
letters[letter] = letters[letter] + 1
elif not letter in letters_used:
letters[letter] = 1
# Sort
sorted_letters = sorted(
letters.items(), key=operator.itemgetter(1), reverse=True
)
# Best letter
if len(sorted_letters) > 0:
return sorted_letters[0][0]
else:
end = True
print('No more possibilities')
return None
# End Functions
# Start
clear()
print('Hangman Bot 1.0v')
print(' _________ ')
print('| | ')
print('| 0 ')
print('| /|\\ ')
print('| / \\ ')
print('| ')
print('| ')
# Get num letters and make list resolve
num_letters = input('Number of letters: ')
clear()
play = True
word_resolve = list()
letters_used = list()
for pos in range(num_letters):
word_resolve.append('_')
print('')
print('Okay, come on!')
print('')
# Logic
while play:
# Get best letter
best_option = best_letter(word_resolve)
if best_option:
# The guard not to give it back
letters_used.append(best_option)
print_words()
# Print best letter
print('Test with the letter> {letter}'.format(
letter=best_option.upper()
))
# Save successes
print('')
question_success = raw_input('I successful? (yes o no): ').lower()
clear()
if question_success == 'no':
clear()
print('')
print('Ups!')
elif question_success == 'yes':
print('')
print_words(True)
good_pos = raw_input('Tell me that positions (Example> 2 4 7): ').split(' ')
clear()
for pos in good_pos:
word_resolve[int(pos) - 1] = best_option
# Game over
end = False
if not '_' in word_resolve:
end = True
if end:
play = False
print('Game over :)')
|
#!/usr/bin/env python
from assignment1.srv import *
import rospy
def handle_task2(req):
if req.c==1:
return task2Response(req.a + req.b)
elif req.c==2:
return task2Response(req.a - req.b)
elif req.c==3:
return task2Response(req.a * req.b)
elif req.c==4:
return task2Response(req.a / req.b)
else:
return 0
def server_task2():
rospy.init_node('server_task2')
s = rospy.Service('task2', task2, handle_task2)
print "Ready."
rospy.spin()
if __name__ == "__main__":
server_task2()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
# safe print of possible UTF-8 character strings on ISO-8859 terminal
def cprint(s, end=None):
s = re.sub("◻","-ENSP-", s)
s = re.sub("◻"," ", s)
t = "".join([x if ord(x) < 128 else '?' for x in s])
if end != None:
print(t, end=end)
else:
print(t)
# Emit the UTF-8 chars as \uXXXX hex strings
def uprint(s):
#s = re.sub("◻"," ", s)
t = "".join([x if ord(x) < 128 else ("\\u"+hex(ord(x))) for x in s])
print(t)
def dprint(level, msg):
from config import debug
if int(debug) >= level:
cprint("{}".format(msg))
def fatal(message):
sys.stderr.write("fatal: " + message + "\n")
exit(1)
warningTag = {}
def wprint(tag, message, end=None):
if giveWarning(tag):
cprint(message, end=end)
def setWarnings(tagList):
tags = tagList.split()
for tag in tags:
warningTag[tag] = False
# Give a warning if tag is **not** in the list of suppressed warnings
def giveWarning(tag):
return tag not in warningTag
|
from typing import Text
from django.urls import path
from .views import *
urlpatterns = [
path('',home, name='dashboard'),
path('test',test),
path('help',help)
]
|
import sys
print(sys.path)
name = "zhan"
age =19
job = "iT"
msg = '''
=============user name %s
yourname is: %s
your age is: %s
your job js: %s
''' % (name,name,age,job)
print(msg)
resArr = ['zhang','chaofu','age']
print(resArr)
print(resArr[0])
print(resArr[2])
print(resArr[1:3])
resArr.append("lai")
print(resArr)
resArr_copy = resArr.copy();
print(resArr_copy)
print(resArr_copy.count("lai"))
info = {
'stu1101': "TengLan Wu",
'stu1102': "LongZe Luola",
'stu1103': "XiaoZe Maliya",
}
info['stu1104'] ="zhouenlai"
print(info)
print(info.get("stu1101"))
print("hello world !")
#for
for key in info:
print(key,info[key])
#file
# f =open("test.py")
# frist_line = f.readline()
# print(frist_line)
# print("line-------".center(50,'+'))
# # data = f.read()
# # print(data)
# f.close()
#集合
jihe = {1,2,3,4,5,4,3}
print(jihe)
#元组 不能修改
#只读列表,只有count, index 2 个方法
#作用:如果一些数据不想被人修改, 可以存成元组,比如身份证列表
#函数
a,b =5,8
def cacl(x,y):
res = x**y
return res
sumd= cacl(a,b)
print(sumd)
|
ARR = [3, 34, 4, 12, 5, 2]
S = 9
# 选或者不选
# Subset(arr[5], 9)
# ---
# 选 Sunbet(arr[4], 7) 不选 Subset(arr[4], 9)
def rec_subset(arr, i, s):
if s == 0:
return True
elif i == 0:
return arr[0] == s
elif arr[i] > s:
return rec_subset(arr, i-1, s)
else:
A = rec_subset(arr, i-1, s-arr[i])
B = rec_subset(arr, i-1, s)
return A or B
print(rec_subset(ARR, len(ARR)-1, 9))
print(rec_subset(ARR, len(ARR)-1, 11))
print(rec_subset(ARR, len(ARR)-1, 12))
|
import pickle
class PickleData:
def __init__(self,fil):
self.file = fil
def dump_object(self,obj):
with open(self.file,'wb') as destination:
pickle.dump(obj,destination)
def depickle(self):
with open(self.file,'rb') as f:
Pikl = pickle.load(f)
return Pikl
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from drp_python.model_layer.subnet_config_model import SubnetConfigModel
from drp_python.translation_layer.subnets_translation import \
SubnetTranslation
from mock_session import MockHttpSession
import logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] '
'%(message)s',
datefmt='%d-%m-%Y:%H:%M:%S',
level=logging.WARNING)
logger = logging.getLogger('drp-python')
class SubnetTranslationTest(unittest.TestCase):
def setUp(self):
self.session = MockHttpSession('http://127.0.0.1:' +
'9999',
'username',
'password')
self.subnet_translation = SubnetTranslation(self.session)
subnet_object = {
'address': '10.197.111.0',
'broadcast_address': '10.197.111.255',
'default_lease': 7200,
'dn': 'cablelabs.com',
'dns': '8.8.8.8',
'listen_iface': 'eno1',
'max_lease': 7200,
'name': 'TestSubnet',
'netmask': '255.255.255.0',
'range': '10.197.111.12 10.197.111.16',
'router': '10.197.111.1',
'type': 'management',
'next_server': '10.191.111.131'
}
subnet_object2 = {
'address': '10.197.111.0',
'broadcast_address': '10.197.111.255',
'default_lease': 7600,
'dn': 'cablelabs.com',
'dns': '8.8.8.8',
'listen_iface': 'eno1',
'max_lease': 7600,
'name': 'TestSubnet',
'netmask': '255.255.255.0',
'range': '10.197.111.12 10.197.111.26',
'router': '10.197.111.2',
'type': 'management',
'next_server': '10.191.111.131'
}
self.subnet_config_model = SubnetConfigModel(**subnet_object)
self.subnet_config_model2 = SubnetConfigModel(**subnet_object2)
def tearDown(self):
pass
def test_create_subnet(self):
model = self.subnet_translation.create_subnet(self.subnet_config_model)
self.assertEqual(model.name, self.subnet_config_model.name)
self.assertEqual(model.address, self.subnet_config_model.address)
self.assertEqual(model.broadcast_address,
self.subnet_config_model.broadcast_address)
self.assertEqual(model.default_lease,
self.subnet_config_model.default_lease)
self.assertEqual(model.dn, self.subnet_config_model.dn)
self.assertEqual(model.dns, self.subnet_config_model.dns)
self.assertEqual(model.listen_iface,
self.subnet_config_model.listen_iface)
self.assertEqual(model.max_lease, self.subnet_config_model.max_lease)
self.assertEqual(model.netmask, self.subnet_config_model.netmask)
self.assertEqual(model.range, self.subnet_config_model.range)
self.assertEqual(model.router, self.subnet_config_model.router)
self.assertEqual(model.next_server, self.subnet_config_model.next_server)
self.assertEqual(model.type, self.subnet_config_model.type)
self.assertEquals(model.extension, {})
self.assertEqual(model.available, True)
self.assertEqual(model.errors, [])
self.assertEqual(model.validated, True)
self.assertEqual(model.options, [{'Code': 6, 'Value': '8.8.8.8'},
{'Code': 15,
'Value': 'cablelabs.com'},
{'Code': 1,
'Value': '255.255.255.0'},
{'Code': 3,
'Value': '10.197.111.1'},
{'Code': 28,
'Value': '10.197.111.255'}])
self.assertEqual(model.pickers, ['hint', 'nextFree', 'mostExpired'])
self.assertEqual(model.strategy, 'MAC')
model = self.subnet_translation.get_subnet(
self.subnet_config_model.name)
self.assertEqual(model.name, self.subnet_config_model.name)
self.assertEqual(model.address, self.subnet_config_model.address)
self.assertEqual(model.broadcast_address,
self.subnet_config_model.broadcast_address)
self.assertEqual(model.default_lease,
self.subnet_config_model.default_lease)
self.assertEqual(model.dn, self.subnet_config_model.dn)
self.assertEqual(model.dns, self.subnet_config_model.dns)
self.assertEqual(model.listen_iface,
self.subnet_config_model.listen_iface)
self.assertEqual(model.max_lease, self.subnet_config_model.max_lease)
self.assertEqual(model.netmask, self.subnet_config_model.netmask)
self.assertEqual(model.range, self.subnet_config_model.range)
self.assertEqual(model.router, self.subnet_config_model.router)
self.assertEqual(model.next_server, self.subnet_config_model.next_server)
self.assertEqual(model.type, self.subnet_config_model.type)
self.assertEquals(model.extension, {})
self.assertEqual(model.available, True)
self.assertEqual(model.errors, [])
self.assertEqual(model.validated, True)
self.assertEqual(model.options, [{'Code': 6, 'Value': '8.8.8.8'},
{'Code': 15,
'Value': 'cablelabs.com'},
{'Code': 1,
'Value': '255.255.255.0'},
{'Code': 3,
'Value': '10.197.111.1'},
{'Code': 28,
'Value': '10.197.111.255'}])
self.assertEqual(model.pickers, ['hint', 'nextFree', 'mostExpired'])
self.assertEqual(model.strategy, 'MAC')
def test_update_subnet(self):
model = self.subnet_translation.update_subnet(
self.subnet_config_model2, self.subnet_config_model.name)
self.assertEqual(model.name, self.subnet_config_model2.name)
self.assertEqual(model.address, self.subnet_config_model2.address)
self.assertEqual(model.broadcast_address,
self.subnet_config_model2.broadcast_address)
self.assertEqual(model.default_lease,
self.subnet_config_model2.default_lease)
self.assertEqual(model.dn, self.subnet_config_model2.dn)
self.assertEqual(model.dns, self.subnet_config_model2.dns)
self.assertEqual(model.listen_iface,
self.subnet_config_model2.listen_iface)
self.assertEqual(model.max_lease, self.subnet_config_model2.max_lease)
self.assertEqual(model.netmask, self.subnet_config_model2.netmask)
self.assertEqual(model.range, self.subnet_config_model2.range)
self.assertEqual(model.router, self.subnet_config_model2.router)
self.assertEqual(model.next_server, self.subnet_config_model2.next_server)
self.assertEqual(model.type, self.subnet_config_model2.type)
self.assertEquals(model.extension, {})
self.assertEqual(model.available, True)
self.assertEqual(model.errors, [])
self.assertEqual(model.validated, True)
self.assertEqual(model.options, [
{'Code': 6,
'Value': '8.8.8.8'},
{'Code': 15,
'Value': 'cablelabs.com'},
{'Code': 1,
'Value': '255.255.255.0'},
{'Code': 3, 'Value': '10.197.111.2'},
{'Code': 28,
'Value': '10.197.111.255'}])
self.assertEqual(model.pickers, ['hint', 'nextFree', 'mostExpired'])
self.assertEqual(model.strategy, 'MAC')
def test_delete_subnet(self):
self.subnet_translation.delete_subnet(
self.subnet_config_model.name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.