text
stringlengths 8
6.05M
|
|---|
# Python Imports
from xml.dom import minidom
import httplib
# Local Imports
import globals
from helpers import *
def do_xml(self, xml, **kwargs):
"""
Base function to send/receive xml using either GET or POST
Optional Parameters:
timeout, ip, port, return_result, print_error, close_xml, print_xml, retry_count, print_response
"""
timeout = float(kwargs.get('timeout', self.default_timeout))
ip = kwargs.get('ip', self.ip_address)
port = kwargs.get('port', self.port)
return_result = kwargs.get('return_result', False)
print_error = kwargs.get('print_error', True)
close_xml = kwargs.get('close_xml', False)
print_xml = kwargs.get('print_xml', False)
retry_count = kwargs.get('retry_count', 0) #used for retry errors
print_response = kwargs.get('print_response', False) #used for returning raw xml response
if close_xml:
xml = close_xml_tags(xml)
if print_xml:
print xml
conn = httplib.HTTPConnection('{0}:{1}'.format(ip, port), timeout=float(timeout))
headers = { "Content-type": "text/xml" }
try:
conn.request("POST", "/YamahaRemoteControl/ctrl", "", headers)
conn.send(xml)
if return_result:
response = conn.getresponse()
rval = response.read()
if print_response:
print rval
conn.close()
return rval
else:
response = conn.getresponse()
rval = response.read()
conn.close()
if rval != "":
if str(rval[25]) == "0":
return True
else:
print "Command did not go to Yamaha Receiver, error code " + str(rval[25])
else:
print "Command did not go to Yamaha Receiver, error NOT possible to set on this model."
except socket.error:
if print_error:
#eg.PrintError("Unable to communicate with Yamaha Receiver. Will try again for 10 times.")
kwargs['retry_count'] = retry_count + 1
if retry_count < 10:
kwargs['close_xml'] = False #could have potential for further errors if not done.
return do_xml(self, xml, **kwargs)
else:
eg.PrintError("Need to check communication with Yamaha Receiver.")
return None
else:
raise
def send_xml(self, xml, **kwargs):
"""
Communicate with the receiver, but do not wait or return the results
"""
if not 'return_result' in kwargs:
kwargs['return_result'] = False
do_xml(self, xml, **kwargs)
def put_xml(self, xml, **kwargs):
send_xml(self, '<YAMAHA_AV cmd="PUT">{0}</YAMAHA_AV>'.format(xml), **kwargs)
def zone_put_xml(self, zone, xml, **kwargs):
if zone == -1:
zone = self.active_zone
if zone < 2:
put_xml(self, '<Main_Zone>{0}</Main_Zone>'.format(xml), **kwargs)
elif zone < -1:
put_xml(self, '<Zone_{1}>{0}</Zone_{1}>'.format(xml, chr(-1 * zone)), **kwargs)
else:
put_xml(self, '<Zone_{1}>{0}</Zone_{1}>'.format(xml, zone), **kwargs)
def receive_xml(self, xml, **kwargs):
kwargs['return_result'] = True
return do_xml(self, xml, **kwargs)
def get_xml(self, xml, **kwargs):
return receive_xml(self, '<YAMAHA_AV cmd="GET">{0}</YAMAHA_AV>'.format(xml), **kwargs)
def zone_get_xml(self, zone, xml, **kwargs):
if zone == -1:
zone = self.active_zone
if zone < 2:
return get_xml(self, '<Main_Zone>{0}</Main_Zone>'.format(xml), **kwargs)
elif zone < -1:
return get_xml(self, '<Zone_{1}>{0}</Zone_{1}>'.format(xml, chr(-1 * zone)), **kwargs)
else:
return get_xml(self, '<Zone_{1}>{0}</Zone_{1}>'.format(xml, zone), **kwargs)
def get_sound_video(self, zone=-1, **kwargs):
return zone_get_xml(self, zone, '<Sound_Video>GetParam</Sound_Video>', **kwargs)
def get_basic_status(self, zone=-1, **kwargs):
return zone_get_xml(self, zone, '<Basic_Status>GetParam</Basic_Status>', **kwargs)
def get_tuner_status(self, **kwargs):
return get_xml(self, '<Tuner><Play_Info>GetParam</Play_Info></Tuner>', **kwargs)
def get_device_status(self, input, section, **kwargs):
return get_xml(self, '<{0}><{1}>GetParam</{1}></{0}>'.format(input, section), **kwargs)
def get_tuner_presets(self, **kwargs):
return get_xml(self, '<Tuner><Play_Control><Preset><Data>GetParam</Data></Preset></Play_Control></Tuner>', **kwargs)
def get_config(self, **kwargs):
return get_xml(self, '<System><Config>GetParam</Config></System>', **kwargs)
def get_sound_video_string(self, param, zone=-1, elem=None, **kwargs):
if elem == "Treble":
xml = zone_get_xml(self, zone, '<Sound_Video><Tone><Treble>GetParam</Treble></Tone></Sound_Video>', **kwargs)
elif elem == "Bass":
xml = zone_get_xml(self, zone, '<Sound_Video><Tone><Bass>GetParam</Bass></Tone></Sound_Video>', **kwargs)
else:
xml = get_sound_video(self, zone, **kwargs)
xmldoc = minidom.parseString(xml)
value = xmldoc.getElementsByTagName(param)[0].firstChild.data
return value
def get_volume_string(self, param, zone=-1, elem=None, **kwargs):
xml = zone_get_xml(self, zone, '<Volume><{0}>GetParam</{0}></Volume>'.format(elem), **kwargs)
xmldoc = minidom.parseString(xml)
value = xmldoc.getElementsByTagName(param)[0].firstChild.data
return value
def get_status_string(self, param, zone=-1, **kwargs):
xml = get_basic_status(self, zone, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
value = xmldoc.getElementsByTagName(param)[0].firstChild.data
return value
def get_status_strings(self, params, zone=-1, **kwargs):
"""
Return multiple values as to to not query the receiver over the network more than once
"""
xml = get_basic_status(self, zone, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
values = []
for param in params:
values.append(xmldoc.getElementsByTagName(param)[0].firstChild.data)
return tuple(values)
def get_status_param_is_on(self, param, zone=-1, **kwargs):
return get_status_string(self, param, zone, **kwargs) == "On"
def get_status_int(self, param, zone=-1, **kwargs):
return int(get_status_string(self, param, zone, **kwargs))
def get_config_string(self, param, **kwargs):
#print "in get config string"
#print self.FOUND_IP
#print "value in self.active_zone " + str(self.active_zone)
xml = get_config(self, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
value = xmldoc.getElementsByTagName(param)[0].firstChild.data
return value
"""
def get_config_param_is_on(param, **kwargs):
return get_config_string(param, **kwargs) == "On"
def get_config_int(param, **kwargs):
return int(get_config_string(param, **kwargs))
"""
def get_tuner_string(self, param, **kwargs):
xml = get_tuner_status(self, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
value = xmldoc.getElementsByTagName(param)[0].firstChild.data
return value
"""
def get_tuner_param_is_on(param, **kwargs):
return get_tuner_string(param, **kwargs) == "On"
def get_tuner_int(param, **kwargs):
return int(get_tuner_string(param, **kwargs))
"""
def get_device_string(self, param, input, section, **kwargs):
xml = get_device_status(self, input, section, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
if param[:4] == "Line":
value = xmldoc.getElementsByTagName('Txt')[int(param[5])-1].firstChild.data
else:
value = xmldoc.getElementsByTagName(param)[0].firstChild.data
return value
def get_device_strings(self, params, input, section, **kwargs):
"""
Return multiple values as to to not query the receiver over the network more than once
"""
xml = get_device_status(self, input, section, **kwargs)
if kwargs.get('print_xml', False):
print xml
xmldoc = minidom.parseString(xml)
values = []
for param in params:
if param.startswith("Line"):
values.append(xmldoc.getElementsByTagName('Txt')[int(param[5])-1].firstChild.data)
else:
values.append(xmldoc.getElementsByTagName(param)[0].firstChild.data)
return tuple(values)
def get_system_pattern_1(self, param=None, **kwargs):
types = ['Front', 'Center', 'Sur', 'Sur_Back', 'Subwoofer']
speakers = []
levels = []
for type in types:
xml = get_xml(self, '<System><Speaker_Preout><Pattern_1><Config><{0}>GetParam</{0}></Config></Pattern_1></Speaker_Preout></System>'.format(type), **kwargs)
xmldoc = minidom.parseString(xml)
value = xmldoc.getElementsByTagName("Type")[0].firstChild.data
if value != "None":
if value == "Use":
speakers.append("Subwoofer_1")
try:
if xmldoc.getElementsByTagName("Type")[1].firstChild.data == "Use":
speakers.append("Subwoofer_2")
except:
pass
elif value[-2:] == "x2":
speakers.append("Sur_Back_R")
speakers.append("Sur_Back_L")
if type == "Sur":
speakers.append("Sur_R")
speakers.append("Sur_L")
if type == "Front":
speakers.append("Front_R")
speakers.append("Front_L")
if type == "Center":
speakers.append("Center")
if param == "Active Speakers":
return speakers
#This is then also done only if levels are requested
else:
for speaker in speakers:
xml = get_xml(self, '<System><Speaker_Preout><Pattern_1><Lvl>GetParam</Lvl></Pattern_1></Speaker_Preout></System>', **kwargs)
xmldoc = minidom.parseString(xml)
levels.append([speaker, float(xmldoc.getElementsByTagName(speaker)[0].firstChild.firstChild.data) /10])
return levels
|
import sys
print(sys.version_info)
age = input("Enter age:\n")
age = int(age)
if age < 5:
print("To young for school")
elif age == 5:
print("Go to Kindergarten")
elif (age > 5) and (age <= 17):
grade = age - 5
print("Go to {} grade".format(grade))
else:
print("Go to college")
|
{
"targets": [
{
"target_name": "collective_jl",
"sources": [
"src/server/lib/collective_jl.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"/opt/julia/include/julia"
],
"dependencies": [
"<!(node -p \"require('node-addon-api').gyp\")"
],
"defines": [
"NAPI_DISABLE_CPP_EXCEPTIONS"
]
},
{
"target_name": "julia_loader",
"sources": [
"src/server/lib/julia_loader.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"/opt/julia/include/julia"
],
"dependencies": [
"<!(node -p \"require('node-addon-api').gyp\")"
],
"defines": [
"NAPI_DISABLE_CPP_EXCEPTIONS",
"JULIA_LIB_PATH=\"/opt/julia/lib/libjulia.so\""
]
},
{
"target_name": "nutrimatic",
"copies": [
{
"destination": "build/nutrimatic_src",
"files": [
"/opt/nutrimatic/expr-anagram.cpp",
"/opt/nutrimatic/expr-filter.cpp",
"/opt/nutrimatic/expr-intersect.cpp",
"/opt/nutrimatic/expr-optimize.cpp",
"/opt/nutrimatic/expr-parse.cpp",
"/opt/nutrimatic/index-reader.cpp",
"/opt/nutrimatic/index-walker.cpp",
"/opt/nutrimatic/search-driver.cpp"
]
}
],
"sources": [
"build/nutrimatic_src/expr-anagram.cpp",
"build/nutrimatic_src/expr-filter.cpp",
"build/nutrimatic_src/expr-intersect.cpp",
"build/nutrimatic_src/expr-optimize.cpp",
"build/nutrimatic_src/expr-parse.cpp",
"build/nutrimatic_src/index-reader.cpp",
"build/nutrimatic_src/index-walker.cpp",
"build/nutrimatic_src/search-driver.cpp",
"src/server/lib/nutrimatic.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"/opt/nutrimatic"
],
"dependencies": [
"<!(node -p \"require('node-addon-api').gyp\")"
],
"cflags": [
"-std=c++11",
"-g",
"-O6",
"-Wall",
"-Werror",
"-Wno-unused-local-typedefs",
"-Wno-uninitialized",
"-Wno-sign-compare",
"-fexceptions"
],
"cflags_cc": [
"-std=c++11",
"-g",
"-O6",
"-Wall",
"-Werror",
"-Wno-unused-local-typedefs",
"-Wno-uninitialized",
"-Wno-sign-compare",
"-fexceptions"
],
"libraries": [
"-lfst",
"-lpthread",
"-ldl"
],
"defines": [
"NAPI_DISABLE_CPP_EXCEPTIONS"
]
}
]
}
|
'''
given a folder of disk image, calculate the cdr of each images and save cdr to a file.
'''
from MNet_CDR_Seg import *
from utils import *
from config import get_config
from joint import resize_and_joint
import tensorflow as tf
import sys
from glob import glob
import os
import argparse
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default='0', help='')
parser.add_argument('--pre_model_MNetSeg', type=str, default='Model_MNet_ORIGA_pretrain.h5', help='')
parser.add_argument('--input_dir', type=str, default='img_data_glaucoma_crop_train', help='')
parser.add_argument('--save_path', type=str, default='./train_cdr.txt', help='')
args = parser.parse_args()
def main():
input_dir = args.input_dir
if not os.path.exists(input_dir):
print('[*]input dir not exsit!')
sys.exit()
data_list = glob(os.path.join(input_dir,'*'))
pre_model_MNetSeg = args.pre_model_MNetSeg
CDRSeg_model = load_model(pre_model_MNetSeg)
save_path = args.save_path
f = open(save_path,'w')
count = 0
total = len(data_list)
for img_path in data_list:
img = scipy.misc.imread(img_path)
cal_cdr = get_image_cdr(img, CDRSeg_model)
cal_cdr = round(cal_cdr,4)
#temp
if cal_cdr > 0.6:
f.write(img_path+','+str(cal_cdr) + '\n')
# f.write(str(cal_cdr) + '\n')
count += 1
if count % 10 == 0:
print('progress:[%d/%d]'%(count,total))
f.close()
if __name__ == '__main__':
#set gpu device
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
KTF.set_session(sess)
main()
|
vc = float (input ('Digite o valor da casa que você deseja comprar.'))
vs = float (input ('Digite o seu salario mensal.'))
a = float (input ('Digite um numero em anos que você ira pagar tudo '))
a1 = a * 12 # calcular quantos meses ira pagar
a2 = vc / a1 # calcular o valor do pag por mes.
salario70 = (vs * 70 / 100) #calcula 70% do salario
salario30 = (vs * 30 / 100) #calcula 30% do salario
valor = a2 - vs
valorf = a2 - salario30
if salario30 > valor :
print (f'Para o o Sr. comprar uma casa no valor de {a2} o seu salario tinha que ser de 30% no valor do aluguel \n 30porcento do seu salario é de {salario30} portanto não aprovamos seu emprestimo. \n ficou faltando {valorf} reais para completar sua compra')
else: print (f'Parabéns voce foi aprovado, sua mensalidade sera de {a2} reais por mês.')
print (salario30)
# if vs1 < vvs:
# print ('Emprestimo negado.')
#else:
# print ('parabens foi aprovado.')
|
def gcdIter(a, b):
'''
a, b: positive integers
returns: a positive integer, the greatest common divisor of a & b.
'''
r = a % b
if r == 0:
return b
else:
return gcdIter(b, r)
print gcdIter(6,12)
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
import builtins as _builtins, sys, typing as _typing
from google.protobuf.descriptor import EnumDescriptor
from google.protobuf.internal.containers import RepeatedScalarFieldContainer
from google.protobuf.message import Message as _Message
__all__ = [
"ListPackagesRequest",
"ListPackagesResponse",
"GetPackageRequest",
"GetPackageResponse",
"GetPackageStatusRequest",
"GetPackageStatusResponse",
]
class PackageStatus:
DESCRIPTOR: _typing.ClassVar[EnumDescriptor] = ...
UNKNOWN: _typing.ClassVar[_typing.Literal[0]] = ...
REGISTERED: _typing.ClassVar[_typing.Literal[1]] = ...
UNKNOWN = _typing.Literal[0]
REGISTERED = _typing.Literal[1]
class HashFunction:
DESCRIPTOR: _typing.ClassVar[EnumDescriptor] = ...
SHA256: _typing.ClassVar[_typing.Literal[0]] = ...
SHA256 = _typing.Literal[0]
class ListPackagesRequest(_Message):
ledger_id: _builtins.str
def __init__(self, *, ledger_id: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["ledger_id"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["ledger_id"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class ListPackagesResponse(_Message):
@property
def package_ids(self) -> RepeatedScalarFieldContainer[_builtins.str]: ...
def __init__(self, *, package_ids: _typing.Optional[_typing.Iterable[_builtins.str]] = ...): ...
def HasField(self, field_name: _typing.Literal["package_ids"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["package_ids"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class GetPackageRequest(_Message):
ledger_id: _builtins.str
package_id: _builtins.str
def __init__(self, *, ledger_id: _typing.Optional[_builtins.str] = ..., package_id: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["ledger_id", "package_id"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["ledger_id", "package_id"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class GetPackageResponse(_Message):
hash_function: _typing.Literal[0]
archive_payload: _builtins.bytes
hash: _builtins.str
def __init__(self, *, hash_function: _typing.Optional[_typing.Literal['SHA256', 0]] = ..., archive_payload: _typing.Optional[_builtins.bytes] = ..., hash: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["hash_function", "archive_payload", "hash"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["hash_function", "archive_payload", "hash"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class GetPackageStatusRequest(_Message):
ledger_id: _builtins.str
package_id: _builtins.str
def __init__(self, *, ledger_id: _typing.Optional[_builtins.str] = ..., package_id: _typing.Optional[_builtins.str] = ...): ...
def HasField(self, field_name: _typing.Literal["ledger_id", "package_id"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["ledger_id", "package_id"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
class GetPackageStatusResponse(_Message):
package_status: _typing.Literal[0, 1]
def __init__(self, *, package_status: _typing.Optional[_typing.Literal['UNKNOWN', 0, 'REGISTERED', 1]] = ...): ...
def HasField(self, field_name: _typing.Literal["package_status"]) -> _builtins.bool: ...
def ClearField(self, field_name: _typing.Literal["package_status"]) -> None: ...
def WhichOneof(self, oneof_group: _typing.NoReturn) -> _typing.NoReturn: ...
|
'''
Check if glyphs are missing outlines or composites.
Only works on glyphs which have unicodes
'''
import unicodedata as uni
IGNORE_GLYPHS_OUTLINE = [
'uni0000'
]
def check(font):
print '***Check Glyphs have outlines or components***'
masters = font.masters
for i, master in enumerate(masters):
bad_glyphs = []
for glyph in font.glyphs:
if str(glyph.category) != 'Separator' and glyph.name not in IGNORE_GLYPHS_OUTLINE:
if len(glyph.layers[i].paths) == 0:
if len(glyph.layers[i].components) == 0:
bad_glyphs.append(glyph.name)
if bad_glyphs:
for glyph in bad_glyphs:
print "ERROR: %s master's %s should have outlines or components\n" % (master.name, glyph.name)
else:
print "PASS: %s master's glyphs have components or outlines\n" % master.name
if __name__ == '__main__':
font = Glyphs.font
check(font)
|
# -*- coding: utf-8 -*-
import base64
import hashlib
import logging
import re
from typing import Iterable
from .base import Fetcher, Item
# noinspection PyProtectedMember
logger = logging.getLogger(__name__)
class JandanFetcher(Fetcher):
URL = 'http://jandan.net/ooxx'
def __init__(self, count=5, browser='random'):
super().__init__()
self.count = count
self.fetcher.browser = browser
self.fetcher.every_time = True
def fetch(self, count=None) -> Iterable[Item]:
if count is None:
count = self.count
key = None
next_url = self.URL
for i in range(count):
soup = self.fetcher.soup(next_url)
next_url = normalize(soup.find('a', 'previous-comment-page')['href'].split('#')[0])
# if i == 0:
# key = self.get_key(soup)
# logger.debug("Get key [%s]", key)
ol = soup.find('ol', 'commentlist')
for item in self.generate(ol, key, i == 0):
yield item
def get_key(self, soup) -> str:
scripts = soup.find('head').find_all('script')
key_url = None
for script in scripts:
src = script.get('src')
if src and re.match(r'//cdn.jandan.net/static/min/\w+\.\d+\.js', src, re.ASCII):
key_url = src
if not key_url:
raise RuntimeError("Cannot get key url")
key_url = normalize(key_url)
key = self.cache.get(key_url)
if key is not None:
return key
js = self.fetcher.fetch(key_url)
match = re.search(r'jandan_load_img\(.*?\){.*?var [a-z]=\w+\([a-z],"(.*?)"\);', js, re.ASCII)
key = None
if match:
key = match.group(1)
if not key:
raise RuntimeError("Cannot get key from [%s]" % key_url)
self.cache.set(key_url, key)
return key
def generate(self, ol, key, test) -> Iterable[Item]:
for item in ol.find_all('li'):
item_id = item.get('id')
if not item_id or item_id == 'adsense':
continue
text = item.find('div', 'text')
a = text.find('span', 'righttext').a
imgs = []
for img in text.find_all('span', 'img-hash'):
src = normalize(decode(img.text, key))
if test:
test = False
self.fetcher.open(src).close()
imgs.append('<div><img src="%s"/></div>' % src)
if imgs:
yield Item(a.text, a.text, None, normalize(a['href']), ''.join(imgs))
def normalize(url):
if url.startswith('//'):
url = "http:" + url
return url
def md5(text):
if isinstance(text, str):
text = text.encode()
return hashlib.md5(text).hexdigest()
def base64decode(text):
mod = len(text) % 4
if mod != 0:
text += '=' * (4 - mod)
return base64.b64decode(text).decode()
def decode(cipher, key):
# if not key:
# key = ''
# key = md5(key)
# key_head = md5(key[:16])
# key_tail = md5(key[16:])
#
# cipher_head = cipher[:4]
# secret = key_head + md5(key_head + cipher_head)
#
# cipher_tail = cipher[4:]
# cipher_tail = base64decode(cipher_tail)
#
# array = list(range(256))
# array2 = [ord(secret[i % len(secret)]) for i in range(256)]
#
# index = 0
# for i in range(256):
# index = (index + array[i] + array2[i]) % 256
# array[i], array[index] = array[index], array[i]
#
# result = ''
# index = index2 = 0
# for i in range(len(cipher_tail)):
# index = (index + 1) % 256
# index2 = (index2 + array[index]) % 256
# array[index], array[index2] = array[index2], array[index]
# result += chr(cipher_tail[i] ^ (array[(array[index] + array[index2]) % 256]))
#
# timestamp = int(result[:10])
# expected = result[10:26]
# result = result[26:]
# actual = md5(result + key_tail)[:16]
#
# if timestamp != 0 and timestamp - time() <= 0:
# raise RuntimeError("Invalid timestamp [%s]" % timestamp)
#
# if expected != actual:
# raise RuntimeError("Not match mac")
result = base64decode(cipher)
result = re.sub(r'(//\w+\.sinaimg\.cn/)(\w+)(/.+\.(gif|jpg|jpeg))', r'\1large\3', result)
return result
|
from django import forms
from forum.models import Topic, Post
from django.forms import ModelForm, Textarea
class PostForm(ModelForm):
class Meta:
model = Post
fields = (
'text',
)
widgets = {
'text': Textarea(attrs={'rows': 4, 'cols': 60}),
}
class TopicForm(ModelForm):
class Meta:
model = Topic
fields = (
'title',
# 'additional_info'
)
widgets = {
'title': Textarea(attrs={'rows': 1}),
# 'additional_info': Textarea(attrs={'rows': 4, 'cols': 80}),
}
class EmailPostForm (forms.Form):
name = forms.CharField(max_length=48)
email = forms.EmailField()
comments = forms.CharField(required=True, widget=forms.Textarea(attrs={'rows': 5}))
|
import os.path
from os import path
file_path = "E:\\temp\\names.txt"
def write_to_file():
# create file if not exist and open for write OR open file if exists for append mode 'a'
if (path.exists(file_path)):
f = open(file_path, "a", encoding='utf-8')
else:
f = open(file_path, "x", encoding='utf-8')
name = input("Please write your name: ")
f.write(name + '\n')
def print_names_From_file():
lines = []
try:
f = open(file_path, 'r', encoding='utf-8')
lines = f.read().split('\n') # Instead of f.readlines(), because f.readlines() will print blank line for each \n
for line in lines:
if (line): # Prevent from the last \n (blank line) from being printed
print("hello dear %s" % line)
except FileNotFoundError:
print("Given file does not exist!")
finally:
f.close()
for i in range(3):
write_to_file()
print_names_From_file()
|
import numpy as np
import clify
import argparse
from config import cnn_config as config
grid = (
[{'curriculum:-1:n_train': 1, 'curriculum:-1:do_train': False}] +
[{'curriculum:-1:n_train': n} for n in 2**np.arange(0, 18, 2)]
)
config = config.copy(
n_controller_units=512,
)
parser = argparse.ArgumentParser()
parser.add_argument("--task", choices="A B C D".split(), default='')
args, _ = parser.parse_known_args()
if args.task == "A":
config.curriculum = [dict(reductions="sum", n_train=2**17), dict(reductions='prod')]
elif args.task == "B":
config.curriculum = [dict(reductions="max", n_train=2**17), dict(reductions='prod')]
elif args.task == "C":
config.curriculum = [dict(reductions="min", n_train=2**17), dict(reductions='prod')]
elif args.task == "D":
config.curriculum = [dict(reductions='prod')]
else:
raise Exception()
from dps.parallel.hyper import build_and_submit, default_host_pool
clify.wrap_function(build_and_submit)(
config=config, distributions=grid, n_param_settings=None, host_pool=default_host_pool)
|
"""
Examples of decorated functions to use for testing.
- Function with decorator with no parameters
- Function with decorator with ordered parameters
- Function with decorator with keyword parameters
"""
from .decorators import no_params, with_params, add_one, add_two
@no_params
def dictionary(*args, **kwargs):
return {}
@with_params("hello!")
def world(*args, **kwargs):
return "world"
@with_params(text="foo!")
def bar(*args, **kwargs):
return "bar"
@add_one
@add_two
def no_addition(x=0):
return x
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:distributiion-example.py
# @Author: Michael.liu
# @Date:2019/2/12
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
#####################
#二项分布
#####################
def binom_pmf_test():
'''
为离散分布
二项分布的例子:抛掷100次硬币,恰好两次正面朝上的概率是多少?
'''
n = 100#独立实验次数
p = 0.5#每次正面朝上概率
k = np.arange(0,100)#0-100次正面朝上概率
binomial = stats.binom.pmf(k,n,p)
print( binomial)#概率和为1
print(sum(binomial))
print( binomial[2])
plt.plot(k, binomial,'o-')
plt.title('Binomial: n=%i , p=%.2f' % (n,p),fontsize=15)
plt.xlabel('Number of successes')
plt.ylabel('Probability of success',fontsize=15)
plt.show()
def normal_distribution():
'''
正态分布是一种连续分布,其函数可以在实线上的任何地方取值。
正态分布由两个参数描述:分布的平均值μ和方差σ2 。
'''
mu = 0 # mean
sigma = 1 # standard deviation
x = np.arange(-10, 10, 0.1)
y = stats.norm.pdf(x, 0, 1)
print(y)
plt.plot(x, y)
plt.title('Normal: $\mu$=%.1f, $\sigma^2$=%.1f' % (mu, sigma))
plt.xlabel('x')
plt.ylabel('Probability density', fontsize=15)
plt.show()
if __name__ =="__main__":
#binom_pmf_test() # 二项分布
normal_distribution() # 正态分布
|
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
def error(driver,message):
from main import testLog,test_id,enableScreenshots,detail,FolderLocation
if enableScreenshots:
driver.screenshot(FolderLocation+'/screenshots/test%s.png' % (test_id))
testLog(message)
# Custom Made By seth
def find(self,method,selector,forceList=False):
content = self
elements = []
if method == 'class':
elements = content.find_elements_by_class_name(selector)
elif method == 'text':
elements = content.find_elements_by_xpath("//*[text()='"+selector+"']")
elif method == 'text*':
elements = content.find_elements_by_partial_link_text(selector)
elif method == 'tag':
elements = content.find_elements_by_tag_name(selector)
elif method == 'name':
elements = content.find_elements_by_name(selector)
elif method == 'id':
elements = content.find_elements_by_id(selector)
else:
elements = content.find_elements_by_xpath("//*[@"+method+"='"+selector+"']")
if len(elements) == 0:
if forceList:
return elements
else:
error(content,"No element matching criteria: %s = '%s'" % (method,selector))
return None
elif len(elements) == 1:
if forceList:
return elements
else:
return elements[0]
else:
return elements
WebDriver.find = find
WebElement.find = find
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from pygments.lexer import inherit
from pygments.lexers.haskell import HaskellLexer
from pygments.token import Keyword
class DAMLLexer(HaskellLexer):
name = "DAML"
aliases = ["daml"]
filenames = ["*.daml"]
daml_reserved = (
"template",
"with",
"controller",
"can",
"ensure",
"daml",
"observer",
"signatory",
"agreement",
"controller",
"nonconsuming",
"return",
"this",
)
tokens = {
"root": [
(r"\b(%s)(?!\')\b" % "|".join(daml_reserved), Keyword.Reserved),
(r"\b(True|False)\b", Keyword.Constant),
inherit,
]
}
|
def countX(lst, x):
count = 0
for ele in lst:
if (ele == x):
count = count + 1
return count
lst = [7, 5, 6, 1, 9, 10, 6, 88, 6]
x = 6
print('{} has occurred {} times'.format(x, countX(lst, x)))
|
'''import flask, os, sys
import requests
app = flask.Flask(__name__)
counter = 12345672
@app.route('/<path:page>')
def custome_page(page):
if page == 'favicon.ico': return ''
global counter
counter += 1
try:
template = open(page).read()
except Exception as e:
template = str(e)
template += "\n<!-- page: %s, src: %s -->\n" % (page, __file__)
return flask.render_template_string(template, name='test', counter=counter)
@app.route('/')
def home():
return flask.redirect('/index.template')
if __name__ == '__main__':
app.debug=True
flag1 = "flag1:this_is_flag1"
with open('/flag','w+') as f:
flag2 = f.read()
app.run(host='0.0.0.0')'''
from flask import Flask, request
from jinja2 import Environment
app = Flask(__name__)
Jinja2 = Environment()
@app.route("/page")
def page():
name = request.values.get('name')
# SSTI VULNERABILITY
# The vulnerability is introduced concatenating the
# user-provided `name` variable to the template string.
output = Jinja2.from_string('Hello ' + name + '!').render()
# Instead, the variable should be passed to the template context.
# Jinja2.from_string('Hello {{name}}!').render(name = name)
return output
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
|
import json
cached_contact_data = {}
def search(query):
contact_data = get_contact_data()
if contact_data and query:
return json.dumps(search_by_all_fields(query, contact_data))
else:
return ''
def search_by_name(query, contact_data):
result = []
for contact in contact_data:
if query in contact['name']:
result.append(contact)
return result
def search_by_all_fields(query, contact_data):
query = query.lower()
result = []
for contact in contact_data:
if contact_matches_query(query, contact):
result.append(contact)
return result
def contact_matches_query(query, contact):
for val in contact:
if isinstance(contact[val], list):
for subval in contact[val]:
if query in subval.lower():
return True
else:
if query in contact[val].lower():
return True
return False
def get_contact_data():
global cached_contact_data
if not cached_contact_data:
cached_contact_data = load_contact_data()
contact_data = cached_contact_data
return contact_data
def load_contact_data():
contact_data = {}
with open('data.json') as json_data:
contact_data = json.load(json_data)
return contact_data
def get_mock_result():
return """
[
{
"city": "Gelbressee",
"name": "Nero Acosta",
"country": "Panama",
"company": "Lacus Cras Associates",
"job_history": [
""
],
"email": "tempus.non.lacinia@ultricesposuerecubilia.com"
},
{
"city": "Westmount",
"name": "Ferris Yates",
"country": "Peru",
"company": "Eu Euismod Ac Corp.",
"job_history": [
""
],
"email": "scelerisque.scelerisque.dui@Nullamvitaediam.org"
},
{
"city": "Cache Creek",
"name": "Germaine Griffin",
"country": "Oman",
"company": "Diam Sed Industries",
"job_history": [
"Finale",
"Cakewalk"
],
"email": "dolor.Fusce@consectetueradipiscingelit.net"
}]
"""
|
import numpy as np
def main():
input_np = np.loadtxt(fname = "input")
fuels = [calc_fuel(x) for x in input_np]
print(calc_fuel(12))
print(calc_fuel(1969))
print(np.sum(fuels))
def calc_fuel(x, fuels = np.array([])):
fuel = x // 3 - 2
if fuel > 0:
fuels = np.append(fuels, fuel)
return calc_fuel(fuel, fuels)
else:
return np.sum(fuels)
if __name__ == '__main__':
main()
|
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
def is_there_nan_values(df):
"""
Check if there are NaN values in Dataframe
:param df: Dataframe to analyze
:return: True if NaN values encounted else False
"""
return False if np.sum(df.isnull().sum()) == 0 else True
def getKeysByValue(dictOfElements, valueToFind):
"""
Return the keys in a dictionary corresponding the values given in parameters
:param dictOfElements: dictionary to analyze
:param valueToFind: value to find corresponding IDs
:return: list of corresponding IDs
"""
listOfKeys = list()
listOfItems = dictOfElements.items()
for item in listOfItems:
if item[1] == valueToFind:
listOfKeys.append(item[0])
return listOfKeys
def transform_genres(df, remove_genres_column=False):
"""
TODO
:param df:
:param remove_genres_column:
:return:
"""
if is_there_nan_values(df):
raise Exception("Sorry, please remove NaN values before transforming Dataframe.")
# We have to remove this genre string '[]' because the string encoder considers it as an empty array
# and it raises an Exception so we drop the line to avoid issues
df = df[df.genres != '[]']
# columns = df.columns.values
# features = np.delete(columns, np.argwhere(columns == target))
genres = np.array(df.genres)
# lexicon will contain each words in "genres" column
lexicon = list()
for genre in genres:
for mot in genre.split(sep=' '):
lexicon.append(mot)
lexicon = np.array(lexicon)
# we keep one time each unique word
unique = np.unique(lexicon)
vectorizer = CountVectorizer(strip_accents='unicode')
# tokenize and build vocab
vectorizer.fit(unique)
transformation = vectorizer.transform(genres).toarray()
col_names = list()
for i in range(len(vectorizer.vocabulary_)):
col_names.append(getKeysByValue(vectorizer.vocabulary_, i)[0])
# Creation of a new dataframe with genre encoded in columns
df_genres = pd.DataFrame(transformation, columns=col_names)
# Adding column "genres" to merge after with the dataframe df
df_genres.insert(0, "genres", genres)
# Merging the original datafram and the new one created
df_final = df.merge(df_genres, on="genres", how='inner')
if remove_genres_column:
df_final = df_final.drop('genres', axis=1)
return df_final
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Validators for labman2 data model and labman2 data form fields"""
from django.utils.translation import ugettext as _
from django.core.validators import RegexValidator, slug_re
import re
#from django.core.exceptions import ValidationError
# pylint: disable=C0103
validate_newltype = RegexValidator(slug_re, _(u'Link type should '
'consists of only letters, numbers, underscores '
'or hyphens'), 'newltype')
# pylint: disable=C0103
doi_re = re.compile(r'[doi|DOI][\s\.\:]{0,2}(10\.\d{4}[\d\:\.\-\/a-z]+)[A-Z\s]')
#doi_re = re.compile('\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])\S)+)\b')
validate_doi = RegexValidator(doi_re, _(u"Invalid ")) # pylint: disable=C0103
|
oldsize=3024
newsize=442
combined_rule="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/14/combinedrules/11"
new_rule="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/14/prunedrules3/11"
old_rule="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/14/combinedrules/11"
oldrules=[]
dest=open(old_rule,"r")
lines=dest.readlines()
dest.close()
for line in lines:
oldrules.append(line.split())
print len(oldrules)
newrules=[]
new=open(new_rule,"r")
lines=new.readlines()
new.close()
for line in lines:
newrules.append(line.split())
print len(newrules)
for rule in newrules:
rule[len(rule)-2]= str(float(rule[len(rule)-2])*newsize/oldsize)
oldrules.append(rule)
print len(oldrules)
f=open(combined_rule,'w')
for item in oldrules:
f.write(" ".join(x for x in item)+"\n")
|
from selenium.common.exceptions import NoSuchElementException
from .base import FunctionalTest
class LoginTest(FunctionalTest):
def setUp(self):
super(LoginTest, self).setUp()
# Create users to login
self.assistant_data = {'username': 'assistant', 'password': 'demo'}
self.operator_data = {'username': 'operator', 'password': 'demo'}
self.operator = self.create_user_with_permission(**self.operator_data)
self.assistant = self.create_user_with_permission(**self.assistant_data)
def test_wrong_credentials(self):
# A visitor try to logs with wrong password
self.user_login(self.operator_data['username'], 'wrong-pass')
# Wrong credential message is shown
self.check_notification_message(
"Please enter a correct username and password. Note that both fields may be case-sensitive.", 'danger'
)
# Check user isn't logged in
self.check_user_logged_out()
def test_login(self):
# A visitor logs with operator credential
self.user_login(self.operator_data['username'], self.operator_data['password'])
# Operator notice hi is logged in
self.check_user_logged_in(self.operator.username)
# Operator logs out
self.user_logout()
# Other visitor logs in with assistant credentials
self.user_login(self.assistant_data['username'], self.assistant_data['password'])
# Assistant see that he is logged in
self.check_user_logged_in(self.assistant.username)
def test_logout_page(self):
# User log in
self.user_login(self.assistant_data['username'], self.assistant_data['password'])
# User logs out
self.user_logout()
# Logout page is shown
self.check_page_title_and_header(title="Logout", header="Logout")
# He notice breadcrumbs (Logout)
self.check_breadcrumbs((("Logout",),))
# A logout notification message is shown
text_in_page = self.browser.find_element_by_tag_name('body').text
self.assertIn("You has been logged out successfully.", text_in_page)
# User notice he is logged out
self.check_user_logged_out(self.operator.username)
def get_logout_button(self):
# TODO: Move logout button to user popup menu
return self.browser.find_element_by_link_text('Logout')
def user_logout(self):
btn_logout = self.get_logout_button()
btn_logout.click()
def check_user_logged_in(self, username):
# Logged in user see he is logged in
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertIn(username, navbar.text) # TODO: Display his name if exist?
# Sign in button disappears
self.assertRaises(NoSuchElementException, self.get_signin_button)
# Log out button is shown
btn_logout = self.get_logout_button()
self.assertTrue(btn_logout.is_displayed())
def check_user_logged_out(self, username=None):
text_in_page = self.browser.find_element_by_tag_name('body').text
# User can't see him as logged
if username is not None:
self.assertNotIn(username, text_in_page)
# Logout button disappears
self.assertRaises(NoSuchElementException, self.get_logout_button)
self.assertNotIn('Log out', text_in_page)
# Sign in button is displayed
btn_signin = self.get_signin_button()
self.assertTrue(btn_signin.is_displayed())
self.assertIn('Sign In', text_in_page)
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="uddup",
version="0.9.3",
author="Rotem Reiss",
author_email="reiss.r@gmail.com",
description="URLs Deduplication Tool.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rotemreiss/uddup",
packages=find_packages(exclude=['tests*']),
install_requires=[],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'uddup=uddup.main:interactive',
],
},
)
|
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from builtins import str
import csv
import sys, getopt
import xml.etree.ElementTree as ET
from xml.dom.minidom import parse, Node
import xml.dom.minidom
from numpy import double
from random import seed
from random import randint
seed(1)
#--------------------------------------------------------------------------------------------------------------------
def return_random_number(begin, end):
return randint(begin, end)
class Abbreviation:
def __init__(self, input_abbreviation, input_expansion):
self.abbreviation = input_abbreviation
self.expansion = input_expansion
self.start_position = -1
#--------------------------------------------------------------------------------------------------------------------
input_address = 'Abbreviation\\AbbreviationsList.tsv'
abbreviation_list = []
with open(input_address) as input_file:
input_data = csv.reader(input_file, delimiter='\t')
for row in input_data:
abbreviation = row[0]
expansion = row[1]
abbreviation_pair = Abbreviation(abbreviation, expansion)
abbreviation_list.append(abbreviation_pair)
max_replace = 3
input_address = 'Dataset\\i2b2.tsv'
output_text = 'text' + '\t' + 'label' + '\n'
num_perturbed_samples = 0
with open(input_address) as input_file:
input_data = csv.reader(input_file, delimiter='\t')
line_num = 0
for row in input_data:
if (line_num > 0):
print(row[0], '\t', row[1])
is_sample_perturbed = False
sample_text = row[0]
sample_label = row[1]
sample_tokenized = nltk.word_tokenize(sample_text)
word_replaced = False
perturbed_sample = sample_text
expanded_abbreviation = []
num_expanded_abbreviations = 0
for i in range(0, len(abbreviation_list)):
perturbed_sample_tokenized = nltk.word_tokenize(perturbed_sample)
index = []
for j in range(0, len(perturbed_sample_tokenized)):
if (perturbed_sample_tokenized[j] == abbreviation_list[i].abbreviation):
index.append(j)
if (len(index) > 0):
for j in index:
perturbed_sample_tokenized[j] = abbreviation_list[i].expansion
temp_abbreviation = Abbreviation(abbreviation_list[i].abbreviation, abbreviation_list[i].expansion)
expanded_abbreviation.append(temp_abbreviation)
num_expanded_abbreviations += 1
word_replaced = True
perturbed_sample = ''
for j in range(0, len(perturbed_sample_tokenized)):
if (j > 0):
perturbed_sample += ' '
perturbed_sample += perturbed_sample_tokenized[j]
if (len(expanded_abbreviation) > 0):
print('-----Expanded abbreviations:')
for i in range(0, len(expanded_abbreviation)):
print(expanded_abbreviation[i].abbreviation, 'was expanded by', expanded_abbreviation[i].expansion)
elif (len(expanded_abbreviation) == 0):
print('No abbreviation was expanded.')
if (word_replaced == True):
is_sample_perturbed = True
num_perturbed_samples += 1
print('Perturbed sample:', perturbed_sample)
if (is_sample_perturbed == True):
output_text += perturbed_sample + '\t' + sample_label + '\n'
print('----------------------------------------------------------')
line_num += 1
print('\nPerturbed Samples:', num_perturbed_samples)
output_file = open('Dataset\\i2b2-perturbed-word-abbreviation-expansion.tsv', 'w')
output_file.write(output_text)
output_file.close()
if __name__ == '__main__':
pass
|
from django.conf.urls import url
from info import views
urlpatterns = [
url(r'^$', views.info, name='info'),
url(r'inj/(?P<slug>[\w\-]+)/$', views.info_inj, name='info_inj'),
url(r'cri/(?P<slug>[\w\-]+)/$', views.info_cri, name='info_cri'),
url(r'pre/(?P<slug>[\w\-]+)/$', views.info_pre, name='info_pre')
]
|
from django.contrib import admin
from .models import Book, Author, BookOrder, Cart, Review
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'price', 'stock')
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name')
class BookOrderAdmin(admin.ModelAdmin):
list_display = ('book', 'cart', 'quantity')
class CartAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'active', 'order_date')
class ReviewAdmin(admin.ModelAdmin):
list_display = ('id', 'book', 'user', 'publish_date')
admin.site.register(Book, BookAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(BookOrder, BookOrderAdmin)
admin.site.register(Cart, CartAdmin)
admin.site.register(Review, ReviewAdmin)
|
import pymysql.cursors
from fixture.db import DbFixture
#from fixture.orm import ORMFixture
#from model.group import Group
connection = pymysql.connect(host="127.0.0.1", database="bugtracker", user="root", password="")
db = DbFixture(_host="127.0.0.1", _database="bugtracker", _user="root", _password="")
try:
list = db.get_project_list()
for item in list:
print(item)
print(len(list))
finally:
connection.close()
#try:
# cursor = connection.cursor()
# cursor.execute("select * from group_list")
# for row in cursor.fetchall(): #fetchall returns all info as list of rows(like in db)
# print(row)
#finally:
# connection.close()
|
import json
import urllib
serviceurl = raw_input('Enter location: ')
if not serviceurl: serviceurl = 'https://python-data.dr-chuck.net/comments_42.json'
#if not serviceurl: serviceurl = 'https://python-data.dr-chuck.net/comments_283750.json'
print 'Retrieving', serviceurl
uh = urllib.urlopen(serviceurl)
data = uh.read()
js = json.loads(data)
#TODO
#Find sum of comments
count = 0
suma = 0
for i in js['comments']:
suma += i['count']
count += 1
print suma,count
|
from src import get_timed_loop_from_config, currencies_list_from_config, get_exchange_data, convert_dic_to_list
from module import CurrencyExchangeRepository
import threading
LOOP_TIME = get_timed_loop_from_config()
def fetch_and_upload_to_db():
threading.Timer(LOOP_TIME, fetch_and_upload_to_db).start()
currencies_list = currencies_list_from_config()
exchange_data = get_exchange_data()
exchange_data = {key: value for (key, value) in dict(exchange_data).items() if key in currencies_list}
list_of_dic = convert_dic_to_list(exchange_data)
CurrencyExchangeRepository.upload_to_db(list_of_dic)
print("DB has been updated.")
if __name__ == "__main__":
fetch_and_upload_to_db()
|
#!/usr/bin/python3
'''
global
'''
x = 9
def example():
#declare global variables
global x
print(x)
x+=9
print(x)
# local variables
s= x+90
print(s)
example();
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-04-30 15:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_input', '0057_dailyactivity_start_time_in_seconds'),
]
operations = [
migrations.AlterField(
model_name='dailyuserinputstrong',
name='prescription_or_non_prescription_medication_yesterday',
field=models.CharField(blank=True, choices=[('', ''), ('', '-'), ('yes', 'Yes'), ('no', 'No')], max_length=4),
),
migrations.AlterField(
model_name='dailyuserinputstrong',
name='prescription_or_non_prescription_sleep_aids_last_night',
field=models.CharField(blank=True, choices=[('', ''), ('', '-'), ('yes', 'Yes'), ('no', 'No')], max_length=4),
),
migrations.AlterField(
model_name='dailyuserinputstrong',
name='smoke_any_substances_whatsoever',
field=models.CharField(blank=True, choices=[('', ''), ('', '-'), ('yes', 'Yes'), ('no', 'No')], max_length=4),
),
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from wadebug import results
from wadebug.wa_actions.base import WAAction
from wadebug.wa_actions.mysql_utils import MySQLUtil
class CheckMySQLConnection(WAAction):
user_facing_name = "check_mysql_connection"
short_description = "Test if MySQL database can be connected"
config_dependencies = ("db.host", "db.port", "db.user", "db.password")
@classmethod
def _run(cls, config, *args, **kwargs):
try:
mysql_utils = MySQLUtil(**config.get("db"))
mysql_utils.try_connect()
return results.OK(cls)
except Exception as e:
return results.Problem(cls, "Unable to connect to db.", e, "")
|
import cv2
import matplotlib.pyplot as plt
img = cv2.imread('bird.png', 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
R, G, B = cv2.split(img)
output1_R = cv2.equalizeHist(R)
output1_G = cv2.equalizeHist(G)
output1_B = cv2.equalizeHist(B)
output1 = cv2.merge((output1_R, output1_G, output1_B))
# clahe = cv2.createCLAHE()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8 ,8))
output2_R = clahe.apply(R)
output2_G = clahe.apply(G)
output2_B = clahe.apply(B)
output2 = cv2.merge((output2_R, output2_G, output2_B))
output = [img, output1, output2]
titles = ['Original Image', 'Adjusted Histogram', 'CLAHE']
for i in range(3):
plt.subplot(1, 3, i+ 1)
plt.imshow(output[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
|
import csv
from termcolor import colored
from collections import defaultdict
import datetime
from taxutils import *
basis_prices = defaultdict(list)
basis_dates = defaultdict(list)
basis_sizes = defaultdict(list)
basis_fees = defaultdict(list)
def buy(product, price, tran_date, size, fee = 0):
# print("BUY {} tran_date : {} size {} price {}".format( product, tran_date, size, price ))
if not tran_date in basis_dates[product]:
# first encounter date
basis_dates[product].append(tran_date)
basis_prices[product].append(price)
basis_sizes[product].append(size)
basis_fees[product].append(fee)
else:
# second encounter date merge
indx = basis_dates[product].index(tran_date)
basis_sizes[product][indx] = basis_sizes[product][indx] + size
basis_prices[product][indx] = max((basis_prices[product][indx], price))
helps = list()
maxes = list()
def sellHelper(salesPrice, saleAmount, i):
helps.append((salesPrice, saleAmount, i))
maxes.append(salesPrice)
alldics = list()
def record(dateAcquired, dateSold, salesPrice, costBasis, amount):
global alldics
description = "{}_{}_c{}_os{}".format(round(amount,4), original_product, round(costBasis,3), round(original_sell_size,3))
print ("SELLING", " ".join([description, str(dateAcquired), str(dateSold), str(salesPrice), str(costBasis)]))
#short,3,5_ETCUSD_c10_5,20210306,20210306,100,100
ret = dict()
ret['holdingType'] = "short"
ret['reportingCategory'] = "3"
ret['description'] = description
ret['dateAcquired'] = dateAcquired
ret['dateSold'] = dateSold
ret['salesPrice'] = round(salesPrice * amount,2)
ret['costBasis'] = round(costBasis * amount,2)
alldics.append(ret)
removes = list()
def getSell(product, remaining, dateSold, salesPrice):
global helps, removes
maxvalue = max(maxes)
idx = maxes.index(maxvalue)
costBasis, saleAmount, i = helps[idx]
need_to_remove = False
try:
last_date = basis_dates[product][i]
last_price = basis_prices[product][i]
except Exception as e:
print("e: {}".format( e))
print("helps: {}".format( helps))
print("maxes: {}".format( maxes))
exit()
if saleAmount < remaining:
# sold all
need_to_remove = True
record(last_date, dateSold, salesPrice, costBasis, saleAmount)
else:
new_size = saleAmount - remaining
if new_size <= 0:
need_to_remove = True
else:
helps[idx] = (salesPrice, new_size, i)
basis_sizes[product][i] = new_size
record(last_date, dateSold, salesPrice, costBasis, remaining)
saleAmount = remaining
if need_to_remove:
maxes.pop(idx)
helps.pop(idx)
removes.append(i)
remaining = remaining - saleAmount
if not maxes and remaining:
record(last_date, dateSold, salesPrice, costBasis, remaining)
return remaining
original_sell_size = None
original_product = None
def sell(product, salesPrice, dateSold, size, fee = 0):
global helps, original_sell_size, original_product
global maxes, removes
original_sell_size = size
original_product = product
if salesPrice == 0:
print("SELL tran_date : {} size : {} salesPrice : {}".format( dateSold, size, salesPrice))
exit()
removes = list()
helps = list()
maxes = list()
for i, date in enumerate(basis_dates[product]):
if dateSold < date:
continue
sellHelper( basis_prices[product][i], basis_sizes[product][i], i)
remaining = size
while helps and remaining > 0:
remaining = getSell(product, remaining, dateSold, salesPrice)
removes = sorted(removes, reverse=True)
for i in removes:
basis_prices[product].pop(i)
basis_sizes[product].pop(i)
basis_dates[product].pop(i)
if not helps and remaining:
print("TROUBLE {} {} dateSold : {} size : {} salesPrice : {}".format( colored("SELL", "red"), product,
dateSold, size , salesPrice))
return
def status():
print("basis_sizes : {}".format( basis_sizes ))
print("basis_dates : {}".format( basis_dates ))
print("basis_prices: {}".format( basis_prices))
print("basis_prices: {}".format( len(basis_prices)))
def test_collecting():
product = "ETCUSD"
size = 10
price = 10
total = 289.3654
date = 20210306
buy(product, price, date, size)
size = 15
sell(product, price, date, size)
status()
printfile()
exit()
price = 20
size = 5
date = 20210307
buy(product, price, date, size)
size = 16
price = 15
date = 20210308
sell(product, price, date, size)
def printfile():
print("alldics: {}".format( len(alldics)))
keys = ["holdingType","reportingCategory","description","dateAcquired","dateSold","salesPrice","costBasis"]
#long,1,Some Stock,12/02/2007,03/04/2017,1234.50,325.55
outputfile = "outputnew.csv"
with open(outputfile, 'wt') as output_file:
dict_writer = csv.DictWriter(output_file, keys, lineterminator='\n')
dict_writer.writeheader()
dict_writer.writerows(alldics)
# print("total : {}".format( round(total,2) ))
if __name__ == '__main__':
test_collecting()
printfile()
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
Motor1A = 22
Motor1B = 21
Motor1E = 18
LED1 = 8
GPIO.setup(LED1,GPIO.OUT)
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1E,GPIO.OUT)
print "LED"
GPIO.output(LED1,GPIO.LOW)
print "Turning motor on"
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
sleep(15)
GPIO.output(Motor1E,GPIO.LOW)
sleep(2)
print "Turning motor on opposite"
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.HIGH)
GPIO.output(Motor1E,GPIO.HIGH)
sleep(10)
print "Stopping motor"
GPIO.output(Motor1E,GPIO.LOW)
sleep(2)
|
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from skimage.util import random_noise
# path of images
img_path = '.\\Images\\Gaussian.jpeg'
# read image form file
img = cv.imread(filename=img_path, flags=cv.IMREAD_GRAYSCALE)
# different standard deviations
stds = [0.01, 0.05, 0.1, 0.12, 0.15, 0.2, 0.6, 1.5]
# histograms
hists = []
# display image
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(3, 3, 1)
ax.set_title("Input Image")
plt.imshow(img, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# histogram
hists.append(cv.calcHist([img], [0], None, [256], [0, 256]))
for idx, std in enumerate(stds):
# add gaussian noise
new_img = random_noise(image=img, mode='gaussian', clip=True, mean=0, var=std**2)
new_img = np.uint8(new_img * 255)
ax = plt.subplot(3, 3, idx+2)
ax.set_title("STD ({})".format(stds[idx]))
plt.imshow(new_img, cmap='gray')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# histogram
#print(new_img)
hists.append(cv.calcHist([new_img], [0], None, [256], [0, 256]))
cv.imwrite('.\\noisy_images\\gaussain-{}.jpg'.format(idx), new_img)
# plot histograms
fig = plt.figure(figsize=(10, 10))
for idx, hist in enumerate(hists):
ax = plt.subplot(3, 3, idx + 1)
if idx != 0:
ax.set_title("STD ({})".format(stds[idx-1]))
else:
ax.set_title("Input")
plt.plot(hist)
#plt.gca().axes.get_xaxis().set_visible(False)
#plt.gca().axes.get_yaxis().set_visible(False)
plt.show()
|
#####################################################
## librealsense T265 to MAVLink ##
#####################################################
# This script assumes pyrealsense2.[].so file is found under the same directory as this script
# Install required packages:
# pip install pyrealsense2
# pip install transformations
# pip install dronekit
# First import the libraries
import pyrealsense2 as rs
import numpy as np
import transformations as tf
import math as m
import time
from dronekit import connect, VehicleMode
# Enable printing debug messages
ENABLE_DEBUG_MSG = 0
# Connection to the FCU
connection_string = '/dev/ttyUSB0'
# Global position of the origin
lat = 13669820 # Terni 425633500
lon = 1036634300 # Terni
alt = 163000
# For forward-facing camera (with X to the right): H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
# For down-facing camera (with X to the right): H_aeroRef_T265Ref = np.array([[0,1, 0,0],[1,0,0,0],[0,0,-1,0],[0,0,0,1]])
# TODO: Explain this transformation with visual example
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
#######################################
# Functions
#######################################
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
def send_vision_position_message(x,y,z,roll,pitch,yaw):
msg = vehicle.message_factory.vision_position_estimate_encode(
current_time, #us Timestamp (UNIX time or time since system boot)
x, #Global X position
y, #Global Y position
z, #Global Z position
roll, #Roll angle
pitch, #Pitch angle
yaw #Yaw angle
#0 #covariance :upper right triangle (states: x, y, z, roll, pitch, ya
#0 #reset_counter:Estimate reset counter.
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Send a mavlink SET_GPS_GLOBAL_ORIGIN message (http://mavlink.org/messages/common#SET_GPS_GLOBAL_ORIGIN), which allows us to use local position information without a GPS.
def set_fake_global_origin():
msg = vehicle.message_factory.set_gps_global_origin_encode(
int(vehicle._master.source_system),
lat,
lon,
alt
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Send a mavlink SET_HOME_POSITION message (http://mavlink.org/messages/common#SET_HOME_POSITION), which should allow us to use local position information without a GPS
def set_fake_home_position():
x = 0
y = 0
z = 0
q = [1, 0, 0, 0] # w x y z
approach_x = 0
approach_y = 0
approach_z = 1
msg = vehicle.message_factory.set_home_position_encode(
int(vehicle._master.source_system),
lat,
lon,
alt,
x,
y,
z,
q,
approach_x,
approach_y,
approach_z
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Request a timesync update from the flight controller
# TODO: Inspect the usage of timesync_update
def update_timesync(ts=0, tc=0):
if ts == 0:
ts = int(round(time.time() * 1000))
msg = vehicle.message_factory.timesync_encode(
tc, # tc1
ts # ts1
)
vehicle.send_mavlink(msg)
vehicle.flush()
#######################################
# Main code starts here
#######################################
# Connect to the Vehicle.
print("\nConnecting to vehicle")
vehicle = connect(connection_string, wait_ready=True, baud=921600)
print("\nFCU Connected")
# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
print("\nRealsense connected")
# Build config object and request pose data
cfg = rs.config()
# Enable the stream we are interested in
cfg.enable_stream(rs.stream.pose) # Positional data
# Start streaming with requested config
pipe.start(cfg)
print("\nSending vision pose messages to FCU through MAVLink NOW\n")
set_home_repeat = 0
try:
while True:
# Wait for the next set of frames from the camera
frames = pipe.wait_for_frames()
# Fetch pose frame
pose = frames.get_pose_frame()
if pose:
# Store the timestamp for MAVLink messages
current_time = int(round(time.time() * 1000000))
# Print some of the pose data to the terminal
data = pose.get_pose_data()
# In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!
H_T265Ref_T265body = tf.quaternion_matrix([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])
# Transform to aeronautic coordinates (body AND reference frame!)
H_aeroRef_aeroBody = H_aeroRef_T265Ref.dot( H_T265Ref_T265body.dot( H_T265body_aeroBody ))
# 'sxyz': Rz(yaw)*Ry(pitch)*Rx(roll) body w.r.t. reference frame
rpy_rad = np.array( tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz'))
# Send MAVLINK VISION_POSITION_MESSAGE to FUC
send_vision_position_message(-data.translation.z, data.translation.x, -data.translation.y, rpy_rad[0], rpy_rad[1], rpy_rad[2])
if ENABLE_DEBUG_MSG:
rpy_deg = rpy_rad * 180 / m.pi
print("Frame #{}".format(pose.frame_number), "RPY [deg]: {}".format(rpy_deg))
# Skip some messages
time.sleep(0.05)
# Set fake home position through MAVLink
# TODO: For sure there can be a more methodological approach
if set_home_repeat == 100 or set_home_repeat == 200:
print("\nAttempt to set home origin")
set_fake_global_origin()
set_fake_home_position()
set_home_repeat = set_home_repeat + 1
finally:
pipe.stop()
#Close vehicle object before exiting script
print("\nClose vehicle object")
vehicle.close()
|
import io
import requests
import zipfile
f = io.StringIO()
#f = io.BytesIO()
f.write('string io')
# f.write(b'string io')
f.seek(0) # 先頭へ
print(f.read()) # string io
# read zip file
f = io.BytesIO()
url = 'https://drive.google.com/file/d/1qRibiuruk3u3dv531N5iHazkdzsmoxi6/view?usp=sharing'
res = requests.get(url)
f.write(res.content)
f.seek(0)
print(f.read().decode())
|
from key_expansion import subWord, rotWord
from utils import combine_byte, separate_bytes, mix_matrix, get_rounds, sub_byte, shift_row, create_state_block, print_state_block, convert_block_to_stream, block_to_text
from ff_algorithm import ff_multiply, ff_add
import logger
def subBytes(state):
for i in range(len(state)):
for j in range(len(state[0])):
state[i][j] = sub_byte(state[i][j])
def shiftRows(state):
for i in range(len(state)):
state[i] = shift_row(state[i], i)
def mixColumns(state):
for i in range(4):
a0 = state[0][i]
a1 = state[1][i]
a2 = state[2][i]
a3 = state[3][i]
b0 = ff_add(ff_multiply(a0, 2),ff_multiply(a1, 3), a2, a3)
b1 = ff_add(a0, ff_multiply(a1, 2), ff_multiply(a2, 3), a3)
b2 = ff_add(a0, a1, ff_multiply(a2, 2), ff_multiply(a3, 3))
b3 = ff_add(ff_multiply(a0, 3), a1, a2, ff_multiply(a3, 2))
state[0][i] = b0
state[1][i] = b1
state[2][i] = b2
state[3][i] = b3
def addRoundKey(state, round_key):
for i in range(len(state)):
r_key = separate_bytes(round_key[i])
for j in range(len(state[0])):
state[j][i] = ff_add(state[j][i], r_key[j])
# print(hex(d), hex(r_key[j]), hex(state[j][i]))
# test = []
# for i in range(4):
# test.append([])
# for j in range(4):
# test[-1].append(hex(state[i][j]))
# print(test)
def cipher(data, key, N_k):
r = 0
rounds = get_rounds(N_k)
data = create_state_block(data)
logger.input(r, data)
key_sch = key[0:4]
logger.k_sch(r, key_sch)
addRoundKey(data, key_sch)
for r in range(1, rounds-1):
logger.start(r, data)
subBytes(data)
logger.sub_byte(r, data)
shiftRows(data)
logger.shift_row(r, data)
mixColumns(data)
logger.mix_column(r, data)
key_sch = key[4*r:4*(r+1)]
logger.k_sch(r, key_sch)
addRoundKey(data, key_sch)
r += 1
logger.start(r, data)
subBytes(data)
logger.sub_byte(r, data)
shiftRows(data)
logger.shift_row(r, data)
key_sch = key[-4:]
logger.k_sch(r, key_sch)
addRoundKey(data, key_sch)
logger.output(r, data)
return convert_block_to_stream(data)
|
from ..extensions import cache
from .models import InvCategory, InvMarketGroups, DgmTypeEffects
from .consts import dogma_effects_slots, market_groups_filter
class EVEStaticDataService(object):
@cache.memoize()
def get_types_by_category(self, category_id):
category = InvCategory.query.filter_by(categoryID=category_id).first()
types = []
for group in category.groups:
for t in group.types:
types.append(t)
return types
@cache.memoize()
def get_market_groups(self):
market_groups = InvMarketGroups.query.all()
filtered_groups = self._filter_market_groups(market_groups)
return filtered_groups
@staticmethod
def _filter_market_groups(market_groups):
good_groups = []
good_group_ids = []
while True:
new_good_groups = False
for group in market_groups:
if group.marketGroupID not in good_group_ids:
if group.parentGroupID is None and group.marketGroupID in market_groups_filter:
good_groups.append(group)
good_group_ids.append(group.marketGroupID)
new_good_groups = True
elif group.parentGroupID in good_group_ids:
good_groups.append(group)
good_group_ids.append(group.marketGroupID)
new_good_groups = True
if new_good_groups is False:
break
return good_groups
@cache.memoize()
def get_market_group(self, group_id):
return InvMarketGroups.query.filter_by(marketGroupID=group_id).first()
@cache.memoize()
def get_type_fitting_slot(self, type_id):
effect = DgmTypeEffects\
.query\
.filter_by(typeID=type_id)\
.filter(DgmTypeEffects.effectID.in_(dogma_effects_slots))\
.first()
if effect is not None:
return effect.effectID
return None
eve_static_data_service = EVEStaticDataService()
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
# import numpy as np
import numpy as np
from scipy.optimize import minimize, linprog
def slice(lowbound=1, upbound=10):
'''
:param lowbound: 比例的下界
:param upbound: 比例的上界
:return:
'''
rho1, rho2, rho3 = np.random.uniform(lowbound, upbound, 3)
sum = rho1 + rho2 + rho3
# 归一化
rho1 /= sum
rho2 /= sum
rho3 /= sum
return rho1, rho2, rho3
def BSs(J, down=10, up=10, compute=10):
bss = np.zeros((J, 3))
for i in range(J):
bss[i][0] = down
bss[i][1] = up
bss[i][2] = compute
def slices(S=18, lowbound=1, upbound=10):
scs = np.zeros((S, 3)) # rho1, rho2, rho3
for i in range(S):
scs[i][0], scs[i][1], scs[i][2] = slice(lowbound, upbound)
return scs
def ZtoX(i, X_map, S, J_num):
if i == -1:
return -1, -1
t = -1
for l_x in range(S):
for l_y in range(J_num + 1):
if X_map[l_x][l_y] == 0:
t += 1
if i == t:
return l_x, l_y
def XtoZ(l_x, l_y, X_map, S, J_num):
if X_map[l_x][l_y] != 0:
return -1
t = -1
if l_x == 0:
for j in range(l_y + 1):
if X_map[0][j] == 0:
t += 1
return t
for i in range(l_x):
for j in range(J_num + 1):
if X_map[i][j] == 0:
t += 1
for j in range(l_y + 1):
if X_map[l_x][j] == 0:
t += 1
return t
def ValCount(X_map, S, J_num):
o = 0
for s in range(S):
for j in range(J_num + 1):
if X_map[s][j] == 0:
o += 1
return o
def UnSelectBS(X_map, S, J_num):
o = []
for s in range(S):
if np.max(X_map[s][0:J_num]) == 1: # todo(*风险点)
o.append(s)
return o
def ResourceConstraint(resorce_type, z, j, X_map, I, ROH, S, J_num, load):
# X_map中第j列为0的位置对应z中的index
x_indexs = []
# X_map中第j列为0的位置,该行为s切片,找到s行行模ys变量对应z中的index
y_indexs = []
# 保存找到确定映射到基站j的切片,即X_map中第j列为1的位置
y_selected_indexs = []
# 记录在j上部署切片s
s_in_j_partly = []
s_in_j_all = []
for s in range(S):
if X_map[s][j] == 0:
x_indexs.append(XtoZ(s, j, X_map, S, J_num))
y_indexs.append(XtoZ(s, J_num, X_map, S, J_num))
s_in_j_partly.append(s)
for s in range(S):
if X_map[s][j] == 1:
y_selected_indexs.append(XtoZ(s, J_num, X_map, S, J_num))
s_in_j_all.append(s)
if resorce_type == 'down':
o = load[j][0]
for i in range(len(x_indexs)):
s = s_in_j_partly[i]
o -= z[x_indexs[i]] * z[y_indexs[i]] * ROH[s][0]
for i in range(len(y_selected_indexs)):
s = s_in_j_all[i]
o -= z[y_selected_indexs[i]] * ROH[s][0]
if resorce_type == 'up':
o = load[j][1]
for i in range(len(x_indexs)):
s = s_in_j_partly[i]
o -= z[x_indexs[i]] * z[y_indexs[i]] * ROH[s][1]
for i in range(len(y_selected_indexs)):
s = s_in_j_all[i]
o -= z[y_selected_indexs[i]] * ROH[s][1]
if resorce_type == 'compute':
o = load[j][2]
for i in range(len(x_indexs)):
s = s_in_j_partly[i]
o -= z[x_indexs[i]] * z[y_indexs[i]] * ROH[s][2]
for i in range(len(y_selected_indexs)):
s = s_in_j_all[i]
o -= z[y_selected_indexs[i]] * ROH[s][2]
return o
# 对于s,xij=1
def EqConstraint(z, s, X_map, I, ROH, S, J_num, load):
o = 1
for j in range(J_num):
if X_map[s][j] == 0:
o -= z[XtoZ(s, j, X_map, S, J_num)]
return o
def cost(type, z, X_map, I, ROH, S, J_num, load, alpha, beta):
o1 = 0
for s in range(S):
o1 += (1 - z[XtoZ(s, J_num, X_map, S, J_num)])
o1 *= alpha
o2 = 0
for s in range(S):
i = I[s]
if i == -1: # 说明该切片是第一次映射,不存在迁移成本
continue
for j in range(J_num):
if X_map[s][j] != -1: # 找到s切片映射的基站j,注意X_map[s][j]对应的xij可能为小数,表示部分映射
if j != i: # 找到发生迁移的部分切片(因为xij可能为小数)
o2 += z[XtoZ(s, j, X_map, S, J_num)]
o2 *= beta
if type == 0:
return o1 + o2
if type == 1:
return o1
if type == 2:
return o2
def num_of_migration(X_map, I):
o = 0
for s in range(S):
i = I[s]
if i == -1: # 说明该切片是第一次映射,不存在迁移成本
continue
for j in range(J_num):
if X_map[s][j] == 1:
if j != i:
o += 1
return o
def generate_k(S, multiple):
k = np.random.uniform(1, 1000, S) # todo(*可调参)
sum = np.sum(k)
k = multiple * k * S / sum
return k
def generate_K(S, iter):
K = np.zeros((iter, S))
multiples = np.linspace(1, 3, iter)
for i in range(iter):
K[i] = generate_k(S, multiples[i])
return K
def opt(X_map, I, RHO, S, J_num, load, alpha, beta, type):
# 设置界
bnd = (0, 1)
bnds = []
for s in range(S):
for j in range(J_num + 1):
if X_map[s][j] == 0:
bnds.append(bnd)
# 设置约束
cons = []
for j in range(J_num):
cons.append(
{'type': 'ineq', 'fun': lambda z, j=j: ResourceConstraint('down', z, j, X_map, I, RHO, S, J_num, load)})
cons.append(
{'type': 'ineq', 'fun': lambda z, j=j: ResourceConstraint('up', z, j, X_map, I, RHO, S, J_num, load)})
cons.append(
{'type': 'ineq', 'fun': lambda z, j=j: ResourceConstraint('compute', z, j, X_map, I, RHO, S, J_num, load)})
for s in range(S):
if np.max(X_map[s][0:J_num]) == 1: # 已经选定了基站 todo(*风险点)
continue
cons.append(
{'type': 'eq', 'fun': lambda z, s=s: EqConstraint(z, s, X_map, I, RHO, S, J_num, load)})
# 设置目标
objective = lambda z: cost(type, z, X_map, I, RHO, S, J_num, load, alpha, beta)
# 设置初始值z0
z0 = np.zeros(ValCount(X_map, S, J_num))
# 初始值为每个切片任意找一个基站作为初始基站,ys赋值为0,则一定是一个可行解
X_map_for_z0 = np.copy(X_map)
for s in range(S):
if XtoZ(s, I[s], X_map, S, J_num) != -1:
z0[XtoZ(s, I[s], X_map, S, J_num)] = 1
for j in range(J_num):
X_map_for_z0[s][j] = -1
X_map_for_z0[s][I[s]] = 1
# todo(*做初始资源分配,即ys暂时都定为0)
ys = Simplex(X_map_for_z0, RHO, S, J_num, load)
for s in range(S):
z0[XtoZ(s, J_num, X_map, S, J_num)] = ys[s]
solution = minimize(objective, z0, method='SLSQP', bounds=bnds, constraints=cons)
# todo(*查一下这句有没有问题)
z = solution.x
return z, cost(0, z, X_map, I, RHO, S, J_num, load, alpha, beta), cost(1, z, X_map, I, RHO, S, J_num, load, alpha,
beta), cost(2, z, X_map, I, RHO, S, J_num,
load, alpha, beta)
def Simplex(X_map, RHO, S, J_num, load):
# 确定为所有的基站,求解ys,采用单纯形算法:min c'Y, s.t. AY<=b, 0<=Y<=b
A = np.zeros((J_num * 3, S))
b = np.zeros(J_num * 3)
c = np.zeros(S)
c += -1
for j in range(J_num):
for s in range(S):
if X_map[s][j] == 1:
A[j * 3][s] = RHO[s][0]
A[j * 3 + 1][s] = RHO[s][1]
A[j * 3 + 2][s] = RHO[s][2]
for j in range(J_num):
b[j * 3] = load[j][0]
b[j * 3 + 1] = load[j][1]
b[j * 3 + 2] = load[j][2]
bnd = (0, 1)
bnds = []
# 设置ys的bound
for s in range(S):
bnds.append(bnd)
solution = linprog(c, A_ub=A, b_ub=b, bounds=bnds)
ys = solution.x
return ys
def solve(X_map, I, RHO, S, J_num, load, alpha, beta, type):
J = np.zeros(S, dtype=int) # 记录映射结果
J -= 1
X_map_init = np.copy(X_map)
slice_has_select_bs = []
for s in range(S):
X_map_this_loop = np.copy(X_map)
z, cost_all, cost_d, cost_m = opt(np.copy(X_map_this_loop), I, RHO, S, J_num, load, alpha, beta, type)
# todo(*可以优化,比如设置大于一个阈值,就令xij=1)
# 记录最大的xij,并令xij=1
zz = z
for s in range(S):
zz[XtoZ(s, J_num, X_map, S, J_num)] = 0 # 将得到结果的ys置为0
max_z = np.max(zz)
max_z_index = np.where(zz == max_z) # 可能存在多个同时最大的
max_z_index = max_z_index[0]
max_z_xii_index = []
for z_index in max_z_index:
l_x, l_y = ZtoX(z_index, np.copy(X_map_this_loop), S, J_num)
if l_y == I[l_x]:
max_z_xii_index.append(z_index)
if len(max_z_xii_index) > 0:
for z_index in max_z_xii_index:
l_x, l_y = ZtoX(z_index, np.copy(X_map_this_loop), S, J_num)
for j in range(J_num):
X_map[l_x][j] = -1
X_map[l_x][l_y] = 1
J[l_x] = l_y
else: # 最大的xij不是xii,则挑选出来最大的xij(多个,比如x01,x02都等于1,最大)对应的yj集合中的最大一个,设置为1
max_ys = -1
max_ys_index = -1
for z_index in max_z_index:
l_x, l_y = ZtoX(z_index, X_map, S, J_num)
if z[XtoZ(s, J_num, X_map, S, J_num)] > max_ys:
max_ys = z[XtoZ(s, J_num, X_map, S, J_num)]
max_ys_index = z_index
l_x, l_y = ZtoX(max_ys_index, X_map, S, J_num)
for j in range(J_num):
X_map[l_x][j] = -1
X_map[l_x][l_y] = 1
J[l_x] = l_y
if ValCount(X_map, S, J_num) == S:
break
# 确定为所有的基站,求解ys,采用单纯形算法:min c'Y, s.t. AY<=b, 0<=Y<=b
ys = Simplex(np.copy(X_map), np.copy(RHO), S, J_num, load)
for s in range(S):
X_map[s][J_num] = ys[s]
# 根据ys求解降级部分
degradation = 0
for s in range(S):
degradation += (1 - ys[s])
cost_d = degradation * alpha
# 求解迁移部分
num_migration = num_of_migration(X_map, I)
cost_m = beta * num_migration
# 求解两部分代价之和
cost_all = cost_d + cost_m
return X_map, J, ys, cost_all, cost_d, cost_m, degradation, num_migration
def solve1(X_map, I, RHO, S, J_num, load, alpha, beta, type):
J = np.zeros(S, dtype=int) # 记录映射结果
J -= 1
for s in range(S):
X_map_this_loop = np.copy(X_map)
z, cost_all, cost_d, cost_m = opt(np.copy(X_map_this_loop), I, RHO, S, J_num, load, alpha, beta, type)
# todo(*可以优化,比如设置大于一个阈值,就令xij=1)
# 记录最大的xij,并令xij=1
# 记录最大的xij,并令xij=1
zz = z
for s in range(S):
zz[XtoZ(s, J_num, X_map, S, J_num)] = 0 # 将得到结果的ys置为0
max_z = np.max(zz)
max_z_index = np.where(zz == max_z) # 可能存在多个同时最大的
max_z_index = max_z_index[0]
for z_index in max_z_index:
l_x, l_y = ZtoX(z_index, np.copy(X_map_this_loop), S, J_num)
for j in range(J_num):
X_map[l_x][j] = -1
X_map[l_x][l_y] = 1
J[l_x] = l_y
if ValCount(X_map, S, J_num) == S:
break
# 确定为所有的基站,求解ys,采用单纯形算法:min c'Y, s.t. AY<=b, 0<=Y<=b
ys = Simplex(np.copy(X_map), np.copy(RHO), S, J_num, load)
for s in range(S):
X_map[s][J_num] = ys[s]
# 根据ys求解降级部分
degradation = 0
for s in range(S):
degradation += (1 - ys[s])
cost_d = degradation * alpha
# 求解迁移部分
num_migration = num_of_migration(X_map, I)
cost_m = beta * num_migration
# 求解两部分代价之和
cost_all = cost_d + cost_m
return X_map, J, ys, cost_all, cost_d, cost_m, degradation, num_migration
# 方法1
def alg_optimize(S, J_num, X_map, load, RHO, I, ys, iter, K, mu):
o = np.zeros((iter, 5))
RHO_init = np.copy(RHO)
for i in range(iter):
print(i)
RHO = RHO_init
for s in range(S):
RHO[s] *= K[i][s]
# RHO = RHO_init * K[i]
# todo(*计算降级函数上限,待验证1 / K[i])
d = 0.0001 # 防止d=0的情况
for s in range(S):
if ys[s] * (1 / K[i][s]) < 1:
d += 1 - ys[s] * (1 / K[i][s])
alpha = mu / S
# todo(*计算迁移上界,有些只能在一个基站上,多算了,算了S次)
beta = (1 - mu) / S
X_map_o, J, ys, cost_all, cost_d, cost_m, degradation, num_migration = solve(np.copy(X_map), I, RHO, S, J_num,
load, alpha, beta, 0)
I = J # 修改切片映射的基站
o[i][0] = cost_d
o[i][1] = cost_m
o[i][2] = cost_all
o[i][3] = degradation
o[i][4] = num_migration
return o
# 方法2
def alg_without_migration_cost(S, J_num, X_map, load, RHO, I, ys, iter, K, mu):
o = np.zeros((iter, 5))
RHO_init = np.copy(RHO)
for i in range(iter):
print(i)
RHO = RHO_init
for s in range(S):
RHO[s] *= K[i][s]
# RHO = RHO_init * K[i]
# todo(*计算降级函数上限,待验证1 / K[i])
d = 0.0001
for s in range(S):
if ys[s] * (1 / K[i][s]) < 1:
d += 1 - ys[s] * (1 / K[i][s])
alpha = mu / S
# todo(*计算迁移上界,有些只能在一个基站上,多算了,算了S次)
beta = (1 - mu) / S
X_map_o, J, ys, cost_all, cost_d, cost_m, degradation, num_migration = solve1(np.copy(X_map), I, RHO, S, J_num,
load, alpha, beta, 1)
I = J # 修改切片映射的基站
o[i][0] = cost_d
o[i][1] = cost_m
o[i][2] = cost_all
o[i][3] = degradation
o[i][4] = num_migration
return o
# 方法3
def static_fix_prov(S, J_num, X_map, load, RHO, I, ys, iter, K, mu):
o = np.zeros((iter, 5))
for i in range(iter):
print(i)
# todo(*计算降级函数上限,待验证1 / K[i])
d = 0.0001
for s in range(S):
if ys[s] * (1 / K[i][s]) < 1:
d += 1 - ys[s] * (1 / K[i][s])
alpha = mu / S
# todo(*计算迁移上界,有些只能在一个基站上,多算了,算了S次)
beta = (1 - mu) / S
degradation = 0
for s in range(S):
if ys[s] * (1 / K[i][s]) < 1:
degradation += 1 - ys[s] * (1 / K[i][s])
cost_d = alpha * degradation
num_migration = 0
cost_m = beta * num_migration
cost_all = cost_d + cost_m
o[i][0] = cost_d
o[i][1] = cost_m
o[i][2] = cost_all
o[i][3] = degradation
o[i][4] = num_migration
return o
# 方法4
def static_opt_prov(S, J_num, X_map, load, RHO, I, ys, iter, K, mu):
o = np.zeros((iter, 5))
RHO_init = np.copy(RHO)
X_map_init = np.zeros_like(X_map)
X_map_init -= 1
for s in range(S):
X_map_init[s][I[s]] = 1
for i in range(iter):
print(i)
RHO = RHO_init
for s in range(S):
RHO[s] *= K[i][s]
# todo(*计算降级函数上限,待验证1 / K[i])
d = 0.0001
for s in range(S):
if ys[s] * (1 / K[i][s]) < 1:
d += 1 - ys[s] * (1 / K[i][s])
alpha = mu / S
# todo(*计算迁移上界,有些只能在一个基站上,多算了,算了S次)
beta = (1 - mu) / S
ys = Simplex(np.copy(X_map_init), np.copy(RHO), S, J_num, load)
# 根据ys求解降级部分
degradation = 0
for s in range(S):
degradation += (1 - ys[s])
cost_d = degradation * alpha
# 求解迁移部分
num_migration = 0
cost_m = beta * num_migration
# 求解两部分代价之和
cost_all = cost_d + cost_m
o[i][0] = cost_d
o[i][1] = cost_m
o[i][2] = cost_all
o[i][3] = degradation
o[i][4] = num_migration
return o
if __name__ == '__main__':
############## 初始参数
# 参数1:切片数量
S = 18
print("切片数量")
print(S)
# 参数2:基站数目
J_num = 6
print("MEC数量")
print(J_num)
# 参数3:可选基站集合
X_map = np.random.binomial(1, 0.8, [S, J_num])
candidate_bs_num = np.sum(X_map, 1)
slices_of_candidate_bs_num_equalTo_0 = np.where(candidate_bs_num == 0)
# 一个可选基站都没有的,随机为该切片选择一个
for s in slices_of_candidate_bs_num_equalTo_0[0]:
j = np.random.randint(0, J_num, 1)
X_map[s][j] = 1
X_map -= 1
X_map = np.c_[X_map, np.zeros(S)] # X_map中0就是变量,1代表s映射到j或者ys=1,-1代表不可选基站
# 参数4:基站的资源
load = np.zeros((J_num, 3)) # 第一列是每个基站的down资源,第二列up资源,第三列compute资源
load += 3
# 参数5:切片参数,C_req_s_down,C_req_s_up,C_req_s_compute,随机生成
RHO = slices(S)
# 参数6:权重因子 todo(*参数待调整)
alpha = 1 / S # 参数待调整
beta = 1 / S # 参数待调整
# 参数7:初始位置,与初始ys
I = np.zeros(S, dtype=int)
I -= 1 # -1表示是第一次映射,无初始的映射基站
X_map_o, J, ys, cost_all, cost_d, cost_m, degradation, num_migration = solve1(np.copy(X_map), I, RHO, S, J_num,
load, alpha,
beta,
0) # 完成第一次映射过程 todo(*未传参alpha,beta)
for s in range(S):
I[s] = J[s]
# 参数8:仿真图点数
iter = 3 # todo(*参数可调)
# 参数:9:生成切片调整因子K,RHO=K[i]*RHO
K = generate_K(S, iter)
print(K)
mu = 0.5
print("方法1:凸优化")
o1 = alg_optimize(S, J_num, X_map, load, RHO, I, ys, iter, K, mu)
print("方法2:静态")
o2 = static_fix_prov(S, J_num, X_map, load, RHO, I, ys, iter, K, mu)
print("方法3:半静态")
o3 = static_opt_prov(S, J_num, X_map, load, RHO, I, ys, iter, K, mu)
print("方法4:不带迁移代价")
o4 = alg_without_migration_cost(S, J_num, X_map, load, RHO, I, ys, iter, K, mu)
print("方法1:凸优化")
print(o1)
print("方法2:静态")
print(o2)
print("方法3:半静态")
print(o3)
print("方法4:不带迁移代价")
print(o4)
|
# Thanks to ChristianECooper's comments about the regex and findall problems
from itertools import groupby
from re import split
VOWELS = set('aeiouAEIOU')
def flesch_kincaid(text):
sentences = syllables = words = 0.0
for sentence in split(r'[!.]', text):
if not sentence:
break
for word in sentence.split():
syllables += sum(
k for k, _ in groupby(1 if a in VOWELS else 0 for a in word))
words += 1
sentences += 1
return round(
0.39 * (words / sentences) + 11.8 * (syllables / words) - 15.59, 2)
|
year = int(input("请输入年份:"))
month = int(input("请输入月份:"))
day = int(input("请输入日:"))
months = (0,31,59,90,120,151,181,212,244,273,304,334)
if 0 < month <=12:
sum = months[month - 1]
else:
print("输入错误!")
sum += day
flag = 0
if (year %400 == 0) or (year % 100 == 0 and year % 4 ==0):
flag = 1
if (flag == 1) and (month > 2 ):
sum += 1
print(sum)
|
# !/usr/bin/python
# -*- coding: UTF-8 -*-
from helloworld.runoob1 import runoob1
from helloworld.runoob2 import runoob2
runoob2()
runoob1()
|
from django.db import models
from django.conf import settings
from django.template.defaultfilters import slugify
from django.utils.html import strip_tags
from tinymce import HTMLField
from filebrowser.fields import FileBrowseField
# Create your models here.
class Category(models.Model):
category_name = models.CharField(max_length=50, blank=False, unique=True)
is_published = models.BooleanField(default=True, help_text='Check the box to publish category.')
category_slug = models.SlugField(max_length=255, blank=True, unique=True, help_text='Text that shows in URL. Will automatically populate when object is saved.')
category_image = FileBrowseField('Category image', max_length=500, extensions=['.jpg',
'.jpeg',
'.gif',
'.png',
'.tif',
'.tiff'], blank=True)
category_content = HTMLField('Category content', blank=True)
class Meta:
verbose_name_plural = 'categories'
def save(self, *args, **kwargs):
if not self.category_slug:
self.category_slug = slugify(self.category_name)
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.category_name
class Tag(models.Model):
tag_name = models.CharField(max_length=50, blank=False, unique=True, help_text='Tag should be short. 1 or 2 words and less than 50 characters.')
tag_slug = models.SlugField(max_length=255, blank=True, unique=True, help_text='Text that shows in URL. Will automatically populate when object is saved.')
def save(self, *args, **kwargs):
if not self.tag_slug:
self.tag_slug = slugify(self.tag_name)
super(Tag, self).save(*args, **kwargs)
def __str__(self):
return self.tag_name
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, blank=True, null=True)
is_published = models.BooleanField(default=False, help_text='Check the box to publish post.')
meta_title = models.CharField(max_length=200, unique=True, blank=False, help_text='Title that shows up in Google search.')
header_title = models.CharField(max_length=200, unique=True, blank=True, help_text='Title that shows on page. Should typically match meta title.')
meta_description = models.CharField(blank=True, max_length=250, help_text='Brief description that shows up in Google search. Approx. 160 characters.')
publication_date = models.DateTimeField(help_text='Original publication date.')
modification_date = models.DateTimeField(auto_now=True, help_text='Date the post was modified from original version.')
featured_image = FileBrowseField('Featured image', max_length=500, extensions=['.jpg',
'.jpeg',
'.gif',
'.png',
'.tif',
'.tiff'], blank=True, help_text='Image featured in post. Must be at least 1,000px X 1,000px')
thumbnail_image = FileBrowseField('Thumbnail image', max_length=500, extensions=['.jpg',
'.jpeg',
'.gif',
'.png',
'.tif',
'.tiff'], blank=True, help_text='Thumbnail image used across site. Must be at least 1,000px X 1,000px')
post_content = HTMLField('Content', blank=True)
post_category = models.ManyToManyField(Category, blank=True)
post_tag = models.ManyToManyField(Tag, blank=True)
post_slug = models.SlugField(max_length=255, blank=True, unique=True, help_text='Text that shows in URL. Will automatically populate when object is saved.')
custom_js = models.TextField(blank=True)
custom_css = models.TextField(blank=True)
def save(self, *args, **kwargs):
if not self.post_slug:
self.post_slug = slugify(self.meta_title)
if not self.meta_description:
self.meta_description = strip_tags(self.post_content[:230]) + '...'
if not self.header_title:
self.header_title = self.meta_title
super(Post, self).save(*args, **kwargs)
def __str__(self):
return self.meta_title
|
"""Entry point for TwitOff Flask application."""
from twistoff.app import create_app
APP = create_app()
|
from xml.etree import ElementTree
import csv
import io
import os
import mechanize
from bs4 import BeautifulSoup
#pasta onde encontra os xmls
diretorio = 'BDTD_USP/'
#array para armazenar todos os arquivos encontrado no diretorio
files = []
for file in os.listdir(diretorio):
if file.endswith(".xml"):
files.append(file)
#array para armazernar os arquivo nao encontrados
errors = []
#dicionario para armazernar todos os diferentes tipos de formatos encontrados
formats = dict()
for filexml in files:
with open(diretorio + filexml, 'rt') as f:
tree = ElementTree.parse(f)
root = tree.getroot()
for arquivo in root.findall('{http://oai.ibict.br/mtd2-br/}Arquivo'):
info = arquivo.getchildren()
for i in info:
if not i.getchildren():
if i.text != None:
if i.tag == '{http://oai.ibict.br/mtd2-br/}URL':
url = i.text
try:
br = mechanize.Browser()
page = br.open(url)
html = page.read()
soup = BeautifulSoup(html)
#leitura do html da pagina da usp para encontrar o formato do documento
div = soup.find('div', {'class': 'DocumentoTituloTexto2'})
div = div.findNext('div' , {'class' : 'DocumentoTituloTexto2'})
div = div.findNext('div' , {'class' : 'DocumentoTituloTexto2'})
if (div.findNext('div' , {'class' : 'DocumentoTituloTexto2'}) != None):
div = div.findNext('div' , {'class' : 'DocumentoTituloTexto2'})
texto = div.text.split('.')
texto = texto[1].split('(')
texto = texto[0]
#se o formato ja existir no dicionario soma mais um
if formats.has_key(texto):
formats[texto] += 1
#senao cria o novo formato no dicionario
else:
formats[texto] = 1
except:
print 'Documento sem arquivo associado' , url , "---" , filexml
errors.append(url)
print formats
print 'Sem documento: ' , len(errors)
|
import math
from utils import *
import numpy as np
import random
from numpy.core.numeric import normalize_axis_tuple
class Bird:
def __init__(self, x, y):
self.pos_x, self.pos_y = x, y
self.angle = random.random() * math.pi * 2
self.dir = np.array([math.cos(self.angle), math.sin(self.angle)])
pass
def step(self, mul=1):
self.pos_x += self.dir[0] * mul
self.pos_y += self.dir[1] * mul
def turn(self, angle):
angle = math.radians(angle)
normalize(self.dir)
self.angle += angle
if self.angle < 0:
self.angle = self.angle + math.pi * 2
if self.angle > math.pi * 2:
self.angle = self.angle - math.pi * 2
self.dir[0], self.dir[1] = math.cos(self.angle), math.sin(self.angle)
normalize(self.dir)
def turnByVec(self, vec):
self.dir[0] += vec[0]
self.dir[1] += vec[1]
normalize(self.dir)
self.angle = math.asin(self.dir[1])
def getPos(self):
return [self.pos_x, self.pos_y]
def getPixelPos(self):
return [math.floor(self.pos_x), math.floor(self.pos_y)]
|
from selenium import webdriver
link = "http://suninjuly.github.io/find_xpath_form"
try:
browser = webdriver.Chrome()
browser.get(link)
browser.find_element_by_tag_name('input').send_keys("Ivan")
browser.find_element_by_name('last_name').send_keys("Petrov")
browser.find_element_by_class_name('city').send_keys("Smolensk")
browser.find_element_by_id('country').send_keys("Russia")
browser.find_element_by_xpath("//button[@type='submit']").click()
alert = browser.switch_to.alert
alert_text = alert.text
alert.accept()
finally:
print(alert_text.split()[-1])
browser.quit()
|
# 변수의 선언과 할당
# a = 1
# b = 1.1
#변수를 선언하고 변수에 들어 있는 데이터 확인하는 방법
# print(a + b)
# 다양한 형태로 선언과 할당
# c, d = 1, 1.1
# print(c + d)
# x, y = 12, 30
# print(x, y)
# x = y
# print(x, y)
# x, y = 12, 30
# x, y = y, x
# print(x, y)
# 영어, 숫자로 구성(한글도 가능)
# a123 = 1
# 한글가능 = 2
# print(a123 + 한글가능)
# 띄어쓰기 금지. 언더바(_)로 표시
#한글 가능 = 2
#print(한글 가능) # SyntaxError: invalid syntax
# 한글_가능 = 2
# print(한글_가능)
# 대문자, 소문자 구분 가능
# q = 1
# Q = 2
# print('q', q)
# print('Q', Q)
# 도형
# ★ = 3
# print(★) # SyntaxError: invalid character in identifier
# 숫자로 시작하는 변수명
# a123 = 1
# 123a = 1 #SyntaxError: invalid syntax
# 중복된 변수명
# a = 10
# print('첫번째a', a)
# a = 100
# print('두번째a', a)
# 예약어(파이썬에서 문법적인 용도로 사용되지고 있는 단어)
# print('안녕하세요.')
# and = 10 #SyntaxError: invalid syntax
# print(and)
|
from django.db import models
class PageCounter(models.Model):
objects = models.Manager()
count = models.IntegerField(verbose_name="방문자수")
# 나라별 검색 및 총 좋아요 랭킹
class Nation(models.Model):
nation_list = (
('Afghanistan', '아프가니스탄'),
('Albania', '알바니아'),
('Algeria', '알제리아'),
('Andorra', '안도라'),
('Angola', '앙골라'),
('Antigua and Barbuda', '안티구아 앤 발부다'),
('Argentina', '아르헨티나'),
('Armenia', '알메니아'),
('Australia', '호주'),
('Azerbaijan', '아제르바이젠'),
('Bahamas', '바하마스'),
('Bahrain', '바레인'),
('Bangladesh', '방글라데시'),
('Barbados', '바르바도'),
('Belarus', '벨라루스'),
('Belgium', '벨기에'),
('Belize', '베리즈'),
('Benin', '베닌'),
('Bhutan', '부탄'),
('Bolivia', '볼리비아'),
('Bosnia and Herzegovina', '보스니아 헤르체코비나'),
('Botswana', '보츠와나'),
('Brazil', '브라질'),
('Brunei', '브루네이'),
('Bulgaria', '불가리아'),
('Burkina Faso', '브루키나 파소'),
('Burundi', '부룬다이'),
('Cabo Verde', '카보 베르드'),
('Cambodia', '캄보디아'),
('Cameroon', '카메룬'),
('Canada', '캐나다'),
('Central African Republic (CAR)', '중앙 아프리카 공화국'),
('Chad', '차드'),
('Chile', '칠레'),
('China', '중국'),
('Colombia', '콜롬비아'),
('Comoros', '코모로스'),
('Congo, Democratic Republic of the', '콩고민주공화국'),
('Costa Rica', '코스타리카'),
("Cote d'Ivoire", '코트디부아르'),
('Croatia', '크로아티아'),
('Cuba', '쿠바'),
('Cyprus', '싸이프러스'),
('Czechia', '체코'),
('Denmark', '덴마크'),
('Djibouti', '지부티'),
('Dominica', '도미니카'),
('Dominican Republic', '도미니카 공화국'),
('Ecuador', '에콰도르'),
('Egypt', '이집트'),
('El Salvador', '엘 살바도르'),
('Equatorial Guinea', '적도 기니'),
('Eritrea', '에르트레아'),
('Estonia', '에스토니아'),
('Eswatini', '에스와티니'),
('Ethiopia', '에티오피아'),
('Fiji', '피지'),
('Finland', '핀란드'),
('France', '프랑스'),
('Gabon', '가봉'),
('Gambia', '감비아'),
('Georgia', '조지아'),
('Germany', '독일'),
('Ghana', '가나'),
('Greece', '그리스'),
('Grenada', '그레나다'),
('Guatemala', '과테말라'),
('Guinea', '기니'),
('Guinea-Bissau', '기니비사우'),
('Guyana', '구야나'),
('Haiti', '하이티'),
('Honduras', '온두라스'),
('Hungary', '헝가리'),
('Iceland', '아이스란드'),
('India', '인도'),
('Indonesia', '인도네시아'),
('Iran', '이란'),
('Iraq', '이라크'),
('Ireland', '아일랜드'),
('Israel', '이스라엘'),
('Italy', '이탈리아'),
('Jamaica', '자메이카'),
('Japan', '일본'),
('Jordan', '요르단'),
('Kazakhstan', '카자흐스탄'),
('Kenya', '케냐'),
('Kiribati', '키리바티'),
('Kosovo', '코소보'),
('Kuwait', '쿠웨이트'),
('Kyrgyzstan', '키르기스탄'),
('Laos', '라오스'),
('Latvia', '라트비아'),
('Lebanon', '레바논'),
('Lesotho', '레소토'),
('Liberia', '라이베리아'),
('Libya', '리비야'),
('Liechtenstein', '리히텐슈타인'),
('Lithuania', '리투아니아'),
('Luxembourg', '룩셈부르그'),
('Madagascar', '마다가스카르'),
('Malawi', '말라위'),
('Malaysia', '말레이시아'),
('Maldives', '말디브'),
('Mali', '말리'),
('Malta', '말타'),
('Marshall Islands', '마쉘 섬'),
('Mauritania', '마리타니아'),
('Mauritius', '마리티우스'),
('Mexico', '멕시코'),
('Micronesia', '미크로네시아'),
('Moldova', '몰도바'),
('Monaco', '모나코'),
('Mongolia', '몽골'),
('Montenegro', '몬테네그로'),
('Morocco', '모로코'),
('Mozambique', '모잠비크'),
('Myanmar', '마이안마르'),
('Namibia', '나미비아'),
('Nauru', '나우루'),
('Nepal', '네팔'),
('Netherlands', '네덜란드'),
('New Zealand', '뉴질랜드'),
('Nicaragua', '니카라구아'),
('Niger', '니게르'),
('Nigeria', '나이지리아'),
('North Korea', '북한'),
('North Macedonia', '북마케도니아'),
('Norway', '노르웨이'),
('Oman', '오만'),
('Pakistan', '파키스탄'),
('Palau', '팔라우'),
('Palestine', '팔레스타인'),
('Panama', '파나마'),
('Papua New Guinea', '파푸아 뉴기니'),
('Paraguay', '파라과이'),
('Peru', '페루'),
('Philippines', '필리핀'),
('Poland', '폴란드'),
('Portugal', '포르투갈'),
('Qatar', '카타르'),
('Romania', '루마니아'),
('Russia', '러시아'),
('Rwanda', '르완다'),
('Saint Kitts and Nevis', 'Saint Kitts and Nevis'),
('Saint Lucia', '세인트 루시아'),
('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'),
('Samoa', '사모아'),
('San Marino', '산 마리노'),
('Sao Tome and Principe', 'Sao Tome and Principe'),
('Saudi Arabia', '사우디 아라비아'),
('Senegal', '세네갈'),
('Serbia', '세르비아'),
('Seychelles', 'Seychelles'),
('Sierra Leone', 'Sierra Leone'),
('Singapore', '싱가포르'),
('Slovakia', '슬로바키아'),
('Slovenia', '슬로베니아'),
('Solomon Islands', '솔로몬 섬'),
('Somalia', '소말리아'),
('South Africa', '남아프리카'),
('South Korea', '한국'),
('South Sudan', '남수단'),
('Spain', '스페인'),
('Sri Lanka', '스리랑카'),
('Sudan', '수단'),
('Suriname', '수리남'),
('Sweden', '스웨덴'),
('Switzerland', '스위스랜드'),
('Syria', '시리아'),
('Taiwan', '대만'),
('Tajikistan', '타지키스탄'),
('Tanzania', '탄자니아'),
('Thailand', '태국'),
('Timor-Leste', '티모르 레스트'),
('Togo', '토고'),
('Tonga', '통가'),
('Trinidad and Tobago', '트리니다드 토바고'),
('Tunisia', '튀니시아'),
('Turkey', '터키'),
('Turkmenistan', '투르크메니스탄'),
('Tuvalu', '투발루'),
('Uganda', '우간다'),
('Ukraine', '우크라이나'),
('United Arab Emirates (UAE)', '아랍에미리트'),
('United Kingdom (UK)', '영국'),
('United States of America', '미국'),
('Uruguay', '우루과이'),
('Uzbekistan', '우즈베키스탄'),
('Vanuatu', '바누아투'),
('Vatican City', '바티칸'),
('Venezuela', '베네수엘라'),
('Vietnam', '베트남'),
('Yemen', '예맨'),
('Zambia', '잠비아'),
('Zimbabwe', '짐바브웨')
)
objects = models.Manager()
name = models.CharField(max_length=100, choices=nation_list)
# 나라 국기
image = models.ImageField()
# 국가 별 총 좋아요 수
sum_of_like = models.IntegerField(default=0)
# 총 post 개수
total_post = models.IntegerField(default=0)
# 총 포인트
nation_point = models.IntegerField(default=0)
def __str__(self):
return self.name
# 학습 추천을 위한 한국어 제시어.
class Keyword(models.Model):
objects = models.Manager()
name = models.CharField(max_length=50)
translation = models.CharField(max_length=50)
pronunciation = models.CharField(max_length=50)
# write할때 제시어 설명
explanation = models.CharField(max_length=100)
# 제시어 별 총 좋아요 수
sum_of_like = models.IntegerField(default=0)
# 다른 제시어 표시 변수
is_show = models.BooleanField(verbose_name="다른 제시어 표시 변수, 건드리지 말자", null=False, default=False)
# 홈 화면 고정 변수
today = models.BooleanField(verbose_name="오늘만 보이게 하려면 체크", null=False, default=False)
# 제시어 제출 날짜
release_date = models.DateTimeField()
# 1,2,3 위의 국가
first_nation = models.ForeignKey(Nation, on_delete=models.CASCADE, related_name="첫번째")
second_nation = models.ForeignKey(Nation, on_delete=models.CASCADE, related_name="두번째")
third_nation = models.ForeignKey(Nation, on_delete=models.CASCADE, related_name="세번째")
def __str__(self):
return self.name
class Post(models.Model):
objects = models.Manager()
keyword = models.ForeignKey(Keyword, on_delete=models.CASCADE)
nation = models.ForeignKey(Nation, on_delete=models.CASCADE)
nation_name = models.CharField(max_length=100, null=False)
first = models.CharField(max_length=100, null=False)
second = models.CharField(max_length=100, null=False)
third = models.CharField(max_length=100, null=False)
trans_1 = models.CharField(max_length=100, default="")
trans_2 = models.CharField(max_length=100, default="")
trans_3 = models.CharField(max_length=100, default="")
nickname = models.CharField(max_length=100, null=False)
# 날짜 시간까지
release_date = models.DateTimeField(auto_now_add=True)
# 각 게시글 당 좋아요 수
num_of_like = models.IntegerField(default=0)
# 각각의 번역
is_trans = models.BooleanField(default=False)
def __str__(self):
return self.keyword.name
|
import time, pytest,sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
from localSettings import *
import localSettings
from utilityTestFunc import *
class Test:
#==============================================================================================================
# Test Description
# Channel page - channel Moderated
#==============================================================================================================
testNum = "742"
enableProxy = False
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
entryName1 = None
entryName2 = None
entryName3 = None
entryName4 = None
entryName5 = None
newUserId = None
newUserPass = None
entryDescription = "Entry description"
entryTags = "entrytags1,entrytags2,"
filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\AutomatedBenefits.jpg'
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
########################### TEST SETUP ###########################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initialize(self, driverFix)
self.common = Common(self.driver)
self.entryName1 = clsTestService.addGuidToString('entryName1')
self.entryName2 = clsTestService.addGuidToString('entryName2')
self.entryName3 = clsTestService.addGuidToString('entryName3')
self.entryName4 = clsTestService.addGuidToString('entryName4')
self.entryName5 = clsTestService.addGuidToString('entryName5')
self.newUserId = "pythonautomation1@mailinator.com"
self.newUserPass = "Kaltura1!"
##################### TEST STEPS - MAIN FLOW #####################
writeToLog("INFO","Step 1: Going to perform login to KMS site as End-user")
if self.common.loginAsUser() == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED to login as End-user")
return
self.entriesToUpload = {
self.entryName1: self.filePath,
self.entryName2: self.filePath,
self.entryName3: self.filePath,
self.entryName4: self.filePath,
self.entryName5: self.filePath }
writeToLog("INFO","Step 2: Going to upload 5 entries")
if self.common.upload.uploadEntries(self.entriesToUpload, self.entryDescription, self.entryTags) == False:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED to upload 5 entries")
return
writeToLog("INFO","Step 6: Going to set entry #4 as Unlisted")
if self.common.myMedia.publishSingleEntryPrivacyToUnlistedInMyMedia(self.entryName4) == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to set entry #4 as Unlisted")
return
writeToLog("INFO","Step 7: Going to publish entries 1-3 to Moderated channel")
if self.common.channel.addContentToChannel("KMS-Automation_Moderate_Channel", [self.entryName1, self.entryName2, self.entryName3], isChannelModerate=True, publishFrom = enums.Location.CHANNELS_PAGE) == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED to publish entries 1-3 to Moderated channel")
return
writeToLog("INFO","Step 8: Going to logout from End-user")
if self.common.login.logOutOfKMS() == False:
self.status = "Fail"
writeToLog("INFO","Step 8: FAILED failed to logout from End-user")
return
writeToLog("INFO","Step 9: Going to login to KMS with channel's owner")
if self.common.login.loginToKMS(self.newUserId, self.newUserPass) == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to login with channel's owner")
return
expectedEntriesList = [self.entryName1, self.entryName2, self.entryName3]
writeToLog("INFO","Step 6: Going to sort entries by Alphabetical & Image type")
if self.common.channel.sortAndFilterInPendingTab(enums.SortBy.ALPHABETICAL, enums.MediaType.IMAGE, "KMS-Automation_Moderate_Channel") == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to sort entries by Alphabetical & Image type")
return
writeToLog("INFO","Step 7: Going to verify entries order - by Alphabetical & Image type")
if self.common.myMedia.verifyEntriesOrder(expectedEntriesList, enums.Location.PENDING_TAB) == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED to verify entries order - by Alphabetical & Image type")
return
writeToLog("INFO","Step 10: Going to handle entries in Pending tab: rejecting entry #1, Approving entry #2")
if self.common.channel.handlePendingEntriesInChannel("KMS-Automation_Moderate_Channel", self.entryName1, self.entryName2, False) == False:
self.status = "Fail"
writeToLog("INFO","Step 10: FAILED to handle entries in Pending tab")
return
writeToLog("INFO","Step 11: Going to logout ")
if self.common.login.logOutOfKMS() == False:
self.status = "Fail"
writeToLog("INFO","Step 11: FAILED failed to logout")
return
writeToLog("INFO","Step 12: Going to perform login to KMS site End-user")
if self.common.loginAsUser() == False:
self.status = "Fail"
writeToLog("INFO","Step 12: FAILED to login as End-user")
return
self.entries = {self.entryName1: enums.EntryPrivacyType.REJECTED,
self.entryName2: enums.EntryPrivacyType.PUBLISHED,
self.entryName3: enums.EntryPrivacyType.PENDING,
self.entryName4: enums.EntryPrivacyType.UNLISTED,
self.entryName5: enums.EntryPrivacyType.PRIVATE }
writeToLog("INFO","Step 13: Going to verify the entries' privacy on my-media")
try:
for entry in self.entries:
if self.common.myMedia.verifyEntryPrivacyInMyMedia(entry, self.entries.get(entry)) == False:
writeToLog("INFO","Step 13: FAILED verify privacy for entry: " + str(entry))
self.status = "Fail"
return
except:
writeToLog("INFO","Step 13: FAILED verify privacy for entry: " + str(entry))
self.status = "Fail"
return
##################################################################
# if an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method ****************")
if self.status == "Fail" :
self.common.login.logOutOfKMS()
self.common.loginAsUser()
self.common.myMedia.deleteEntriesFromMyMedia([self.entryName1, self.entryName2, self.entryName3, self.entryName4, self.entryName5])
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line')
|
import math, random, sys, fileinput
inputs = fileinput.input()
# Main Class
class Main():
def __init__(self, inputs):
stack = []
quote_queue = []
open_braces = ["{","["]
close_braces = ["}","]"]
quotes = ["\'","\"",":",","]
floating_point = ['a','b','c']
for line in inputs:
print_line = True
for char in line:
# Detects missing lead zeros
floating_point[0] = floating_point[1]
floating_point[1] = floating_point[2]
floating_point[2] = char
if floating_point[1] == '.' and floating_point[2].isdigit():
if not floating_point[0].isdigit():
self.print_error(line, "Missing lead zero")
del stack[len(stack) - 1]
print_line = False
break
if char in open_braces:
stack.append(char)
elif char in close_braces:
# Incorrect Closing parentesis
if not(len(stack) == 0):
if (stack[len(stack) - 1] == "[" and char == "]") or (stack[len(stack) - 1] == "{" and char == "}"):
del stack[len(stack) - 1]
else:
self.print_error(line, "Incorrect closing parentesis")
del stack[len(stack) - 1]
print_line = False
break
elif char in quotes:
# Incorrect Quotations ' instead of "
if char == "\'" and len(quote_queue) > 0:
if quote_queue[len(quote_queue) - 1] == "\'":
self.print_error(line, "Incorrect Quotation Marks")
print_line = False
break
# Nedted Quotations "string"example""
if char == "\"" and len(quote_queue) > 2:
if quote_queue[len(quote_queue) - 1] == "\"" and quote_queue[len(quote_queue) - 2] == "\"" and quote_queue[len(quote_queue) - 3] == "\"":
quote_queue.append(":")
self.print_error(line, "Nested Quotation Marks")
print_line = False
break
quote_queue.append(char)
# Print valid lines
if print_line:
print(line)
# Print error message to stderr
def print_error(self, line, error_msg):
sys.stderr.write("------------------\nErr with line\n")
sys.stderr.write(line)
sys.stderr.write("\n" + error_msg + "\n------------------\n")
Main(inputs)
|
from redis_queue import RedisQueue
from redis_dictionary import RedisDictionary
import redis
from lxml.html import fromstring
from urllib.parse import urlparse
import json
import re
import os
import requests
class BeletagCallback:
def __init__(self):
self.q_pages_name = 'beletag_pages'
self.q_elements_name = 'beletag_elements'
self.redisClient = redis.StrictRedis(host='localhost', port=6379, db=1)
self.redisClientElements = redis.StrictRedis(host='localhost', port=6379, db=2)
self.pages_queue = RedisQueue(client=self.redisClient, queue_name=self.q_pages_name)
self.elements_queue = RedisQueue(client=self.redisClient, queue_name=self.q_elements_name)
self.main_url = "https://shop.beletag.com/catalog/"
self.categories = {
"Мужское": {
"485": "Брюки,бриджи",
"771": "Спорт",
"486": "Спорт",
"559": "Бельевая группа",
"487": "Шорты",
"489": "Джемпера",
"490": "Футболки",
"634": "Поло",
"715": "Комплекты"
},
"Женское": {
"734": "Базовый ассортимент",
"602": "Бельевая группа",
"498": "Блузки, рубашки",
"599": "Брюки, бриджи",
"601": "Водолазки",
"500": "Джемпера",
"474": "Майки",
"496": "Платья, сарафаны",
"770": "Спорт",
"598": "Толстовки, куртки",
"497": "Футболки",
"501": "Шорты",
"502": "Юбки"
},
"Общее": {
"510": "Одежда для дома",
"661": "Термобельё"
}
}
def __call__(self):
self.set_categories_pages()
def set_categories_pages(self):
for cat in self.categories:
for catId in self.categories[cat]:
url = 'https://shop.beletag.com/catalog/{}/?pagecount=90&mode=ajax&PAGEN_1=1'.format(catId)
self.pages_queue.push(url)
def page_parse(self, url, html):
tree = fromstring(html)
parsed_url = urlparse(url).path.split('/')
cat_id = parsed_url[-2]
if not cat_id:
return
#parse first_level_urls
a_count = tree.xpath('//div[@class="pages"][1]/a')
if len(a_count) > 0:
pages_count = int(a_count[-1].text.strip())
else:
pages_count = 1
print(url, 'pages_count: {}'.format(pages_count))
for x in range(1, pages_count + 1):
ready_url = self.main_url + "{}/?pagecount=90&mode=ajax&PAGEN_1={}".format(cat_id, str(x))
self.pages_queue.push(ready_url)
# parse elements_urls
links = tree.xpath('//a[@class="base-photo"]/@href')
if len(links) > 0:
for link in links:
url_params = urlparse(link).path.split('/')
category_id = url_params[2]
element_id = url_params[3]
ready_url = self.main_url + "{}/{}/".format(category_id, element_id)
self.elements_queue.push(ready_url)
# Сбор всей информации о товаре и занесение в массив с результатом
def element_parse(self, url, html):
rd = RedisDictionary(self.redisClientElements)
parsed_url = urlparse(url).path.split('/')
element_id = parsed_url[-2]
result = {'url': url}
tree = fromstring(html)
catalog_element = tree.xpath('//div[@class="catalog-element"]')
if len(catalog_element) == 0:
return 0
catalog_element = catalog_element[0]
cn = catalog_element.xpath('//div[@class="catalog-element"]/span[@itemprop="category"]/@content')[0]
cn = cn.split(" > ")[-1:]
category_name = "".join(cn)
good_id = catalog_element.attrib['data-id']
category_id = catalog_element.attrib["data-categoryid"]
try:
img = catalog_element.xpath("//div[@class='photo-zoom']/img/@src")[0]
except Exception:
img = None
self.save_image(img, good_id + '.' + "".join(img.split('.')[-1:]))
scripts = tree.xpath("//script/text()")
stocks = None
for script in scripts:
if script is None:
continue
stock = re.search('offersQuantity\[[0-9]+\]=(.*?);', script)
if stock is None:
continue
else:
stocks = json.loads(stock.groups()[0])
break
try:
info = tree.xpath('//div[@id="item-full-zoom"]')[0]
except Exception:
info = ''
try:
name = info.xpath('//div[@id="item-full-zoom"]/div[@class="title"]/span[@itemprop="name"]/text()')[0]
except Exception:
name = ''
try:
articul = info.xpath('//div[@class="article"]/span/@content')[0]
except Exception:
articul = ''
try:
compositions = info.xpath('//div[@class="composition"]')
complecte = ''
season = ''
for composition in compositions:
for comp in composition.getchildren():
if "Состав" in comp.text:
complecte = comp.tail
elif "Сезон" in comp.text:
season = comp.tail
except Exception:
complecte = ''
season = ''
try:
description = info.xpath('//div[@class="description"]/text()')[0]
#description = description.replace(u'\xa0', u' ')
#description = description.strip()
except Exception:
description = ''
try:
price = info.xpath('//div[contains(@class, "price")]/div[contains(@class, "price")]/text()')
if not price[-1]:
price = price[0]
else:
price = price[-1]
except Exception:
price = ''
colors = []
try:
colors_container = info.xpath('//div[@class="colors"]/div')
for color in colors_container:
colors.append({"name": color.text, "id": color.attrib["data-id"]})
sizes = []
sizes_container = info.xpath('//div[@class="sizes"]/div')
for size in sizes_container:
sizes.append({"name": size.text[1:], "id": size.attrib["data-id"]})
except Exception:
pass
result = {
"category":
{
"name": category_name,
"id": category_id
},
"item":
{
"name": name,
"id": good_id
},
"articul": articul,
"season": season,
"complecte": complecte,
"description": description,
"price": price,
"colors": []
}
for color in colors:
tmp_color = {
"id": color["id"],
"name": color["name"],
"size": []
}
for size in sizes:
# size count
c = 0
if (color["id"] in stocks) and (size["id"] in stocks[color["id"]]):
c = stocks[color["id"]][size["id"]]
tmp_color["size"].append({
"id": size["id"],
"name": size["name"],
"count": c
})
result["colors"].append(tmp_color)
rd[element_id] = result
def save_image(self, img_url, name):
directory = os.getcwd() + "/images/beletag/"
file = directory + name
if os.path.exists(file):
return
if not os.path.exists(directory):
os.makedirs(directory)
url = 'http://shop.beletag.com/' + img_url
img = requests.get(url, stream=True)
with open(file, "bw") as f:
for chunk in img.iter_content(8192):
f.write(chunk)
|
""" Constants file for Auth0's seed project """
JWT_PAYLOAD = 'jwt_payload'
ACCESS_TOKEN = 'access_token'
USER_EMAIL = 'user_email'
USER_ID = 'user_id'
PERMISSION = 'permission'
ROLE = 'role'
|
# 扑克牌初始化
import abc
import random
class Card:
"""卡牌基类."""
def __init__(self, rank, suit):
"""
:param rank: 大小.
:type rank: str
:param suit: 花色.
:type suit: str
"""
self.suit = suit
self.rank = rank
self.hard, self.soft = self._points()
class NumberCard(Card):
"""数字牌."""
def _points(self):
return int(self.rank), int(self.rank)
class AceCard(Card):
"""A. 1点或11点"""
def _points(self):
return 1, 11
class FaceCard(Card):
"""JQK. 10点"""
def _points(self):
return 10, 10
class Suit:
"""花色.
:param name: 名称.
:type name: str
:param symbol: 符号.
:type symbol: str
>> Club, Diamond, Heart, Spade = Suit('Club', '♣'), Suit('Diamond', '♦'), Suit('Heart', '♥'), Suit('Spade', '♠')
"""
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
class CardFactory(object):
"""卡牌工厂.
>> card8 = CardFactory()
>> deck8 = [card8.rank(r+1).suit(s) for r in range(13) for s in (Club, Diamond, Heart, Spade)]
"""
def rank(self, rank):
"""创建点数.
:param rank: 大小.
:type rank: int
"""
self.class_, self.rank_str = {
1: (AceCard, 'A'),
11: (FaceCard, 'J'),
12: (FaceCard, 'Q'),
13: (FaceCard, 'K'),
}.get(rank, (NumberCard, str(rank)))
return self
def suit(self, suit):
"""创建花色.
:param suit: 花色.
:type suit: Suit
"""
return self.class_(self.rank_str, suit)
def card(rank, suit):
"""发牌.
:param rank: 大小.
:type rank: int
:param suit: 花色.
:type suit: Suit
:return: 扑克牌.
:rtype: Card
"""
if rank == 1:
return AceCard(rank, suit)
elif 2 <= rank < 11:
return NumberCard(rank, suit)
elif 11 <= rank < 14:
return FaceCard(rank, suit)
else:
raise Exception("Rank out of range")
# class Deck:
# """洗牌, 发牌.
# """
# def __init__(self):
# self._cards = [card(r+1, s) for r in range(13) for s in (Club, Diamond, Heart, Spade)]
# random.shuffle(self._cards)
#
# def pop(self):
# return self._cards.pop()
# class Deck(list):
# def __init__(self):
# super().__init__(card(r+1, s) for r in range(13) for s in (Club, Diamond, Heart, Spade))
# random.shuffle(self)
class Deck(list):
def __init__(self, decks=1):
"""Short summary.
:param decks: 几副牌混在一起.
:type decks: int
"""
super().__init__()
for i in range(decks):
self.extend(card(r + 1, s) for r in ragne(13) for s in (Club, Diamond, Heart, Spade))
random.shuffle(self)
burn = random.randint(1, 52)
for i in range(born):
self.pop()
# class Hand:
# def __init__(self, dealer_card):
# self.dealer_card = dealer_card
# self.cards = []
#
# def hard_total(self):
# return sum(c.hard for c in self.cards)
#
# def soft_total(self):
# return sum(c.soft for c in self.cards)
# class Hand:
# """打牌策略.
#
# :param dealer_card: 要处理的牌.
# :type dealer_card: Card
# :param *cards: 手中的牌.
# :type *cards: Card
# >> d = Deck()
# >> h = Hand(d.pop(), d.pop(), d.pop())
# """
#
# def __init__(self, dealer_card, *cards):
# self.dealer_card = dealer_card
# self.cards = list(cards)
#
# def hard_total(self):
# return sum(c.hard for c in self.cards)
#
# def soft_total(self):
# return sum(c.soft for c in self.cards)
#
#
# class Hand:
# def __init__(self, *args, **kw):
# if len(args) == 1 and isinstance(args[0], Hand):
# other = args[0]
# self.deal_card = other.deal_card
# self.cards = other.cards
# elif len(args) == 2 and isinstance(args[0], Hand) and 'split' in kw:
# other, card = args
# self.deal_card = other.deal_card
# self.cards = list(cards)
# elif len(args) == 3:
# deal_card, *cards = args
# self.cards = list(cards)
# else:
# raise TypeError("Invalid constructor args={0!r} kw={1!r}".format(args, kw))
#
# def __str__(self):
# return ", ".join(map(str, self.cards))
class Hand:
"""打牌策略.
:param dealer_card: 要处理的牌.
:type dealer_card: Card
:param *cards: 手中的牌.
:type *cards: Card
>> d = Deck()
>> h = Hand(d.pop(), d.pop(), d.pop())
"""
def __init__(self, dealer_card, *cards):
self.dealer_card = dealer_card
self.cards = cards
@staticmethod
def freeze(other):
hand = Hand(other.dealer_card, *other.cards)
return hand
@staticmethod
def split(other, card0, card1):
hand0 = Hand(other.dealer_card, other.cards[0], card0)
hand1 = Hand(other.dealer_card, other.cards[1], card1)
return hand0, hand1
def __str__(self):
return ", ".join(map(str, self.cards))
class GameStragegy:
"""游戏模式.
>> dumb = GameStratege()
"""
def insurance(self, hand):
return False
def split(self, hand):
return False
def double(self, hand):
return False
def hit(self, hand):
return False
class Table:
"""模拟器.
"""
def __init__(self):
self.deck = Deck()
def place_bet(self, amount):
print("Bet", amount)
def get_hand(self):
try:
self.hand = Hand(d.pop(), d.pop(), d.pop())
self.hole_card = d.pop()
except IndexError:
self.deck = Deck()
return self.hand
print("Deal", send.hand)
return self.hand
def can_insure(self, hand):
return hand.dealer_card.insure
class BettingStragegy(abc.ABCMeta):
"""下注策略."""
@abc.abstractmethod
def bet(self):
raise NotImplementedError("No bet method")
def record_win(self):
pass
def record_loss(self):
pass
class Flat(BettingStragegy):
def bet(self):
return 1
class Player(Player):
def __init__(self, table, bet_strategy, game_strategy, **extras):
self.bet_strategy = bet_strategy
self.game_strategy = game_strategy
self.table = table
self.__dict__.update(extras)
|
import random
import numpy as np
p_n = 50
n_dimensions = 30
n_iter = 1000
def population():
pop = [np.random.choice([0, 1], size=(n_dimensions)) for i in range(p_n)]
return pop
def fitness(pop):
pop.sort(key=sort_key, reverse=True)
return pop
def selection(pop):
r1 = np.random.randint(0, len(pop) - 1)
r2 = np.random.randint(0, len(pop) - 1)
xi = pop[r1]
xj = pop[r2]
mates = (xi, xj)
return mates
def crossover(mates):
parent1 = mates[0]
parent2 = mates[1]
offspring1 = np.zeros(n_dimensions, dtype=np.int)
offspring2 = np.zeros(n_dimensions, dtype=np.int)
rnd = np.random.randint(0, n_dimensions)
offspring1[0: rnd] = parent1[0: rnd]
offspring1[rnd:n_dimensions] = parent2[rnd:n_dimensions]
offspring2[0: rnd] = parent2[0: rnd]
offspring2[rnd:n_dimensions] = parent1[rnd:n_dimensions]
return offspring1, offspring2
def mutation(pop):
rnd = np.random.randint(100)
if rnd < 2:
rnd = np.random.randint(p_n)
rnd2 = np.random.randint(n_dimensions)
tmp = pop[rnd][rnd2]
if tmp == 0:
pop[rnd][rnd2] = 1
else:
pop[rnd][rnd2] = 0
return pop
def sort_key(item):
sum = 0
for i in item:
if i == 1:
sum += 1
return sum
def main():
pop = population()
pop = fitness(pop)
print(pop[0])
for i in range(n_iter):
for i in range(50):
mates = selection(pop)
offspring1, offspring2 = crossover(mates)
if offspring1 is not None:
pop.append(offspring1)
if offspring2 is not None:
pop.append(offspring2)
pop = mutation(pop)
pop = fitness(pop)
pop = pop[0:p_n]
print(pop[0])
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.contrib.auth.decorators import login_required
import datetime
from PIL import Image,ImageEnhance
from PIL import ImageFilter
from PIL.ImageFilter import (
RankFilter, MedianFilter, MinFilter, MaxFilter
)
from .models import User,Images,Category,Profile,Likes,Collection
# Create your views here.
def index(request):
instance1=Images.objects.all()
instance1=instance1.order_by('-datetime').all()
instance2=Category.objects.all()
if request.user.is_authenticated:
lists=[]
my_likes=Likes.objects.filter(user=request.user)
for posts in my_likes:
lists.append(posts.image.id)
else:
lists=[]
return render(request,"gallery/index.html",{
"Image":instance1,"Category":instance2,"lists":lists
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "gallery/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "gallery/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST.get("username")
email = request.POST["email"]
pic=request.FILES["profile"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "gallery/login.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "gallery/login.html", {
"message": "Username already taken."
})
login(request, user)
instance=Profile(photo_of=user,pic=pic)
instance.save()
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "gallery/login.html")
def Form_page(request):
return render(request,"gallery/form.html")
def Form(request):
if request.method=="POST":
title=request.POST["title"]
image=request.FILES["image"]
date=datetime.datetime.now()
user=request.user
description=request.POST["description"]
instance=Images(user=user,title=title,image=image,description=description,datetime=date)
instance.save()
category=request.POST.getlist("checks[]")
for c in category:
instance2=Category(image=instance,category=c)
instance2.save()
return HttpResponseRedirect(reverse("index"))
def Search(request):
if request.method=="POST":
to_search=request.POST["search"]
result=Category.objects.filter(category__contains=to_search)
likes=[]
my_likes=Likes.objects.filter(user=request.user)
for posts in my_likes:
likes.append(posts.image.id)
return render(request,"gallery/search_result.html",{
"result":result,"lists":likes
})
def api(request):
return render(request,"gallery/Api.html")
def profile_pic(request,user_id):
instance=User.objects.get(pk=user_id)
image=Profile.objects.get(photo_of=instance)
return HttpResponse(image.pic.url)
def add_like(request,image_id):
my_user=request.user
image=Images.objects.get(pk=image_id)
check=Likes.objects.filter(image=image).filter(user=my_user).first()
if check is None:
instance=Likes(image=image,user=my_user)
instance.save()
return HttpResponse("liked")
else:
check.delete()
return HttpResponse("dislike")
def collection(request):
user=request.user
items=user.my_collection.all()
likes=[]
my_likes=Likes.objects.filter(user=request.user)
for posts in my_likes:
likes.append(posts.image.id)
my_list=[]
for item in items:
my_list.append(item.collected_images)
return render(request,"gallery/collection.html",{
"Image":my_list,"lists":likes
})
def add_collection(request,image_id):
user=request.user
item=Images.objects.get(pk=image_id)
check=user.my_collection.all()
for i in check:
if image_id==i.collected_images.id:
return HttpResponse("already in list")
instance=Collection(collection_of=user,collected_images=item)
instance.save()
return HttpResponse("added")
def remove_collection(request,image_id):
item=Images.objects.get(pk=image_id)
Collection.objects.filter(collection_of=request.user).filter(collected_images=item).delete()
return HttpResponseRedirect(reverse("collection"))
def edit_flip(request,image_id):
item=Images.objects.get(pk=image_id)
image_url=item.image.url
sliced=image_url[6:]
image=Image.open(image_url)
filpped_vertical=image.transpose(Image.FLIP_LEFT_RIGHT).save("media/transpose_"+sliced)
return HttpResponse("/media/transpose_"+sliced)
def edit_contrast(request,image_id):
item=Images.objects.get(pk=image_id)
image_url=item.image.url
sliced=image_url[6:]
image=Image.open(image_url)
en=ImageEnhance.Contrast(image)
img2=en.enhance(1.5)
img2.save("media/contrast_"+sliced)
return HttpResponse("/media/contrast_"+sliced)
def edit_bw(request,image_id):
item=Images.objects.get(pk=image_id)
image_url=item.image.url
sliced=image_url[6:]
image=Image.open(image_url)
en=ImageEnhance.Color(image)
img2=en.enhance(0.0)
img2.save("media/bw_"+sliced)
return HttpResponse("/media/bw_"+sliced)
def edit_median(request,image_id):
item=Images.objects.get(pk=image_id)
image_url=item.image.url
sliced=image_url[6:]
image=Image.open(image_url)
img2 = image.filter(MedianFilter(size=9))
img2.save("media/median_"+sliced)
return HttpResponse("/media/median_"+sliced)
|
import numpy as np
from copy import deepcopy
''' Some kinds of SGD algorithms
Our target is to decide how to update theta
'''
class SGD:
''' Stochastic Gradient Descent:
g[t] = ∇[θ[t−1]]f(θ[t−1])
θ[t] = θ[t-1] - η * g[t]
'''
def update(self, g):
return g
class Momentum(SGD):
''' Momentum:
g[t] = ∇[θ[t−1]]f(θ[t−1])
m[t] = µ * m[t-1] + g[t]
θ[t] = θ[t-1] - η * m[t]
Super parameters:
m: an array or a matrix
µ: a value, recommended value is ?
'''
def __init__(self, mu=0.09):
self.m = None
self.mu = mu
def update(self, g):
if self.m is None:
# Use 1.0 is for copy and format
self.m = 1.0 * g
else:
self.m = self.mu * self.m + g
delta = self.m
return delta
class NAG(SGD):
''' Nesterov Accelerated Gradient:
It is so different !
g[t] = ∇[θ[t−1]]f(θ[t−1] - η * µ * m[t-1])
m[t] = µ * m[t-1] + g[t]
θ[t] = θ[t-1] - η * m[t]
Transfer:
g[t] = ∇[θ[t−1]]f(θ[t−1])
m[t] = µ * m[t-1] + g[t] + µ * (g[t] - g[t-1])
θ[t] = θ[t-1] - η * m[t]
Super parameters:
g_1: the last g
m: an array or a matrix
µ: a value, recommended value is ?
'''
def __init__(self, mu=0.09):
self.g_1 = None
self.m = None
self.mu = mu
def update(self, g):
if self.g_1 is None:
self.g_1 = np.zeros_like(g)
if self.m is None:
self.m = g + self.mu * (g - self.g_1)
else:
self.m = self.mu * self.m + g + self.mu * (g - self.g_1)
delta = self.m
return delta
class AdaGrad(SGD):
''' Adapative Gradient:
g[t] = ∇[θ[t−1]]f(θ[t−1])
n[t] = n[t-1] + g[t]^2
θ[t] = θ[t-1] - η * g[t] / sqrt(n[t] + ε)
Super parameters:
n: an array or a matrix
ε: a value, recommended value is ?
'''
def __init__(self, epsilon=0.001):
self.n = None
self.epsilon = epsilon
def update(self, g):
if self.n is None:
self.n = g * g
else:
self.n = self.n + g * g
delta = self.g / np.sqrt(self.n + self.epsilon)
delta = np.nan_to_num(delta)
return delta
class RMSProp(SGD):
''' RMSProp:
g[t] = ∇[θ[t−1]]f(θ[t−1])
n[t] = v * n[t-1] + (1-v) * g[t]^2
θ[t] = θ[t-1] - η * g[t] / sqrt(n[t] + ε)
Super parameters:
n: an array or a matrix
v: a valuem, v is a constant value 0.5?
ε: a value, recommended value is ?
'''
def __init__(self, v=0.5, epsilon=0.001):
self.n = None
self.v = v
self.epsilon = epsilon
def update(self, g):
if self.n is None:
self.n = (1 - self.v) * g * g
else:
self.n = self.v * self.n + (1 - self.v) * g * g
delta = self.g / np.sqrt(self.n + self.epsilon)
delta = np.nan_to_num(delta)
return delta
class AdaDelta(SGD):
''' Adapative Delta:
g[t] = ∇[θ[t−1]]f(θ[t−1])
n[t] = v * n[t-1] + (1-v) * g[t]^2
θ[t] = θ[t-1] - η * g[t] / sqrt(n[t] + ε)
Super parameters:
n: an array or a matrix
v: a valuem, recommended value is
ε: a value, recommended value is ?
'''
def __init__(self, v=0.1, epsilon=0.001):
self.n = None
self.v = v
self.epsilon = epsilon
def update(self, g):
if self.n is None:
self.n = (1 - self.v) * g * g
else:
self.n = self.v * self.n + (1 - self.v) * g * g
delta = self.g / np.sqrt(self.n + self.epsilon)
delta = np.nan_to_num(delta)
return delta
class Adam(SGD):
''' Adam:
g[t] = ∇[θ[t−1]]f(θ[t−1])
m[t] = µ * m[t-1] + (1-µ) * g[t]
^m[t] = m[t] / (1-µ^t)
n[t] = v * n[t-1] + (1-v) * g[t]^2
^n[t] = n[t] / (1-v^t)
θ[t] = θ[t-1] - η * ^m[t] / sqrt(^n[t] + ε)
Super parameters:
m: an array or a matrix
n: an array or a matrix
µ: a value, recommended value is ?
v: a value, recommended value is ?
ε: a value, recommended value is ?
'''
def __init__(self, mu, v, epsilon):
self.m = None
self.n = None
self.mu = mu
self.v = v
self.epsilon = epsilon
self.mut = 1
self.vt = 1
def update(self, g):
if self.m is None:
self.m = (1 - self.mu) * g
self.n = (1 - self.v) * g
else:
self.m = self.mu * self.m + (1 - self.mu) * g
self.v = self.v * self.n + (1 - self.v) * g * g
self.mut *= self.m
self.vt *= self.v
_m = self.m / (1 - self.mut)
_n = self.n / (1 - self.vt)
delta = _m / (np.sqrt(_n) + self.epsilon)
delta = np.nan_to_num(delta)
return delta
class NAGR(SGD):
''' NAGR:
g[t] = ∇[θ[t−1]]f(θ[t−1])
m[t] = µs[t] * m[t-1] + g[t]
^m[t] = g[t] + µs[t+1] * m[t]
θ[t] = θ[t-1] - η * ^m[t]
Super parameters:
m: an array or a matrix
µs: an array generated for µ
µ: a value, recommended value is ?
'''
def __init__(self, mu=0.99):
self.m = None
self.mu = mu
self.e = np.pow(0.96, 1.0/250)
self.et = 0.5
self.mu_t = mu * 0.5
def update(self, g):
self.et *= self.e
self.mu_t = self.mu * (1 - self.et)
if self.m is None:
self.m = 1.0 * g
else:
self.m = self.mu_t * self.m + g
mu_t_next = self.mu * (1 - self.et * self.e)
_m = g + mut_next * self.m
delta = _m
return delta
class Nadam:
''' Nadam:
g[t] = ∇[θ[t−1]]f(θ[t−1])
^g = g[t] / (1 - Mul(i=1->t)µs[i])
m[t] = µ * m[t-1] + (1-µ) * g[t]
^m[t] = m[t] / (1 - Mul(i=1->t+1)µs[i])
n[t] = v * n[t-1] + (1-v) * g[t]^2
^n[t] = n[t] / (1 - v^t)
_m[t] = (1 - µs[t]) * ^g[t] + µs[t+1] * ^m[t]
θ[t] = θ[t-1] - η * _m[t] / sqrt(^n[t] + ε)
Super parameters:
m: an array or a matrix
n: an array or a matrix
µ: a value, recommended value is 0.99?
µs: an array generated from µ
v: a value, recommended value is 0.999?
ε: a value, recommended value is 1e-8?
'''
def __init__(self, mu=0.99, v=0.999, epsilon=1e-8):
self.m = None
self.n = None
self.mu = mu
self.e = np.pow(0.96, 1.0/250)
self.et = 0.5
self.mu_t = mu * 0.5
self.mum_t = 1
self.v = v
self.vt = 1
self.epsilon = epsilon
def update(self, g):
self.et *= self.e
self.mu_t = self.mu * (1 - self.et)
self.mum_t *= self.mu_t
_g = g / (1 - self.mum_t)
self.m = self.mu * m + (1 - self.mu) * g
mu_t_next = self.mu * (1 - self.et * self.e)
mum_t_next = self.mum_t * mu_t_next
_m = m / ((1 - mum_t_next))
self.vt *= self.v
n = self.n / (1 - self.vt)
__m = (1 - self.mu_t) * _g + mu_t_next * _m
delta = __m / np.sqrt(_n + epsilon)
delta = np.nan_to_num(delta)
return delta
sgd = SGD()
momentum = Momentum()
nag = NAG()
adagrad = AdaGrad()
rmsprop = RMSProp()
adadelta = AdaDelta()
adam = Adam()
nagr = NAGR()
nadam = Nadam()
|
import multiprocessing
import adns
import logging
rr = adns.rr.A
def worker(q, c, f, n):
print q.qsize()
with n:
if q.empty():
print "Nothing in the queue"
f.set()
else:
host = q.get()
# print host
c.submit(host, rr)
n.notify()
class WorkerProcess(multiprocessing.Process):
def __init__(self, params):
multiprocessing.Process.__init__(self)
self._start_flag = params["start"]
self._jobs = params["jobs"]
print self.name, ":queue length:", self._jobs.qsize()
self._adns = params["adns"]
self._new_job = params["notify"]
self._finished = multiprocessing.Event()
def run(self):
self._start_flag.wait()
while not self._finished.is_set():
worker(self._jobs, self._adns, self._finished, self._new_job)
def main():
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.INFO)
f = open("/home/edward/Projects/test_python_adns/top-1m.csv", "r")
urls = [line.split(',')[1].strip() for line in f.readlines()]
f.close()
num = 500
# Put urls into queue
urls = urls[:num]
q = multiprocessing.Queue()
count = 0
for each in urls:
q.put(each)
count += 1
print count
print q.qsize()
# other initialization
resolved_list = []
start_flag = multiprocessing.Event()
c = adns.init()
new_job = multiprocessing.Condition()
# build process pools
opts = {"start": start_flag, "jobs": q, "adns": c, "notify": new_job}
pool = [WorkerProcess(opts) for i in xrange(1)]
for each in pool:
each.start()
start_flag.set()
while True:
with new_job:
for query in c.completed():
answer = query.check()
resolved_list.append(answer)
print answer
new_job.wait()
if len(resolved_list) == count:
break
for each in pool:
each.join()
if __name__ == "__main__":
main()
|
from re import compile, match
REGEX = compile(r'^(?P<h>\d{2}):(?P<m>[0-5]\d):(?P<s>[0-5]\d)\Z')
def to_seconds(time):
m = match(REGEX, time)
if m:
hms = m.groupdict()
return int(hms['h']) * 3600 + int(hms['m']) * 60 + int(hms['s'])
|
import pandas as pd
df = pd.read_csv("..\data\ordersList.csv",encoding="utf-8",header = 0)
print(df.pivot_table(index="品名",columns="客戶名稱", values="金額", \
fill_value=0, margins=True, aggfunc="sum"))
|
"""
Run multiple parameter with multiple GPUs and one python script
Usage: python run_all.py
Author: Xu Zhang
Email: xu.zhang@columbia.edu.cn
"""
#! /usr/bin/env python2
import numpy as np
import scipy.io as sio
import time
import os
import sys
import subprocess
import shlex
import argparse
####################################################################
# Parse command line
####################################################################
def usage():
print >> sys.stderr
sys.exit(1)
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
#define gpu you want to use
gpu_set = ['0']
#gpu_set = ['0', '1', '2', '3'] #if you want to use more
parameter_set = [\
# one single region test
#'--region_list=4 --loss=dice --metric=iou-multi-region ',
'--loss=dice --metric=iou ',
#'--region_list=4 --loss=dice --metric=iou-multi-region --no_alignment --no_ref_mask ',
]
number_gpu = len(gpu_set)
process_set = []
for run in range(1):
for idx, parameter in enumerate(parameter_set):
print('Test Parameter: {}'.format(parameter))
command = 'python one_shot_training_multi_region.py --data_dir=../data/ --log_dir=../result/unet_log/ \
--output_dir=../result/result/ --model_dir=../result/model/ \
{} --num_epochs=100 \
--gpu-id {} --idx={} '.format(parameter, gpu_set[idx%number_gpu], run)
print(command)
p = subprocess.Popen(shlex.split(command))
process_set.append(p)
if (idx+1)%number_gpu == 0:
print('Wait for process end')
for sub_process in process_set:
sub_process.wait()
process_set = []
time.sleep(10)
for sub_process in process_set:
sub_process.wait()
|
class Reservation:
def __init__(self, location, time_block, spots_available):
self.location = location
self.time_block = time_block
self.spots_available = spots_available
def dictify(self):
return {
'location' : self.location,
'time_block' : self.time_block,
'spots_available' : self.spots_available
}
def adapt(self, li):
self.location = li[0]
self.time_block = li[1]
self.spots_available = li[2]
|
from classes.ListNode import ListNode
class Solution(object):
ListNode: ListNode
def mergeTwoLists(self, l1, l2):
"""
attempt to learn linked list in python. could not solve.
"""
result = curr = ListNode(0)
while(l1 or l2):
if not l2:
result.next = l1
l1 = l1.next
elif not l1:
result.next = l2
l2 = l2.next
else:
if l1.val <= l2.val:
result.next = l1
l1 = l1.next
else:
result.next = l2
l2 = l2.next
result = result.next
return curr.next
|
#
# author : Omid Sharghi (omid.sharghi@sjsu.edu)
#
from pandas import ExcelWriter
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
def sanitize_and_scale(file_name):
if file_name.endswith('.csv'):
df = pd.read_csv(file_name)
elif file_name.endswith('.xlsx'):
df = pd.read_excel(file_name)
#start new Code
df = df.drop(['Gender'], axis=1)
df = df.dropna(subset=['ApplicantIncome', 'LoanAmount'])
avg_approved = df[df.Loan_Status == 'Y'][['ApplicantIncome']].mean()
for i in df.index:
if pd.isnull(df.loc[i, 'Credit_History']) and (df.loc[i, 'Loan_Status'] == 'Y'):
df.at[i, 'Credit_History'] = '1'
# for i in df.index:
# if pd.isnull(df.loc[i, 'Married']) and (df.loc[i, 'CoapplicantIncome'] != 0):
# df.at[i, 'Married'] = 'Yes'
# for i in df.index:
# if pd.isnull(df.loc[i, 'Credit_History']) and (df.loc[i, 'Loan_Status'] == 'N'):
# df.at[i, 'Credit_History'] = '0'
# for i in df.index:
# if pd.isnull(df.loc[i, 'Dependents']) and (
# df.loc[i, 'Married'] == 'Yes' and df.loc[i, 'Property_Area'] == 'Rural'):
# df.at[i, 'Dependents'] = '2'
df['CreditHistory_By_ApplicantIncome'] = df['Credit_History']/df['ApplicantIncome']
df['CreditHistory_By_LoanAmount'] = df['Credit_History'] / df['LoanAmount']
# df['Loan_To_Income_Ratio'] = df['LoanAmount'] / (df['ApplicantIncome'] + df['CoapplicantIncome'])
#end new code
df= df.dropna(subset=['Married', 'Dependents', 'Self_Employed', 'Credit_History', 'LoanAmount'])
marriage_mapping = {'No': 0, 'Yes': 1}
df['Married'] = df['Married'].map(marriage_mapping)
loan_status_mapping = {'N': 0, 'Y': 1}
df['Loan_Status'] = df['Loan_Status'].map(loan_status_mapping)
education_mapping = {'Not Graduate': 0, 'Graduate': 1}
df['Education'] = df['Education'].map(education_mapping)
property_mapping = {'Rural': 0, 'Semiurban': 1, 'Urban': 2}
df['Property_Area'] = df['Property_Area'].map(property_mapping)
self_employed_mapping = {'No': 0, 'Yes': 1}
df['Self_Employed'] = df['Self_Employed'].map(self_employed_mapping)
df['Dependents'] = df['Dependents'].replace('3+', 3)
cols = ['Married', 'Dependents', 'Education', 'Self_Employed', 'Credit_History', 'Property_Area']
df[cols] = df[cols].applymap(np.int64)
df = df.dropna()
df = df.drop(['Loan_ID'], axis=1)
scaler = MinMaxScaler(feature_range=(0., 1.))
X_scaled = scaler.fit_transform(df)
col_names = df.columns
df_scaled = pd.DataFrame(X_scaled, columns=col_names)
output_sanitized_file(df_scaled)
return df_scaled
def output_sanitized_file(df):
writer = ExcelWriter('SanitizedData.xlsx')
df.to_excel(writer, 'Sheet1')
writer.save()
|
import os
import shutil
import subprocess
import tarfile
# Used this function for compatibility issues (subprocess.run) don't work for all Python versions
def run(*popenargs, **kwargs):
input = kwargs.pop("input", None)
check = kwargs.pop("handle", False)
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
process = subprocess.Popen(*popenargs, **kwargs)
try:
stdout, stderr = process.communicate(input)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(
retcode, process.args, output=stdout, stderr=stderr)
return retcode, stdout, stderr
# For later improvements, do functions f("name of the packet")
# Function for extraction
# Function for computing the hashes and putting them in a file (V1 = check only those in md5sums) (V2 = compute all of the files in data.tar.xz)
# Function for checking the hashes
working_directory = os.getcwd()
print(working_directory)
os.mkdir(working_directory + "/Debian_package_extraction_folder") # Handle exceptions (eg. no more space)
# Copy deb file from Download folder (deb_src) to here for manipulations
deb_src = working_directory + "/../Example_Package/cheese-common_3.34.0-1_all.deb"
dst_folder = working_directory + "/Debian_package_extraction_folder"
shutil.copy(deb_src, dst_folder) # Handle exceptions (eg. no more space)
# Extract deb content (ar archive and other archives after) -> md5sums + paths which interest us
os.chdir(working_directory + "/Debian_package_extraction_folder")
deb_extraction = run(["ar", "-x", "cheese-common_3.34.0-1_all.deb"]) # Handle the exceptions
print("Deb Extraction Terminated")
control_xz_extraction = run(["tar", "-xf", "control.tar.xz", "./md5sums"]) # Handle the exceptions
print("Theoretical md5sums Extraction Terminated")
# Now will compare theoretical md5 with ones in the system
# Delete these temporary files to keep some space
# os.chdir(working_directory)
# shutil.rmtree("./Debian_package_extraction_folder")
|
from typing import Dict
from src.contracts.ethereum.multisig_wallet import MultisigWallet
from src.signer.eth.impl import EthSignerImpl
from src.contracts.ethereum.erc20 import Erc20
from src.util.common import Token
from src.util.config import Config
from src.util.web3 import web3_provider
class _ERC20SignerImpl(EthSignerImpl):
"""
Verifies Secret swap tx and adds it's confirmation to the ERC-20 contract
Sends the ERC-20 confirmation tx, after verifying SCRT tx stored in the db
See EthSignerImpl for more info
"""
def __init__(self, multisig_wallet: MultisigWallet, token: Token,
private_key: bytes, account: str, config: Config):
self.token_contract = Erc20(web3_provider(config['eth_node_address']),
token,
multisig_wallet.address)
super().__init__(config=config, multisig_wallet=multisig_wallet, private_key=private_key, account=account)
def _validate_tx_data(self, swap_data: Dict, submission_data: Dict) -> bool:
"""
This used to verify secret-20 <-> erc-20 tx data
:param swap_data: the data from secret20 contract query
:param submission_data: the data from the proposed tx on the smart contract
"""
if int(submission_data['value']) != 0: # sanity check
self.logger.critical(f"Got an erc-20 transaction with a non-empty amount of ETH sent "
f"{swap_data['ethr_tx_hash']}")
return False
try:
addr, amount = self.token_contract.get_params_from_data(submission_data['data'])
return addr.lower() == swap_data['destination'].lower() and amount == int(swap_data['amount'])
except ValueError as e:
self.logger.error(f"Failed to verify transaction with submission data: {submission_data} - {str(e)}")
return False
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def lee_entero():
""" Solicita un valor entero y lo devuelve.
Mientras el valor ingresado no sea entero, vuelve a solicitarlo. """
while True:
valor = raw_input("Ingrese un número entero: ")
try:
valor = int(valor)
return valor
except ValueError:
print "ATENCIÓN: Debe ingresar un número entero."
val=lee_entero()
print val
|
import pandas as pd
import numpy as np
from tqdm import tqdm
"""计算每张图片中的box与其它框的iou"""
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Arguments:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def box_iou(boxes1, boxes2):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
area1 = box_area(boxes1)
area2 = box_area(boxes2)
# When the shapes do not match,
# the shape of the returned output tensor follows the broadcasting rules
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # left-top [N,M,2]
rb = np.minimum(boxes1[:, None, 2:4], boxes2[:, 2:4]) # right-bottom [N,M,2]
wh = (rb - lt).clip(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
train_data = pd.read_csv(r"E:\kaggle\nfl-impact-detection\train_labels1.csv")
#合并几列的内容
train_data['gameKey'] = train_data['gameKey'].astype('str')
train_data['playID'] = train_data['playID'].astype('str')
train_data['frame'] = train_data['frame'].astype('str')
train_data['name'] = train_data['gameKey'] + train_data['playID'] + train_data['view'] + train_data['video'] + train_data['frame']
names_mat = np.empty(shape=(0,5))
names = train_data['name'].unique()
names = tqdm(names)
for i, name in enumerate(names):
name_mat = train_data.loc[train_data['name'] == name][["xmin",'ymin','xmax', 'ymax', 'iou']].values
name_mat_copy = name_mat.copy()
iou = box_iou(name_mat, name_mat_copy)
for j in range(iou.shape[0]):
iou_seat = np.where((iou[j,:]>0) & (iou[j,:]<1))
if len(iou_seat[0]) ==0:
continue
iou_value = iou[j,iou_seat]
name_mat[iou_seat,4] = iou_value
names_mat = np.concatenate((names_mat,name_mat), axis=0)
# df = pd.DataFrame(names_mat)
# tra = train_data.copy()
# tra['iou'].iloc[0:200] = names_mat[:,4]
train_data.loc[:,'iou'] = names_mat[:,4]
train_data.to_csv(r"E:\kaggle\nfl-impact-detection\train_labels1.csv", index=False)
# lala = train_data.loc[train_data['name'] == name[0]][["xmin",'ymin','xmax', 'ymax']]
# lala = train_data.groupby('name')
# train_data.dtypes
# box1 = np.array([[586, 301, 607, 327]])
# box2 = np.array([[568, 326, 591, 352
# ]])
#
#
#
# a = box_iou(box1,box2)
|
from flask import Blueprint,render_template
users=Blueprint('users',__name__)
@users.route('/')
def index():
user={'username':'sarpong'}
return render_template('index.html',user=user)
@users.route('/profile/')
def profile():
return render_template('profile.html')
|
from django.conf.urls import url
from progress_analyzer import views
# from quicklook.views import export_users_xls
urlpatterns = [
url(r'^user/report$',views.ProgressReportView.as_view(),
name="progress_analyzer_report"),
url(r'^user/report/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})$$',views.ProgressReportView.as_view(),
name="progress_analyzer_report"),
url(r'^print/progress/excel$',views.progress_excel_export,name="Exceldata"),
#url(r'^print/progress/excel/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})$',views.progress_excel_export,name="progress_Exceldata"),
url(r'^user/report/update_schedule$',views.ProgressReportSyncedDateView.as_view(),name="progress_analyzer_report_date"),
]
|
"""
Josh Rudolph
BIMM185
4/6/17
"""
with open("TCDB.faa", "r") as rfile:
headers = []
protSeq = ""
proteins = []
x=0
for line in rfile:
if x==98:
break
if line[0] == '>':
splitHeader = line.rstrip('\n').split('|')
headers.append(splitHeader)
if x == 0:
continue
proteins.append(protSeq)
protSeq = ""
else:
protSeq += line.rstrip('\n')
x+=1
proteins.append(protSeq)
ID_2 = []
for i in headers:
ID_2.append(i[2])
# print(ID_2)
ID_1 = []
for i in headers:
curr = i[3].split(' ')
currID = curr[0]
ID_1.append(currID)
# print(ID_1)
# combine the two id's
IDs = zip(ID_1, ID_2)
# print(IDs)
# print("proteins length: ", len(proteins))
# print("IDs length: ", len(IDs))
# print("LAST PROTEIN")
# print(proteins[-1])
# print("LAST ID")
# print(IDs[-1])
entries = []
for i in range(len(IDs)):
currEntry = IDs[i][0] + " - " + IDs[i][1] + " - " + proteins[i]
print(currEntry)
entries.append(currEntry)
# print("FULL HEADERS:")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
"""
import facebook
def send_the_post(msg):
"""
Ecrit un post facebook
:param msg: le message que l'on veut écrire
:return:
"""
# Fill in the values noted in previous steps here
cfg = {
"page_id": "1536198983059262",
"access_token": "EAAFAEV2SX8MBAD47SOzllYZCujhRqd1HKZB7UZC71ZCOpKOJ7GI3U251gXv4zTFSnoZAoPsNHAm3jRBsZBJX5OZBGQ2UHkp97tYapvDKyp67SeUdieSUSNvuozsnguaGjklNDi5yRETKChATZAG4QE8OAxllll2mp1wZD"
}
api = get_api(cfg)
status = api.put_wall_post(msg)
def get_api(cfg):
"""
récupère l'accès à l'api facebook
:param cfg: dico qui contient l'id de la page facebook et le token d'accès
:return:
"""
graph = facebook.GraphAPI(cfg['access_token'])
# Get page token to post as the page. You can skip
# the following if you want to post as yourself.
resp = graph.get_object('me/accounts')
page_access_token = None
for page in resp['data']:
if page['id'] == cfg['page_id']:
page_access_token = page['access_token']
graph = facebook.GraphAPI(page_access_token)
return graph
# You can also skip the above if you get a page token:
# http://stackoverflow.com/questions/8231877/facebook-access-token-for-pages
# and make that long-lived token as in Step 3
if __name__ == "__main__":
send_the_post("Hello this is a test ! ┏(:|])┛┗(:))┓┗(:D)┛┏(8|)┓")
|
import pygame
from random import randint
class Snow:
def __init__(self):
"""self.xpos = [0,100, 200, 300, 400, 500, 600, 700, 800, 900, 1000,
50, 150, 250, 350, 450, 550, 650, 750, 850, 950, 1050,
25, 125, 225, 325, 425, 525, 625, 725, 825, 925, 1025,
75, 175, 275, 375, 475, 575, 675, 775, 875, 975, 1075]"""
self.xpos = [-200,-75,0, 150, 300, 400, 550, 700, 800,950,1100, 1200]
self.start_xpos = [-200,-75,0, 150, 300, 400, 550, 700, 800,950,1100, 1200]
self.ypos = []
self.image = []
self.image_rect = []
for n in range(len(self.xpos)):
self.ypos.append(randint(-700, 0))
self.image.append(pygame.image.load("img/bigsnow.gif"))
self.image_rect.append((self.xpos[n], self.ypos[n], 200,200))
def update(self, dt):
for n in range(len(self.image)):
if(self.ypos[n] > 300 or self.xpos[n] < -400):
self.ypos[n] = randint(-700, 0)
self.xpos[n] = self.start_xpos[n]
self.ypos[n] += dt / 2
self.xpos[n] -= dt / 4
self.image_rect[n] = (self.xpos[n], self.ypos[n], 100,100)
def render(self, screen):
for n in range(len(self.xpos)):
screen.blit(self.image[n], self.image_rect[n])
#screen.blit(self.image[0], self.image_rect[0])
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a project hierarchy created when the --generator-output=
and --depth= options is used to put the build configuration files in a separate
directory tree.
"""
import TestGyp
import os
# This is a regression test for the make generator only.
test = TestGyp.TestGyp(formats=['make'])
test.writable(test.workpath('src'), False)
toplevel_dir = os.path.basename(test.workpath())
test.run_gyp(os.path.join(toplevel_dir, 'src', 'prog1.gyp'),
'-Dset_symroot=1',
'--generator-output=gypfiles',
depth=toplevel_dir,
chdir='..')
test.writable(test.workpath('src/build'), True)
test.writable(test.workpath('src/subdir2/build'), True)
test.writable(test.workpath('src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
|
#!/usr/bin/env python
#coding:utf-8
'''
用来协助调试发送邮件
Test data
Let's populate the database with the example.org domain, a john@example.org email account and a forwarding of
jack@example.org to john@example.org. Open a MySQL shell and issue the following SQL queries:
INSERT INTO `mailserver`.`virtual_domains` (
`id` ,
`name`
)
VALUES (
'1', 'example.org'
);
INSERT INTO `mailserver`.`virtual_users` (
`id` ,
`domain_id` ,
`password` ,
`email`
)
VALUES (
'1', '1', MD5( 'summersun' ) , 'john@example.org'
);
INSERT INTO `mailserver`.`virtual_aliases` (
`id`,
`domain_id`,
`source`,
`destination`
)
VALUES (
'1', '1', 'jack@example.org', 'john@example.org'
);
Postfix/Database configuratio
'''
import time
import sqlite3
import os.path
import os
import random
from smtplib import SMTP
from poplib import POP3
import threading
import sys
import csv
import web
import getpass
import signal
import sys
import base64
import email
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.Utils import parseaddr, formataddr
from email import Encoders
from email import parser
from email.header import Header
smtp = '192.168.56.101'
email_from = 'john@example.org'
passwd = 'summersun'
email_to = 'zjfhappy@126.com'
msg = time.time()
sendSvr = SMTP(smtp, 25)
sendSvr.ehlo()
sendSvr.esmtp_features["auth"] = "LOGIN PLAIN"
try:
sendSvr.login(email_from, passwd)
except Exception, error:
raise Exception(message = error)
sendSvr.sendmail(email_from, email_to, str(msg))
|
from scapy.all import *
import subprocess
from flux_led import WifiLedBulb, BulbScanner
import datetime
import os
import sys
version = "1.0.3 " # assign version number for debugging
bulb = WifiLedBulb('192.168.XX.XX') # assign bulb IP
#bulb.refreshState() # refresh state to collect accurate staus
logfile = r"/home/pi/SANDBOX/Dash/logfiles.log" # name of my log file
WHITE_MAC_ADDRESS = 'XX:XX:XX:XX:XX:XX' # enter Dash Button's MAC Address here.
testMAC = 'XX:XX:XX:XX:XX:XX'
dashButton = {
"whiteDash" : 'XX:XX:XX:XX:XX:XX',
"redDash" : 'XX:XX:XX:XX:XX:XX'
}
whiteBulb = (0,0,0,191)
redBulb = (15,0,0,0)
#sniff for packets sent from button press
def detect_button(pkt):
for mac in dashButton:
if pkt.haslayer(DHCP) and pkt[Ether].src == dashButton[mac]:
#TODO: use switch case instead
write_log("Button pressed: ", logfile)
bulb.refreshState()
#write_log (bulb.getRgbw)
currentColor = bulb.getRgbw()
#write_log(currentColor, logfile)
if currentColor == whiteBulb:
if mac == "whiteDash":
write_log("White bulb shut off: ", logfile)
bulb.setRgb(0,0,0)
bulb.refreshState()
time.sleep(2)
elif mac == "redDash":
write_log("Red bulb set: ", logfile)
bulb.setRgb(15,0,0)
bulb.refreshState()
time.sleep(2)
elif currentColor == redBulb:
if mac == "redDash":
write_log("Red bulb shut off: ", logfile)
bulb.setRgb(0,0,0)
bulb.refreshState()
time.sleep(2)
elif mac == "whiteDash":
write_log("White bulb set: ", logfile)
bulb.setWarmWhite(75)
bulb.refreshState()
time.sleep(2)
else:
if mac == "redDash":
write_log("Red bulb set: ", logfile)
bulb.setRgb(15,0,0)
bulb.refreshState()
time.sleep(2)
elif mac == "whiteDash":
write_log("White bulb set: ", logfile)
bulb.setWarmWhite(75)
bulb.refreshState()
time.sleep(2)
return
#record actions to logfile for later debugging
def write_log(text, file):
f = open(file, 'a') # 'a' will append to an existing file if it exists
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
f.write("{}\n".format(text + timestamp)) # write the text to the logfile and move to next line
return
write_log("Running version:"+version, logfile)
#loops forever sniffing traffic and call the detect function
#Sniff can run forever but would hit two packets at a time.
#Using While loop and 1 packet filter to loop forever but only grab first packet
while True:
sniff( count=1, prn=detect_button, filter="(udp and (port 67 or 68))", store=0)
|
import pyglet
from pyglet.gl import *
from gauge import gauge_parent
import common
import variable
import math, time
import text
class gauge_c(gauge_parent):
def __init__(self, *args, **kwds):
super(gauge_c, self).__init__(*args, **kwds)
self.x = 300
self.y = 300
self.set_native_size(self.x, self.y)
self.earth = (0.69, 0.4, 0.0) #The ground color on attitude indicator
self.sky = (0.0, 0.6, 0.8)
self.pixel_per_degree = 7.5
self.load_batch()
#Init Variables
self.a = 0.0
self.pitch = variable.variables.load(0x126,'4F')
self.roll = variable.variables.load(0x125, '4F')
#self.pitch = 8.5
#pyglet.clock.schedule_interval(self.fade_in, .03)
#print self.pitch2.hex
#pyglet.clock.schedule_interval(self.update, 1.0/30.0)
#pyglet.clock.set_fps_limit(30.0)
def load_batch(self):
self.bV_shape = self.Center_Mark_V()
self.static_triangle_shape = self.static_triangles()
self.white_triangle, self.yellow_triangle = self.dynamic_triangle()
self.white_slip, self.yellow_slip = self.slip_indicator()
def dynamic_triangle(self):
radius = 120.0
size = 8.0
triangle = common.vertex.lines()
#Draw actually traingle
triangle.add([0.0,radius])
triangle.add([size, radius - size *2])
triangle.add([-size, radius - size *2])
triangle.add([0.0,radius])
#Create batch white triangle
w_batch = pyglet.graphics.Batch()
v1 = w_batch.add(len(triangle.points)//2, GL_LINES, None, ('v2f', triangle.points),('c3f',common.color.white*(len(triangle.points)//2)))
#Create batch solid yellow triangle
y_batch = pyglet.graphics.Batch()
v1 = y_batch.add(len(triangle.points)//2, GL_LINES, None, ('v2f', triangle.points),('c3f',common.color.yellow*(len(triangle.points)//2)))
v2 = y_batch.add(len(triangle.points)//2, GL_POLYGON, None, ('v2f', triangle.points),('c3f',common.color.yellow*(len(triangle.points)//2)))
return w_batch, y_batch
def slip_indicator(self):
radius = 120.0
size = 8.0
top = radius - size * 2 -1.0
rect = common.vertex.lines()
rect.add([-size, top])
rect.add([-size, top -7.0])
rect.add([size, top -7.0])
rect.add([size, top])
rect.add([-size, top])
w_batch = pyglet.graphics.Batch()
y_batch = pyglet.graphics.Batch()
v1 = y_batch.add(len(rect.points)//2, GL_LINES, None, ('v2f', rect.points),('c3f',common.color.yellow*(len(rect.points)//2)))
v3 = y_batch.add(len(rect.points)//2, GL_POLYGON, None, ('v2f', rect.points),('c3f',common.color.yellow*(len(rect.points)//2)))
v2 = w_batch.add(len(rect.points)//2, GL_LINES, None, ('v2f', rect.points),('c3f',common.color.white*(len(rect.points)//2)))
return w_batch, y_batch
def static_triangles(self):
radius = 120.0
#glLineWidth(1.5*)
def bank_ticks(dir):
def tick(deg, size):
#glBegin(GL_LINES)
l = []
l.extend(common.xycalc.rotate(0.0, radius + size, deg))
#glVertex2f(0.0, radius + 12.0)
l.extend(common.xycalc.rotate(0.0, radius, deg))
#glVertex2f(0.0, radius)
#glEnd()
return l
def triang(deg):
size = 5.0
l = []
l.extend(common.xycalc.rotate(0.0, radius, deg))
l.extend(common.xycalc.rotate(size, radius+ size *2, deg))
l.extend(common.xycalc.rotate(-size, radius+ size *2, deg))
l.extend(common.xycalc.rotate(0.0, radius, deg))
return l
#10 deg shorttick
triangles.reset()
theta = dir * 10.0
triangles.add(tick(theta, 12.0))
#20 deg shorttick
triangles.reset()
theta = dir * 20.0
triangles.add(tick(theta, 12.0))
#30 deg long tick
triangles.reset()
theta = dir * 30.0
triangles.add(tick(theta, 25.0))
#45 dig triangle
triangles.reset()
theta = dir * 45.0
triangles.add(triang(theta))
#60 dig long tick
triangles.reset()
theta = dir * 60.0
triangles.add(tick(theta, 25.0))
#Draw Static triangles
triangles = common.vertex.lines()
#Center triangle
size = 8.0
triangles.add([0.0,radius, size, radius+ size *2, -size, radius + size *2, 0.0, radius])
#Left side ticks
bank_ticks(1)
#Right side ticks
bank_ticks(-1)
#Create batch
batch = pyglet.graphics.Batch()
v1 = batch.add(len(triangles.points)//2, GL_LINES, None, ('v2f', triangles.points),('c3f',common.color.white*(len(triangles.points)//2)))
return batch
def Center_Mark_V(self): #This is one varent of the center mark
def V_shape(side):
v = common.vertex.lines()
v.add([0.0,0.0])
v.add([side * 40.0, -30.0])
v.add([side * 80.0, -30.0])
v.add([0.0,0.0])
return v.points, v.list
def Rect(side):
l = []
l.extend([-side * 106.0, 2.0])
l.extend([-side * 106.0, -2.0])
l.extend([-side * 126.0 , -2.0])
l.extend([-side * 126.0, 2.0])
l.extend([-side * 106.0, 2.0])
return l
v_points = []
v_points.extend(V_shape(-1)[0])
v_points.extend(V_shape(1)[0])
#print v_points
v_list = []
v_list.extend(V_shape(-1)[1])
v_list.extend(V_shape(1)[1])
rects = common.vertex.lines()
rects.add(Rect(-1))
rects.reset()
rects.add(Rect(1))
fg = pyglet.graphics.OrderedGroup(1)
fg2= pyglet.graphics.OrderedGroup(2)
poly = pyglet.graphics.OrderedGroup(0)
batch = pyglet.graphics.Batch()
v2 = batch.add(8, GL_POLYGON, poly, ('v2f', v_list),('c3B',(0,0,0)*8))
v1 = batch.add(len(v_points)//2, GL_LINES, fg, ('v2f', v_points),('c3B',(255,255,255)*(len(v_points)//2)))
v3 = batch.add(len(rects.points)//2, GL_LINES, fg, ('v2f', rects.points),('c3B',(255,255,255)*(len(rects.points)//2)))
return batch
def update(self, dt):
pass
#self.pitch += 0.01
#self.a = 45.0
#if self.a>180.0:
# self.a=-180.0
#if self.a == -3.0:
# self.pitch +=1
#self.a = 0.0
#self.pitch = -10.0
def draw(self):
slope = self.draw_horizon(-self.roll.value,self.pitch.value)
self.pitch_marks(-self.roll.value, self.pitch.value, 1.5*self.scale_lw)
glLineWidth(1.5*self.scale_lw)
self.bV_shape.draw()
self.static_triangle_shape.draw()
#time.sleep(0.03)
#self.draw_border()
#self.pitch.value += 0.2
#self.roll.value += 0.8
#if self.pitch.value > 30.0:
# self.pitch.value = -30.0
#if self.roll.value >180.0:
# self.roll.value = -180.0
def pitch_marks(self, roll, pitch, line_width):
def get_width(pitch):
x = int(round(pitch / 2.5))
if x==0:
w = 115 #Horizon is draw during in draw_horizon()
elif (x % 4) == 0:
w = 30
elif (x % 2) ==0:
w = 15
else:
w = 5
return w
glPushMatrix()
#Rotate with roll
glRotatef(roll,0,0,1)
#Draw the pitch marks
#Uses pitch to determine which pitch lines need to be drawn
#pixel_per_degree = 7.25 Starts 12.5 degrees down and goes up 11 lines
start_point = 12.5
num_lines = 11
glColor3f(1.0,1.0,1.0)
glLineWidth(line_width)
#pitch = pitch * -1
#Round pitch to nearest 2.5 degrees
y_center = round(pitch / 2.5) * 2.5
offset = (y_center -pitch)
y_center = y_center - start_point # Go down 25 degrees
y = (offset - start_point) * self.pixel_per_degree
#glTranslatef(0.0, start * pixel_per_degree, 0.0)
point_l = []
#print y_center, y, offset
for i in range(num_lines):
w = get_width(y_center)
if w>0:
#glBegin(GL_LINES)
#glVertex2f(-w, 0.0)
#glVertex2f(w, 0.0)
#glEnd()
point_l.extend([-w,y,w,y])
if (w==30): #Draw number for degrees
glPushMatrix()
glTranslatef(35.0, y, 0.0) #Move over to right (Numbers only on right side)
glPushMatrix()
glScalef(0.12, 0.12, 1.0) #Scale down for numbers
text.write(str(int(abs(y_center))))
glPopMatrix()
glPopMatrix()
y+=2.5 * self.pixel_per_degree
#glTranslatef(0.0, 2.5 * pixel_per_degree, 0.0)
y_center += 2.5
pyglet.graphics.draw(len(point_l)/2, pyglet.gl.GL_LINES,
('v2f', point_l ))
if abs(roll)> 30.0:
self.yellow_triangle.draw()
#Translation for slip indicator
glTranslatef(2,0,0)
self.yellow_slip.draw()
else:
self.white_triangle.draw()
#Translation for slip indicator
glTranslatef(0,0,0)
self.white_slip.draw()
glPopMatrix()
def draw_horizon(self, roll, pitch):
def check_side(p1, p2, slope, p3):
#p1, and p2 will be corners of the Aritifical Horizon
#p3 is point along horizon line
#slope is slope of horizon line
#checking if horizon line intersections a side of the artifical horizon
p1x = p1[0]
p1y = p1[1]
p2x = p2[0]
p2y = p2[1]
p3x = p3[0]
p3y = p3[1]
if p1x == p2x: #Checking a vertical side
vert = True
else: #Must be horizontal side
vert = False
#If verticle solve horizon line where intersects p1x or p2x
if vert:
run = p2x - p3x
y = (slope * run) + p3y
#y = round(y,0)
if ((p1y <= y <p2y) or (p2y < y <=p1y)): #Within check
return [p1x,int(y)]
else:
return None
else: #not vert
run = p2y - p3y
if slope ==0: return None
x = (1.0/slope * run ) + p3x
#x = round(x,0)
if ((p1x <= x <p2x) or (p2x < x <=p1x)): #Within check
return [int(x),p1y]
else:
return None
y = int(-pitch * self.pixel_per_degree / 1)
rad = math.radians(roll)
slope = math.tan(rad)
#Calculate "center" point of horizon
p3 = [math.sin(-rad)*y, math.cos(-rad)*y]
middle = [int(p3[0])+5,int(p3[1])+5,int(p3[0])-5,int(p3[1])-5]
w = 150
h = 150
l_points = []
horizon_cord = []
corners = [[w,-h],[-w,-h],[-w,h],[w,h]]
#Determine side intersection
sides = [[corners[0],corners[1]],[corners[1],corners[2]],[corners[2],corners[3]],[corners[3],corners[0]]]
side_count = 0
side_hit = [] #List of side, xcord,ycord
count = 0
for side in sides:
r = check_side(side[0],side[1],slope,p3)
if r!=None:
side_hit.append([count,r[0],r[1]])
side_count +=1
count +=1
#Take two counts compare cordinates
reverse = False
if side_count == 2:
sides_hit = [side_hit[0], side_hit[1]]
if -45<=roll<=45: #Take right most point
if side_hit[0][1] < side_hit[1][1]:
reverse=True
elif 45<=roll<=135: #Take upper most point
if side_hit[0][2] < side_hit[1][2]:
reverse=True
elif -135<=roll<=-45: #Take lower most point
if side_hit[0][2] > side_hit[1][2]:
reverse=True
else: #Must be take left most point
if side_hit[0][1] > side_hit[1][1]:
reverse=True
if reverse:
sides_hit = [side_hit[1], side_hit[0]]
#Loope through
i = sides_hit[0][0] #First side
l_points.extend([sides_hit[0][1],sides_hit[0][2]])
while i!=sides_hit[1][0]:
i+=1
if i==4: i=0
l_points.extend(corners[i])
#Last point
l_points.extend([sides_hit[1][1],sides_hit[1][2]])
horizon_cord.extend(l_points[:2])
horizon_cord.extend(l_points[-2:])
#All sky color then draw ground over it.
pyglet.gl.glColor3f(*self.sky)
#If pitch is negative and no sides intersect horizon, then screen needs to be all earth.
if side_count == 0:
if pitch<0:
pyglet.gl.glColor3f(*self.earth)
pyglet.graphics.draw(4, pyglet.gl.GL_POLYGON,
('v2i', (-w,-h,-w,h,w,h,w,-h)))
#if (r_side<> l_side):
# ps = [lx2,ly2,rx2,ry2]
# horz_ps = ps
# c_index = r_side
# while c_index <> l_side:
# ps.append(corners[c_index][0])
# ps.append(corners[c_index][1])
# c_index+=1
# if c_index>3:
# c_index = 0
# ps.append(lx2)
# ps.append(ly2)
# #l_side = find_side((lx2,ly2))
# #r_side = find_side((rx2,ry2))
# #print ps
pyglet.gl.glColor3f(*self.earth)
pyglet.graphics.draw(len(l_points)/2, pyglet.gl.GL_POLYGON,
('v2i', l_points))
# pyglet.gl.glColor3f(1,1,1)
#pyglet.graphics.draw(len(ps)/2, pyglet.gl.GL_LINES,
# ('v2i', horz_ps ))
#Draw horizon
#pyglet.gl.glColor3f(1.0,1.0,1.0)
#pyglet.graphics.draw(len(horizon_cord)/2, pyglet.gl.GL_LINES,
# ('v2i', horizon_cord ))
return slope
#self.a+=0.01
|
import scipy.stats as st
p = 0.2
for i in range(0, 10):
u = st.uniform(0, 1)
if u.rvs() < p:
print "bellow", p
|
#!/usr/bin/python
'''
Usage: jokebot.py [--debug] [-p <port>] [-H <host>]
Options:
-p, --port=<port> The port number on which to run Flask [default: 5000]
-H, --host=<host> The host to listen to [default: 127.0.0.1]
--debug Flag to determine debug mode [default: False]
'''
import json, requests, re, uuid
from collections import defaultdict
from flask import Flask, request, render_template
from docopt import docopt
from schema import Use, Schema
from random import choice, getrandbits, uniform
from redis import StrictRedis
app = Flask(__name__, static_folder='_static')
db = StrictRedis("localhost", 6379)
payload = lambda text: {"channel": "#jokestest", "username": "JokeBot", "text": text, "icon_emoji": ":ghost:"}
rimshot = {"channel": "#jokestest", "username": "RimshotBot", "text": "Ba-dum Tsh!", "icon_emoji": ":rimshot:"}
theJoke = {"channel": "#jokestest", "username": "ThatsTheJokeBot", "text": "That's the joke!!!", "icon_emoji": ":thatsthejoke:"}
COUNT = 50*10**6
help_message = "*Hi! I'm Jokebot!* To hear a joke, just say my name. I'll also pipe in if I know jokes about the things you're talking about. If you'd like to tell me a joke, I'll add it to my collection: just say `jokebot, add this joke about TAG1, TAG2, TAG3: JOKE`. You may add as many tags as you like! Bye now! HOOOHOOHEEE HEE HAA HAAA lololololololololol"
laughs = ["AHAHAHAHHAHAHAHAHAHAAA",
"ROFLCOPTAH <--jokebot is from Boston",
"OOOHEEE HAAAA HAAA HEEE HEE HOO",
"tee hee hee",
"Literally rolling on the floor laughing. Literally.",
"guffaw guffaw",
"*chortle chortle*",
"HAHAHAHAH",
"They should call me Mr. Funny because that was GREAT!",
"lol, good one, amirite? >__< :D",
"Oh ho ho ho ho!",
"*emits filthy snicker*",
"lollersk8zz brooo",
"lololololololololol",
"hahahahahahha"]
messages = ["I've got a real knee-slapper for you!",
"You're in luck, I've got just the thing.",
"Here's a good one:",
"I read this one on a candy wrapper!",
"You better get your suture kit, because this one is so funny you'll be in stitches!",
"Here's one of my favorites:",
"I heard this one at the bar last night:",
"My grandfather used to tell this one all the time. It was so embarassing!",
"My wife always slaps me when I say this one:",
"Try this bad-boy on for size:"]
restricted_users = frozenset(["jshaak"])
with open('blacklist.txt') as f:
blacklist = frozenset(f.read().split('\n'))
key_store = defaultdict(list)
joke_keys = db.keys("jokes:*")
for j in joke_keys:
joke = json.loads(db.get(j))
joke['count'] = COUNT
key_store['*'].append(joke)
for tag in joke['tags']:
key_store[tag].append(joke)
@app.route('/', methods=['POST'])
def hello_world():
orig = request.form['text']
string = request.form['text'].lower()
user = request.form['user_name']
if string.strip(".!?(:)") == "i don't get it" or string == "i dont get it":
return post_otherbot("theJoke")
if user.lower() == "slackbot" and string != "ba-dum tsh!":
if not bool(getrandbits(2)): #1 in 3 chance of rimshot
return post_otherbot("rimshot")
elif user.lower() != "slackbot":
word_array = [w.strip("!#$%&()*,-.:;?@^`~<>") for w in string.split(" ")]
for w in word_array:
if w == "jokebot":
if re.search(r'add this \w+ about', string):
return add_joke(orig, user)
for w in word_array:
if re.search(r"hel+p+", w):
return post_message(help_message)
return post_message(add_laugh("Did somebody ask for a joke? %s %s" % (choice(messages), choose_joke('*', key_store['*']))))
elif w in key_store:
return post_message(add_laugh("Did somebody say *%s*? Here's a joke about it! %s" % (w, choose_joke(w, key_store[w], user.lower()))))
return ""
def add_laugh(joke):
if bool(getrandbits(1)): # Add laugh
joke += "\n%s" % choice(laughs)
return joke
def post_message(message):
return json.dumps(payload(message))
def post_otherbot(type):
if type == "rimshot":
return json.dumps(rimshot)
if type == "theJoke":
return json.dumps(theJoke)
def choose_joke(tag, list_of_jokes, user=None):
total = sum(i['count'] for i in list_of_jokes)
r = uniform(0, total)
upto = 0
for joke in list_of_jokes:
upto += joke['count']
if upto >= r:
joke['count'] /= 1.653 #scientifically proven to be the funniest ratio
joke['log_data'][tag] += 1
db.set(joke['id'], json.dumps(joke))
return joke['joke']
print "holy shit something smells of cabbage"
def add_joke(jokeString, user):
if user.lower() in restricted_users:
return post_message("Sorry, %s, but I don't like your jokes." % user)
text = re.search(r"about(.*?):(.*)", jokeString, flags=(re.S | re.I))
joketext = text.group(2)
tags = set([s.strip().lower() for s in re.split(r"\s*,\s*", text.group(1))])
tags_good = set([s for s in tags if s.isalpha()
and len(s) >= 3
and s not in blacklist])
tags_bad = tags - tags_good
if tags_good:
ident = "jokes:%s" % str(uuid.uuid4())
joke = {
'id': ident,
'joke': joketext,
'tags': list(tags_good),
'count': COUNT,
'log_data': dict([(t, 0) for t in tags_good] + [('*', 0)])
}
db.set(ident, json.dumps(joke))
for tag in joke['tags']:
if tag not in key_store:
key_store[tag] = []
key_store[tag].append(joke)
key_store['*'].append(joke)
if tags_bad:
return post_message("Joke added successfully! The following tags were ignored: %s" % ", ".join(tags_bad))
else:
return post_message("Joke added successfully! that was sooooooooooo funnnnnnyyyyyyy")
return post_message("you dun goofed bro")
@app.route('/plotdata', methods=['GET', 'POST'])
def plot():
return render_template('plot.html')
if __name__ == '__main__':
args = Schema({'--host': Use(str), '--port': Use(int), '--debug': Use(bool)}).validate(docopt(__doc__))
app.run(host=args['--host'], port=args['--port'], debug=args['--debug'])
|
import sys
sys.path.append('.')
import torch
import torch.nn as nn
from ModelWrapper import *
from functions import get_para
from libs.random_fault import *
def train_test():
Conf.load(filename="configs/cfg.yaml")
net = ModelWrapper(net_name='VGG', dataset_name='cifar100')
net.train()
_, acc = net.verify()
print(acc)
def vgg_inject():
Conf.load(filename="configs/cfg.yaml")
Conf.set("train.resume", True)
net = ModelWrapper(net_name='VGG',dataset_name='cifar100')
res1, acc = net.verify()
print(acc)
fault_model = RandomFault(frac=1/8)
net.weight_inject(fault_model)
res2, acc = net.verify()
print(acc)
# print(res1 == res2)
if __name__ == '__main__':
train_test()
# vgg_inject()
|
from SignalGenerationPackage.SignalTransformer import SignalTransformer
from SignalGenerationPackage.Point import Point
class ExperimentScheduleTransformer(SignalTransformer):
def __init__(self, SignalData):
super().__init__(SignalData)
# overridden
def TransformSignal(self):
# В функции UpdateSignalData мы создали
# начальный сигнал, который записан в SignalData.point_array
# надо его оптимизировать - какие точки отправлять, а какие нет
point_arr = self.SignalData.point_array
transformed_point_arr = self.SignalData.transformed_point_array
points_len = len(point_arr)
for i in range(points_len):
if i == points_len - 1: # Первую и последнюю точки отправлять не надо
p = Point(x=point_arr[i].x, y=point_arr[i].y, to_send=False)
else:
# Структура сигнала - набор "полочек", плато
# В начале каждой полочки (чётный i-индекс) надо задавать частоту
if i % 2 == 0:
# Чётный индекс, по нему отправка значения
p = Point(x=point_arr[i].x, y=point_arr[i].y, to_send=True)
else:
p = Point(x=point_arr[i].x, y=point_arr[i].y, to_send=False)
transformed_point_arr.append(p)
|
import numpy as np
def accuracy_score(y_true: np.ndarray, y_pred: np.ndarray):
diff = y_true - y_pred
acc = len(np.where(diff == 0)[0])
return acc / len(y_true)
def sn_sp(y_true: np.ndarray, y_pred: np.ndarray):
"""
需要输入为 1 or -1
:param y_true:
:param y_pred:
:return:
"""
t, f = set(np.where(y_true == 1)[0]), set(np.where(y_true != 1)[0])
p, n = set(np.where(y_pred == 1)[0]), set(np.where(y_pred != 1)[0])
tp, fp, tn, fn = len(t.union(p)), len(f.union(p)), len(t.union(n)), len(f.union(n))
return tp / (tp + fn), tn / (tn + fp)
def tpr_fpr(y_true: np.ndarray, y_pred: np.ndarray):
t, f = set(np.where(y_true == 1)[0]), set(np.where(y_true != 1)[0])
p, n = set(np.where(y_pred == 1)[0]), set(np.where(y_pred != 1)[0])
tp, fp, tn, fn = len(t.union(p)), len(f.union(p)), len(t.union(n)), len(f.union(n))
return tp / (tp + fn), fp / (tn + fp)
|
#!/usr/bin/env python
# -----------------------------------------------------------------------
# runserver.py
# Author: Sophie Li, Connie Xu, Jayson Wu
# -----------------------------------------------------------------------
from sys import argv, exit, stderr
from rebook import app
def main(argv):
if len(argv) != 2:
print('Usage: ' + argv[0] + ' port', file=stderr)
exit(1)
try:
port = int(argv[1])
except Exception:
print('Port must be an integer.', file=stderr)
exit(1)
app.run(host='localhost', port=port, debug=True)
if __name__ == '__main__':
main(argv)
|
import maya.cmds as cmds
class Toolbox():
def __init__(self):
self.myWin = 'amToolbox'
self.createWin()
def createWin(self):
self.delete()
self.myWin = cmds.window(self.myWin, title='amToolbox')
self.myColl = cmds.columnLayout(parent=self.myWin, adjustableColumn=True)
cmds.button(label="Duplicate Selected", parent=self.myColl, command=lambda x:cmds.duplicate())
cmds.button(label="Create Locator Window", parent=self.myColl, command=lambda x: self.locatorWind())
cmds.button(label="Control Creator Window", parent=self.myColl, command=lambda x: self.CtrlWind())
cmds.button(label="Parent Constraint W/ Options Window", parent=self.myColl, command=lambda x: self.ConstraintWind())
cmds.button(label="Renaming Window", parent=self.myColl, command=lambda x: self.RenameWind())
cmds.button(label="Randomizer Window", parent=self.myColl, command=lambda x: self.RandomWind())
cmds.showWindow(self.myWin)
def delete(self):
if cmds.window(self.myWin, q=True, exists=True): cmds.deleteUI(self.myWin)
def DuplicateWind(self):
cmds.duplicate
def locatorWind(self):
import CreateLocator
LocatorTool = CreateLocator.Locators()
LocatorTool.windowMaker()
def CtrlWind(self):
import FinalControlCreator
ControlCreate = FinalControlCreator.ControlsSir()
ControlCreate.ControlCreation()
def RenameWind(self):
import Rename
Renamer = Rename.RenameUI()
Renamer.winCreat()
def RandomWind(self):
import RandomizerSelf
Randomize = RandomizerSelf.randoValuez()
Randomize.RandomValues()
def ConstraintWind(self):
import ParentScale
ParentNScale = ParentScale.PSWindow()
ParentNScale.Create()
Toolbox()
|
def solution(spell, dic):
answer = 2
for i in dic:
sub = list(i)
if len(sub) == len(set(sub)) and set(sub) == set(spell):
return 1
return answer
|
s=input()
l=[]
for x in s:
l.append(x)
l.sort()
for x1 in l:
print(x1,end="")
|
def validar_entrada1( texto ):
try:
return str(long(texto))
except ValueError:
return ""
def validar_entrada2( texto ):
try:
return str(float(texto))
except ValueError:
return ""
def pelas_a_euros( entrada ): #Aquí viene lo que han tecleado
return "%.2f" % (float(entrada)/166.386)
def euros_a_pelas( entrada ): #Aquí viene lo que han tecleado
return "%d" % (float(entrada)*166.386)
|
import boto3
import sys
region = sys.argv[1]
access_key = sys.argv[2]
secret_key= sys.argv[3]
client = boto3.client('ec2',
region_name = region,
aws_access_key_id = access_key,
aws_secret_access_key = secret_key)
myvpc = client.create_vpc(
CidrBlock='192.168.0.0/16',
InstanceTenancy='default',
TagSpecifications=[{'ResourceType': 'vpc','Tags': [{'Key': 'Name','Value': 'Ktexperts-VPC'},]},])
print("My VPC ID :")
print(myvpc['Vpc']['VpcId'])
mysubnet = client.create_subnet(
CidrBlock='192.168.1.0/24',
VpcId=myvpc['Vpc']['VpcId'],
AvailabilityZone='ap-south-1a',
TagSpecifications=[{'ResourceType': 'subnet','Tags': [{'Key': 'Name','Value': 'MySubnet'},]},])
print("My Subnet ID :")
print(mysubnet['Subnet']['SubnetId'])
myroutetable=client.create_route_table(
VpcId=myvpc['Vpc']['VpcId'],
TagSpecifications=[{'ResourceType': 'route-table','Tags': [{'Key': 'Name','Value': 'Ktexpers-RT'},]},])
print("My Route Table ID :")
print(myroutetable['RouteTable']['RouteTableId'])
myigw=client.create_internet_gateway(
TagSpecifications=[{'ResourceType': 'internet-gateway', 'Tags': [{'Key': 'Name','Value': 'Ktexperts-IGW'},]},])
print("My Internet Gateway :")
print(myigw['InternetGateway']['InternetGatewayId'])
Mysubnetassociate=client.associate_route_table(
RouteTableId=myroutetable['RouteTable']['RouteTableId'],
SubnetId=mysubnet['Subnet']['SubnetId'])
print("Subnet Associated")
Myigwattach=client.attach_internet_gateway(
InternetGatewayId=myigw['InternetGateway']['InternetGatewayId'],
VpcId=myvpc['Vpc']['VpcId'])
Myrtentry = client.create_route(
DestinationCidrBlock='0.0.0.0/0',
RouteTableId=myroutetable['RouteTable']['RouteTableId'],
GatewayId=myigw['InternetGateway']['InternetGatewayId'])
print("Route entry entered")
|
import unittest
from simulator.schedulers import SJF
from simulator.tests.base import SchedulerTest
from simulator.tests.test_utils import create_processes
class SJFTest(unittest.TestCase, SchedulerTest):
def setUp(self):
self.scheduler = SJF()
def tearDown(self):
self.scheduler.reset()
def test_simple_example(self):
processes = create_processes(
(1, 0, 2),
(2, 1, 10),
(3, 2, 1))
expected_schedule = [
(0, 1),
(2, 2),
(12, 3)]
expected_avg_waiting_time = 3.67
expected = {
'schedule': expected_schedule,
'avg_waiting_time': expected_avg_waiting_time
}
self.assert_process_schedule(processes, expected)
def test_assignment_input(self):
"""Runs the test using the given input for the assignment."""
processes = self.get_assignment_input()
expected = {}
expected['schedule'] = [
(0, 0),
(9, 1),
(17, 2),
(19, 3),
(30, 3),
(35, 2),
(41, 1),
(43, 0),
(60, 2),
(67, 1),
(70, 3),
(78, 0),
(90, 1),
(100, 0),
(110, 2),
(119, 3)]
expected['avg_waiting_time'] = 7.12
self.assert_process_schedule(processes, expected)
|
#To find the average of five numbers
a=input("__Enter first number = ")
b=input("__Enter second number = ")
c=input("__Enter third number = ")
d=input("__Enter fourth number = ")
e=input("__Enter fifth number = ")
av=(a+b+c+d+e)/5
print"____________The average is = ",av
|
import urllib2
def download(url):
print 'Downloading: ', url
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print 'Download error: ', e.reason
html = None
return html
#download('http://httpstat.us/500');
|
from gmssl import sm3, func
if __name__ == '__main__':
y = sm3.sm3_hash(func.str_to_list("abc"))
print(y)
|
from django.db import models
from django.core.exceptions import FieldDoesNotExist
from django.http import Http404
from django.db.models import Q
from django.urls import reverse
from .utils import unique_slug_generator, random_string_generator
class ProductQuerySet(models.query.QuerySet):
def search(self, query):
if query:
return self.filter(
Q(title__icontains=query) |
Q(price__icontains=query) |
Q(description__icontains=query) |
Q(category__name__icontains=query) |
Q(product_class__name__icontains=query) |
Q(product_class__sex__icontains=query)
).distinct()
else:
return self
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self.db)
def search(self, query):
return self.get_queryset().search(query)
def get_product_by_slug(self, slug):
query = self.get_queryset().get(slug=slug)
if query:
return query
else:
return None
def sort_product_by_timestamp(self):
query = self.get_queryset()
return query.order_by('-timestamp')
class Product(models.Model):
title = models.CharField(max_length=120)
price = models.DecimalField(decimal_places=2, default=99.99, max_digits=12)
description = models.TextField(max_length=500)
category = models.ManyToManyField('Category', blank=True)
product_class = models.ForeignKey(
'ProductType', related_name='product_class',
blank=False, null=True, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
slug = models.SlugField(blank=True, null=True)
objects = ProductManager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('detail', kwargs={'slug': self.slug})
class Category(models.Model):
name = models.CharField(max_length=120)
slug = models.SlugField(blank=True, null=True)
class Meta:
verbose_name_plural = 'categories'
ordering = ['name']
def __str__(self):
return self.name
@property
def title(self):
return self.name
def get_absolute_url(self):
return reverse('products:category', kwargs={'slug': self.slug})
class ProductType(models.Model):
name = models.CharField(max_length=120)
sex = models.CharField(max_length=120)
category = models.ManyToManyField('Category', blank=True)
class Meta:
unique_together = (('name', 'sex'),)
def __str__(self):
cat_path = [self.name]
k = self.sex
if k:
cat_path.append(k)
return ' -> '.join(cat_path)
else:
return cat_path[0]
def product_pre_save(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
models.signals.pre_save.connect(product_pre_save, sender=Product)
models.signals.pre_save.connect(product_pre_save, sender=Category)
|
"""empty message
Revision ID: 1cb5073ef9b1
Revises: fd0fb00bfd08
Create Date: 2019-06-02 23:57:07.332506
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1cb5073ef9b1'
down_revision = 'fd0fb00bfd08'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('message_db',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.Text(), nullable=True),
sa.Column('last_name', sa.Text(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('Days')
op.add_column('users', sa.Column('fourth', sa.Integer(), nullable=True))
op.drop_column('users', 'forth')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('forth', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column('users', 'fourth')
op.create_table('Days',
sa.Column('id', sa.DATE(), autoincrement=False, nullable=False),
sa.Column('first_AM', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('first_PM', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('second', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('third', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('fourth', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('fith', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('sixth', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('seventh', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('PostCall', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('Is_Weekend', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='Days_pkey')
)
op.drop_table('message_db')
# ### end Alembic commands ###
|
from lll.exceptions import (
FormattedError,
)
SOURCE_CODE = """
(seq
(def 'test-const 0xxff)
)
"""[1:-1]
def test_formatted_error_mark_placement():
assert str(FormattedError(
'test error',
SOURCE_CODE,
0, 0,
mark_size=1,
file_name=None,
)) == """
line 1:1: test error
(seq
^
"""[1:-1]
assert str(FormattedError(
'test error',
SOURCE_CODE,
-1, -1,
mark_size=1,
file_name=None,
)) == """
line 5:1: test error
)
^
"""[1:-1]
assert str(FormattedError(
'test error',
SOURCE_CODE,
-3, -2,
mark_size=5,
file_name=None,
)) == """
line 3:26: test error
(def 'test-const 0xxff)
^^^^^
"""[1:-1]
assert str(FormattedError(
'test error',
SOURCE_CODE,
2, 25,
mark_size=5,
file_name=None,
)) == """
line 3:26: test error
(def 'test-const 0xxff)
^^^^^
"""[1:-1]
def test_formatted_error_file_name():
assert str(FormattedError(
'test error',
SOURCE_CODE,
2, 25,
mark_size=5,
file_name='test.lll',
)) == """
test.lll:3:26: test error
(def 'test-const 0xxff)
^^^^^
"""[1:-1]
|
#!/usr/bin/python3
# -----------------------------------------------------------------------------
# Structures.py
# Custom data structures for gadgets and instructions.
# Author: Eval
# GitHub: https://github.com/jcouvy
# -----------------------------------------------------------------------------
class Gadget:
"""
A gadget is represented by the address of its first gadget and a list of
instructions (custom object).
"""
def __init__(self, address, instructions):
self.address = address
self.instructions = instructions
def __str__(self):
string = "Gadget <%s>:" % hex(self.address)
offset = len(string)
# string += "%s\n" % ("-"*len(string))
i = 0
for insn in self.instructions:
i += 1
string += "\tg%s: %s\n" % (i, insn.simple_print())
string += " "*offset
return string
class Instruction:
"""
An instruction is represented by multiples parameters
Label (string): raw instruction.
Addr (string): address of the instruction.
Mnemonic (string): short string representing an instruction format.
Dst, src (string): destination and source registers.
Dst_offset, src_offset (string): memory offset.
Note that the last four parameters are initialized at None, this helps
simplifying printing when the information is absent (ex: ret instruction).
"""
def __init__(self, label, addr, mnemonic,
dst=None, src=None, dst_off=None, src_off=None):
self.label = label
self.addr = addr
self.mnemonic = mnemonic
self.dst = dst
self.dst_offset = dst_off
self.src = src
self.src_offset = src_off
def __str__(self):
# Can be reworked with **kwargs later ?
string = "Instruction found at <%s>\n" % hex(self.addr)
string += "-"*len(string)+"\n"
string += "Mnemonic: %s\n" % self.mnemonic
string += " Label: %s\n" % self.label
if self.dst is not None:
string += " Dest: %s\n" % self.dst
if self.dst_offset is not None:
string += "\twith offset: %s\n" % self.dst_offset
if self.src is not None:
string += " Src: %s\n" % self.src
if self.src_offset is not None:
string += "\twith offset: %s\n" % self.src_offset
return string
def simple_print(self):
return self.label
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.