text stringlengths 8 6.05M |
|---|
import flask
import json
from flask import request, jsonify
import movie_suggestion as movie
app = flask.Flask(__name__)
app.config["DEBUG"] = True
movie_df = movie.moviesuggestion()
# Create some test data for our catalog in the form of a list of dictionaries.
@app.route('/', methods=['GET'])
def home():
return '''<h1>Distant Reading Archive</h1>
<p>A prototype API for distant reading of science fiction novels.</p>'''
# A route to return all of the available entries in our catalog.
@app.route('/api/movie_suggestion/similiar_names/<movie>', methods=['GET'])
def api_similar_name(movie):
df = movie_df.get_similiar_named_movies(movie)
print(df.columns)
return jsonify(json.loads(df.to_json(orient='records')))
@app.route('/api/movie_suggestion/keywords/<movie>',methods=['GET'])
def api_similar_keywords(movie):
name = movie_df.get_similiar_named_movies(movie)['title'].values[0].lower()
df = movie_df.get_movies_by_keywords(name)
return jsonify(json.loads(df.to_json(orient='records')))
@app.route('/api/movie_suggestion/content/<movie>',methods=['GET'])
def api_similar_content(movie):
name = movie_df.get_similiar_named_movies(movie)['title'].values[0].lower()
df = movie_df.get_movies_by_content(name)
return jsonify(json.loads(df.to_json(orient='records')))
|
import random
import time
import matplotlib.pyplot as plt
from PIL import Image
class slow_one():
RIGHT = [0, 1] # deteermain the move busetion
DOWN = [1, 0]
LEFT = [0, -1]
UP = [-1, 0]
l1 = [RIGHT, DOWN, LEFT, UP]
def __init__(self):
self.numOfCol, self.NumOfRow = 50,50
self.image_color = [(0, 0, 128), (250, 1, 1), (204, 0, 204), (255, 236, 139), (255, 105, 180), (119, 172, 152),
(0, 100, 0), (238, 232, 170), (240, 189, 122), (220, 225, 219), (238, 59, 59),
(0, 255, 255), (139, 139, 0), (127, 255, 0), (0, 238, 238), (255, 105, 180), (255, 52, 179),
(255, 255, 0)]
self.mainArray1=[]
def take_inputs(self): #take start,end inputs
self.int1 = str(input("write the start cordinate in the format num of colume,numer of the row: "))
self.int1=self.int1.split(',')
self.int1x = int(self.int1[0])
self.int1y = int(self.int1[1])
self.int2 = str(input("write the End cordinate in the format num of colume,numer of the row: "))
self.int2=self.int2.split(',')
self.endx = int(self.int2[0])
self.endy = int(self.int2[1])
self.roadinput=int(input('please enter the number of the rouds that you want to generat: '))
self.process_time=[[] for one in range(self.roadinput)]
return self.int1x,self.int1y,self.endx,self.endy, self.roadinput
def evolve(self):
self.int1x, self.int1y, self.endx,self.endy, self.roadinput= self.take_inputs() #pass inputs
for i in range( self.roadinput): #generat my mazes
self.mainArray =[[0 for one in range(self.NumOfRow)] for tow in range(self.numOfCol) ]#creat our list
start = time.time()
break1 = True
self.l2 = [] # creat strt move coor. list
self.num_row_end = self.endy
self.num_col_end = self.endx
while break1==True:
try:
self.mainArray[self.num_row_end][self.num_col_end] = 1
self.l2.append([self.num_row_end, self.num_col_end])
except:
pass
self.end_rand = random.choice(slow_one.l1)
self.num_col_end=self.num_col_end+self.end_rand[0]
self.num_row_end = self.num_row_end + self.end_rand[1]
if self.num_col_end <0 :
self.num_col_end=self.num_col_end*-1
elif self.num_col_end>self.numOfCol-1:
self.num_col_end = self.num_col_end -1
if self.num_row_end <0 :
self.num_row_end = self.num_row_end * -1
elif self.num_row_end >self.numOfCol-1:
self.num_row_end = self.num_row_end -1
for item in range( len(self.l2)):
if self.l2[item][0]== self.int1x and self.l2[item][1]== self.int1y: #SO I HAVE WROUD!!!!!!!
print('done',i+1) #NOT we start from the front and the back
for t in range (len(self.mainArray)):
print(self.mainArray[t])
break1=False
break
self.mainArray1.append(self.mainArray)
end = time.time()
self.process_time[i].append(end - start)
print('process time generat',end-start)
def plot_dataa(self): #creat my plot function
plt.boxplot(self.process_time)
plt.show()
def photo_outout (self,drow,location) : #NOT:the left sidre of the image in the batume in our foto
img1 = Image.new('RGB', (len(drow),len(drow[0])), color=0) # midean filter images
for x in range(len(drow)):
for y in range(len(drow[0])):
if drow[x][y]!=0:
img1.putpixel((x,y),self.image_color[drow[x][y]])
else:
img1.putpixel((x, y), 0)
img1.save(location)
img1.show()
SO= slow_one()
SO.evolve()
SO.plot_dataa()
SO.photo_outout(SO.mainArray1,'C:\\Users\\Abdulrrahman\\GMT203\\maze1234.tif') |
from gnuradio import gr
from gnuradio import audio
from gnuradio import analog
#######################################################
# LA CLASE QUE DESCRIBE TODO EL FLUJOGRAMA
######################################################
class my_top_block(gr.top_block): # hereda de gr.top_block
def __init__(self):
gr.top_block.__init__(self) # otra vez la herencia
sample_rate = 32000
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, "")
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
#######################################################
# EL CÓDIGO PARA LLAMAR EL FLUJOGRAMA “my_top_block”
######################################################
my_top_block().run()
|
from django.db import models
from django.contrib.auth.models import User
from api_auction.utils import sendTransaction
import hashlib
class Auction(models.Model):
vendor = models.ForeignKey(User, on_delete=models.CASCADE, related_name='auction_vendor')
winner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='auction_winner',null=True)
asset_title = models.CharField(max_length=100, default="default_name")
description = models.TextField(default="default_description")
entry_price = models.FloatField(default=None)
bid = models.FloatField(default=None, null=True)
status = models.CharField(max_length=20, default="OPEN")
start_date = models.DateTimeField(auto_now=False, auto_now_add=True)
end_date = models.DateTimeField(auto_now=False, auto_now_add=False, null=True)
hash = models.CharField(max_length=66, default=None, null=True)
txId = models.CharField(max_length=66, default=None, null=True)
def writeOnChain(self, jsonAuction):
#Uso il json con tutti i dati dell'asta per la generazione dello hash
self.hash = hashlib.sha256(jsonAuction.encode('utf-8')).hexdigest()
self.txId = sendTransaction(self.hash)
self.save()
|
# coding: utf-8
# In[1]:
#Extract non-rectangular regions of interest (ROI)
import cv2
import numpy as np
import imutils
# In[2]:
#Draw a rectangle
canvas1 = np.zeros((300,300), dtype='uint8')
rectangle = cv2.rectangle(canvas1, (25, 25),(275, 275),255,-1)
cv2.imshow('Rectangle', rectangle) #Binary imagei
cv2.waitKey(0)
# In[3]:
canvas2 = np.zeros((300,300), dtype='uint8')
circle = cv2.circle(canvas2, (150,150), 150,255,-1)
cv2.imshow('Circle', circle) #Binary Image
cv2.waitKey(0)
# In[4]:
#Bitwise AND - examine both pixels and if both > 0 then the pixel is turned ON and set to 255
bitwiseAnd = cv2.bitwise_and(rectangle, circle)
cv2.imshow('AND', bitwiseAnd)
cv2.waitKey(0)
# In[5]:
#Bitwise OR - extract if either of the pixels is > 0 and output pixel is set to 255
bitWiseOR = cv2.bitwise_or(rectangle, circle)
cv2.imshow('OR', bitWiseOR)
cv2.waitKey(0)
# In[6]:
#Bitwise XOR- both the pixels should not have value > 0
bitwiseXOR = cv2.bitwise_xor(rectangle, circle)
cv2.imshow('XOR', bitwiseXOR)
cv2.waitKey(0)
# In[7]:
#Bitwise NOT - invert the values of pixel values
bitwiseNOT = cv2.bitwise_not(rectangle)
cv2.imshow('NOT', bitwiseNOT)
cv2.waitKey(0)
bitwiseNOT2 = cv2.bitwise_not(circle)
cv2.imshow('NOT', bitwiseNOT2)
cv2.waitKey(0)
# In[12]:
# In[ ]:
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
dfTop = pd.read_csv("top3.txt")
dfAvg = pd.read_csv("average.txt")
print(dfTop)
print(dfAvg) |
from ftrl import FTRL
from csv import DictReader
from datetime import datetime
from math import log
import argparse
def main(args):
model = FTRL(args.alpha, args.beta, args.L1, args.L2)
train(model)
test(model)
def load_data_enumarator(path):
for _, row in enumerate(DictReader(open(path), delimiter=',')):
clicked = 0
if 'target' in row:
if row['target'] == '1':
#kliknięcię wystąpiło
clicked = 1.
# budowa wektora cech za pomocą hash trick
feature = []
for key in row:
if key != 'target' and key != 'ID':
feature.append(abs(hash(key + '_' + row[key])) % 2 ** 28)
yield feature, clicked
def train(ftrl_model):
epoch = 10
print('alpha:', ftrl_model.alpha, 'beta:', ftrl_model.beta, 'L1:', ftrl_model.L1, 'L2:', ftrl_model.L2)
print('epoch, count, logloss')
for epoch_number in range(epoch):
loss = 0
count = 0
train_data = load_data_enumarator('data/train.csv')
for feature, clicked in train_data: # data is a generator
probability = ftrl_model.predict(feature)
loss += logloss(probability, clicked)
ftrl_model.update_model(feature, probability, clicked)
count += 1
if count % 1000 == 0:
print(epoch_number + 1, count, loss/count)
def test(ftrl_model):
test_data = load_data_enumarator('data/test.csv')
print ('write result')
print('ID,target\n')
for feature, _ in test_data:
probability = ftrl_model.predict(feature)
print(probability)
def logloss(p, y):
return -log(p) if y == 1. else -log(1. - p)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='FTRL')
parser.add_argument('--alpha', type=float, required=True, default=0.005)
parser.add_argument('--beta', type=float, required=True, default=1)
parser.add_argument('--L1', type=float, required=True, default=0)
parser.add_argument('--L2', type=float, required=True, default=1)
args = parser.parse_args()
main(args) |
from django.test import TestCase
from .models import Review, AuthorReview, Country, MovieRole, MovieFigure, Genre, Tag, Film
from django.test import Client
from django.urls import reverse
class TestModel(TestCase):
def setUp(self):
self.author = AuthorReview.objects.create(fio='Иванов Иван Алексеевич')
self.country = Country.objects.create(name_county='США')
self.movie_role = MovieRole.objects.create(role='актер')
self.genre = Genre.objects.create(genre_name='комедия')
self.movie_figure = MovieFigure.objects.create(fio='Алексеев Алексей Иванович')
self.tag = Tag.objects.create(tag_name='лучший фильм 2021')
def tearDown(self):
AuthorReview.objects.all().delete()
Country.objects.all().delete()
MovieRole.objects.all().delete()
Genre.objects.all().delete()
MovieFigure.objects.all().delete()
Tag.objects.all().delete()
def test_model_author(self):
self.assertEqual('Иванов Иван Алексеевич', str(self.author))
def test_model_country(self):
self.assertEqual('США', str(self.country))
def test_model_movie_role(self):
self.assertEqual('актер', str(self.movie_role))
def test_model_genre(self):
self.assertEqual('комедия', str(self.genre))
def test_model_movie_figure(self):
self.assertEqual('Алексеев Алексей Иванович', str(self.movie_figure))
def test_model_tag(self):
self.assertEqual('лучший фильм 2021', str(self.tag.tag_name))
class TestView(TestCase):
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def test_index(self):
url = reverse('catalog:about')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_context_paginate(self):
response = self.client.get('/')
self.assertEqual(response.context['is_paginated'], False)
def test_page_not_found(self):
response = self.client.get('/test')
self.assertEqual(404, response.status_code)
|
import sys
import re
def read_file(filename):
with open(filename, mode='rt', encoding='utf-8') as f:
line = f.readline()
return line
def output_result(result):
print('Paragraph Analysis')
print('-' * 25)
print(f'Approximate Word Count: {result["word_count"]}')
print(f'Approximate Sentence Count: {result["sentence_count"]}')
print(f'Average Letter Count: {result["average_letter_count"]}')
print(f'Average Sentence Length: {result["average_sentence_length"]}')
def calculate_metrics(paragraph):
columns = ["word_count", "sentence_count", "average_letter_count", "average_sentence_length"]
row = []
sentences = re.split("(?<=[.!?]) +", paragraph)
# print(sentences[0])
#Approximate word count
words = re.split(r"\s|-|'", paragraph)
word_count = len(words)
row.append(len(words))
#Approximate sentence count
sentence_count = len(sentences)
row.append(sentence_count)
#Approximate letter count (per word)
total_count_of_letters = sum([len(word) for word in words])
row.append(round(total_count_of_letters/word_count, 2))
#Average sentence length (in words)
row.append(round(word_count/sentence_count, 2))
return {x[0]: x[1] for x in zip(columns, row)}
def main(filename):
paragraph = read_file(filename)
result = calculate_metrics(paragraph)
output_result(result)
if __name__ == '__main__':
main(sys.argv[1])
|
t1,t2,t3=list(map(int,input().split()))
if t1>t2 and t1>t3:
print(t1)
elif t2>t3:
print(t2)
else:
print(t3)
|
from twisted.internet.task import Clock
from twisted.trial.unittest import SynchronousTestCase
import treq
from mimic.resource import MimicRoot
from mimic.test.helpers import request, json_request
from mimic.core import MimicCore
from mimic.test.dummy import ExampleDomainAPI
class TestDomainMock(SynchronousTestCase):
"""
Test cases to verify the :obj:`IAPIDomainMock`.
"""
def test_domain_mock(self):
"""
A GET on ``http://mimic-host.example.com:port/domain`` should return
the list of all the domains; empty, if no plugins are registered.
"""
core = MimicCore(Clock(), [])
root = MimicRoot(core).app.resource()
response = self.successResultOf(request(
self, root, b"GET",
"http://mybase/domain"))
self.assertEqual(200, response.code)
def test_domain_mock_with_an_example_mock(self):
"""
A GET on the ``http://mimic-host.example.com:port/domain`` should
return the list of all the domains, enumerating all registered plugins.
"""
example_domain_api = ExampleDomainAPI()
core = MimicCore(Clock(), [], [example_domain_api])
root = MimicRoot(core).app.resource()
response, content = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/domain"))
self.assertEqual(200, response.code)
self.assertEqual(content, [u'api.example.com'])
def test_domain_mock_child(self):
"""
Any request to ``http://mimic-host.example.com:port/domain/<a-domain>``
should be fielded by the :obj:`IAPIDomainMock` which returns
``<a-domain>`` from its ``domain()`` method.
"""
example_domain_api = ExampleDomainAPI()
core = MimicCore(Clock(), [], [ExampleDomainAPI(u'api2.example.com',
b'"other-value"'),
example_domain_api])
root = MimicRoot(core).app.resource()
response, content = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/domain/api.example.com/"))
self.assertEqual(200, response.code)
self.assertEqual(content, u'test-value')
def test_domain_mock_no_child(self):
"""
A GET on
``http://mimic-host.example.com:port/domain/non-existent.example.com``
should return a 404 status assuming that there is no registered domain
mock with that name.
"""
example_domain_api = ExampleDomainAPI()
core = MimicCore(Clock(), [], [example_domain_api])
root = MimicRoot(core).app.resource()
response = self.successResultOf(request(
self, root, b"GET",
b"http://mybase/domain/nope.example.com"))
self.assertEqual(404, response.code)
self.assertEqual(self.successResultOf(treq.content(response)),
b"No such domain.")
|
import os
import redis
import random
import string
"""
The following settings deal with debug outputs.
"""
# Enables / disables debug outputs.
DEBUG = os.environ.get('DEBUG', '0') == '1'
# Enables / disables very verbose debug outputs.
DEBUG_LOW = os.environ.get('DEBUG_LOW', '0') == '1'
# Enables / disables the most verbose of verbose debug outputs. Warning: Lots of text
DEBUG_LOWEST = os.environ.get('DEBUG_LOWEST', '0') == '1'
# Enables / disables adding the first X bytes of stdout and stderr to the response. Only shows in the debug log.
# This value should be an integer in the range 0-256 (to prevent huge amounts of output).
PROGRAM_OUTPUT = int(os.environ.get('PROGRAM_OUTPUT', 0))
"""
The following options deal with program compilation. They are used to prevent compiler bombs.
It's probably best to leave these at their default settings.
"""
# Time limit while compiling a program (in seconds).
COMPILE_TIME_LIMIT = round(float(os.environ.get('COMPILE_TIME_LIMIT', 10)), 1)
# Memory limit while compiling a program (in MB). Must be an integer.
COMPILE_MEMORY_LIMIT = int(os.environ.get('COMPILE_MEMORY_LIMIT', 256))
# Outputs the first X bytes of a compile error to the person who submitted the program.
# This value should be an integer in the range 0-512 (to prevent huge amounts of output).
COMPILE_ERROR_OUTPUT = int(os.environ.get('COMPILE_ERROR_OUTPUT', 512))
# Maximum size of the final executable (in MB). Must be an integer.
MAX_COMPILE_SIZE = int(os.environ.get('MAX_COMPILE_SIZE', 16))
"""
These settings deal with the actual running of the compiled program.
Most of these should be left at default, unless the machine you're running this on has a weak CPU or low RAM.
"""
# Maximum possible time limit for programs (in seconds). Rounded to 1 decimal place.
# Note that the actual time limit for a program is set by the info.yml file for each individual problem.
MAX_TIME_LIMIT = round(float(os.environ.get('MAX_TIME_LIMIT', 5)), 1)
# Maximum possible memory limit for programs (in MB). Must be an integer.
# Note that the actual memory limit for a program is set by the info.yml file for each individual problem.
MAX_MEMORY_LIMIT = int(os.environ.get('MAX_MEMORY_LIMIT', 256))
# Time limit adjustment for Java (base time limit will be multiplied by this). Rounded to 1 decimal place.
JAVA_TIME_MULTIPLIER = round(float(os.environ.get('JAVA_TIME_MULTIPLIER', 1.5)), 1)
# Time limit adjustment for Python (base time limit will be multiplied by this). Rounded to 1 decimal place.
PYTHON_TIME_MULTIPLIER = round(float(os.environ.get('PYTHON_TIME_MULTIPLIER', 2)), 1)
# Max file size that the program can create (including stdout and stderr) in MB. Must be an integer.
MAX_OUTPUT_SIZE = int(os.environ.get('MAX_OUTPUT_SIZE', 16))
# Number of seconds to add to the wall time threshold (used to kill a program that runs for too long).
# Rounded to 1 decimal place.
WALL_TIME_EXTENSION = round(float(os.environ.get('WALL_TIME_EXTENSION', 1.5)), 1)
"""
These settings deal with the web server (gunicorn).
"""
# Number of workers for gunicorn to use.
WORKER_COUNT = int(os.environ.get('WORKER_COUNT', 1))
# Number of threads for gunicorn to use.
THREAD_COUNT = int(os.environ.get('THREAD_COUNT', 2))
# Max code file size allowed, in KILOBYTES (KB)!!!!! Must be an integer.
MAX_CODE_SIZE = int(os.environ.get('MAX_CODE_SIZE', 256))
"""
Miscellaneous settings
"""
# The webhook URL (a POST request will be sent here after processing a submission).
WEBHOOK_URL = os.environ.get('WEBHOOK_URL', None)
# How long to keep the results of jobs (in seconds). Applies to both failed and successful jobs.
# Defaults to basically forever.
RESULT_TTL = os.environ.get('RESULT_TTL', 2000000000)
# The path to the problem_info folder.
PROBLEM_INFO_PATH = "./sample_problem_info"
if os.path.isdir("/problem_info"):
PROBLEM_INFO_PATH = "/problem_info"
PROBLEM_INFO_PATH = os.environ.get('PROBLEM_INFO_PATH', PROBLEM_INFO_PATH)
# The secret key used to authenticate to JudgeLite.
default_secret_key = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(24))
SECRET_KEY = os.environ.get('SECRET_KEY', default_secret_key)
# The page size for the submission list API call.
PAGE_SIZE = int(os.environ.get('PAGE_SIZE', 50))
# The redis connection (not changeable).
REDIS_CONN = redis.Redis()
|
"""
Test computing a descriptor on something.
"""
from smqtk.algorithms.descriptor_generator.caffe_descriptor import CaffeDescriptorGenerator
from smqtk.representation.data_element.file_element import DataFileElement
e = DataFileElement("/usr/local/lib/python2.7/dist-packages/smqtk/tests/data/"
"Lenna.png")
gen = CaffeDescriptorGenerator(
"/home/smqtk/caffe/msra_resnet/ResNet-50-deploy.prototxt",
"/home/smqtk/caffe/msra_resnet/ResNet-50-model.caffemodel",
"/home/smqtk/caffe/msra_resnet/ResNet_mean.binaryproto",
return_layer="pool5",
use_gpu=False, load_truncated_images=True
)
# Uses default DescriptorMemoryElement factory.
d = gen.compute_descriptor(e)
assert d.vector() is not None
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from blog.models import *
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
from django.conf import settings
from django.db.models import Count
from django.contrib.auth import login,logout,authenticate
from django.contrib.auth.hashers import make_password
from blog.forms import *
from rest_framework.renderers import JSONRenderer
from blog.serializers import *
from rest_framework import generics
from rest_framework import permissions
from blog.permissions import IsOwnerOrReadOnly
from rest_framework import viewsets
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView, DetailView
from django.db.models import Q
import json
# Create your views here.
def global_setting(request):
#获取文章日期列表
archive_list = Article.objects.distinct_date()
#标签
tag_list = Tag.objects.all()
search_form = SearchForm()
#友情链接
#根据文章评论数量进行排行
comment_count_list = Comment.objects.values('article').annotate(comment_count=Count('article')).order_by('-comment_count')
article_comment_list = [Article.objects.get(pk=comment['article']) for comment in comment_count_list]
return locals()
def reply(request):
if request.is_ajax():
content=request.POST.get('content','')
username=request.POST.get('username','')
if username=='AnonymousUser':username='不愿透露姓名的围观群众'
article_id=request.POST.get('article_id','')
if content and article_id:
Comment.objects.create(content=content,article_id=article_id,username=username)
return HttpResponse(json.dumps({"content":content,"username":username,"article_id":article_id}))
#分页
def getpage(request,article_list):
paginator = Paginator(article_list,3)
try:
page = request.GET.get('page',1)
article_list = paginator.page(page)
except (InvalidPage,EmptyPage,PageNotAnInteger)as e:
print(e)
article_list = paginator.page(1)
finally:
return article_list
#首页
def index(request):
#最新文章显示并分页
if request.GET.get('search_value',None):
value = request.GET.get('search_value',None)
articles = Article.objects.filter(Q(title__icontains=value)|Q(content__icontains=value)
|Q(category__name=value)).order_by('-id')
if not articles:
return HttpResponse('没有相关文章')
#articles = getpage(request,article_list)
return render(request,'blogs.html',locals())
else:
article_list = Article.objects.all().order_by('-id')
articles = getpage(request,article_list)
return render(request,'blogs.html',locals())
def archive(request):
#获取用户提交的信息
year = request.GET.get('year',None)
month = request.GET.get('month',None)
#最新文章显示并分页
article_list = Article.objects.filter(date_publish__icontains=year+'-'+month)
articles = getpage(request,article_list)
return render(request,'blogs.html',locals())
#点击标签进入页面
def tag_to_article(request):
tag_id = request.GET.get('tag')
articles = getpage(request,Article.objects.filter(tag=tag_id))
return render(request,'blogs.html',locals())
#点击文章进入页面
def article(request):
id = request.GET.get('id',None)
try:
article = Article.objects.get(pk=id)
ClickCount(request,article,id)
except Article.DoesNotExist:
return HttpResponse('没有这个文章')
comment_form = CommentForm({'username':'不愿透露姓名的围观群众','article':id}if not request.user.is_authenticated()
else{'username':request.user.username,'article':id})
#获取评论
comments = Comment.objects.filter(article=article).order_by('id')
comment_list=[]
for comment in comments:
for item in comment_list:
if not hasattr(item,'children_comment'):
setattr(item,'children_comment',[])
if comment.pid == item:
item.children_comment.append(comment)
break
if comment.pid is None:
comment_list.append(comment)
return render(request,'article.html',locals())
#点击次数
def ClickCount(request,article,id):
click_count = article.click_count + 1
Article.objects.filter(pk=id).update(click_count=click_count)
#添加评论
def comment_post(request):
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
Comment.objects.create(content=comment_form.cleaned_data['comment_content'],
article_id=comment_form.cleaned_data['article'],
username=comment_form.cleaned_data['username'])
return redirect(request.META['HTTP_REFERER'])
#注销
def do_logout(request):
try:
logout(request)
except Exception as e:
print(e)
return redirect(request.META['HTTP_REFERER'])
#登陆
def do_login(request):
try:
if request.method =='POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username,password=password)
if user is not None:
user.backend = 'django.contrib.auth.backends.ModelBackend'
try:
login(request,user)
except Exception as e:
print(e)
else:
return HttpResponse('登陆失败')
return redirect(request.POST.get('source_url'))
else:
return HttpResponse('登陆失败')
else:
login_form = LoginForm()
except Exception as e:
print(e)
return render(request,'login.html',locals())
#注册
def do_reg(request):
try:
if request.method=='POST':
reg_form = RegForm(request.POST)
if reg_form.is_valid():
#注册
user = User.objects.create(username=reg_form.cleaned_data['username'],
password=make_password(reg_form.cleaned_data['password'],)
)
#登陆
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request,user)
return redirect(request.POST.get('source_url'))
else:
return HttpResponse('X')
else:
reg_form = RegForm()
except Exception as e:
print(e)
return render(request,'reg.html',locals())
#序列化
'''
class JSONResponse(HttpResponse):
def __init__(self,data,**kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse,self).__init__(content,**kwargs)
#初始的方法
@api_view(['GET','POST'])
@csrf_exempt
def article_list(request):
if request.method == 'GET':
article = Article.objects.all()
ser = ArticleSerializer(article,many=True)
return Response(ser.data)
elif request.method == 'POST':
ser = ArticleSerializer(data=request.data)
if ser.is_valid():
ser.save()
return Response(ser.data,status=status.HTTP_201_CREATED)
return Response(ser.data,status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET','POST'])
@csrf_exempt
def article_detail(request,pk,format=None):
try:
article = Article.objects.get(pk=pk)
except Article.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
ser = ArticleSerializer(article)
return Response(ser.data)
'''
'''
#使用类
class articlelist(APIView):
def get(self,request,format=None):
article = Article.objects.all()
ser = ArticleSerializer(article,many=True)
return Response(ser.data)
def post(self,request,format=None):
ser = ArticleSerializer(data=request.data)
if ser.is_valid():
ser.save()
return Response(ser.data,status=status.HTTP_201_CREATED)
return Response(ser.errors,status=status.HTTP_400_BAD_REQUEST)
class articledetail(APIView):
def get_object(self,pk):
try:
return Article.objects.get(pk=pk)
except Article.DoesNotExit:
return Response(status=status.HTTP_404_NOT_FOUND)
def get(self,request,pk,format=None):
article = self.get_object(pk)
ser = ArticleSerializer(article)
return Response(ser.data)
def put(self,request,pk,format=None):
ser = ArticleSerializer(data=request.data)
if ser.is_valid():
ser.save()
return Response(ser.data,status=status.HTTP_201_CREATED)
return Response(ser.errors,status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
article = self.get_object(pk)
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''
'''
#使用mixins简化代码
class articlelist(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
def get(self,request,*args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self,request,*args, **kwargs):
return self.create(request, *args, **kwargs)
class articledetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
def get(self,request,*args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self,request,*args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self,request,*args, **kwargs):
return self.destroy(request, *args, **kwargs)
'''
'''
#更更更简洁
class articlelist(generics.ListCreateAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
#添加权限
permission_classes = (permissions.IsAuthenticatedOrReadOnly,IsOwnerOrReadOnly)
#将代码段与用户关联
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class articledetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,IsOwnerOrReadOnly)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
'''
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class ArticleViewSet(viewsets.ModelViewSet):
#print(request.data)
queryset = Article.objects.all()
serializer_class = ArticleSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,IsOwnerOrReadOnly)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
class TagViewSet(viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
class ArticleList(generics.ListAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
def get_queryset(self):
'''
value = self.kwargs.get('value','')
queryset = Article.objects.filter(title__icontains=value)
return queryset
'''
queryset = Article.objects.all()
value = self.request.query_params.get('value', None)
if value is not None:
queryset = queryset.filter(title__icontains=value)
return queryset
|
n,k=map(int,input().split())
l=list(map(int,input().split()))
for i in range(k):
l=l[len(l)-1:]+l[:len(l)-1]
print(*l)
|
import urllib.request,json
class Webservice :
def call_vendors(self,path):
url="https://www.hurrybunny.com/{}".format(path)
data= urllib.request.urlopen(url)
response = json.loads(data.read().decode("utf-8"))
return response['vendors']
def main():
webservice = Webservice()
print(webservice.call_vendors("api/vendors/"))
if __name__ == '__main__': main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Alexander I. Shaykhrazeev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Used to analyse ISSGPRS data records with 3GPP parser
"""
from subprocess import Popen, PIPE
import argparse
import string
import sys
import os
class GSMMessageParser(object):
# Available GSM protocols to parse
gsm_protocols = {
'RR': 'gsm_a_ccch',
'RLC_Uplink': 'gsm_rlcmac_ul',
'RLC_Downlink': 'gsm_rlcmac_dl',
'LLC': 'llc',
'GPRS_LLC': 'gprs_llc',
'SNDCP': 'sndcp',
'SNDCP_XID': 'sndcpxid'
}
def writeTempPcap(self, message):
try:
with open('msg_text.txt', 'w') as msgFile:
msgFile.write('0000 ' + message)
except IOError as err:
sys.stderr.write(err.message)
return False
return True
def call_text2pcap(self, message):
if self.writeTempPcap(message):
p = Popen('text2pcap -q -l 147 msg_text.txt pcap_temp.pcap', shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.wait()
(out, err) = p.communicate()
if p.returncode:
print(err)
return False
else:
return False
os.unlink('msg_text.txt')
return True
def call_tshark(self, protocol):
cmd = 'tshark -r pcap_temp.pcap -Ttext -V -o "uat:user_dlts:\\\"User 0 (DLT=147)\\\",' \
'\\\"%s\\\",\\\"0\\\",\\\"\\\",\\\"0\\\",\\\"\\\""' % self.gsm_protocols[protocol]
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.wait()
(out, err) = p.communicate()
result = []
if p.returncode:
print(err)
return False
else:
result = out.decode('utf-8', 'strict').splitlines()
os.unlink('pcap_temp.pcap')
return True, result
def checkCorrect(self, message, protocol):
if protocol not in self.gsm_protocols.keys():
return False
msg = message.replace(' ', '')
onlyHex = all(c in string.hexdigits for c in msg)
pow2 = (len(msg) % 2 == 0)
if onlyHex and pow2:
return True
return False
def parse(self, arguments):
res = False
text = []
if self.checkCorrect(arguments.message, arguments.protocol):
if self.call_text2pcap(message=arguments.message):
res, text = self.call_tshark(protocol=arguments.protocol)
if res:
return text
else:
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process messages of 3GPP')
parser.add_argument('protocol', metavar='Proto', type=str, help='Protocol of specified 3GPP message')
parser.add_argument('message', metavar='Msg', type=str, help='Hexadecimal string of message octets')
args = parser.parse_args()
result = GSMMessageParser().parse(arguments=args)
for line in result:
print(line)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-04 13:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='catalogentry',
options={'verbose_name_plural': 'Catalog entries'},
),
migrations.AlterModelOptions(
name='operationalstatus',
options={'verbose_name_plural': 'Operational statuses'},
),
migrations.AlterModelOptions(
name='orbitalstatus',
options={'verbose_name_plural': 'Orbital statuses'},
),
migrations.AlterModelOptions(
name='tle',
options={'verbose_name': 'Two Line Element', 'verbose_name_plural': 'Two Line Elements'},
),
]
|
# encoding: utf-8
'''
This application does an NVE+Langevin LAMMPS simulation of spherocylinder-like rods
(defined in a .cfg file) using the "lammps_multistate_rods" library, with some rods
preassembled in a fibril (using "tools/prepare_fibril.py).
The initial locations of the rods are at SC lattice points defined by the input params,
excluding the fibril region, and their orientations are randomly determined at each
insertion point.
Created on 16 Mar 2018
@author: Eugen Rožić
'''
import os
import argparse
parser = argparse.ArgumentParser(description='Program for NVE+Langevin hybrid LAMMPS'\
' simulation of spherocylinder-like rods, using the'\
' "lammps_multistate_rods" library, with a preassembled fibril.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('cfg_file',
help='path to the "lammps_multistate_rods" model configuration file')
parser.add_argument('run_file',
help='path to the run configuration file')
parser.add_argument('simlen', type=int,
help='the length of the simulation')
parser.add_argument('--seed', type=int,
help='the seed for random number generators')
parser.add_argument('--out', type=str, default=None,
help='name/path for the output folder (defaults to cfg_file path w/o ext)')
parser.add_argument('-o', '--output_freq', type=int,
help='configuration output frequency (in MD steps);'\
' default behavior is after every batch of MC moves')
parser.add_argument('-s', '--silent', action='store_true',
help="doesn't print anything to stdout")
args = parser.parse_args()
if not args.cfg_file.endswith('.cfg'):
raise Exception('Model configuration file (first arg) has to end with ".cfg"!')
if not args.run_file.endswith('.run'):
raise Exception('Run configuration file (second arg) has to end with ".run"!')
if args.seed is None:
import time
seed = int((time.time() % 1)*1000000)
print "WARNING: no seed given explicitly; using:", seed
else:
seed = args.seed
if args.out is None:
output_folder = os.path.splitext(args.cfg_file)[0]
else:
output_folder = args.out
#========================================================================================
#from mpi4py import MPI #TODO make MPI work...
from lammps import PyLammps
import lammps_multistate_rods as rods
import lammps_multistate_rods.tools as rods_tools
if not os.path.exists(output_folder):
os.makedirs(output_folder)
run_filename = os.path.splitext(os.path.basename(args.run_file))[0]
sim_ID = '{:s}_{:d}'.format(run_filename, seed)
dump_filename = sim_ID+'.dump'
dump_path = os.path.join(output_folder, dump_filename)
log_filename = '{:d}.lammps'.format(seed)
log_path = os.path.join(output_folder, log_filename)
run_args = rods.rod_model.Params()
execfile(args.run_file,
{'__builtins__' : None, 'True' : True, 'False' : False, 'None' : None},
vars(run_args))
out_freq = args.output_freq if args.output_freq != None else run_args.run_length
py_lmp = PyLammps(cmdargs=['-screen','none'])
py_lmp.log('"'+log_path+'"')
model = rods.Rod_model(args.cfg_file)
simulation = rods.Simulation(py_lmp, model, seed, output_folder)
py_lmp.units("lj")
py_lmp.dimension(3)
py_lmp.boundary("p p p")
py_lmp.lattice("sc", 1/(run_args.cell_size**3))
py_lmp.region("box", "block", -run_args.num_cells / 2, run_args.num_cells / 2,
-run_args.num_cells / 2, run_args.num_cells / 2,
-run_args.num_cells / 2, run_args.num_cells / 2)
simulation.setup("box")
# create fibril
fibril_temp_file = os.path.join(output_folder, '{:d}_fibril.dat'.format(seed))
fibril_edges = rods_tools.prepare_fibril(model, run_args.seed_size, run_args.seed_phi,
run_args.seed_theta, run_args.seed_r0, fibril_temp_file)
simulation.create_rods(state_ID=model.num_states-1, file=[fibril_temp_file])
os.remove(fibril_temp_file)
# create other rods
box_size = run_args.num_cells * run_args.cell_size
xmin = fibril_edges[0][0] - model.rod_length / 2
xmax = fibril_edges[0][1] + model.rod_length / 2
ymin = fibril_edges[1][0] - model.rod_length / 2
ymax = fibril_edges[1][1] + model.rod_length / 2
zmin = fibril_edges[2][0] - model.rod_length / 2
zmax = fibril_edges[2][1] + model.rod_length / 2
py_lmp.region("fibril", "block", xmin, xmax, ymin, ymax, zmin, zmax, "units box")
#TODO py_lmp.region("box_minus_fibril", "subtract", 2, "box", "fibril")
py_lmp.region("left", "block",
'EDGE', xmin,
'EDGE', 'EDGE',
'EDGE', 'EDGE',
"units box")
py_lmp.region("right", "block",
xmax, 'EDGE',
'EDGE', 'EDGE',
'EDGE', 'EDGE',
"units box")
py_lmp.region("front", "block",
'EDGE', 'EDGE',
'EDGE', ymin,
'EDGE', 'EDGE',
"units box")
py_lmp.region("back", "block",
'EDGE', 'EDGE',
ymax, 'EDGE',
'EDGE', 'EDGE',
"units box")
py_lmp.region("down", "block",
'EDGE', 'EDGE',
'EDGE', 'EDGE',
'EDGE', zmin,
"units box")
py_lmp.region("up", "block",
'EDGE', 'EDGE',
'EDGE', 'EDGE',
zmax, 'EDGE',
"units box")
py_lmp.region("box_minus_fibril", "union", 6, "up", "down", "front", "back", "left", "right")
simulation.create_rods(region = ["box_minus_fibril"])
# DYNAMICS
py_lmp.fix("thermostat", "all", "langevin",
run_args.temp, run_args.temp, run_args.damp, seed)#, "zero yes")
simulation.set_rod_dynamics("nve")
py_lmp.neigh_modify("every 1 delay 1")
py_lmp.timestep(run_args.dt)
# RANDOMISE INITIAL CONFIGURATION
simulation.deactivate_state(0, vx_eps=5.0)
py_lmp.command('run 10000')
simulation.activate_state(0)
py_lmp.reset_timestep(0)
# GROUPS & COMPUTES
if hasattr(run_args, 'label_fibrils'):
fibril_group = 'beta_patches'
beta_active_patch_types = sorted(filter(lambda t: (t in model.active_bead_types) and\
(t not in model.body_bead_types),
model.state_bead_types[1]))
py_lmp.variable(fibril_group, 'atom', '"' +
'||'.join(['(type == {:d})'.format(t)
for t in beta_active_patch_types]) +
'"')
py_lmp.group(fibril_group, 'dynamic', simulation.rods_group, 'var', fibril_group,
'every', out_freq)
fibril_compute = "fibril_ID"
if hasattr(run_args, 'fibril_cutoff'):
fibril_cutoff = run_args.fibril_cutoff
else:
fibril_cutoff = 0
i = -1
for t1 in beta_active_patch_types:
i += 1
for t2 in beta_active_patch_types[i:]:
try:
int_key = model.eps[(t1,t2)][1]
except:
continue
int_range = model.int_types[int_key][1]
cutoff = model.bead_radii[t1] + model.bead_radii[t2] + int_range*2/3
if cutoff > fibril_cutoff:
fibril_cutoff = cutoff
py_lmp.compute(fibril_compute, fibril_group, 'aggregate/atom', fibril_cutoff)
# OUTPUT
py_lmp.thermo_style("custom", "step atoms", "pe temp")
dump_elems = "id x y z type mol"
try:
dump_elems += " c_"+fibril_compute
except:
pass
py_lmp.dump("dump_cmd", "all", "custom", out_freq, dump_path, dump_elems)
py_lmp.dump_modify("dump_cmd", "sort id")
py_lmp.thermo(out_freq)
# RUN...
if model.num_states == 1 or run_args.mc_moves == 0:
raise Exception("Multiple states need to exist and MC moves need to be made for fibrils to grow!")
mc_moves_per_run = int(run_args.mc_moves * simulation.rods_count())
py_lmp.command('run {:d} post no'.format(run_args.run_length-1)) #so output happens after state changes
remaining = args.simlen - run_args.run_length + 1
while True:
success = simulation.state_change_MC(mc_moves_per_run)#, replenish=("box", 2*model.rod_radius, 10)) TODO
if not args.silent:
base_count = simulation.state_count(0)
beta_count = simulation.state_count(1)
print 'step {:d} / {:d} : beta-to-soluble ratio = {:d}/{:d} = {:.5f} (accept rate = {:.5f})'.format(
(i+1)*run_args.run_length, args.simlen, beta_count, base_count, float(beta_count)/base_count,
float(success)/mc_moves_per_run)
if remaining / run_args.run_length > 0:
py_lmp.command('run {:d} post no'.format(run_args.run_length))
remaining -= run_args.run_length
else:
py_lmp.command('run {:d} post no'.format(remaining))
break
|
import pytest
from LayerClient import LayerClient
from test_utils import MockRequestResponse, TestPlatformClient
class TestApiException(TestPlatformClient):
def test_json_exception(self, layerclient, monkeypatch):
def return_sample_response(method, url, headers, data, params):
return MockRequestResponse(
False,
json={
'message': 'Operation not supported',
'code': 40,
'id': 23,
},
status_code=401,
)
monkeypatch.setattr('requests.request', return_sample_response)
with pytest.raises(LayerClient.LayerPlatformException) as e:
layerclient.get_conversation('some_uuid')
assert str(e.value) == 'Operation not supported'
assert e.value.http_code == 401
assert e.value.code == 40
assert e.value.error_id == 23
def test_raw_exception(self, layerclient, monkeypatch):
def return_sample_response(method, url, headers, data, params):
return MockRequestResponse(
False,
text='Internal server error',
status_code=500,
)
monkeypatch.setattr('requests.request', return_sample_response)
with pytest.raises(LayerClient.LayerPlatformException) as e:
layerclient.get_conversation('some_uuid')
assert str(e.value) == 'Internal server error'
assert e.value.http_code == 500
|
# Import the flask library functions
from flask import (
Flask,
session,
g,
render_template,
request,
redirect,
url_for
)
# Other libraries needed
from requests.api import get
from pathlib import Path
import yfinance as yf
import datetime as d
import pandas as pd
import requests
import glob
import json
import time
import os
import io
# Import functions from other folders
from sendmail import send_mail, send_buy, send_sell
from models import users, contactus, stock
from api import getdata
# Path used for all tables in database
path = "app.db"
# App configuration
templates_path = os.path.abspath('./templates')
app = Flask(__name__, template_folder=templates_path)
app.secret_key = 'somekey'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# Creates all the tables in the database when the application is run
users.create_user(path)
contactus.create_tbl(path)
stock.make_tbl(path)
'''
Function to get the current price of any stock
Takes symbol as input and uses Ticker Method to get stock data of the current day
Extracts the closing price and returns it
'''
def get_current_price(symbol):
ticker = yf.Ticker(symbol)
todays_data = ticker.history(period='1d')
return todays_data['Close'][0]
'''
For analysis and trading we need a list of stock symbols to see if the user has entered a valid stock symbol
URL containing NASDAQ listings in csv format
Store all stock symbols in a list
'''
url="https://pkgstore.datahub.io/core/nasdaq-listings/nasdaq-listed_csv/data/7665719fb51081ba0bd834fde71ce822/nasdaq-listed_csv.csv"
data = requests.get(url).content
df_data = pd.read_csv(io.StringIO(data.decode('utf-8')))
symbols = df_data['Symbol'].to_list()
'''
Sets the current user - g.user to none and then checks if the user is in session
If the user is in session then their email is fetched and g.user is updated to that email
Otherwise Exception is thrown
'''
@app.before_request
def security():
g.user = None
if 'user_email' in session:
emails = users.getemail(path)
try:
useremail = [email for email in emails if email[0] == session['user_email']][0]
g.user = useremail
except Exception as e:
print("Failed")
# LOGIN page
@app.route('/', methods=["GET", "POST"])
def home():
# The particular user is removed from session
session.pop("user_email", None)
# Flag checks if the password entered by the user is correct or not
flag = True
"""
If a post request is made on the login page
Take input from the fields - Name, Email, Password, Confirm Password
"""
if request.method == "POST":
name = request.form['name']
email = request.form['email']
password = request.form['password']
repeat_password = request.form['rpassword']
'''
If the password field has a password, and the repeat password is empty the user is trying to login
First the user is verified -> Check if user exists
Then the password is verified by checking the database for that user
If the password matches the user is added to the session otherwise the flag variable is set to false
If the user doesnt exist then render back to login and give error message
'''
if password and not repeat_password:
if users.check_user_exist(path, email):
print("LOGIN")
# if users.checkpwd(path, password, email):
# session['user_email'] = email
# return redirect('/index')
'''
If the password field is entered check the password against the hashed password in the db
If it matches then user is in session and is redirected to the homepage
Else a flag is set and the user is shown an error message
'''
if users.check_hash(path, password, email):
session['user_email'] = email
return redirect('/index')
else:
#If the flag variable is false -> user has entered the wrong password
flag = False
#print("WRONG PWD")
return render_template('login.html', error="Incorrect Email or Password")
else:
#If the user doesnt exist
return render_template('login.html', error="User Doesnt Exist")
'''
If the password and repeat password fields are filled - SIGN UP
If the user already exists then print an error message and redirect to login page
If the user doesnt exist then allow the signup to take place
If they both are the same (password and repeat password)
Then a new user is added to the USER TABLE in the database with all the data
The user is then added to the session and the user is redirected to the login page
If the fields dont match the user is alerted and redirected back to the login page to try again
'''
if password and repeat_password:
print("SIGN UP")
if not users.check_user_exist(path, email):
if password == repeat_password:
#Hash the users password and store the hashed password
password = users.hash_pwd(password)
#print("Hashed PWD: ", password)
users.insert(path, 'user', (email, name, password, 0))
#print("Inserted Hashed Password")
session['user_email'] = email
return render_template('login.html', error="Sign Up Complete - Login")
else:
return render_template('login.html', error="Password & Retyped Password Not Same")
else:
return render_template('login.html', error="This User Already Exists! Try Again")
'''
If only the email field is filled it means the user has requested to reset their password
First the User table is looked up to see if the user exists (if the password can be reset)
The password is reset if the user exists through the reset process (mail, verification code ...)
If the user doesnt exist an error message is generated and the user is redirected back to the login page
'''
if not name and not password and email:
if users.check_user_exist(path, email):
print("RESET PASSWORD:")
# session['user_email'] = email
reset_password(path, email)
return render_template('login.html',
error="We have sent you a link to reset your password. Check your mailbox")
else:
print("User Doesnt Exist")
return render_template('login.html', error="This Email Doesnt Exist - Please Sign Up")
#If the flag variable is true then the user has entered the correct password and is redirected to the login page
#FLAG VALUE IS TRUE INITIALLY
if flag:
return render_template('login.html')
# HOME page
@app.route('/index', methods=["GET", "POST"])
def index():
# Enters the page only if a user is signed in - g.user represents the current user
if g.user:
return render_template("index.html")
# Redirects to login page if g.user is empty -> No user signed in
return redirect('/')
"""
Function to reset password
Sends the mail for resetting password to user
"""
def reset_password(path:str, email: str):
#print(email)
send_mail(path, email)
# RESET PASSWORD page
@app.route('/reset', methods=["GET", "POST"])
def reset():
"""
Once the user clicks on the reset password link sent to his mail he is taken to the reset password page
If a post request is generated (when user clicks submit) - all the input fields are fetched (pwd, rpwd, code)
If all three fields are filled it checks if the password and repeat password match
If the two passwords match the verification code is checked in the database to verify user
If code matches the user then the password is updated for the user in the database
The code is set back to 0 for that user (to avoid repetition of codes)
Otherwise an error is generated
"""
if request.method == "POST":
pwd = request.form['npassword']
repeat_pwd = request.form['rnpassword']
ver_code = request.form['vcode']
ver_code = int(ver_code)
#print(ver_code)
if pwd and repeat_pwd and ver_code:
print("CHECKING")
if pwd == repeat_pwd:
if users.check_code(path, ver_code):
#Hash the new password and update db with hashed password
pwd = users.hash_pwd(pwd)
users.reset_pwd(path, pwd, ver_code)
#print("Resetting password & Updating DB")
users.reset_code(path, ver_code)
return redirect("/")
#return render_template('login.html', error="Password Reset Successfully")
else:
#print("Verification Code Doesnt Match")
#return redirect("/")
return render_template('reset.html', error="Incorrect Verification Code")
else:
return render_template('reset.html', error="Password & Retyped Password Not Same")
return render_template('reset.html')
# ANALYSIS page -> Allows user to get historical stock data for any company and then view it graphically
@app.route('/inv', methods=["GET", "POST"])
def inv():
# Enters the page only if a user is signed in - g.user represents the current user
if g.user:
#If the user clicks on the 'VIEW' Button a POST request is generated
if request.method == "POST":
#print("ENTERED POST REQUEST")
#Get the variable name for the option the the user has entered
stock_id = request.form['stocksym']
stock_id = stock_id.upper()
#print(stock_id)
#If the stock symbol is valid and exists
if stock_id in symbols:
#print(stock_id)
#Fetch data into another dataframe
df_stock = yf.download(stock_id, start="1950-01-01", period='1d')
#print(df_stock)
#If stock symbol is invalid
else:
#Return to page with error
return render_template('inv.html', error="Incorrect Stock Symbol. Please Enter Valid Symbol")
#Drop the 'Adj Close' column as we dont need it to plot data
df_stock.drop('Adj Close', axis='columns', inplace=True)
#Reset index makes sure the dataframe has indexing of its own and converts the date index to a column
df_stock.reset_index(inplace=True)
#Convert the date to a datetime object (gets converted to a specialised type of datetime object)
df_stock['Date'] = pd.to_datetime(df_stock['Date'])
#Convert date to epoch datetime format
df_stock['Date'] = (df_stock['Date'] - d.datetime(1970,1,1)).dt.total_seconds()
#Format for plotting requires specific size for date so multiply by 1000
df_stock['Date'] = df_stock['Date']*1000
#print(df_stock.head())
#Gets a list of all files ending in _mod.json
files = glob.glob("/home/nvombat/Desktop/Investment-WebApp/analysis/data/*_mod.json")
#If there is such a file (list is not empty)
if len(files)!=0:
#Extract the file name of that particular file
file_rem = Path(files[0]).name
#print("FILE BEING DELETED IS:", file_rem)
#Get the path of that file
location = "/home/nvombat/Desktop/Investment-WebApp/analysis/data/"
path = os.path.join(location, file_rem)
#Delete the file
os.remove(path)
#We delete the file to make sure that at any given time there is only one file that can be plotted
#As the plotting function chooses the first file from the directory (files[0])
#Thus if we have more than one file we may not end up plotting the correct data
#Convert to json format and make sure its converted as json with arrays thus orient = values
df_stock.to_json("/home/nvombat/Desktop/Investment-WebApp/analysis/data/"+stock_id+"_mod.json", orient='values')
#return redirect(url_for("inv"))
return render_template('inv.html', name=stock_id)
return render_template('inv.html')
# Redirects to login page if g.user is empty -> No user signed in
return redirect('/')
# ABOUT US page
@app.route('/about')
def about():
# Enters the page only if a user is signed in - g.user represents the current user
if g.user:
return render_template('about.html')
# Redirects to login page if g.user is empty -> No user signed in
return redirect('/')
# TRADING GUIDE page
@app.route('/doc')
def doc():
# Enters the page only if a user is signed in - g.user represents the current user
if g.user:
return render_template('doc.html')
# Redirects to login page if g.user is empty -> No user signed in
return redirect('/')
# TRADE page
@app.route('/trade', methods=["GET", "POST"])
def trade():
# Enters the page only if a user is signed in - g.user represents the current user
#print(g.user)
if g.user:
'''
uses the user email id to query the users transactions
this transactions array is then received by the table on the html page
'''
user_email = g.user
transactions = stock.query(user_email[0], path)
if request.method == "POST":
'''
If a post request is generated (button clicked) the user wants to buy or sell stocks
It is then checked whether the user wants to buy or sell (based on the button pressed)
'''
# BUYING
if request.form.get("b1"):
# The data from the fields on the page are fetched
symb = request.form["stockid"]
quant = request.form["amount"]
'''
The stock symbol entered is capitalised as all symbols are always capitalized
The stock symbol is checked for validity
Then the current date and time is calculated
Then the quantity is stored as an integer
The stock price api/stock price function is called to calculate the price of that particular stock
The total amount of money spent is then calculated using price and quantity
The format of price and total is adjusted to 2 decimal places
The STOCK TABLE is then updated with this data using the buy function
A mail is sent to the user alerting them of the transaction made
The user is now redirected back to the trade page - we use redirect to make sure a get request is generated
'''
print("BUYING")
symb = symb.upper()
#Check if the stock symbol is valid
if symb in symbols:
date = d.datetime.now()
date = date.strftime("%m/%d/%Y, %H:%M:%S")
quant = int(quant)
#print("AMOUNT", quant)
#stock_price = getdata(close='close', symbol=symb)[0]
stock_price = get_current_price(symb)
#print("STOCK PRICE", stock_price)
total = quant * stock_price
stock_price = "{:.2f}".format(stock_price)
total = "{:.2f}".format(total)
#print("You have spent $", total)
#print("USER EMAIL:", user_email)
stock.buy("stock", (date, symb, stock_price, quant, user_email[0]), path)
data = (symb, stock_price, quant, total, user_email[0], date)
send_buy(path, data)
#print("TRANSACTIONS: ", transactions)
# Redirect submits a get request (200) thus cancelling the usual post request generated by the
# browser when a page is refreshed
return redirect(url_for("trade"))
#If stock symbol is invalid
else:
#Return to page with error
return render_template('trade.html', error="Incorrect Stock Symbol. Please Enter Valid Symbol", transactions=transactions)
# SELLING
elif request.form.get("s1"):
# The data from the fields on the page are fetched
symb = request.form["stockid"]
quant = request.form["amount"]
'''
The stock symbol entered is capitalised as all symbols are always capitalized
The stock symbol is checked for validity
Then the quantity is stored as an integer
The stock price api/stock price function is called to calculate the price of that particular stock
The total amount of money received is then calculated using price and quantity
The format of price and total is adjusted to 2 decimal places
The STOCK TABLE is then updated with this data using the sell function
A mail is sent to the user alerting them of the transaction made
The user is now redirected back to the trade page - we use redirect to make sure a get request is generated
'''
symb = symb.upper()
if symb in symbols:
print("SELLING")
#print("DELETING SYMBOL:", symb)
quant = int(quant)
#print("AMOUNT", quant)
#stock_price = getdata(close='close', symbol=symb)[0]
stock_price = get_current_price(symb)
#print("STOCK PRICE", stock_price)
total = quant * stock_price
#print("You have received $", total)
stock_price = "{:.2f}".format(stock_price)
total = "{:.2f}".format(total)
date = d.datetime.now()
date = date.strftime("%m/%d/%Y, %H:%M:%S")
data = (symb, quant, user_email[0])
stock.sell("stock", data, path)
mail_data = (symb, stock_price, quant, total, user_email[0], date)
send_sell(path, mail_data)
return redirect(url_for("trade"))
#If stock symbol is invalid
else:
return render_template('trade.html', error="Incorrect Stock Symbol. Please Enter Valid Symbol", transactions=transactions)
# FIND PRICE
elif request.form.get("p1"):
# The data from the fields on the page are fetched
sym = request.form["stockid"]
quant = request.form["amount"]
'''
If the user wants to find the price of a stock they can enter the symbol they want to find the price for
and the amount
The stock symbol entered is capitalised as all symbols are always capitalized
The API/Function fetches the price and then returns the value
The format of price and total is adjusted to 2 decimal places
The user is then given the price of that stock for the amount they entered
'''
sym = sym.upper()
#First we check if the stock symbol is valid
if sym in symbols:
print("PRICE")
quant = int(quant)
#print("AMOUNT", quant)
#price = getdata(close='close', symbol=sym)[0]
price = get_current_price(sym)
#print("PRICE:", price)
price = float(price)
total = quant * price
#print("Total cost is $", total)
price = "{:.2f}".format(price)
total = "{:.2f}".format(total)
quant = str(quant)
price = str(price)
total = str(total)
# Message with price for amount entered and per unit as well
err_str = "The price for " + quant + " unit(s) of " + sym + " Stock is $ " + total + " at $ " + price + " per unit"
#print(transactions)
# render template because we want the table to show and the message
return render_template('trade.html', transactions=transactions, error=err_str)
#If stock symbol is invalid
else:
return render_template('trade.html', error="Incorrect Stock Symbol. Please Enter Valid Symbol", transactions=transactions)
return render_template('trade.html', transactions=transactions)
# Redirects to login page if g.user is empty -> No user signed in
return redirect('/')
# CONTACT US page
@app.route('/contact', methods=["GET", "POST"])
def contact():
# Enters the page only if a user is signed in - g.user represents the current user
if g.user:
"""
If a post request is generated (when user clicks submit)
The email and message are fetched from the input fields
The entered email is then checked with the database to make sure it matches the user and the user exists
If the emails dont match it generates an error and if it does match then we insert data into contact table
Redirects to login page if g.user is empty -> No user signed in
"""
if request.method == "POST":
print("CONTACT US")
email = request.form["email"]
#print(email)
msg = request.form["message"]
user_email = g.user
curr_user = user_email[0]
#print(curr_user)
if users.check_contact_us(path, email, curr_user):
#print("Correct Email")
contactus.insert(email, msg, path)
return render_template('contact.html', error="Thank you, We will get back to you shortly")
else:
#print("Incorrect Email")
return render_template('contact.html', error="Incorrect Email!")
return render_template("contact.html")
return redirect('/')
'''
Function sends data (in json format) to the plotting function
Gets a list of all files in the data folder which have a _mod.json ending
If there are no such files then plot AAPL as the default graph
If there is such a file - sends json file containing data to be plotted
'''
@app.route('/pipe', methods=["GET", "POST"])
def pipe():
files = glob.glob("/home/nvombat/Desktop/Investment-WebApp/analysis/data/*_mod.json")
if len(files) == 0:
with open("/home/nvombat/Desktop/Investment-WebApp/analysis/data/AAPL.json") as f:
r = json.load(f)
return {"res": r}
else:
with open(files[0]) as f:
r = json.load(f)
return {"res": r}
if __name__ == '__main__':
app.run(debug=True, port=8000) |
from django.utils.translation import ugettext_lazy as _
from jet.dashboard import modules
from jet.dashboard.dashboard import Dashboard, AppIndexDashboard
from residents import dashboard_modules
class CustomIndexDashboard(Dashboard):
columns = 3
def init_with_context(self, context):
self.children.append(dashboard_modules.Total_Residents(_('Total Residents')))
self.children.append(dashboard_modules.Pending_Request(_('Total Pending Resident Requests')))
self.children.append(dashboard_modules.Pending_Family_Request(_('Total Pending Resident\'s Family Requests')))
self.children.append(dashboard_modules.Display_Community(_('Community'))) |
#!/usr/bin/env python3
# Created by Amir Mersad
# Created on October 2019
# This program gives you the number of the day in the week
def main():
number_int = input("Please enter the number of the day: ")
try:
number = int(number_int)
if number == 1:
print("Monday")
elif number == 2:
print("Tuesday")
elif number == 3:
print("Wednesday")
elif number == 4:
print("Thursday")
elif number == 5:
print("Friday")
elif number == 6:
print("Saturday")
elif number == 7:
print("Sunday")
else:
print("Please enter a number between 1 and 7!")
except(Exception):
print("Wrong input!!!")
if __name__ == "__main__":
main()
|
import socket
def main():
# 1.创建套接字
tcp_client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 2.建立连接
server_ip = input("请输入tcp server的ip:")
server_port = int(input("请输入tcp server的port:"))
addr = (server_ip, server_port)
tcp_client_sock.connect(addr)
# 3.发送数据
send_data = input("请输入你要发送的数据:")
tcp_client_sock.send(send_data.encode('gbk'))
# 4.关闭套接字
tcp_client_sock.close()
if __name__ =='__main__':
main()
|
#!/usr/bin/env python
import rospy
import time
from rover_control.msg import TowerPanTiltControlMessage
DEFAULT_TOWER_PAN_TILT_CONTROL_TOPIC = "chassis/pan_tilt/control"
rospy.init_node("chassis_pan_tilt_tester")
publisher = rospy.Publisher(DEFAULT_TOWER_PAN_TILT_CONTROL_TOPIC, TowerPanTiltControlMessage, queue_size=1)
time.sleep(2)
message = TowerPanTiltControlMessage()
message.should_center = 1
publisher.publish(message)
time.sleep(1)
message = TowerPanTiltControlMessage()
message.relative_pan_adjustment = -100
message.relative_tilt_adjustment = -500
publisher.publish(message) |
#Some helper functions from https://www.kaggle.com/peterchang77/exploratory-data-analysis
import pandas as pd
import pydicom
import numpy as np
import pylab
from sklearn import svm
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from skimage.measure import block_reduce
import matplotlib.pyplot as plt
from matplotlib import cm
import os
def parse_data(df, image_folder, lung_boxes):
"""
Method to read a CSV file (Pandas dataframe) and parse the
data into the following nested dictionary:
parsed = {
'patientId-00': {
'dicom': path/to/dicom/file,
'label': either 0 or 1 for normal or pnuemonia,
'boxes': list of box(es)
},
'patientId-01': {
'dicom': path/to/dicom/file,
'label': either 0 or 1 for normal or pnuemonia,
'age': int,
'sex': either 0 or 1 for male or female,
'boxes': list of box(es)
}, ...
}
"""
# --- Define lambda to extract coords in list [y, x, height, width]
extract_box = lambda row: [row['y'], row['x'], row['height'], row['width']]
parsed = {}
for n, row in df.iterrows():
# print("test1")
# --- Initialize patient entry into parsed
pid = row['patientId']
if pid in lung_boxes:
# print("test2")
if pid not in parsed:
# print("test3")
dcm_file = '%s/%s.dcm' % (image_folder,pid)
# dcm_data = pydicom.read_file(dcm_file)
parsed[pid] = {
'dicom': dcm_file,
'label': row['Target'],
# 'sex' : dcm_data.PatientSex,
# 'age' : dcm_data.PatientAge,
'p_boxes': [],
'lung_boxes': lung_boxes[pid]['lungs']}
# --- Add box if opacity is present
if parsed[pid]['label'] == 1:
parsed[pid]['p_boxes'].append(extract_box(row))
# print(len(parsed))
return parsed
def draw(data, predictions = None, sub = False):
"""
Method to draw single patient with bounding box(es) if present
"""
n = 8
if sub is False:
# --- Open DICOM file
d = pydicom.read_file(data['dicom'])
im = d.pixel_array
else:
im = data['im']
# --- Convert from single-channel grayscale to 3-channel RGB
im = np.stack([im] * 3, axis=2)
rgb1 = [220, 20, 60]
rgb2 = [46, 139, 87]
if sub is False:
# --- Add pnuemonia boxes with random color if present
for box in data['p_boxes']:
# rgb = np.floor(np.random.rand(3) * 256).astype('int')
im = overlay_box(im=im, box=box, rgb=rgb1, stroke=6)
# --- Add lung boxes with random color if present
for box in data['lung_boxes']:
# rgb = np.floor(np.random.rand(3) * 256).astype('int')
im = overlay_box(im=im, box=box, rgb=rgb2, stroke=6)
if predictions is not None:
for i in range(n*n):
if predictions[i]:
overlay_color(im=im, index=i)
# pylab.imshow(im, cmap=pylab.cm.gist_gray)
pylab.imshow(im)
pylab.axis('off')
pylab.show()
def overlay_box(im, box, rgb, stroke=1):
"""
Method to overlay single box on image
"""
# --- Convert coordinates to integers
box = [int(b) for b in box]
# --- Extract coordinates
y1, x1, height, width = box
y2 = y1 + height
x2 = x1 + width
# print('%s, %s, %s, %s' %(y1, y2, x1, x2))
# print(rgb)
im[y1:y1 + stroke, x1:x2] = rgb
im[y2:y2 + stroke, x1:x2] = rgb
im[y1:y2, x1:x1 + stroke] = rgb
im[y1:y2, x2:x2 + stroke] = rgb
return im
def overlay_color(im, index):
"""
Method to overlay single box on image
"""
# --- Extract coordinates
n = 8
l = 1024//n
sx1 = int((index % n)*l)
sx2 = int(sx1 + l)
sy1 = int((index // n)*l)
sy2 = int(sy1 + l)
# rgb = cm.get_cmap(plt.get_cmap('Blues')).to_rgba([20])
cmap = cm.get_cmap('winter')
sm = cm.ScalarMappable(cmap=cmap)
rgb = sm.to_rgba([1])
# print("RGB:")
# print(cmap)
# print(rgb)
for y in range(sy1, sy2):
for x in range(sx1, sx2):
grey = im[y, x][0]/255
# print(grey)
rgba = sm.to_rgba(grey, bytes=True, norm=False)
# print(rgba)
im[y, x][0] = rgba[0]
im[y, x][1] = rgba[1]
im[y, x][2] = rgba[2]
return im
# def sub_image_bools(boxes):
def get_lung_boxes(lung_folder, file_list = None):
size = 1024
IDs = [];
if file_list == None:
for filename in os.listdir(lung_folder):
if not filename.startswith('.'):
ID, file_extension = os.path.splitext(filename)
IDs.append(ID)
else:
for filename in file_list:
IDs.append(filename)
lung_boxes = {}
for pid in IDs:
if pid not in lung_boxes:
lung_boxes[pid] = {'lungs':[]}
f=open('%s/%s.txt' % (lung_folder, pid), 'r')
f_lines = f.readlines()
for line in f_lines:
l = line.split()
if len(l) == 5:
h = int(float(l[4])*size)
w = int(float(l[3])*size)
y = int(float(l[2])*size) - h/2
x = int(float(l[1])*size) - w/2
lung_boxes[pid]['lungs'].append([y, x, h, w])
elif len(l) == 4:
# print(l)
# print(pid)
h = int(float(l[3])*size)
w = int(float(l[2])*size)
y = int(float(l[1])*size) - h/2
x = int(float(l[0])*size) - w/2
lung_boxes[pid]['lungs'].append([y, x, h, w])
f.close()
return lung_boxes
def in_box(index, boxes, partial):
# partial is bool for whether partially in counts as less than fully in
n = 8
l = 1024//n
sx1 = (index % n)*l
sx2 = sx1 + l - 1
sy1 = (index // n)*l
sy2 = sy1 + l - 1
for each in boxes:
y1, x1, height, width = each
y2 = y1 + height
x2 = x1 + width
# check each corner
i1 = sx1 > x1 and sx1 < x2 and sy1 > y1 and sy1 < y2
i2 = sx2 > x1 and sx2 < x2 and sy1 > y1 and sy1 < y2
i3 = sx2 > x1 and sx2 < x2 and sy2 > y1 and sy2 < y2
i4 = sx1 > x1 and sx1 < x2 and sy2 > y1 and sy2 < y2
#all corners are in
if (i1 and i2 and i3 and i4):
return 1
elif (i1 or i2 or i3 or i4):
if partial:
return 0.5
else:
return 1
return 0
def split_to_sub_samples(parsed, full=True, uneven_split=True):
n = 8
size = 1024//n
split_parsed = {}
p_count=0
np_count=0
for each in iter(parsed):
dcm_file = parsed[each]['dicom']
dcm_data = pydicom.read_file(dcm_file)
im = dcm_data.pixel_array
h, w = im.shape
new_im = (im.reshape(h//size, size, -1, size).swapaxes(1,2).reshape(-1, size, size))
for i in range(n*n):
pid = each + '_' + str(i)
in_pnuemonia = in_box(i, parsed[each]['p_boxes'], False)
in_lung = in_box(i, parsed[each]['lung_boxes'], True)
if uneven_split or p_count > np_count or in_pnuemonia:
if full or in_lung:
split_parsed[pid] = {
'pid': each,
'index': i,
'dicom': parsed[each]['dicom'],
'im': new_im[i],
'im_small': block_reduce(new_im[i], block_size=(size//4, size//4), func=np.mean).flatten(),
'label': parsed[each]['label'],
'p_boxes': in_pnuemonia,
'lung_boxes': in_lung,
'age': dcm_data.PatientAge,
'sex': 0 if dcm_data.PatientSex=='M' else 1}
if in_pnuemonia:
p_count += 1
else:
np_count += 1
print("p_count: %s, np_count: %s" %(p_count, np_count))
return split_parsed
def generate_samples(split_parsed, adj = False):
# feature data in form img, age, sex, inlung
# label data in form in_pnuemonia
#TODO append and return IDs
n = 8
IDs = []
samples = []
labels = []
for each in split_parsed:
flat_img = split_parsed[each]['im'].flatten()
age = split_parsed[each]['age']
sex = split_parsed[each]['sex']
in_lung = split_parsed[each]['lung_boxes']
if adj:
# find nearby values of i, i[0] = left and then clockwise ic = current
adj_array = []
pid = split_parsed[each]['pid']
ic = split_parsed[each]['index']
i = []
i.append(ic-1)
i.append(ic - n)
i.append(ic + 1)
i.append(ic + n)
if (ic % n == 0):
i[0] = -1
elif ((ic + 1) % n == 0):
i[2] = -1
if(ic // n == 0):
i[1] = -1
elif(ic // n == n-1):
i[3] = -1
for j in range(4):
if i[j] == -1:
adj_array.append(np.zeros(16))
else:
adj_array.append(split_parsed[pid + '_' + str(i[j])]['im_small'])
sample = np.append(flat_img, adj_array[0], adj_array[1], adj_array[2], adj_array[3] (age, sex, in_lung))
else:
sample = np.append(flat_img, (age, sex, in_lung))
samples.append(sample)
labels.append(split_parsed[each]['p_boxes'])
IDs.append(each)
#scale data before returning
scaled_samples = preprocessing.scale(samples)
return IDs, scaled_samples, labels
# return IDs, np.array(scaled_samples), np.array(labels)
def score_on_positive(labels, predicted):
total = 0
correct = 0
for label, prediction in zip(labels, predicted):
if label == 1:
total += 1
if prediction == label:
correct +=1
return correct/total
df = pd.read_csv('all/stage_2_train_labels.csv')
#
# patientId = df['patientId'][0]
# dcm_file = 'all/stage_2_train_images/%s.dcm' % patientId
# dcm_data = pydicom.read_file(dcm_file)
# print(dcm_data)
#
# im = dcm_data.pixel_array
# # print(type(im))
# # print(im.dtype)
# # print(im.shape)
#
# pylab.imshow(im, cmap=pylab.cm.gist_gray)
# lung_boxes = get_lung_boxes('sarah_annotated_lungs')
# parsed = parse_data(df, 'all/stage_2_train_images', lung_boxes)
# pylab.axis('off')
#
# pylab.show()
# print(np.amax(im))
# parsed = parse_data(df, 'all/stage_2_train_images')
#
# dcm_data = pydicom.read_file(parsed['00436515-870c-4b36-a041-de91049b9ab4']['dicom'])
#
# print(dcm_data.PatientAge)
#
# print(df)
# ID = 'f0a1f49a-f67f-4716-97e0-840c83610f6f'
# draw(parsed[ID])
test_list = []
train_list = []
f=open('test_list.txt', 'r')
f_lines = f.readlines()
for each in f_lines:
test_list.append(each.rstrip())
#
for filename in os.listdir('all/stage_2_train_images'):
ID, file_extension = os.path.splitext(filename)
if ID not in test_list and len(train_list) < 3001:
# if ID not in test_list:
train_list.append(ID)
# print(train_list)
#
print("Generating Test Set Method 1")
lung_boxes = get_lung_boxes('006_train_lungs', file_list = test_list)
# lung_boxes = get_lung_boxes('reduced1')
parsed = parse_data(df, 'all/stage_2_train_images', lung_boxes)
split_parsed1 = split_to_sub_samples(parsed)
testIDs, xtest2, ytest2 = generate_samples(split_parsed)
#
# print("Generating Test Set Method 2")
# lung_boxes = get_lung_boxes('boxresults_text_train', file_list = test_list)
# # lung_boxes = get_lung_boxes('reduced2')
# # print(len(lung_boxes))
# parsed = parse_data(df, 'all/stage_2_train_images', lung_boxes)
# split_parsed = split_to_sub_samples(parsed)
# testIDs, xtest2, ytest2 = generate_samples(split_parsed)
#
# print("Training with hand annotated")
# lung_boxes = get_lung_boxes('sarah_annotated_lungs')
# parsed = parse_data(df, 'all/stage_2_train_images', lung_boxes)
# split_parsed = split_to_sub_samples(parsed, full=True)
# predictIDs, x0, y0 = generate_samples(split_parsed)
# print(np.linalg.norm(x0[20]-x0[30]))
# C_2d_range = [1e-2, 1, 1e2]
# gamma_2d_range = [1e-1, 1, 1e1]
#
# print("Testing on Method 1 with linear svc")
# for c in C_2d_range:
# for gamma in gamma_2d_range:
# print("C = " + str(c) + " gamma = " + str(gamma))
# model = svm.SVC(C = c, kernel = 'linear', gamma = gamma)
# model.fit(x0, y0)
# predicted = model.predict(xtest2)
# # np.savez('output1', predictIDs, x0, y0, predicted)
# total_score = accuracy_score(ytest2, predicted)
# print("Score on total: " + str(total_score))
#
# positive_score = score_on_positive(ytest2, predicted)
# print("Score on positive: " + str(positive_score))
# data = np.load('output1.npz')
# print(np.average(data['arr_3']))
# print(np.average(data['arr_2']))
# sample_predictions = [
# 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 1, 1, 1, 0, 0, 0, 0,
# 0, 1, 1, 1, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0]
# for each in parsed:
# # ID = data['predictIDs'][i*64].split('_')[0]
# # drawPredictions = data['predicted'][64*i:64*(1+1)]
# print(each)
# draw(parsed[each])
# pylab.show()
# for each in parsed:
# print(each)
# print(parsed[each]['label'])
# if parsed[each]['label']:
# draw(parsed[each])
# test_id = 'f002b136-7acc-42e3-b989-071c6266fd60'
# draw(split_parsed['f002b136-7acc-42e3-b989-071c6266fd60_27'], sub=True)
# f002b136-7acc-42e3-b989-071c6266fd60
# ID= 'f002b136-7acc-42e3-b989-071c6266fd60'
#
# draw(parsed[ID], sample_predictions)
# pylab.show()
# print("Score: %s" % score)
# print("Testing on Method 2")
# predicted = model.predict(xtest2)
# score = accuracy_score(ytest2, predicted)
# print("Score: %s" % score)
# print("Generating Train Set Method 1")
# lung_boxes = get_lung_boxes('006_train_lungs', file_list = train_list)
# parsed = parse_data(df, 'all/stage_2_train_images', lung_boxes)
# split_parsed = split_to_sub_samples(parsed)
# xtrain1, ytrain1 = generate_samples(split_parsed)
# model = svm.SVC()
# model.fit(xtrain1, ytrain1)
# print("Testing on Method 1")
# predicted = model.predict(xtest1)
# score = accuracy_score(ytest1, predicted)
# print("Score: %s" % score)
print("Generating Train Set Method 1 even split")
# lung_boxes = get_lung_boxes('006_train_lungs', file_list = train_list)
# lung_boxes = get_lung_boxes('boxresults_text_train', file_list = train_list)
lung_boxes = get_lung_boxes('006_train_lungs', file_list = train_list)
parsed = parse_data(df, 'all/stage_2_train_images', lung_boxes)
split_parsed = split_to_sub_samples(parsed, full=False, uneven_split=False)
trainIDs, xtrain2, ytrain2 = generate_samples(split_parsed)
model = svm.SVC(C=1)
print("fiting on Method 1")
model.fit(xtrain2, ytrain2)
print("Testing on Method 1")
predicted = model.predict(xtest2)
score = accuracy_score(ytest2, predicted)
positive_score = score_on_positive(ytest2, predicted)
print("Score on positive: " + str(positive_score))
print("Score: %s" % score)
np.savez('output_full_for_print', xtrain2, ytrain2, testIDs, xtest2, ytest2, predicted)
for each in test_list:
test_id = each
pnuemonia_labels = []
lung_labels = []
for i in range(64):
id = test_id +'_' +str(i)
lung_labels.append(split_parsed[id]['lung_boxes'])
pnuemonia_labels.append(split_parsed[id]['p_boxes'])
if i % 8 == 0:
print('\n')
print(pnuemonia_labels[i], end = ' ')
# draw(parsed[test_id], lung_labels)
draw(parsed[test_id], pnuemonia_labels)
#
# draw(parsed['f0a1f49a-f67f-4716-97e0-840c83610f6f'])
# pylab.show()
# im = split_parsed['f0a1f49a-f67f-4716-97e0-840c83610f6f_2']['im']
#
# im = np.stack([im] * 3, axis=2)
#
# pylab.imshow(im, cmap=pylab.cm.gist_gray)
# pylab.axis('off')
# print(len(split_parsed))
# pylab.show()
|
import numpy as np
import gym
from collections import defaultdict
from tensorboardX import SummaryWriter
class PI_Agent(object):
def __init__(self, env, maxItr=10000, pItr = 10, gamma=0.95):
self.env = env
self.maxItr = maxItr
self.pItr = pItr
self.gamma = gamma
self.stateValue = defaultdict(lambda: 0)
self.policy = defaultdict(lambda: 0)
def Policy_Evaluation(self):
for itr in range(self.pItr):
delta = 0
for state in range(self.env.nS): # for all state S
state_value = 0
for i in range(len(self.env.P[state][self.policy[state]])):
prob, next_state, reward, done = self.env.P[state][self.policy[state]][i]
state_value += (prob * (reward + self.gamma*self.stateValue[next_state]))
delta = max(delta,abs(self.stateValue[state]-state_value))
self.stateValue[state] = state_value #update the value of the state
if delta < 1e-04:
break
def Policy_Improvement(self):
Done = True
for state in range(self.env.nS):
action_values = []
for action in range(self.env.nA):
action_value = 0
for i in range(len(self.env.P[state][action])):
prob, next_state, r, _ = self.env.P[state][action][i]
action_value += prob * (r + self.gamma * self.stateValue[next_state])
action_values.append(action_value)
best_action = np.argmax(np.asarray(action_values))
if(self.policy[state] != best_action):
Done = False
self.policy[state] = best_action
return Done
def learn(self, eval = 1, maxStep = 1000):
self.stateValue = defaultdict(lambda: 0)
self.policy = defaultdict(lambda: 0)
Done = False
writer = SummaryWriter(comment="DPPI")
self.eval(writer,0, episodes=1000,maxStep = maxStep)
for itr in range(self.maxItr):
self.Policy_Evaluation()
Done = self.Policy_Improvement()
if itr%eval == 0:
self.eval(writer,itr, episodes=1000,maxStep = maxStep)
if Done:
break
self.eval(writer,self.maxItr, episodes=1000,maxStep = maxStep)
def eval(self, writer, itr, episodes=1000, maxStep = 1000):
score = 0
steps_list = []
for episode in range(episodes):
observation = self.env.reset()
steps=0
while True:
action = self.policy[observation]
observation, reward, done, _ = self.env.step(action)
steps+=1
score+=reward
if done:
steps_list.append(steps)
break
if steps>maxStep:
steps_list.append(steps)
break
print('----------------------------------------------')
print('You took an average of {:.0f} steps'.format(np.mean(steps_list)))
print('Average reward {:.2f}'.format((score/episodes)))
print('----------------------------------------------')
writer.add_scalar("Episode Length",np.mean(steps_list),itr)
writer.add_scalar("Reward",score/episodes,itr)
if __name__ == "__main__":
env = gym.make("Taxi-v3").env
agent = PI_Agent(env)
agent.learn()
|
class OS:
def software(self):
print("Oxygen Os")
class COS:
def software(self):
print("HydrogenOS")
class Oneplus5T():
def os(self,rom):
rom.software()
os = COS()
jn = Oneplus5T()
jn.os(os)
|
import enum
from flask import Flask, request, jsonify
app = Flask(__name__)
# dummy data for testing
composers_list = [
{
"id" : 1,
"name" : "John Williams",
"movies": "Jaws, Star Wars, Jurassic Park, Harry Potter"
},
{
"id": 2,
"name" : "John Powell",
"movies": "How To Train Your Dragon"
},
{
"id": 3,
"name" : "Joe Hisaishi",
"movies": "Spirited Away, Howl's Moving Castle"
}]
@app.route('/')
@app.route('/home')
def index():
return ('Hello World')
@app.route('/composers', methods=['GET', 'POST'])
def composers():
'''Create (POST) and Read (GET)'''
# Read
if request.method == 'GET':
if len(composers_list) > 0:
return jsonify(composers_list)
else:
return 'Nothing\'s there', 404
# Create
if request.method == 'POST':
id = composers_list[-1]['id'] + 1
name = request.form['name']
movies = request.form['movies']
new_object = {
"id" : id,
"name" : name,
"movies": movies
}
composers_list.append(new_object)
return jsonify(composers_list), 201
@app.route('/composers/id/<int:id>', methods=['GET', 'PUT', 'DELETE'])
def single_composer(id):
'''Read (GET), Update (PUT) and Delete (DELETE) methods using an id of the composer
Update (PUT) method will replace all the old data with the new data provided,
witch exception of id which will stay the same
'''
# Read
if request.method == 'GET':
for composer in composers_list:
if composer['id'] == id:
return jsonify(composer)
# Update
elif request.method == 'PUT':
for composer in composers_list:
if composer['id'] == id:
composer['name'] = request.form['name']
composer['movies'] = request.form['movies']
updated_object = {
'id': id,
'name': composer['name'],
'movies': composer['movies']
}
return jsonify(updated_object)
# Delete
elif request.method == 'DELETE':
for index, composer in enumerate(composers_list):
if composer['id'] == id:
composers_list.pop(index)
return jsonify(composers_list)
@app.route('/urltest/<name>/<second_name>')
def print_name(name, second_name):
return f'URL: .../urltest/{name}/{second_name}'
if __name__ == '__main__':
app.run(debug=True)
|
def fib(n):
''' 函数定义'''
a , b = 0, 1
while a < n:
print(a, end=",")
a, b = b, a + b
print
def main():
print('hello world')
b = ['first', 'second', 'third']
for i in b:
print(i, end="") # 不换行
print()
put = int(input("请输入你的数字:"))
print(put)
print("输入多个整数:")
l = list(map(int, input().split()))
print(l)
fib(10)
# 仅当执行的是该脚本时,才运行main函数
if __name__ == '__main__':
main() |
#crypto4
_H256=[0x6a09e667, 0xbb67ae85,0x3c6ef372,0xa54ff53a,
0x510e527f, 0x9b05688c,0x1f83d9ab,0x5be0cd19]
_H512=[0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
_H384=[0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939,
0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4]
#Main function of SHA
#@parameters: message(string) < 2^MSG_Size
#
#@return: message synopsis(list of binary) - 512bits
#
#
#
def sha(message,size):
#PREPARE MeSSaGe
if(size/8==32): #_SHA256
Block_Size=512
MSG_Size=64
mod_constant=448
elif(size/8==64): #_SHA512
Block_Size=1024
MSG_Size=128
mod_constant=896
elif(size/8==48): #_SHA384
Block_Size=1024
MSG_Size=128
mod_constant=896
else:
return -1
length = bin(len(message) * 8)[2:].rjust(MSG_Size, "0")
binmsg = ''.join(bin(ord(i))[2:].rjust(8, "0") for i in message)
binmsg += '1'
binmsg += "0" * ((mod_constant - len(binmsg) % Block_Size) % Block_Size) + length
if(len(binmsg)> 2**MSG_Size):
raise OverflowError
if(size/8==32): #_SHA256
result=_SHA256(binmsg)
elif(size/8==64): #_SHA512
result=_SHA512(binmsg)
elif(size/8==48): #_SHA384
result=_SHA384(binmsg)
binresult= [hex(i)[:-1] for i in result] # remove "L"
return binresult
#
#
#
#
def _SHA256(binmsg):
w=[]
h=list(_H256)
Block_Size=512
Word_Size=32
Rounds=64
for i in range(0,len(binmsg) // Block_Size):
chunk= binmsg[i * Block_Size:i * Block_Size + Block_Size]
for j in range(len(chunk) // Word_Size):
w.append(int(chunk[j * Word_Size:j * Word_Size + Word_Size], 2))
for j in range(16,Rounds):
sum0 = _rotr(w[j-15],7,Word_Size) ^ _rotr(w[j-15],18,Word_Size) ^ (w[j-15] >> 3)
sum1 = _rotr(w[j-2],17,Word_Size) ^ _rotr(w[j-2] ,19,Word_Size) ^ (w[j-2] >> 10)
w.append( (w[j-16] + sum0 + w[j-7] + sum1) & 0xFFFFFFFFL)
[a,b,c,d,e,f,g,h]=h
for j in range(Rounds):
sum0 =_rotr(a, 2,Word_Size) ^ _rotr(a, 13,Word_Size) ^ _rotr(a, 22,Word_Size)
maj = (a & b) ^ (a & b) ^ ( b & c )
t2= sum0 + maj
sum1 = _rotr(e, 6,Word_Size) ^ _rotr(e, 11,Word_Size) ^ _rotr(e, 25,Word_Size)
ch = (e & f) ^ ((~e)&g)
t1= h+sum1+ch+_k256[j]+w[j]
h=g
g=f
f=e
e=(d+t1)&0xFFFFFFFFL
d=c
c=b
b=a
a=(t1+t2)& 0xFFFFFFFFL
h = [(x+y) & 0xFFFFFFFFL for x,y in zip(_H256, [a,b,c,d,e,f,g,h])]
return h
def _SHA512(binmsg):
w=[]
h=list(_H512)
Block_Size=1024
Word_Size=64
Rounds=80
for i in range(0,len(binmsg) // Block_Size):
chunk= binmsg[i * Block_Size:i * Block_Size + Block_Size]
for j in range(len(chunk) // Word_Size):
w.append(int(chunk[j * Word_Size:j * Word_Size + Word_Size], 2))
for j in range(16,Rounds):
sum0 = _rotr(w[j-15],28,Word_Size) ^ _rotr(w[j-15],8,Word_Size) ^ (w[j-15] >> 7)
sum1 = _rotr(w[j-2],19,Word_Size) ^ _rotr(w[j-2] ,61,Word_Size) ^ (w[j-2] >> 6)
w.append( (w[j-16] + sum0 + w[j-7] + sum1) & 0xFFFFFFFFL)
[a,b,c,d,e,f,g,h]=h
for j in range(Rounds):
sum0 =_rotr(a, 28,Word_Size) ^ _rotr(a, 34,Word_Size) ^ _rotr(a, 39,Word_Size)
maj = (a & b) ^ (a & b) ^ ( b & c )
t2= sum0 + maj
sum1 = _rotr(e, 14,Word_Size) ^ _rotr(e, 18,Word_Size) ^ _rotr(e, 41,Word_Size)
ch = (e & f) ^ ((~e)&g)
t1= h+sum1+ch+_k512[j]+w[j]
h=g
g=f
f=e
e=(d+t1)&0xFFFFFFFFL
d=c
c=b
b=a
a=(t1+t2)& 0xFFFFFFFFL
h = [(x+y) & 0xFFFFFFFFL for x,y in zip(_H512, [a,b,c,d,e,f,g,h])]
return h
def _SHA384(binmsg):
w=[]
h=list(_H384)
Block_Size=1024
Word_Size=64
Rounds=80
for i in range(0,len(binmsg) // Block_Size):
chunk= binmsg[i * Block_Size:i * Block_Size + Block_Size]
for j in range(len(chunk) // Word_Size):
w.append(int(chunk[j * Word_Size:j * Word_Size + Word_Size], 2))
for j in range(16,Rounds):
sum0 = _rotr(w[j-15],28,Word_Size) ^ _rotr(w[j-15],8,Word_Size) ^ (w[j-15] >> 7)
sum1 = _rotr(w[j-2],19,Word_Size) ^ _rotr(w[j-2] ,61,Word_Size) ^ (w[j-2] >> 6)
w.append( (w[j-16] + sum0 + w[j-7] + sum1) & 0xFFFFFFFFL)
[a,b,c,d,e,f,g,h]=h
for j in range(Rounds):
sum0 =_rotr(a, 28,Word_Size) ^ _rotr(a, 34,Word_Size) ^ _rotr(a, 39,Word_Size)
maj = (a & b) ^ (a & b) ^ ( b & c )
t2= sum0 + maj
sum1 = _rotr(e, 14,Word_Size) ^ _rotr(e, 18,Word_Size) ^ _rotr(e, 41,Word_Size)
ch = (e & f) ^ ((~e)&g)
t1= h+sum1+ch+_k512[j]+w[j]
h=g
g=f
f=e
e=(d+t1)&0xFFFFFFFFL
d=c
c=b
b=a
a=(t1+t2)& 0xFFFFFFFFL
h = [(x+y) & 0xFFFFFFFFL for x,y in zip(_H512, [a,b,c,d,e,f,g,h])]
return h[0:6]
def _rotr(x, y,Word_Size):
return ((x >> y) | (x << (Word_Size-y))) & 0xFFFFFFFFL
#SHA-256 round constants
_k256=[0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]
_k512= [ 0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc, 0x3956c25bf348b538,
0x59f111f1b605d019, 0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242, 0x12835b0145706fbe,
0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2, 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3, 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65,
0x2de92c6f592b0275, 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5, 0x983e5152ee66dfab,
0xa831c66d2db43210, 0xb00327c898fb213f, 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc, 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed,
0x53380d139d95b3df, 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6, 0x92722c851482353b,
0xa2bfe8a14cf10364, 0xa81a664bbc423001, 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8, 0x19a4c116b8d2d0c8, 0x1e376c085141ab53,
0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb, 0x5b9cca4f7763e373,
0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc, 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915, 0xc67178f2e372532b, 0xca273eceea26619c,
0xd186b8c721c0c207, 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba, 0x0a637dc5a2c898a6,
0x113f9804bef90dae, 0x1b710b35131c471b, 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a, 0x5fcb6fab3ad6faec, 0x6c44198c4a475817]
|
import numpy as np
import random
import json
import sys
from env import make_env, CarRacingMDNRNN
import time
from rnn.rnn import hps_sample, MDNRNN, rnn_init_state, rnn_next_state, rnn_output, rnn_output_size
render_mode = True
# controls whether we concatenate (z, c, h), etc for features used for car.
MODE_ZCH = 0
MODE_ZC = 1
MODE_Z = 2
MODE_Z_HIDDEN = 3 # extra hidden later
MODE_ZH = 4
EXP_MODE = MODE_ZH
def make_model():
# can be extended in the future.
controller = Controller()
return controller
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def clip(x, lo=0.0, hi=1.0):
return np.minimum(np.maximum(x, lo), hi)
def passthru(x):
return x
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def sample(p):
return np.argmax(np.random.multinomial(1, p))
class Controller:
''' simple one layer model for car racing '''
def __init__(self):
self.env_name = "carracing"
self.input_size = rnn_output_size(EXP_MODE)
self.z_size = 32
if EXP_MODE == MODE_Z_HIDDEN: # one hidden layer
self.hidden_size = 40
self.weight_hidden = np.random.randn(self.input_size, self.hidden_size)
self.bias_hidden = np.random.randn(self.hidden_size)
self.weight_output = np.random.randn(self.hidden_size, 3)
self.bias_output = np.random.randn(3)
self.param_count = ((self.input_size+1)*self.hidden_size) + (self.hidden_size*3+3)
else:
self.weight = np.random.randn(self.input_size, 3)
self.bias = np.random.randn(3)
self.param_count = (self.input_size)*3+3
self.render_mode = False
def get_action(self, h):
'''
action = np.dot(h, self.weight) + self.bias
action[0] = np.tanh(action[0])
action[1] = sigmoid(action[1])
action[2] = clip(np.tanh(action[2]))
'''
if EXP_MODE == MODE_Z_HIDDEN: # one hidden layer
h = np.tanh(np.dot(h, self.weight_hidden) + self.bias_hidden)
action = np.tanh(np.dot(h, self.weight_output) + self.bias_output)
else:
action = np.tanh(np.dot(h, self.weight) + self.bias)
action[1] = (action[1]+1.0) / 2.0
action[2] = clip(action[2])
return action
def set_model_params(self, model_params):
if EXP_MODE == MODE_Z_HIDDEN: # one hidden layer
params = np.array(model_params)
cut_off = (self.input_size+1)*self.hidden_size
params_1 = params[:cut_off]
params_2 = params[cut_off:]
self.bias_hidden = params_1[:self.hidden_size]
self.weight_hidden = params_1[self.hidden_size:].reshape(self.input_size, self.hidden_size)
self.bias_output = params_2[:3]
self.weight_output = params_2[3:].reshape(self.hidden_size, 3)
else:
self.bias = np.array(model_params[:3])
self.weight = np.array(model_params[3:]).reshape(self.input_size, 3)
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
#return np.random.randn(self.param_count)*stdev
return np.random.standard_cauchy(self.param_count)*stdev # spice things up
def init_random_model_params(self, stdev=0.1):
params = self.get_random_model_params(stdev=stdev)
self.set_model_params(params)
def simulate(controller, env, train_mode=False, render_mode=True, num_episode=5, seed=-1, max_len=-1):
reward_list = []
t_list = []
max_episode_length = 1000
recording_mode = False
penalize_turning = False
if train_mode and max_len > 0:
max_episode_length = max_len
if (seed >= 0):
random.seed(seed)
np.random.seed(seed)
env.seed(seed)
for episode in range(num_episode):
obs = env.reset()
total_reward = 0.0
random_generated_int = np.random.randint(2**31-1)
filename = "record/"+str(random_generated_int)+".npz"
for t in range(max_episode_length):
if render_mode:
env.render("human")
else:
env.render('rgb_array')
action = controller.get_action(obs)
obs, reward, done, info = env.step(action)
extra_reward = 0.0 # penalize for turning too frequently
if train_mode and penalize_turning:
extra_reward -= np.abs(action[0])/10.0
reward += extra_reward
#if (render_mode):
# print("action", action, "step reward", reward)
total_reward += reward
if done:
break
if render_mode:
print("total reward", total_reward, "timesteps", t)
reward_list.append(total_reward)
t_list.append(t)
return reward_list, t_list
def main():
assert len(sys.argv) > 1, 'python model.py render/norender path_to_mode.json [seed]'
render_mode_string = str(sys.argv[1])
if (render_mode_string == "render"):
render_mode = True
else:
render_mode = False
use_model = False
if len(sys.argv) > 2:
use_model = True
filename = sys.argv[2]
print("filename", filename)
the_seed = np.random.randint(10000)
if len(sys.argv) > 3:
the_seed = int(sys.argv[3])
print("seed", the_seed)
if (use_model):
model = make_model()
print('model size', model.param_count)
model.make_env(render_mode=render_mode)
model.load_model(filename)
else:
model = make_model(load_model=False)
print('model size', model.param_count)
model.make_env(render_mode=render_mode)
model.init_random_model_params(stdev=np.random.rand()*0.01)
N_episode = 100
if render_mode:
N_episode = 1
reward_list = []
for i in range(N_episode):
reward, steps_taken = simulate(model,
train_mode=False, render_mode=render_mode, num_episode=1)
if render_mode:
print("terminal reward", reward, "average steps taken", np.mean(steps_taken)+1)
else:
print(reward[0])
reward_list.append(reward[0])
if not render_mode:
print("seed", the_seed, "average_reward", np.mean(reward_list), "stdev", np.std(reward_list))
if __name__ == "__main__":
import env
e = env.make_env()
c = Controller()
import pdb; pdb.set_trace()
r, t = simulate(c, e, render_mode=False)
#main()
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# File: osc4py3/tests/method.py
# <pep8 compliant>
import sys
from os.path import abspath, dirname
# Make osc4py3 available.
PACKAGE_PATH = dirname(dirname(dirname(abspath(__file__))))
if PACKAGE_PATH not in sys.path:
sys.path.insert(0, PACKAGE_PATH)
from osc4py3.oscmethod import *
print("=" * 80)
print("\nTEST PATTERN MATCHING REWRITE FROM OSC TO PYTHON RE\n")
def nothing(*args):
pass
def show_match(apflist, slist):
for apf in apflist:
print("\n=== Pattern: {!r}".format(apf.addrpattern))
print(" RE : {!r}".format(apf.repattern))
for s in slist:
m = apf.match(s)
print("{!r}: {}".format(s, m))
show_match([
MethodFilter("/third/*", nothing),
MethodFilter("/third/*/d", nothing),
MethodFilter("/third/*/*/e", nothing),
], [
"/third/a",
"/third/b",
"/third/c",
"/third/c/d",
"/third/c/d/e",
"/first/second/third/a",
])
show_match([
MethodFilter("/trigger/chan[0-9]/", nothing),
MethodFilter("/trigger/chan*/", nothing),
], [
"/trigger/chan1/",
"/trigger/chan2/",
"/trigger/chan34/",
"/trigger/chan2/go",
"/first/trigger/chan2/third/a",
])
show_match([
MethodFilter("/trigger/chan?/set", nothing),
], [
"/trigger/chan5/set",
"/trigger/chanX/setit",
"/trigger/chanXY/set",
])
show_match([
MethodFilter("/first/this/one", nothing),
], [
"/first/this/one",
"/first/this/one/win",
"/first/second/third/a",
])
# Options lists
show_match([
MethodFilter("/items/{first,this,one}", nothing),
MethodFilter("/items/{this,first}/{bad,good}", nothing),
], [
"/noitems/this/bad",
"/items/this/bad",
"/items/one/chance",
"/items/first/good",
"/items/first",
])
# Root to leaf
show_match([
MethodFilter("//here", nothing),
], [
"/nothinghere",
"/there/here/it/is",
"/here/i/am",
])
# Root to leaf
show_match([
MethodFilter("/here//something", nothing),
], [
"/here/there/is/something",
"/there/here/it/is",
"/here/i/am/with/something/to/do",
])
# All !
show_match([
MethodFilter("*", nothing),
], [
"/here/there/is/something",
"/there/here/it/is",
"/here/i/am/with/something/to/do",
])
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY=os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = \
os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
#you will never guess should be a huge psudorandom string
#DO NOT USE THE SECRET KEY ON GITHUB
# app.config['SECRET_KEY']= 'you-will-never-guess'
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai).
# n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0).
# Find two lines, which together with x-axis forms a container, such that the container contains the most water.
# Note: You may not slant the container and n is at least 2.
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
dummy, max_Area, left, right = 0, 0, 0, len(height) - 1
while left < right:
dummy = (right - left) * min(height[left], height[right])
max_Area = max(max_Area, dummy)
if height[right] > height[left]:
left += 1
else:
right -= 1
return max_Area
if __name__ == '__main__':
print(Solution().maxArea([1,1]))
# 49 / 49 test cases passed.
# Status: Accepted
# Runtime: 69 ms
# Your runtime beats 67.62 % of python submissions.
# Here is the fastest solution.But this solution is O(n) too.
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
small = 0
big = len(height) - 1
w = big - small
if height[big] > height[small]:
h = height[small]
small += 1
else:
h = height[big]
big -= 1
area = w * h
while small < big:
w = big - small
if height[big] > height[small]:
h = height[small]
small += 1
else:
h = height[big]
big -= 1
new_area = w * h
if new_area > area:
area = new_area
return area
|
class Person:
def __init__(self, name):
self.name = name
def __str__(self):
return f'{self.__class__.__name__} class, Obj name: {self.name}'
person = Person("Arijona")
print(person.__str__())
|
import ctypes
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
import kivy
from kivy.properties import ListProperty
from kivy.factory import Factory
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.uix.popup import Popup
import mybackend
class MyGrid(FloatLayout): pass
class ErrorPage(FloatLayout): pass
class MyApp(App):
def build(self):
return MyGrid()
## function that actiave when click on Let's Go button. make all the inputs checks and activate the backend
def openPopUpSubmit(self):
if(self.root.ids.count_res_input.text.isdigit() and self.root.ids.time_input.text.isdigit() and len(self.root.ids.location_input.text) > 0):
self.loc = self.root.ids.location_input.text
self.duration = self.root.ids.time_input.text
self.resCount = self.root.ids.count_res_input.text
self.root.ids.location_input.text = ''
self.root.ids.time_input.text = ''
self.root.ids.count_res_input.text = ''
try:
#BackEnd:calculate the answer
db = mybackend.Database()
answers =db.calculate_recommendations(self.loc,int(self.duration), int(self.resCount))
self.popupWindowSuccess = Popup(title="Results Window", content=Label(text=str('\n'.join(answers)), halign='center'),
size_hint=(None, None), size=(400, 300)).open()
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, e.args[0], u"Error", 0)
else:
self.popupWindowERR = Popup(title="OOPS!", content=ErrorPage(),
size_hint=(None, None), size=(400, 260))
self.popupWindowERR.open()
# function that close the Error popUp
def closePopUpERR(self):
self.popupWindowERR.dismiss()
if __name__ == "__main__":
MyApp().run()
|
import os
import sys
from bruker2nifti._metadata import BrukerMetadata
from bruker2nifti._utils import bruker_read_files
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock as mock
here = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.dirname(here)
banana_data = os.path.join(root_dir, "test_data", "bru_banana")
class TestMetadata(object):
def test_instantiation(self):
paths = [
None,
"/",
os.path.join("path", "to", "study"),
os.path.join("path", "to", "study", "0"),
os.path.join("path", "to", "study", "1"),
]
for path in paths:
m = BrukerMetadata(path)
assert isinstance(m, BrukerMetadata)
assert m.pfo_input == path
def test_list_subdirs(self):
m = BrukerMetadata(banana_data)
assert m._list_subdirs(banana_data) == ["1", "2", "3"]
assert m._list_subdirs(os.path.join(banana_data, "1")) == []
assert m._list_subdirs(os.path.join(banana_data, "1", "pdata")) == ["1"]
assert m._list_subdirs(os.path.join(banana_data, "1", "pdata", "1")) == []
def test_list_scans(self):
data_dir = os.path.join("path", "to", "study")
expected_scans = ["1", "2", "3", "4"]
with mock.patch.object(
BrukerMetadata, "_list_subdirs", return_value=expected_scans
) as mock_method:
m = BrukerMetadata(data_dir)
scans = m.list_scans()
mock_method.assert_called_once_with(data_dir)
assert scans == expected_scans
def test_list_reconstructions(self):
data_dir = os.path.join("path", "to", "study")
selected_scan = "2"
expected_recons = ["1"]
expected_dir = os.path.join(data_dir, selected_scan, "pdata")
with mock.patch.object(
BrukerMetadata, "_list_subdirs", return_value=expected_recons
) as mock_method:
m = BrukerMetadata(data_dir)
recons = m.list_recons(selected_scan)
mock_method.assert_called_once_with(expected_dir)
assert recons == expected_recons
# TODO: The following test case is not properly tested yet as the
# banana dataset does not include a subject. Will need to add a new
# test dataset or update banana to include a subject
def test_read_subject(self):
expected_contents = bruker_read_files("subject", banana_data)
with mock.patch("bruker2nifti._utils.bruker_read_files") as mock_function:
mock_function.configure_mock(side_effect=bruker_read_files)
m = BrukerMetadata(banana_data)
subject = m.read_subject()
assert subject == expected_contents
mock_function.assert_called_once_with("subject", banana_data)
def test_read_recon(self):
selected_scan = "3"
selected_recon = "1"
data_path = os.path.join(banana_data, selected_scan)
ex_reco = bruker_read_files("reco", data_path, selected_recon)
ex_visu_pars = bruker_read_files("visu_pars", data_path, selected_recon)
expected_contents = {"reco": ex_reco, "visu_pars": ex_visu_pars}
with mock.patch("bruker2nifti._utils.bruker_read_files") as mock_function:
mock_function.configure_mock(side_effect=bruker_read_files)
m = BrukerMetadata(banana_data)
recon = m.read_recon(selected_scan, selected_recon)
assert recon.keys() == expected_contents.keys()
assert recon["reco"].keys() == ex_reco.keys()
assert recon["visu_pars"].keys() == ex_visu_pars.keys()
mock_function.assert_called()
def test_read_recons(self):
selected_scan = "2"
root_path = os.path.join("path", "to")
data_path = os.path.join(root_path, selected_scan)
expected_keys = ["1", "2"]
with mock.patch.object(
BrukerMetadata, "list_recons", return_value=["1", "2"]
) as mock_list_recons, mock.patch.object(
BrukerMetadata, "read_recon", return_value=None
) as mock_read_recon:
m = BrukerMetadata(root_path)
recons = m.read_recons(selected_scan)
assert list(recons.keys()) == expected_keys
mock_list_recons.assert_called_once()
mock_read_recon.assert_called()
def test_read_scan(self):
selected_scan = "1"
data_path = os.path.join(banana_data, selected_scan)
expected_keys = ["acqp", "method", "recons"]
ex_acqp = bruker_read_files("acqp", data_path)
ex_method = bruker_read_files("method", data_path)
with mock.patch.object(
BrukerMetadata, "read_recons", return_value={"1": None, "2": None}
) as mock_read_recons:
m = BrukerMetadata(banana_data)
scan = m.read_scan(selected_scan)
assert set(scan.keys()) == set(expected_keys)
assert scan["acqp"].keys() == ex_acqp.keys()
assert scan["method"].keys() == ex_method.keys()
assert set(scan["recons"].keys()) == {"1", "2"}
mock_read_recons.assert_called_once_with(selected_scan)
def test_read_scans(self):
expected_keys = ["1", "2", "3", "4"]
root_path = os.path.join("path", "to")
with mock.patch.object(
BrukerMetadata, "list_scans", return_value=expected_keys
) as mock_list_scans, mock.patch.object(
BrukerMetadata, "read_scan", return_value=None
) as mock_read_scan:
m = BrukerMetadata(root_path)
scans = m.read_scans()
assert set(scans.keys()) == set(expected_keys)
mock_list_scans.assert_called_once()
mock_read_scan.assert_called()
def test_parse_scans(self):
root_path = os.path.join("path", "to")
expected_contents = {"acqp": None, "method": None, "recons": None}
with mock.patch.object(
BrukerMetadata, "read_scans", return_value=expected_contents
) as mock_read_scans:
m = BrukerMetadata(root_path)
m.parse_scans()
assert m.scan_data == expected_contents
mock_read_scans.assert_called_once()
def test_parse_subject(self):
root_path = os.path.join("path", "to")
expected_contents = {"OWNER": "nmrsu", "SUBJECT_name": "Tom Bombadil"}
with mock.patch.object(
BrukerMetadata, "read_subject", return_value=expected_contents
) as mock_read_subject:
m = BrukerMetadata(root_path)
m.parse_subject()
assert m.subject_data == expected_contents
mock_read_subject.assert_called_once()
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Service Account
See: https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts
"""
class ServiceAccount(object):
"""Represents Service Account resource."""
def __init__(self, **kwargs):
"""Service Account resource.
Args:
**kwargs (dict): The keyworded variable args.
"""
self.project_id = kwargs.get('project_id')
self.name = kwargs.get('name')
self.email = kwargs.get('email')
self.oauth2_client_id = kwargs.get('oauth2_client_id')
self.keys = kwargs.get('keys')
self.raw_service_account = kwargs.get('raw_service_account')
|
"""Heating/cooling systems without any ventilation."""
from pydantic import Field, constr
from enum import Enum
from ._template import _TemplateSystem
class _HeatCoolBase(_TemplateSystem):
"""Base class for all heating/cooling systems without any ventilation."""
class FCUEquipmentType(str, Enum):
fcu_chill_gb = 'Fan coil chiller with boiler'
fcu_chill_ashp = 'Fan coil chiller with central air source heat pump'
fcu_chill_dhw = 'Fan coil chiller with district hot water'
fcu_chill_base = 'Fan coil chiller with baseboard electric'
fcu_chill_guh = 'Fan coil chiller with gas unit heaters'
fcu_chill = 'Fan coil chiller with no heat'
fcu_ac_chill_gb = 'Fan coil air-cooled chiller with boiler'
fcu_ac_chill_ashp = 'Fan coil air-cooled chiller with central air source heat pump'
fcu_ac_chill_dhw = 'Fan coil air-cooled chiller with district hot water'
fcu_ac_chill_base = 'Fan coil air-cooled chiller with baseboard electric'
fcu_ac_chill_guh = 'Fan coil air-cooled chiller with gas unit heaters'
fcu_ac_chill = 'Fan coil air-cooled chiller with no heat'
fcu_dcw_gb = 'Fan coil district chilled water with boiler'
fcu_dcw_ashp = 'Fan coil district chilled water with central air source heat pump'
fcu_dcw_dhw = 'Fan coil district chilled water with district hot water'
fcu_dcw_base = 'Fan coil district chilled water with baseboard electric'
fcu_dcw_guh = 'Fan coil district chilled water with gas unit heaters'
fcu_dcw = 'Fan coil district chilled water with no heat'
class BaseboardEquipmentType(str, Enum):
e_base = 'Baseboard electric'
gb_base = 'Baseboard gas boiler'
ashp_base = 'Baseboard central air source heat pump'
dhw_base = 'Baseboard district hot water'
class EvaporativeCoolerEquipmentType(str, Enum):
evap_e_base = 'Direct evap coolers with baseboard electric'
evap_gb_base = 'Direct evap coolers with baseboard gas boiler'
evap_ashp_base = 'Direct evap coolers with baseboard central air source heat pump'
evap_dhw_base = 'Direct evap coolers with baseboard district hot water'
evap_furnace = 'Direct evap coolers with forced air furnace'
evap_guh = 'Direct evap coolers with gas unit heaters'
evap = 'Direct evap coolers with no heat'
class WSHPEquipmentType(str, Enum):
wshp_fc_gb = 'Water source heat pumps fluid cooler with boiler'
wshp_ct_gb = 'Water source heat pumps cooling tower with boiler'
wshp_gshp = 'Water source heat pumps with ground source heat pump'
wshp_dcw_dhw = 'Water source heat pumps district chilled water with district hot water'
class ResidentialEquipmentType(str, Enum):
res_ac_e_base = 'Residential AC with baseboard electric'
res_ac_gb_base = 'Residential AC with baseboard gas boiler'
res_ac_ashp_base = 'Residential AC with baseboard central air source heat pump'
res_ac_dhw_base = 'Residential AC with baseboard district hot water'
res_ac_furnace = 'Residential AC with residential forced air furnace'
res_ac = 'Residential AC with no heat'
res_hp = 'Residential heat pump'
res_hp_no_cool = 'Residential heat pump with no cooling'
res_furnace = 'Residential forced air furnace'
class WindowACEquipmentType(str, Enum):
win_ac_e_base = 'Window AC with baseboard electric'
win_ac_gb_base = 'Window AC with baseboard gas boiler'
win_ac_ashp_base = 'Window AC with baseboard central air source heat pump'
win_ac_dhw_base = 'Window AC with baseboard district hot water'
win_ac_furnace = 'Window AC with forced air furnace'
win_ac_guh = 'Window AC with unit heaters'
win_ac = 'Window AC with no heat'
class VRFEquipmentType(str, Enum):
vrf = 'VRF'
class GasUnitHeaterEquipmentType(str, Enum):
guh = 'Gas unit heaters'
class FCU(_HeatCoolBase):
"""Fan Coil Unit (FCU) heating/cooling system (with no ventilation)."""
type: constr(regex='^FCU$') = 'FCU'
equipment_type: FCUEquipmentType = Field(
FCUEquipmentType.fcu_chill_gb,
description='Text for the specific type of system equipment from the '
'FCUEquipmentType enumeration.'
)
class Baseboard(_HeatCoolBase):
"""Baseboard heating system. Intended for spaces only requiring heating."""
type: constr(regex='^Baseboard$') = 'Baseboard'
equipment_type: BaseboardEquipmentType = Field(
BaseboardEquipmentType.e_base,
description='Text for the specific type of system equipment from the '
'BaseboardEquipmentType enumeration.'
)
class EvaporativeCooler(_HeatCoolBase):
"""Direct evaporative cooling systems (with optional heating)."""
type: constr(regex='^EvaporativeCooler$') = 'EvaporativeCooler'
equipment_type: EvaporativeCoolerEquipmentType = Field(
EvaporativeCoolerEquipmentType.evap_e_base,
description='Text for the specific type of system equipment from the '
'EvaporativeCoolerEquipmentType enumeration.'
)
class WSHP(_HeatCoolBase):
"""Direct evaporative cooling systems (with optional heating)."""
type: constr(regex='^WSHP$') = 'WSHP'
equipment_type: WSHPEquipmentType = Field(
WSHPEquipmentType.wshp_fc_gb,
description='Text for the specific type of system equipment from the '
'WSHPEquipmentType enumeration.'
)
class Residential(_HeatCoolBase):
"""Residential Air Conditioning, Heat Pump or Furnace system."""
type: constr(regex='^Residential$') = 'Residential'
equipment_type: ResidentialEquipmentType = Field(
ResidentialEquipmentType.res_ac_e_base,
description='Text for the specific type of system equipment from the '
'ResidentialEquipmentType enumeration.'
)
class WindowAC(_HeatCoolBase):
"""Window Air Conditioning cooling system (with optional heating)."""
type: constr(regex='^WindowAC$') = 'WindowAC'
equipment_type: WindowACEquipmentType = Field(
WindowACEquipmentType.win_ac_e_base,
description='Text for the specific type of system equipment from the '
'WindowACEquipmentType enumeration.'
)
class VRF(_HeatCoolBase):
"""Variable Refrigerant Flow (VRF) heating/cooling system (with no ventilation)."""
type: constr(regex='^VRF$') = 'VRF'
equipment_type: VRFEquipmentType = Field(
VRFEquipmentType.vrf,
description='Text for the specific type of system equipment from the '
'VRFEquipmentType enumeration.'
)
class GasUnitHeater(_HeatCoolBase):
"""Gas unit heating system. Intended for spaces only requiring heating."""
type: constr(regex='^GasUnitHeater$') = 'GasUnitHeater'
equipment_type: GasUnitHeaterEquipmentType = Field(
GasUnitHeaterEquipmentType.guh,
description='Text for the specific type of system equipment from the '
'GasUnitHeaterEquipmentType enumeration.'
)
|
# Generated by Django 2.2.4 on 2019-10-04 12:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('to_do_list', '0002_auto_20190915_1927'),
]
operations = [
migrations.AddField(
model_name='task',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Время создания'),
preserve_default=False,
),
migrations.AddField(
model_name='task',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='Время изменения'),
),
]
|
def hashable(v):
return set(tuple(a) if isinstance(a, list) else a for a in v)
def match_arrays(v, r):
return len(hashable(v).intersection(hashable(r)))
|
london_cp = {
'r1' : {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '4451',
'ios': '15.4',
'ip': '10.255.0.1'
},
'r2' : {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '4451',
'ios': '15.4',
'ip': '10.255.0.2'
},
'sw1' : {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '3850',
'ios': '3.6.XE',
'ip': '10.255.0.101',
'vlans': '10,20,30',
'routing': True
}
}
ch_device = input('Please enter device name: ')
parametr_list = london_cp.get(ch_device).keys()
parametr_list_string = ','.join(parametr_list)
ch_parametr = input('Please enter parametr name ({}): '.format(parametr_list_string))
print(london_cp.get(ch_device).get(ch_parametr.lower(), 'This parametr does not exist')) |
#import pycls.core.net as net
import torch.nn as nn
import numpy as np
def get_stem_fun(stem_type):
"""Retrieves the stem function by name."""
stem_funs = {
"res_stem_cifar": ResStemCifar,
"res_stem_in": ResStemIN,
"simple_stem_in": SimpleStemIN,
}
err_str = "Stem type '{}' not supported"
assert stem_type in stem_funs.keys(), err_str.format(stem_type)
return stem_funs[stem_type]
def get_block_fun(block_type):
"""Retrieves the block function by name."""
block_funs = {
"vanilla_block": VanillaBlock,
"res_basic_block": ResBasicBlock,
"res_bottleneck_block": ResBottleneckBlock,
}
err_str = "Block type '{}' not supported"
assert block_type in block_funs.keys(), err_str.format(block_type)
return block_funs[block_type]
class AnyHead(nn.Module):
"""AnyNet head: AvgPool, 1x1."""
def __init__(self, w_in, nc):
super(AnyHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# @staticmethod
# def complexity(cx, w_in, nc):
# cx["h"], cx["w"] = 1, 1
# cx = net.complexity_conv2d(cx, w_in, nc, 1, 1, 0, bias=True)
# return cx
class VanillaBlock(nn.Module):
"""Vanilla block: [3x3 conv, BN, Relu] x2."""
def __init__(self, w_in, w_out, stride, bm=None, gw=None, se_r=None):
err_str = "Vanilla block does not support bm, gw, and se_r options"
assert bm is None and gw is None and se_r is None, err_str
super(VanillaBlock, self).__init__()
self.a = nn.Conv2d(w_in, w_out, 3, stride=stride, padding=1, bias=False)
self.a_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.a_relu = nn.ReLU(inplace=True)
self.b = nn.Conv2d(w_out, w_out, 3, stride=1, padding=1, bias=False)
self.b_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.b_relu = nn.ReLU(inplace=True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, stride, bm=None, gw=None, se_r=None):
# err_str = "Vanilla block does not support bm, gw, and se_r options"
# assert bm is None and gw is None and se_r is None, err_str
# cx = net.complexity_conv2d(cx, w_in, w_out, 3, stride, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# cx = net.complexity_conv2d(cx, w_out, w_out, 3, 1, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class BasicTransform(nn.Module):
"""Basic transformation: [3x3 conv, BN, Relu] x2."""
def __init__(self, w_in, w_out, stride):
super(BasicTransform, self).__init__()
self.a = nn.Conv2d(w_in, w_out, 3, stride=stride, padding=1, bias=False)
self.a_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.a_relu = nn.ReLU(inplace=True)
self.b = nn.Conv2d(w_out, w_out, 3, stride=1, padding=1, bias=False)
self.b_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, stride):
# cx = net.complexity_conv2d(cx, w_in, w_out, 3, stride, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# cx = net.complexity_conv2d(cx, w_out, w_out, 3, 1, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class ResBasicBlock(nn.Module):
"""Residual basic block: x + F(x), F = basic transform."""
def __init__(self, w_in, w_out, stride, bm=None, gw=None, se_r=None):
err_str = "Basic transform does not support bm, gw, and se_r options"
assert bm is None and gw is None and se_r is None, err_str
super(ResBasicBlock, self).__init__()
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self.proj = nn.Conv2d(w_in, w_out, 1, stride=stride, padding=0, bias=False)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.f = BasicTransform(w_in, w_out, stride)
self.relu = nn.ReLU(True)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, stride, bm=None, gw=None, se_r=None):
# err_str = "Basic transform does not support bm, gw, and se_r options"
# assert bm is None and gw is None and se_r is None, err_str
# proj_block = (w_in != w_out) or (stride != 1)
# if proj_block:
# h, w = cx["h"], cx["w"]
# cx = net.complexity_conv2d(cx, w_in, w_out, 1, stride, 0)
# cx = net.complexity_batchnorm2d(cx, w_out)
# cx["h"], cx["w"] = h, w # parallel branch
# cx = BasicTransform.complexity(cx, w_in, w_out, stride)
# return cx
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, ReLU, FC, Sigmoid."""
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.f_ex = nn.Sequential(
nn.Conv2d(w_in, w_se, 1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(w_se, w_in, 1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
# @staticmethod
# def complexity(cx, w_in, w_se):
# h, w = cx["h"], cx["w"]
# cx["h"], cx["w"] = 1, 1
# cx = net.complexity_conv2d(cx, w_in, w_se, 1, 1, 0, bias=True)
# cx = net.complexity_conv2d(cx, w_se, w_in, 1, 1, 0, bias=True)
# cx["h"], cx["w"] = h, w
# return cx
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
def __init__(self, w_in, w_out, stride, bm, gw, se_r):
super(BottleneckTransform, self).__init__()
w_b = int(round(w_out * bm))
g = w_b // gw
self.a = nn.Conv2d(w_in, w_b, 1, stride=1, padding=0, bias=False)
self.a_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1)
self.a_relu = nn.ReLU(inplace=True)
self.b = nn.Conv2d(w_b, w_b, 3, stride=stride, padding=1, groups=g, bias=False)
self.b_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1)
self.b_relu = nn.ReLU(inplace=True)
if se_r:
w_se = int(round(w_in * se_r))
self.se = SE(w_b, w_se)
self.c = nn.Conv2d(w_b, w_out, 1, stride=1, padding=0, bias=False)
self.c_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, stride, bm, gw, se_r):
# w_b = int(round(w_out * bm))
# g = w_b // gw
# cx = net.complexity_conv2d(cx, w_in, w_b, 1, 1, 0)
# cx = net.complexity_batchnorm2d(cx, w_b)
# cx = net.complexity_conv2d(cx, w_b, w_b, 3, stride, 1, g)
# cx = net.complexity_batchnorm2d(cx, w_b)
# if se_r:
# w_se = int(round(w_in * se_r))
# cx = SE.complexity(cx, w_b, w_se)
# cx = net.complexity_conv2d(cx, w_b, w_out, 1, 1, 0)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform."""
def __init__(self, w_in, w_out, stride, bm=1.0, gw=1, se_r=None):
super(ResBottleneckBlock, self).__init__()
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self.proj = nn.Conv2d(w_in, w_out, 1, stride=stride, padding=0, bias=False)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.f = BottleneckTransform(w_in, w_out, stride, bm, gw, se_r)
self.relu = nn.ReLU(True)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, stride, bm=1.0, gw=1, se_r=None):
# proj_block = (w_in != w_out) or (stride != 1)
# if proj_block:
# h, w = cx["h"], cx["w"]
# cx = net.complexity_conv2d(cx, w_in, w_out, 1, stride, 0)
# cx = net.complexity_batchnorm2d(cx, w_out)
# cx["h"], cx["w"] = h, w # parallel branch
# cx = BottleneckTransform.complexity(cx, w_in, w_out, stride, bm, gw, se_r)
# return cx
class ResStemCifar(nn.Module):
"""ResNet stem for CIFAR: 3x3, BN, ReLU."""
def __init__(self, w_in, w_out):
super(ResStemCifar, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.relu = nn.ReLU(True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out):
# cx = net.complexity_conv2d(cx, w_in, w_out, 3, 1, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class ResStemIN(nn.Module):
"""ResNet stem for ImageNet: 7x7, BN, ReLU, MaxPool."""
def __init__(self, w_in, w_out):
super(ResStemIN, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 7, stride=2, padding=3, bias=False)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.relu = nn.ReLU(True)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out):
# cx = net.complexity_conv2d(cx, w_in, w_out, 7, 2, 3)
# cx = net.complexity_batchnorm2d(cx, w_out)
# cx = net.complexity_maxpool2d(cx, 3, 2, 1)
# return cx
class SimpleStemIN(nn.Module):
"""Simple stem for ImageNet: 3x3, BN, ReLU."""
def __init__(self, w_in, w_out):
super(SimpleStemIN, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 3, stride=2, padding=1, bias=False)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.relu = nn.ReLU(True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out):
# cx = net.complexity_conv2d(cx, w_in, w_out, 3, 2, 1)
# cx = net.complexity_batchnorm2d(cx, w_out)
# return cx
class AnyStage(nn.Module):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
super(AnyStage, self).__init__()
for i in range(d):
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
name = "b{}".format(i + 1)
self.add_module(name, block_fun(b_w_in, w_out, b_stride, bm, gw, se_r))
def forward(self, x):
for block in self.children():
x = block(x)
return x
# @staticmethod
# def complexity(cx, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
# for i in range(d):
# b_stride = stride if i == 0 else 1
# b_w_in = w_in if i == 0 else w_out
# cx = block_fun.complexity(cx, b_w_in, w_out, b_stride, bm, gw, se_r)
# return cx
class AnyNet(nn.Module):
"""AnyNet model."""
@staticmethod
def get_args(cfg):
return {
"stem_type": cfg.ANYNET.STEM_TYPE,
"stem_w": cfg.ANYNET.STEM_W,
"block_type": cfg.ANYNET.BLOCK_TYPE,
"ds": cfg.ANYNET.DEPTHS,
"ws": cfg.ANYNET.WIDTHS,
"ss": cfg.ANYNET.STRIDES,
"bms": cfg.ANYNET.BOT_MULS,
"gws": cfg.ANYNET.GROUP_WS,
"se_r": cfg.ANYNET.SE_R if cfg.ANYNET.SE_ON else None,
"nc": cfg.CLASS_NUM,
}
def __init__(self, cfg, logger, **kwargs):
super(AnyNet, self).__init__()
kwargs = self.get_args(cfg) if not kwargs else kwargs
self._construct(**kwargs)
#self.apply(net.init_weights)
def _construct(self, stem_type, stem_w, block_type, ds, ws, ss, bms, gws, se_r, nc):
# Generate dummy bot muls and gs for models that do not use them
bms = bms if bms else [None for _d in ds]
gws = gws if gws else [None for _d in ds]
stage_params = list(zip(ds, ws, ss, bms, gws))
stem_fun = get_stem_fun(stem_type)
self.stem = stem_fun(3, stem_w)
block_fun = get_block_fun(block_type)
prev_w = stem_w
for i, (d, w, s, bm, gw) in enumerate(stage_params):
name = "s{}".format(i + 1)
self.add_module(name, AnyStage(prev_w, w, s, d, block_fun, bm, gw, se_r))
prev_w = w
self.head = AnyHead(w_in=prev_w, nc=nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x
# @staticmethod
# def complexity(cx, **kwargs):
# """Computes model complexity. If you alter the model, make sure to update."""
# kwargs = AnyNet.get_args() if not kwargs else kwargs
# return AnyNet._complexity(cx, **kwargs)
# @staticmethod
# def _complexity(cx, stem_type, stem_w, block_type, ds, ws, ss, bms, gws, se_r, nc):
# bms = bms if bms else [None for _d in ds]
# gws = gws if gws else [None for _d in ds]
# stage_params = list(zip(ds, ws, ss, bms, gws))
# stem_fun = get_stem_fun(stem_type)
# cx = stem_fun.complexity(cx, 3, stem_w)
# block_fun = get_block_fun(block_type)
# prev_w = stem_w
# for d, w, s, bm, gw in stage_params:
# cx = AnyStage.complexity(cx, prev_w, w, s, d, block_fun, bm, gw, se_r)
# prev_w = w
# cx = AnyHead.complexity(cx, prev_w, nc)
# return cx
def quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_ws_gs_comp(ws, bms, gs):
"""Adjusts the compatibility of widths and groups."""
ws_bot = [int(w * b) for w, b in zip(ws, bms)]
gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]
ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]
ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]
return ws, gs
def get_stages_from_blocks(ws, rs):
"""Gets ws/ds of network at each stage from per block values."""
ts_temp = zip(ws + [0], [0] + ws, rs + [0], [0] + rs)
ts = [w != wp or r != rp for w, wp, r, rp in ts_temp]
s_ws = [w for w, t in zip(ws, ts[:-1]) if t]
s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist()
return s_ws, s_ds
def generate_regnet(w_a, w_0, w_m, d, q=8):
"""Generates per block ws from RegNet parameters."""
assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0
ws_cont = np.arange(d) * w_a + w_0
ks = np.round(np.log(ws_cont / w_0) / np.log(w_m))
ws = w_0 * np.power(w_m, ks)
ws = np.round(np.divide(ws, q)) * q
num_stages, max_stage = len(np.unique(ws)), ks.max() + 1
ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist()
return ws, num_stages, max_stage, ws_cont
class RegNet(AnyNet):
"""RegNet model."""
@staticmethod
def get_args(cfg):
"""Convert RegNet to AnyNet parameter format."""
# Generate RegNet ws per block
w_a, w_0, w_m, d = cfg.REGNET.WA, cfg.REGNET.W0, cfg.REGNET.WM, cfg.REGNET.DEPTH
ws, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d)
# Convert to per stage format
s_ws, s_ds = get_stages_from_blocks(ws, ws)
# Use the same gw, bm and ss for each stage
s_gs = [cfg.REGNET.GROUP_W for _ in range(num_stages)]
s_bs = [cfg.REGNET.BOT_MUL for _ in range(num_stages)]
s_ss = [cfg.REGNET.STRIDE for _ in range(num_stages)]
# Adjust the compatibility of ws and gws
s_ws, s_gs = adjust_ws_gs_comp(s_ws, s_bs, s_gs)
# Get AnyNet arguments defining the RegNet
return {
"stem_type": cfg.REGNET.STEM_TYPE,
"stem_w": cfg.REGNET.STEM_W,
"block_type": cfg.REGNET.BLOCK_TYPE,
"ds": s_ds,
"ws": s_ws,
"ss": s_ss,
"bms": s_bs,
"gws": s_gs,
"se_r": cfg.REGNET.SE_R if cfg.REGNET.SE_ON else None,
"nc": cfg.CLASS_NUM,
}
def __init__(self, cfg=None, logger=None):
kwargs = RegNet.get_args(cfg)
super(RegNet, self).__init__(cfg, logger, **kwargs)
# @staticmethod
# def complexity(cx, **kwargs):
# """Computes model complexity. If you alter the model, make sure to update."""
# kwargs = RegNet.get_args() if not kwargs else kwargs
# return AnyNet.complexity(cx, **kwargs) |
#! /usr/bin/env python
# Imports
import sys
import copy
import rospy
import moveit_commander
def main ():
moveit_commander.roscpp_initialize(sys.argv)
group_name = 'arm'
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
move_group = moveit_commander.MoveGroupCommander(group_name)
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',DisplayTrajectory,queue_size=20)
if __name__ == "__main__":
rospy.init_node("remote_moveit_node")
main()
rospy.spin() |
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
from flask import Flask, request
import schedule2
import logging
app = Flask(__name__)
@app.route('/', methods=["POST"])
def run():
return schedule2.schedule(request.json)
if __name__ == '__main__':
app.run(host="0.0.0.0",
port=9292,
threaded=False,
processes=1)
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Any, Mapping, Tuple
from ..damlast.daml_lf_1 import DefDataType, Type
from ..prim import (
to_bool,
to_date,
to_datetime,
to_decimal,
to_int,
to_party,
to_record,
to_str,
to_variant,
)
from .context import Context
from .mapper import ValueMapper
__all__ = ["CanonicalMapper"]
class CanonicalMapper(ValueMapper):
"""
A mapper that canonicalizes values. If values are already in canonical form, this is essentially
the identity mapper.
For container primitives (Optional, List, Map, etc), records, and variants, values are recursed
and walked through, so subclasses that override :class:`CanonicalMapper` can typically just
override the primitive methods in order to get slightly different behavior.
The canonical format of DAML-LF values in dazl was designed specifically to conform as closely
as possible to simple JSON representations, so the :class:`CanonicalMapper` can also be used to
decode DAML-LF JSON.
"""
def data_record(
self, context: "Context", dt: "DefDataType", record: "DefDataType.Fields", obj: "Any"
) -> "Any":
orig_mapping = self._record_to_dict(context, dt, record, obj)
expected_keys = frozenset(fld.field for fld in record.fields)
actual_keys = frozenset(orig_mapping)
if actual_keys.issuperset(expected_keys):
if actual_keys != expected_keys:
# Earlier versions were more tolerant of extra fields. To keep backwards
# compatibility, we'll emit a warning, though this may become an exception
# eventually.
context.value_warn(
obj, f'extra fields: {", ".join(sorted(actual_keys - expected_keys))}'
)
else:
context.value_error(
obj, f'missing fields: {", ".join(sorted(expected_keys - actual_keys))}'
)
new_mapping = {
fld.field: context.append_path(fld.field).convert(fld.type, orig_mapping[fld.field])
for fld in record.fields
}
return self._dict_to_record(context, dt, record, new_mapping)
def data_variant(
self, context: "Context", dt: "DefDataType", variant: "DefDataType.Fields", obj: "Any"
) -> "Any":
ctor, orig_val = self._variant_to_ctor_value(context, dt, variant, obj)
for fld in variant.fields:
if fld.field == ctor:
new_val = context.append_path(fld.field).convert(fld.type, orig_val)
return self._ctor_value_to_variant(context, dt, variant, ctor, new_val)
# searched through all fields, and did not find the constructor
raise ValueError(f"could not find a variant constructor for {ctor}")
def data_enum(
self,
context: "Context",
dt: "DefDataType",
enum: "DefDataType.EnumConstructors",
obj: "Any",
) -> "Any":
return context.value_validate_enum(obj, enum)
def prim_unit(self, context: "Context", obj: "Any") -> "Any":
return {}
def prim_bool(self, context: "Context", obj: "Any") -> "Any":
return to_bool(obj)
def prim_int64(self, context: "Context", obj: "Any") -> "Any":
return to_int(obj)
def prim_text(self, context: "Context", obj: "Any") -> "Any":
return to_str(obj)
def prim_timestamp(self, context: "Context", obj: "Any") -> "Any":
return to_datetime(obj)
def prim_party(self, context: "Context", obj: "Any") -> "Any":
return to_party(obj)
def prim_list(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
return context.convert_list(item_type, obj)
def prim_date(self, context: "Context", obj: "Any") -> "Any":
return to_date(obj)
def prim_contract_id(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
return context.convert_contract_id(item_type, obj)
def prim_optional(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
return context.convert_optional(item_type, obj)
def prim_text_map(self, context: "Context", item_type: "Type", obj: "Any") -> "Any":
return context.convert_text_map(item_type, obj)
def prim_numeric(self, context: "Context", nat: int, obj: "Any") -> "Any":
return to_decimal(obj)
def prim_gen_map(
self, context: "Context", key_type: "Type", value_type: "Type", obj: "Any"
) -> "Any":
return obj
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _record_to_dict(
self, context: "Context", dt: "DefDataType", record: "DefDataType.Fields", obj: "Any"
) -> "Mapping[str, Any]":
"""
Convert a record object to a Python dict. Should be overridden by subclasses to convert a
record to a dict whose keys are field names and values are associated field values if record
objects are not always understood to be dicts.
The default implementation assumes that ``obj`` is already a ``dict`` that matches this
contract and simply returns it (though this is verified first).
"""
return to_record(obj)
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _dict_to_record(
self, context: "Context", dt: "DefDataType", record: "DefDataType.Fields", obj: "Any"
):
return obj
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _variant_to_ctor_value(
self, context: "Context", dt: "DefDataType", record: "DefDataType.Fields", obj: "Any"
) -> "Tuple[str, Any]":
"""
Convert a variant object to a constructor and a value. Should be overridden by subclasses to
convert a variant that is not formatted in a recognized way.
"""
return to_variant(obj)
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _ctor_value_to_variant(
self,
context: "Context",
dt: "DefDataType",
variant: "DefDataType.Fields",
ctor: str,
value: "Any",
) -> "Any":
return {ctor: value}
|
def printme(str):
"This prints a passed String into this function"
print str;
return;
printme("I'm first call to user defined funtion!");
printme("Again Secong to call to the same Funtion");
|
import sys
import numpy as np
import gensim
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
torch.backends.cudnn.enabled = True
#
# class MLPClassifier(nn.Module):
# def __init__(self, embed,
# embed_size,
# classifier_hidden_dim,
# classifier_output_dim):
# super(MLPClassifier, self).__init__()
#
# self.embeds = Variable(embed, requires_grad=False).cuda()
#
# self.classifier = nn.Sequential(nn.Linear(embed_size, classifier_hidden_dim, bias=True),
# nn.ReLU(inplace=True),
# nn.Linear(classifier_hidden_dim, classifier_output_dim, bias=True))
#
# def look_up_embed(self, id):
# return self.embeds[id].view(1,-1)
#
# def look_up_embeds(self, ids):
# return self.embeds.index_select(0, ids)
#
# def forward(self, batch):
# input = []
# for id in batch:
# input.append(self.look_up_embed(id))
# inputs = torch.cat(input, 0)
#
# # inputs = self.look_up_embeds(batch)
# output = self.classifier(inputs)
# return output
class MLPClassifier(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, layer=1):
super(MLPClassifier, self).__init__()
if layer == 1:
self.classifier = nn.Sequential(nn.Linear(input_dim, hidden_dim, bias=True),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, output_dim, bias=True))
elif layer == 2:
self.classifier = nn.Sequential(nn.Linear(input_dim, hidden_dim, bias=True),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, output_dim, bias=True))
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, batch, label):
return self.loss_fn(self.classifier(batch), label)
def predict(self, batch, label):
self.eval()
scores = self.classifier(batch)
predicted = scores.argmax(dim=1)
c = (predicted == label).sum(dim=0).item()
acc = c / len(label)
self.train()
return acc
class MLP(nn.Module):
def __init__(self, num_node, embedding_size, output_dim):
super(MLP, self).__init__()
self.num_node = num_node
self.embedding_size = embedding_size
self.embeds = nn.Embedding(self.num_node, self.embedding_size)
self.embeds.weight = nn.Parameter(torch.FloatTensor(self.num_node, self.embedding_size).uniform_(
-0.5 / self.embedding_size, 0.5 / self.embedding_size))
self.classifier = nn.Linear(embedding_size, output_dim, bias=True)
def look_up_embeds(self, ids):
return self.embeds(ids)
def forward(self, ids):
X = self.look_up_embeds(ids)
return self.classifier(X)
class ModuleBlock(nn.Module):
def __init__(self, in_features, out_features):
super(ModuleBlock, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, bias=None):
if bias is None:
return F.relu(F.linear(input, self.weight))
else:
return F.relu(F.linear(input, self.weight, bias))
class ModuleNet(nn.Module):
def __init__(self, num_node, embedding_size, num_module, target_embedding=None):
super(ModuleNet, self).__init__()
self.num_node = num_node
self.embedding_size = embedding_size
if target_embedding is None:
self.embeds = nn.Embedding(self.num_node, self.embedding_size)
self.embeds.weight = nn.Parameter(torch.FloatTensor(self.num_node, self.embedding_size).uniform_(
-0.5 / self.embedding_size, 0.5 / self.embedding_size))
else:self.embeds = nn.Embedding.from_pretrained(target_embedding.data, freeze=False)
self.target_embeds = nn.Embedding.from_pretrained(self.embeds.weight.data, freeze=True)
self.module_dict = nn.ModuleDict(
{str(module_id): ModuleBlock(in_features=embedding_size, out_features=embedding_size)
for module_id in range(num_module)})
def copy_embedding(self):
self.target_embeds.weight.data.copy_(self.embeds.weight.data)
def forward(self, path, nodes):
X = self.embeds(nodes[:,0])
for i, edge in enumerate(path):
X = self.module_dict[edge](X)
target = self.target_embeds(nodes[:,-1])
loss = ((target-X)**2).sum(1)
return loss.mean()
def save_embedding(self, id_to_name, path, binary):
target_embed = self.embeds.weight.data.cpu().numpy()
learned_embed = gensim.models.keyedvectors.Word2VecKeyedVectors(self.embedding_size)
learned_embed.add(id_to_name[:self.num_node], target_embed)
learned_embed.save_word2vec_format(fname=path, binary=binary, total_vec=self.num_node)
return learned_embed
def return_embedding(self):
return self.embeds.weight.data.cpu().numpy()
class LabelEncoder(object):
def __init__(self, dataset, args):
self.model = MLP(len(dataset.type_to_node[args.target_node_type]), args.embedding_size, dataset.num_class).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=5e-4)
self.loss_fn = torch.nn.CrossEntropyLoss().cuda()
self.X = dataset.train_nodes
self.X_var = Variable(torch.LongTensor(self.X)).cuda()
self.y = [dataset.node_to_label[node] for node in self.X]
self.label_var = Variable(torch.LongTensor(self.y)).cuda()
self.num = len(self.X)
self.batch_size = 32
self.num_epoch = 200
def next_batch(self):
for i in np.arange(0, self.num, self.batch_size):
yield self.X_var[i:i + self.batch_size], self.label_var[i:i + self.batch_size]
def train(self):
self.model.train()
best_train_acc = 0
for epoch in range(self.num_epoch):
for batch in self.next_batch():
X_batch, y_batch = batch
self.optimizer.zero_grad()
scores = self.model(X_batch)
loss = self.loss_fn(scores, y_batch)
loss.backward()
self.optimizer.step()
self.model.eval()
scores = self.model(self.X_var)
preds = np.argmax(scores.data.cpu().numpy(), axis=1)
num_correct = np.sum(preds == self.y)
train_acc = float(num_correct) / self.num
if train_acc > best_train_acc:
best_train_acc = train_acc
best_train_acc_epoch = epoch + 1
msg = '\rEpoch:{}/{} train acc={}, best train acc={} @epoch:{}'.\
format(epoch + 1, self.num_epoch, train_acc, best_train_acc, best_train_acc_epoch)
print(msg, end='')
sys.stdout.flush()
print('')
return self.model.embeds.weight.data.cpu()
|
c = 0
soma = 0
cont = 0
while c != 999:
c = int(input('Digite um número[999 pra parar]:'))
soma += c
cont += 1
print('Você digitou {} números e a soma entre eles é {}'.format(cont - 1, soma - 999)) |
# Generated by Django 2.0.7 on 2018-07-21 07:11
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20180721_1510'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='add_tiem',
field=models.DateTimeField(default=datetime.datetime(2018, 7, 21, 15, 11, 26, 461475), verbose_name='添加顺序'),
),
]
|
import driver
import threading
import time
from rdflib import URIRef, Namespace, Literal, RDFS
from collections import defaultdict
from datetime import datetime
from modelica_brick_parser import Modelica_Brick_Parser
class ModelicaJSONDriver(driver.Driver):
def __init__(self, port, servers, bldg_ns, opts):
self._lib_path = opts.get('lib_path')
self._modelica_json_file = opts.get('modelica_json_file')
self._path = opts.get('path')
super().__init__(port, servers, bldg_ns)
t = threading.Thread(target=self._check_source, daemon=True)
t.start()
def _check_source(self):
while True:
self.load_file()
time.sleep(600)
def load_file(self):
BLDG = Namespace(self._ns)
def do_load_file():
self.app.logger.info(f"Loading modelica models from {self._path}")
timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S%Z')
parser = Modelica_Brick_Parser(modelica_buildings_library_path=self._lib_path,
modelica_json_filename=self._modelica_json_file,
json_folder=self._path)
brick_relationships = parser.get_brick_relationships()
records = defaultdict(list)
sources = {}
for rel in brick_relationships:
triple = [
rel['obj1'] if isinstance(rel['obj1'], URIRef) else BLDG[rel['obj1']],
rel['relationship'] if isinstance(rel['relationship'], URIRef) else BLDG[rel['relationship']],
rel['obj2'] if isinstance(rel['obj2'], URIRef) else BLDG[rel['obj2']],
]
triple = [str(t) for t in triple]
records[rel['obj1']].append(triple)
# add "label":
label_triple = (
triple[0], RDFS.label, Literal(rel['obj1'])
)
records[rel['obj1']].append(label_triple)
for ent, triples in records.items():
rec = {
'id': ent,
'source': type(self).__name__,
'record': {
'encoding': 'JSON',
# TODO: get some JSON for the entity?
'content': '{"content": "N/A"}',
},
'triples': triples,
'timestamp': timestamp
}
self.add_record(ent, rec)
self.app.logger.info(f"Loaded {len(self._records)} records")
# start thread
t = threading.Thread(target=do_load_file, daemon=True)
t.start()
if __name__ == '__main__':
import sys
ModelicaJSONDriver.start_from_configfile(sys.argv[1])
|
from Contributor import Contributor
enrolledContributors = [
Contributor(username="Stefan Grotz", clientHash="16e3ec4bc4408cae301120760cef8d4b4aeaf07f0ca3884afdb8d04857f675af",
validatedClipsBeginning=34876, recordedClipsBeginning=1078),
Contributor(username="Pablo b", clientHash="15b600bce2f5eea38cf641df2b00df0bddd31649583e5543f8e0f2169f38cf64",
validatedClipsBeginning=5565, recordedClipsBeginning=975)
]
# Contributor(username="mkohler", clientHash="28982f1e9e2e76ebfdbb2af6c8380dbd6859a0d1543f2469a985b01189c03151",
# validatedClipsBeginning=67, recordedClipsBeginning=15)
|
class _Array:
def __init__(self, *args):
if len(args) > 2:
raise ValueError("Only takes 2 arguments")
if len(args) == 0:
self.value = None
self.type = object
if len(args) == 1:
try:
self.value = [x for x in args[0]]
except TypeError:
self.value = args[0]
try:
tp = type(self.value[0])
for i in self.value:
if type(i) == tp:
continue
else:
tp = object
break
except TypeError:
tp = type(self.value)
self.type = tp
else:
if not isinstance(args[1], type):
raise TypeError("'type' must be a type")
try:
self.value = [x for x in args[0]]
except TypeError:
self.value = args[0]
try:
x = []
for i in args[0]:
x.append(args[1](i))
self.value = x
except TypeError:
self.value = args[1](self.value)
self.type = args[1]
def __repr__(self):
return "Array({0}, {1})".format(self.value, self.type)
def __iter__(self):
return iter(self.value)
def __recompute(self):
pass
def astype(self, tp):
if not isinstance(tp, type):
raise TypeError("'tp' must be a type")
try:
x = []
for i in self.value:
x.append(tp(i))
self.value = x
except TypeError:
self.value = tp(self.value)
self.type = tp
def list(self, tp):
if not isinstance(tp, type):
raise TypeError("'tp' must be a type")
try:
return tp(self)
except TypeError:
raise TypeError("'tp' must be a str, list, set, frozenset, or tuple")
except AttributeError:
raise TypeError("'tp' must be a str, list, set, frozenset, or tuple")
def append(self, value):
if isinstance(self.value, list):
self.value.append(value)
Array = type("Array", (_Array, object), {})
|
from scrapy import cmdline
cmdline.execute('scrapy crawl tagLink'.split())
|
from django.contrib.auth.models import User
from django.db import models
from django_comments.moderation import CommentModerator, moderator
from taggit.managers import TaggableManager
from django.core.urlresolvers import reverse
class Blog(models.Model):
title = models.CharField(max_length=100, unique_for_date="posted", verbose_name="Загаловак")
description = models.TextField(verbose_name="Скарочаны змест")
content = models.TextField(verbose_name="Змест")
posted = models.DateTimeField(auto_now_add=True, db_index=True, verbose_name="Апублікавана")
is_commentable = models.BooleanField(default=True, verbose_name="Дазвол каментавання")
tags = TaggableManager(blank=True, verbose_name="Тэгі")
user = models.ForeignKey(User, editable=False)
def get_absolute_url(self):
return reverse("blog_detail", kwargs={"pk": self.pk})
class Meta:
ordering = ["-posted"]
verbose_name = "артыкул блогу"
verbose_name_plural = "артыкулы блогу"
class BlogModerator(CommentModerator):
email_notification = True
enable_field = "is_commentable"
moderator.register(Blog, BlogModerator)
|
revenue = input('Введите выручку: ')
while True:
if revenue.isdigit():
revenue = int(revenue)
break
else:
revenue = input('Число введено некорректно, повторите попытку: ')
cost = input('Введите издержки: ')
while True:
if str(cost).isdigit():
cost = int(cost)
break
else:
cost = input('Число введено некорректно, повторите попытку: ')
if revenue > cost:
print('Финансовый результат компании: прибыль.')
return_on_revenue = revenue - cost
return_on_revenue_perc = return_on_revenue/revenue*100
print(f'Рентабельность выручки составляет: {return_on_revenue_perc} %')
number_of_employees = input('Введите количество сотрудников: ')
while True:
if str(number_of_employees).isdigit():
number_of_employees = int(number_of_employees)
break
else:
number_of_employees = input('Число введено некорректно, повторите попытку: ')
return_per_emloy = return_on_revenue/number_of_employees
print(f'Доход компании на сотрудника составляет {return_per_emloy}')
else:
print('Финансовый результат компании: убыток.') |
num1, num2 = input().split()
num1, num2 = int(num1), int(num2)
print(num1+num2) |
import random
import webbrowser
import math
import sys
import subprocess, os
################################################################################
# configuration
imagick = 'd:/files/4/programs/terminal-utilities/imagemagick/convert.exe'
framesFolder = 'frames/'
gifName = 'animated.gif'
galleryName = 'index.html'
# some additional configuration is in main()
################################################################################
# exits with an error
def exitWithError(message):
print('\nError: %s' % message)
exit()
################################################################################
# removes bytes within the image
def cut(contents):
firstTouchable = 10
maxEditLength = int(len(contents) * 0.0005)
cutMax = min(len(contents) - firstTouchable, maxEditLength)
cutLength = random.randint(10, cutMax)
cutStart = random.randint(firstTouchable, len(contents) - cutLength)
cutEnd = cutStart + cutLength
output = contents[:cutStart] + contents[cutEnd:]
return output
################################################################################
# shifts bytes around within the image
def shift(contents):
firstTouchable = 10
maxEditLength = int(len(contents) * 0.001)
cutLength = random.randint(10, maxEditLength)
cutStart = random.randint(firstTouchable, len(contents) - cutLength)
cutEnd = cutStart + cutLength
insertpoint = random.randint(cutStart, len(contents))
output = contents[:cutStart]
output += contents[cutEnd:insertpoint]
output += contents[cutStart:cutEnd]
output += contents[insertpoint:]
return output
################################################################################
# generates a gallery of the glitched frames
def generateGallery(count, openGallery=True, filename=galleryName):
html = '''<!DOCTYPE html>
<html>
<head>
<title>Glitch Results</title>
<style>
html, body { margin: 0; padding: 0; background: #111; }
img { width: %.1f%%; }</style>
</head>
<body>''' % (100 / math.sqrt(count))
for i in range(count):
html += '<a href="%s%d.jpg">' % (framesFolder, i)
html += '<img src="%s%d.jpg"></a>' % (framesFolder, i)
html += '''</body>
</html>'''
with open(filename, 'w') as o:
o.write(html)
if openGallery:
webbrowser.open(filename,new=2)
################################################################################
# generates a GIF of the glitched frames
def animate(output, delay=1.5, longestEdge=500, openGif=True):
cmd = imagick + ' -delay %.1f -loop 0 ' % delay
cmd += '-resize "%dx%d>" -colors 128 ' % (longestEdge, longestEdge)
cmd += '-coalesce -layers OptimizeTransparency -fuzz 2%% '
cmd += '"%s*.jpg" "%s"' % (framesFolder, output)
print('Animating...')
# some of the files are bound to be corrupt, so we'll catch the error
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except:
pass
print('Done.')
if openGif:
os.system('"%s"' % output)
################################################################################
# does glitched variations of original frame
def doVariations(data, variations, minCuts, maxCuts):
for i in range(variations):
d = data
glitches = random.randint(minCuts, maxCuts)
print('Generating #%d: %d edits' % (i, glitches))
for j in range(glitches):
type = random.randint(0,1)
if type == 0:
d = cut(d)
elif type == 1:
d = shift(d)
with open(framesFolder + '%d.jpg' % i, 'wb') as o:
o.write(d)
################################################################################
# handles logic of varying the glitched frames
def main():
openGallery = True
openGif = True
variations = 20
delay = 7
minCuts = 1
maxCuts = 8
try:
filename = sys.argv[1]
except:
filename = 'input.jpg'
try:
f = open(filename, 'rb')
c = f.read()
except:
exitWithError('Couldn''t open input file.')
doVariations(c, variations, minCuts, maxCuts)
print()
# generate the gallery and open, if desired
if '-dontopen' in sys.argv:
openGallery = False
openGif = False
if '-animate' in sys.argv:
openGallery = False
generateGallery(variations, openGallery=openGallery)
if '-animate' in sys.argv:
animate(gifName, openGif=openGif, delay=delay)
################################################################################
# program entrypoint
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
from sys import exc_info
from datetime import timezone, datetime, date
from typing import Optional, List
from fastapi import APIRouter, HTTPException
from fastapi.logger import logger
from app.services import get_users, parse_dt, User
router = APIRouter()
BASE_URL = 'users'
@router.get(f'/{BASE_URL}')
async def get_all_users(start: Optional[date] = None, end: Optional[date] = None) -> List[User]:
try:
db = router.db
if start is not None:
start_dt = datetime.combine(start, datetime.min.time()).replace(tzinfo=timezone.utc)
else:
start_dt = parse_dt('2019-06-01', '%Y-%m-%d').replace(tzinfo=timezone.utc)
if end is not None:
end_dt = datetime.combine(end, datetime.min.time()).replace(tzinfo=timezone.utc)
else:
end_dt = parse_dt('2020-05-01', '%Y-%m-%d').replace(tzinfo=timezone.utc)
return get_users(start_dt, end_dt, db=db)
except:
logger.error("Unexpected error:", exc_info()[0])
raise HTTPException(status_code=500, detail='Unexpected error')
|
#!/usr/bin/python
#
# Copyright 2010 Brian Dolbec <brian.dolbec@gmail.com>
# Copyright 2002-2010 Gentoo Technologies, Inc.
# Distributed under the terms of the GNU General Public License v2 or later
#
# $Header$
"""'enalyze' is a flexible utility for Gentoo linux which can display various
information about installed packages, such as the USE flags used and the
packages that use them. It can also be used to help rebuild /etc/portage/package.*
files in the event of corruption, and possibly more.
"""
from __future__ import print_function
import sys
# This block ensures that ^C interrupts are handled quietly.
try:
import signal
def exithandler(signum,frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
print()
sys.exit(1)
signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except KeyboardInterrupt:
print()
sys.exit(1)
from gentoolkit import enalyze, errors
try:
enalyze.main()
except errors.GentoolkitException as err:
if '--debug' in sys.argv:
raise
else:
from gentoolkit import pprinter as pp
sys.stderr.write(pp.error(str(err)))
print()
print("Add '--debug' to global options for traceback.")
sys.exit(1)
|
#!/usr/bin/python3
from configparser import ConfigParser
from app_exceptions import ConfigReadError
import os
class ConfigModel:
#constructor
def __init__(self, filename='model.cfg', section='deter'):
self.filename = filename
self.section = section
def get(self):
config_file = (os.path.dirname(__file__) or '.') + '/config/' + self.filename
# Test if model.cfg exists
if not os.path.exists(config_file):
# get model params from env vars
raise ConfigReadError('Model configuration','Model configuration file, {0}, was not found.'.format(self.filename))
# create a parser
parser = ConfigParser()
# read config file
parser.read(config_file)
# get section, default
cfg = {}
if parser.has_section(self.section):
params = parser.items(self.section)
for param in params:
cfg[param[0]] = param[1]
else:
raise ConfigReadError('Model configuration', 'Section {0} not found in the {1} file'.format(self.section, self.filename))
return cfg |
#! /usr/bin/env python3
import pandas
import rdflib
# initialization
basedir = "/home/ivo/dataprojecten/SAA_CTA/"
g = rdflib.Graph()
# read data
data = pandas.read_csv(basedir + "data/termen.csv")
# create triples
for index, row in data.iterrows():
s = rdflib.URIRef(str(row['URI']))
p = rdflib.URIRef("http://www.w3.org/2000/01/rdf-schema#label")
o = rdflib.Literal(str(row['AATLABEL']))
g.add((s,p,o))
# write RDF turtle
outfile = basedir + "data/termen.ttl"
s = g.serialize(format='turtle')
f = open(outfile,"wb")
f.write(s)
f.close()
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
import os
import logging
from logging.config import dictConfig
from tonga import tonga
from examples.coffee_bar.bartender.models.events import CoffeeFinished
from examples.coffee_bar.bartender.models.commands import MakeCoffee
from examples.coffee_bar.coffeemaker.models.events import CoffeeStarted
if __name__ == '__main__':
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '[%(asctime)s] %(levelname)s: %(name)s/%(module)s/%(funcName)s:%(lineno)d (%(thread)d) %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'tonga': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': True,
},
}
}
dictConfig(LOGGING)
logger = logging.getLogger(__name__)
# Initializes Aio Event
aio_event = tonga(avro_schemas_folder=os.path.join(BASE_DIR, 'tests/coffee_bar/avro_schemas'),
http_handler=False)
# Registers events
aio_event.serializer.register_event_class(CoffeeFinished,
'tonga.bartender.event.CoffeeFinished')
aio_event.serializer.register_event_class(MakeCoffee,
'tonga.coffeemaker.command.MakeCoffee')
aio_event.serializer.register_event_class(CoffeeStarted,
'tonga.coffeemaker.event.CoffeeStarted')
# Creates consumer
aio_event.append_consumer('debug', mod='earliest', bootstrap_servers='localhost:9092',
client_id='debug_1', topics=['coffee-maker-events'], auto_offset_reset='earliest',
max_retries=10, retry_interval=1000,
retry_backoff_coeff=2, isolation_level='read_uncommitted')
# After build state, start (http handler, consumer, producer)
aio_event.start()
|
import numpy as np
import pickle
from sklearn.metrics import roc_auc_score
from isolation_forest.isolation_forest import IsolationForest
from isolation_forest.tree_grower_basic import TreeGrowerBasic
from isolation_forest.tree_grower_generalized_uniform import TreeGrowerGeneralizedUniform
import os
from utils import load_data
def test_grower_saving_files(grower, grower_args, source_path, target_path,
additional_info=""):
target_folder_path = f"{target_path}\\{grower.__name__}_{additional_info}"
if(not os.path.exists(target_folder_path)):
os.mkdir(target_folder_path)
datasets = os.listdir(source_path)
for dataset in datasets:
ds_name = dataset.split('.')[0]
loaded_data = np.load(f"{source_path}\\{dataset}")
data = loaded_data['data']
labels = loaded_data['labels']
total_sample_cnt = data.shape[1]
repeat_cnt = data.shape[0]
scores = np.zeros((repeat_cnt, total_sample_cnt))
for i in range(repeat_cnt):
X = data[i,...]
y = labels[i,...]
gr_args = (X,)+grower_args
new_grower = grower(*gr_args)
forest = IsolationForest(new_grower, X, tree_cnt, sample_size)
forest.grow_forest()
print(f"trained, {grower.__name__}!")
scores[i,...] = forest.compute_paths()
print(f'{ds_name}: {roc_auc_score(y, scores[i,...])}')
np.save(f'{target_path}\\{dataset.split(".")[0]}.npy', scores)
tree_cnt = 100
sample_size = 256
repeat_cnt = 30
power = 2
growers = [TreeGrowerBasic, TreeGrowerGeneralizedUniform]
grower_arg_sets = [(sample_size,), (sample_size,power)]
additional_infos = ['', f'power_{power}']
source_path = 'artificial_data'
target_path = 'results_artificial'
if(not os.path.exists(target_path)):
os.mkdir(target_path)
cnt = 0
for grower, grower_arg_set, additional_info in zip(growers, grower_arg_sets, additional_infos):
test_grower_saving_files(grower, grower_arg_set, source_path, target_path,
additional_info)
|
import pdb
import sys
from os import path
import numpy as np
import torch
from scipy.io import wavfile
from utils import vocoder
from utils.SqueezeWave.TacotronSTFT import TacotronSTFT
########################################################################################################################
def get_mel(audio):
stft = TacotronSTFT(filter_length=1024,
hop_length=256,
win_length=1024,
sampling_rate=22050,
mel_fmin=0.0, mel_fmax=8000.0)
audio = torch.autograd.Variable(audio, requires_grad=False)
melspec = stft.mel_spectrogram(audio)
return melspec
########################################################################################################################
def synthesize_one_audio(x, info, itarget, filename,
target_spk, blow, path_out,
sw_model,
print_mel=False,
convert=True, device='cuda', sr=22050,
normalize=True, xmax=0.98,
target_emb=None
):
# import pdb
# pdb.set_trace()
x = get_mel(x)
isource = info[:, 3]
# Convert
if convert:
# Forward & reverse
x = x.to(device)
isource = isource.to(device)
itarget = itarget.to(device)
z = blow.forward(x, isource)[0]
if target_emb != None:
original_emb = blow.embedding.weight.data[0]
blow.embedding.weight.data[0] = target_emb
x = blow.reverse(z, itarget)
if target_emb != None:
blow.embedding.weight.data[0] = original_emb
x = x.cpu()
if print_mel:
# Hacking to print mel_syn here
_, mel_fname = path.split(filename) # p285/p285_04452
if convert:
mel_fname += '_to_' + target_spk
mel_fname = path.join(path_out,
"{}_mel.pt".format(mel_fname))
torch.save(x[0], mel_fname)
# print("Saved mel to {}".format(mel_fname))
##
# Vocoder Inference
x = vocoder.infer(mel=x,
squeezewave=sw_model)
x = x.cpu()
# Filename
_, filename = path.split(filename)
if convert:
filename += '_to_' + target_spk
filename = path.join(path_out, filename + '.wav')
# Synthesize
# Refer to audioutils.synthesize
x = x.squeeze().numpy().astype(np.float32)
# Normalize
if normalize:
neginf_ps = np.isneginf(x)
posinf_ps = np.isposinf(x)
x[neginf_ps] = np.nan
x[posinf_ps] = np.nan
x -= np.nanmean(x)
mx = np.nanmax(np.abs(x))
if mx > 0:
x *= xmax / mx
x[neginf_ps] = -xmax
x[posinf_ps] = xmax
x[np.isnan(x)] = 0
else:
x = np.clip(x, -xmax, xmax)
x[np.isnan(x)] = xmax
# To 16 bit & save
wavfile.write(filename, sr, np.array(x * 32767, dtype=np.int16))
# return x as float
return x
########################################################################################################################
def timer(start, end):
days, rem = divmod(end - start, 3600 * 24)
hours, rem = divmod(rem, 3600)
minutes, seconds = divmod(rem, 60)
# return '{:0>2}:{:0>2}:{:05.2f}'.format(int(hours),int(minutes),seconds)
return '{:0>2}:{:0>2}:{:0>2}:{:0>2}'.format(int(days), int(hours),
int(minutes), int(seconds))
########################################################################################################################
class FIFOFixed(object):
def __init__(self, l):
self.data = l[:]
return
def push(self, v):
self.data.append(v)
return self.data.pop(0)
def upperbound(self, factor=2):
return np.mean(self.data) + factor * np.std(self.data)
########################################################################################################################
def print_arguments(args):
print('=' * 100)
print('Arguments =')
aux = vars(args)
tmp = list(aux.keys())
tmp.sort()
for arg in tmp:
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
return
def print_model_report(model, verbose=3):
if verbose > 1:
print(model)
if verbose > 2:
print('Dimensions =', end=' ')
count = 0
for p in model.parameters():
if verbose > 2:
print(p.size(), end=' ')
count += np.prod(p.size())
if verbose > 2:
print()
if verbose > 0:
print('Num parameters = %s' % (human_format(count)))
return count
def human_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
########################################################################################################################
def repackage_hidden(h):
if h is None:
return None
if isinstance(h, list):
return list(repackage_hidden(v) for v in h)
elif isinstance(h, tuple):
return tuple(repackage_hidden(v) for v in h)
return h.detach()
########################################################################################################################
TWOPI = 2 * np.pi
def get_timecode(dim, t, tframe, size=None, maxlen=10000, collapse=False):
if size is None: size = tframe
n = t.float().view(-1, 1, 1) + torch.linspace(0, tframe - 1,
steps=size).view(1, 1,
-1).to(
t.device)
f = (10 ** (torch.arange(1, dim + 1).float() / dim)).view(1, -1, 1).to(
t.device)
tc = torch.sin(TWOPI * f * n / maxlen)
if collapse:
tc = tc.mean(1).unsqueeze(1)
return tc
"""
def get_timecode(dim,t,size,fmin=30,fmax=330,fs=16000):
samples=t.float().view(-1,1,1)+torch.arange(size).to(t.device).float().view(1,1,-1)
freqs=torch.logspace(np.log10(fmin).item(),np.log10(fmax).item(),steps=dim).to(t.device).view(1,-1,1)
signal=torch.sin(TWOPI*freqs*samples/fs)
return signal
#"""
########################################################################################################################
def freeze_model(model):
for param in model.parameters():
param.requires_grad = False
return
########################################################################################################################
HALFLOGTWOPI = 0.5 * np.log(2 * np.pi).item()
def gaussian_log_p(x, mu=None, log_sigma=None):
if mu is None or log_sigma is None:
return -HALFLOGTWOPI - 0.5 * (x ** 2)
return -HALFLOGTWOPI - log_sigma - 0.5 * ((x - mu) ** 2) / torch.exp(
2 * log_sigma)
def gaussian_sample(x, mu=None, log_sigma=None):
if mu is None or log_sigma is None:
return x
return mu + torch.exp(log_sigma) * x
def disclogistic_log_p(x, mu=0, sigma=1, eps=1e-12):
xx = (x - mu) / sigma
return torch.log(torch.sigmoid(xx + 0.5) - torch.sigmoid(xx - 0.5) + eps)
########################################################################################################################
def loss_flow_nll(z, log_det):
# size of: z = sbatch * n_mel_channels * lchunk
# log_det = sbatch
_, n_mel_channels, lchunk = z.size()
size = n_mel_channels * lchunk
log_p = gaussian_log_p(z).sum(2).sum(1)
nll = -log_p - log_det
log_det /= size
log_p /= size
nll /= size
log_det = log_det.mean()
log_p = log_p.mean()
nll = nll.mean()
"""
# Sanity check
if torch.isnan(nll) or nll>1000:
print('\n***** EXIT: Wrong value in loss (log_p={:.2f},log_det={:.2f}) *****'.format(log_p.item(),log_det.item()))
sys.exit()
#"""
return nll, np.array([nll.item(), log_p.item(), log_det.item()],
dtype=np.float32)
########################################################################################################################
def save_stuff(basename, report=None, args=None, model=None, optim=None):
# Report
if report is not None:
torch.save(report, basename + '.report.pt')
if args is not None:
torch.save(args, basename + '.args.pt')
# Model & optim
if model is not None:
try:
torch.save(model.module, basename + '.model.pt')
except:
torch.save(model, basename + '.model.pt')
if optim is not None:
torch.save(optim, basename + '.optim.pt')
return
def load_stuff(basename, device='cpu'):
try:
report = torch.load(basename + '.report.pt', map_location=device)
except:
report = None
try:
args = torch.load(basename + '.args.pt', map_location=device)
except:
args = None
try:
model = torch.load(basename + '.model.pt', map_location=device)
except:
model = None
try:
optim = torch.load(basename + '.optim.pt', map_location=device)
except:
optim = None
return report, args, model, optim
########################################################################################################################
def pairwise_distance_matrix(x, y=None, eps=1e-10):
x_norm = x.pow(2).sum(1).view(-1, 1)
if y is not None:
y_norm = y.pow(2).sum(1).view(1, -1)
else:
y = x
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2 * torch.mm(x, y.t().contiguous())
if y is None:
dist -= torch.diag(dist.diag())
return torch.clamp(dist, eps, np.inf)
########################################################################################################################
class RoundSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad):
return grad
@staticmethod
def reverse(ctx, x):
return x + torch.rand_like(x) - 0.5
########################################################################################################################
|
from flask_sqlalchemy import SQLAlchemy
from app import app
db = SQLAlchemy(app)
@app.teardown_request
def teardown_request(exception):
if exception:
db.session.rollback()
db.session.close()
db.session.close()
|
# Generated by Django 3.0.3 on 2021-05-04 04:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticketingsystem', '0005_auto_20210503_2039'),
]
operations = [
migrations.AddField(
model_name='inventoryitem',
name='itemImage',
field=models.ImageField(blank=True, upload_to='static/images/stock', verbose_name='Item Picture'),
),
]
|
# Build a Variable List of Adjacent Cells
def build_var_list(row, col, VARS):
var_list = []
idx = row*n + col
var_list.append(VARS[idx])
idx_right = row*n + col+1
idx_left = row*n + col-1
idx_down = (row+1)*n + col
idx_up = (row-1)*n + col
idx_right_down = (row+1)*n + col+1
idx_left_down = (row+1)*n + col-1
idx_left_up = (row-1)*n + col-1
idx_right_up = (row-1)*n + col+1
if row == 0:
var_list.append(VARS[idx_down])
if col == 0:
var_list.append(VARS[idx_right])
var_list.append(VARS[idx_right_down])
elif col == n-1:
var_list.append(VARS[idx_left_down])
var_list.append(VARS[idx_left])
else:
var_list.append(VARS[idx_right])
var_list.append(VARS[idx_right_down])
var_list.append(VARS[idx_left_down])
var_list.append(VARS[idx_left])
elif row == n-1:
var_list.append(VARS[idx_up])
if col == 0:
var_list.append(VARS[idx_right_up])
var_list.append(VARS[idx_right])
elif col == n-1:
var_list.append(VARS[idx_left])
var_list.append(VARS[idx_left_up])
pass
else:
var_list.append(VARS[idx_right_up])
var_list.append(VARS[idx_right])
var_list.append(VARS[idx_left])
var_list.append(VARS[idx_left_up])
else:
var_list.append(VARS[idx_down])
var_list.append(VARS[idx_up])
if col == 0:
var_list.append(VARS[idx_right_up])
var_list.append(VARS[idx_right])
var_list.append(VARS[idx_right_down])
elif col == n-1:
var_list.append(VARS[idx_left])
var_list.append(VARS[idx_left_up])
var_list.append(VARS[idx_left_down])
else:
var_list.append(VARS[idx_right_up])
var_list.append(VARS[idx_right])
var_list.append(VARS[idx_right_down])
var_list.append(VARS[idx_left])
var_list.append(VARS[idx_left_up])
var_list.append(VARS[idx_left_down])
return var_list |
# Import des librairies NLP (natural language processing)
import nltk
import re
from nltk.stem import *
from french_lefff_lemmatizer.french_lefff_lemmatizer import FrenchLefffLemmatizer
import treetaggerwrapper
import hunspell
# Import des librairies nécessaire pour la partie machine learning
import numpy as np
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import *
from keras.models import Model
from keras.layers.convolutional import *
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.optimizers import Adam
from keras.optimizers import RMSprop
from keras.optimizers import SGD
from keras.models import model_from_json
# autres imports
import time
from collections import *
import itertools
import pickle
from Response import *
# chargement des données d'entrainement
def load_vocabulary(rep = ""):
data = pickle.load( open( rep + "model_data", "rb" ) )
words = data['words']
vocabulary = data['vocabulary']
classes = data['classes']
entity = data['entity']
size_data_max = data['size_data_max']
ignore_words = data['ignore_words']
return words,vocabulary,classes,entity,size_data_max,ignore_words
# importation du fichier d'intention Json
def load_intent(rep = ""):
with open(rep + 'Intentions.json') as json_data:
intents = json.load(json_data)
return intents
# chargement du réseau de neurones
def load_model(rep = ""):
# Chargement du fichier JSON contenant l'architècture du réseau
json_file = open(rep + 'model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# Chargement du fichier JSON contenant les poids du réseau
model.load_weights(rep + "model.h5")
print("chargement du réseau terminé")
return model
# chargement du réseau de neurones
def load_model_entity(rep = ""):
# Chargement du fichier JSON contenant l'architècture du réseau
json_file = open(rep + 'model_entity.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model_entity = model_from_json(loaded_model_json)
# Chargement du fichier JSON contenant les poids du réseau
model_entity.load_weights(rep + "model_entity.h5")
print("chargement du réseau terminé")
return model_entity
# chargement du dictionnaire de synonimes
def load_thesaurus(thesaurus,rep = ""):
for line, syn in enumerate(enumerate_wolf_synonyms(rep + "wolf/wolf-1.0b4.xml")):
clean = [_.lower() for _ in syn if " " not in _]
if len(clean) > 1:
for word in clean:
if word not in thesaurus:
thesaurus[word] = set(clean)
continue
else:
for cl in clean:
thesaurus[word].add(cl)
if len(thesaurus) > 50000:
break
print("thesaurus", len(thesaurus),"line")
# format adapté à la librairie TreeTagger
class TreeTaggerWord:
def __init__(self, triplet):
self.word,self.postag,self.lemma = triplet
def formatTTG(output):
words = []
for w in output:
words.append(TreeTaggerWord(w.split("\t")))
return words
class Chatbot_CNN:
def __init__(self, words,vocabulary,classes,entity,size_data_max,ignore_words,stemmer,lemmatizer,tagger,speller,model,model_entity,mode="default",rep = ""):
self.words = words
self.vocabulary = vocabulary
self.classes = classes
self.entity = entity
self.size_data_max = size_data_max
self.ignore_words = ignore_words
self.stemmer = stemmer
self.lemmatizer = lemmatizer
self.tagger = tagger
self.speller = speller
self.model = model
self.model_entity = model_entity
self.mode = mode
self.rep = rep
self.ERROR_THRESHOLD = 0.45
self.split_param_caps = "([A-Z])"
self.split_param_digits = "([0-9])"
self.split_param_prefixes = "(Mr|St|M|Mme|Mlle|Dr|Pr|Prof)[.]"
self.split_param_suffixes = "(Inc|Ltd|Jr|Sr|Co)"
self.split_param_acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
self.split_param_websites = "[.](com|net|org|io|gov|fr|edu|mc)"
# Paramètres propres à chaque utilisateur
self.context = {} # Liste vide
def split_into_sentences(self,text):
text = text.replace("\n","")
text = re.sub(self.split_param_prefixes,"\\1<prd>",text)
text = re.sub(self.split_param_websites,"<prd>\\1",text)
text = re.sub(self.split_param_digits + "[.]" + self.split_param_digits,"\\1<prd>\\2",text)
text = re.sub("\s" + self.split_param_caps + "[.] "," \\1<prd> ",text)
text = re.sub(self.split_param_caps + "[.]" + self.split_param_caps + "[.]" + self.split_param_caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(self.split_param_caps + "[.]" + self.split_param_caps + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+self.split_param_suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + self.split_param_caps + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace("...","<prds>")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prds>","...")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
if(len(sentences)>1):
sentences = [s for s in sentences if s!=""]
return sentences
def clean_tokenize_lemmatise(self,sentence):
sentence = sentence.lower()
sentence = re.sub('[^A-Za-z0-9?!.àâçëéèêïîôûùü]+', ' ',sentence)
sentence = re.sub('[0-9]+', '0',sentence)
sentence_words = nltk.word_tokenize(sentence, language='french')
sentence_words = sentence_words[0:self.size_data_max]
# Transformation des mots pour garder leur racine uniquement
sentence_words = [word for word in sentence_words if word not in self.ignore_words]
sentence_words_intact = sentence_words
sentence_words = [self.stemmer.stem(self.lemmatizer.lemmatize(word)) for word in sentence_words]
return sentence_words, sentence_words_intact
# utilisation du dictionnaire pour convertir les mots en entiers
def pre_embedding(self,sentence):
result_sentence_words = self.clean_tokenize_lemmatise(sentence)
sentence_words = result_sentence_words[0]
sentence_words_intact = result_sentence_words[1]
vector_empty = [0] * self.size_data_max
vec = list(vector_empty)
for i in range(len(sentence_words)):
word = sentence_words[i]
if (word in self.words):
vec[i] = self.vocabulary[word] + 1
return np.asarray([vec]), sentence_words_intact
def classify(self,sentence):
transformed_sentence = self.pre_embedding(sentence)[0]
# obtention des probabilité d'appartenance à chacune des classes d'intention
results = self.model.predict([transformed_sentence])[0]
self.classify_entity(sentence)
# on filtre les résultats avec des probabilités insuffisantes
results = [[i,r] for i,r in enumerate(results) if r>self.ERROR_THRESHOLD]
# on trie en fonction des probablités
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((self.classes[r[0]], r[1]))
print("Intention : " + str(return_list[-1][0]) + " | Proba : " + str(return_list[-1][1]))
return return_list
def classify_entity(self,sentence):
result_transformed_sentence = self.pre_embedding(sentence)
transformed_sentence = result_transformed_sentence[0]
intact_sentence = result_transformed_sentence[1]
# obtention des probabilité d'appartenance à chaque type d'entitée
results = self.model_entity.predict([transformed_sentence])[0]
results_best_match = []
length = len(intact_sentence)
for i in range(length):
index = 0
best_match = results[i][0]
for j in range(len(self.entity)):
candidate_match = results[i][j]
if(candidate_match>best_match):
index = j
best_match = candidate_match
results_best_match.append(index)
results_best_match = [self.entity[match] for match in results_best_match]
result = []
for i in range(length):
result.append([intact_sentence[i],results_best_match[i]])
return result
# On supprime les sessions inutilisés depuis plus de 2 minutes.
def clear_up(self):
current_time = time.time()
session = []
# stockage temporaire
for context in self.context:
session.append(context)
for context in session:
if (self.context[context].last_time + 120 < current_time):
del self.context[context]
print("Cleaned, " + str(len(self.context)) + " session still ongoing.")
def response(self,sentence,userID='0'):
sentences = self.split_into_sentences(sentence)
nbr_sentences = len(sentences)
count = 0
responses = []
while(count < nbr_sentences):
sentence = sentences[count]
# initialisation
if (self.context.get(userID) == None ):
self.context[userID] = Chatbot_Context(time.time(),self.classes,self.entity,self,self.mode)
# on change l'intention actuelle si nécessaire
log = 'classification intention : '
if (self.context[userID].require_classification == True):
results = self.classify(sentence)
self.context[userID].last_intent = self.context[userID].current_intent
if results:
self.context[userID].current_intent = results[0][0]
log = log + str(results[0][0]) + ' (avec une confiance de : ' + str(results[0][1]) + ')'
else:
self.context[userID].current_intent = ''
log = log + 'aucune des prédictions ne dépasse la barre des ' + str(self.ERROR_THRESHOLD) + '%.'
else:
log = log + ' inutilisée '
# on cherche la réponse
response = self.context[userID].give_response(sentence)
if (self.context[userID].require_classification == True):
response.log = log + "<br>" + response.log
if( not (self.context[userID].current_intent == self.context[userID].last_intent)&(count>=1)):
responses.append(response)
count = count + 1
return responses
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import collections
class Solution(object):
def verticalTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
queue = collections.deque([CNode(root, 0, 0)])
m = dict()
min_x, max_x, min_y = 0, 0, 0
while len(queue) > 0:
head = queue.popleft()
if head.x not in m:
m[head.x] = dict()
if head.y not in m[head.x]:
m[head.x][head.y] = []
m[head.x][head.y].append(head)
if head.x < min_x:
min_x = head.x
if head.x > max_x:
max_x = head.x
if head.y < min_y:
min_y = head.y
if head.node.left is not None:
queue.append(CNode(head.node.left, head.x - 1, head.y - 1))
if head.node.right is not None:
queue.append(CNode(head.node.right, head.x + 1, head.y - 1))
ret = []
for x in xrange(min_x, max_x + 1):
if x in m:
report = []
for y in xrange(0, min_y - 1, -1):
if y in m[x]:
vals = map(lambda cnode: cnode.node.val, m[x][y])
if len(vals) > 1:
vals.sort()
report.extend(vals)
ret.append(report)
return ret
class CNode(object):
def __init__(self, node, x, y):
self.node = node
self.x = x
self.y = y
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from fudge import patch
import cuisine
import fabric
from revolver import core
from .utils import assert_contain_function_wrapped
def test_revolver_is_just_a_wrapper():
assert core.env == fabric.api.env
assert core.get == fabric.api.get
assert core.local == fabric.api.local
def test_environment_default_values():
assert not core.env.sudo_forced
assert core.env.sudo_user is None
def test_patch_fabric_api():
assert fabric.api.run == core.run
assert fabric.api.sudo == core.sudo
def test_patch_fabric_operations():
assert fabric.operations.run == core.run
assert fabric.operations.sudo == core.sudo
def test_patch_cuisine():
assert cuisine.run == core.run
assert cuisine.sudo == core.sudo
def test_original_methods_are_available_but_private():
assert core._run.__module__ == "fabric.operations"
assert core._sudo.__module__ == "fabric.operations"
@patch("revolver.core.sudo")
def test_force_sudo_via_env(sudo):
sudo.expects_call().with_args("foo")
core.env.sudo_forced = True
core.run("foo")
core.env.sudo_forced = False
@patch("revolver.core._sudo")
def test_inject_user_for_sudo_via_env(_sudo):
_sudo.expects_call().with_args("foo", user="bar")
core.env.sudo_user = "bar"
core.sudo("foo")
core.env.sudo_user = None
@patch("revolver.core._put")
def test_put_does_not_pass_any_default_args(_put):
_put.expects_call().with_args()
core.put()
@patch("revolver.core._put")
def test_put_passes_any_given_args(_put):
_put.expects_call().with_args("foo", baz="bar")
core.put("foo", baz="bar")
|
import asyncio
async def sleep_task(num):
for i in range(5):
print(f"process task: {num} iter : {i}")
await asyncio.sleep(1)
return num
loop = asyncio.get_event_loop()
task_list = [loop.create_task(sleep_task(i)) for i in range(2)]
loop.run_until_complete(asyncio.wait(task_list))
loop.close()
#loop.run_until_complete(loop.create_task(sleep_task(3)))
#loop.run_until_complete(asyncio.gather(sleep_task(4), sleep_task(5)))
|
#Armstrong Number:
'''
def length(n):
count=0
while n>0:
count+=1
n=n//10
return count
def armstrong(n):
arm=0
n1=n
l=length(n)
while n>0:
rem=n%10
arm+=rem**l
n//=10
if n1==arm:
return("armstrong")
else:
return("not armstrong")
n=int(input("enetr a number:"))
print(armstrong(n))
'''
'''
#PERFECT number:
Ex:
6
sum of 6 divisors
3+2+1=6
output: yes
10
sum of 10 divisors
5+2+1=8
output: No
'''
def perfect(n):
c=0
for i in range(1,n//2+1):
if n%i==0:
c+=i
if c==n:
return"True"
else:
return"False"
n=int(input("enter a number:"))
print(perfect(n))
|
import sys
import os
import csv
import getopt
from pprint import pprint
CV_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__)))
os.environ['CV_HOME'] = str(CV_HOME)
sys.path.append(os.path.join(CV_HOME, 'lib'))
import requests
from requests.auth import HTTPDigestAuth
import simplexml
import cvtools
def main(argv):
opts = cvtools.readinputs(argv)
if not opts['username'] and not opts['password']:
cvtools.usage()
username = opts['username']
password = opts['password']
print "Downloading Qualys KB - this can take a few minutes..."
kb_dl = cvtools.download_kb(username, password)
if kb_dl:
print "Qualys KB downloaded"
print "Will now parse XML and convert to CSV - this will also take a few minutes..."
cvtools.convert_kb()
print "All done! The converted KB is located in: %s" % ( os.path.join(CV_HOME, 'kb.csv'))
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python
"""
info about project here
"""
from gpiozero import LED
import time
from gpiozero.pins.pigpio import PiGPIOFactory
__author__ = "Johannes Coolen"
__email__ = "johannes.coolen@student.kdg.be"
__status__ = "development"
IP = PiGPIOFactory(host='192.168.0.196')
LED = LED(pin=17, pin_factory=IP)
def main():
LED.on()
time.sleep(5)
LED.off()
for x in range(5):
time.sleep(2)
LED.on()
time.sleep(2)
LED.off()
while True:
time.sleep(1)
LED.on()
time.sleep(1)
LED.off()
if __name__ == '__main__': # code to execute if called from command-line
main()
|
#!/usr/bin/env python
# This is a collection of functions that computes the absorption
# coefficient for direct transitions
#
#
# Written by Wennie Wang (wwwennie@gmail.com)
# Created: 1 June 2016
# Last modified:
#
# Absorption coefficient is calculated by:
# [see Kioupakis, et al. PRB 81, 241201 (2010)]
# [see Eq. 5-13 in Electronic States and Optical Transitions, Bassani & Parravicini]
#
# THINGS TO TEST/DO:
# - MP smearing
# - rescaling of p_mn terms
# - vectorization of the code
import sys
# specific to where special packages like f90nml are
sys.path.append("/u/wwwennie/.local/lib/python2.6/site-packages/")
import numpy as np
import f90nml
import smear as sm
import absorb as a
import vectorabs as vec
import time
from multiprocessing import Pool
#*************************************************************
#**************** Read Input *****************
#*************************************************************
## Assumes VASP format files for input
def read_input():
""" Read input from Transmatrix, EIGENVAL, IBZKPT
Assumes VASP files
Transmatrix: momentum matrix elements, in units of hbar/a_bohr
EIGENVAL: eigenvalues at KPOINTS, unused currently
IBZKPT: list of k-points and respective weights
Other inputs
nbnd = number bands
n_r = refractive index (assumed constant)
efermi = Fermi energy (eV)
temp = temperature (K)
smear = constant smearing amount (eV)
aa = adaptive smearing factor (eV)
kgrid = k-point grid size
"""
# Read input card
nml = f90nml.read('input')
fil_trans = nml['input']['fil_trans']
fil_eig = nml['input']['fil_eig']
fil_ibzkpt = nml['input']['fil_ibzkpt']
# Assign values
nbnd = nml['input']['nbnd']
n_r = nml['input']['n_r']
efermi = nml['input']['efermi']
temp = nml['input']['T']
smear = nml['input']['smear']
ismear = nml['input']['ismear']
aa = nml['input']['aa']
volume = nml['input']['volume']
kgrid = nml['input']['kgrid']
rfreq = nml['input']['rfreq']
stepfreq = nml['input']['stepfreq']
# read in files
f_trans = np.loadtxt(fil_trans) # Transmatrix
f_ibzkpt = np.loadtxt(fil_ibzkpt,skiprows=3) # ibzkpt
# Make things into floats as appropriate
aa = float(aa)
temp = float(temp)
kgrid = [int(x) for x in kgrid]
minfreq = float(rfreq[0])
maxfreq = float(rfreq[1])
stepfreq = float(stepfreq)
# Assign values from IBZKPT
nkpt = f_ibzkpt.shape[0]
kpts = f_ibzkpt[:,0:3]
kptwt = f_ibzkpt[:,3]/sum(f_ibzkpt[:,3])
# Assign values of Transmatrix* file
#kptwt = f_trans[:,0] #kpt weight
#kptind = f_trans[:,1] #kpt index num
ibnd = f_trans[:,1] #empty band index, BEWARE of new Transmatrix format
jbnd = f_trans[:,2] #filled band index
nfilled = int(max(jbnd)) # num filled bands
nempty = int(max(ibnd)-nfilled) # num empty bands
pmn_k = np.zeros((nkpt,nempty,nfilled))
eigs = np.zeros((nkpt,nbnd)) # eigenvals (in eV)
diffeigs = np.zeros((nkpt,nempty,nfilled))
counter = 0
ikpt = -1
for line in f_trans:
#ikpt = int(line[1])-1 # new Transmatrix format omits
ibnd = int(line[1])-1 - nfilled
jbnd = int(line[2])-1
# tracking index of kpt, new Transmatrix format
if np.mod(counter,nfilled*nempty) == 0:
counter += 1
ikpt += 1
else:
counter += 1
# eigenvalues- lots of overwriting values here; more efficient way?
eigs[ikpt,ibnd+nfilled] = float(line[3])
eigs[ikpt,jbnd] = float(line[4])
diffeigs[ikpt,ibnd,jbnd] = float(line[3])-float(line[4])
# modulus over coordinates
pmntmp = line[5:]
pmn_k[ikpt][ibnd][jbnd] = np.linalg.norm(pmntmp)
##Debug
##print pmn[10][1][1], pmn[0][0][0]
##print eigs[10,30], eigs[10,3]
return nkpt, nbnd, nfilled, kgrid, n_r, pmn_k, diffeigs, eigs, kpts, kptwt, efermi, temp, smear, ismear, aa, volume, minfreq, maxfreq, stepfreq
#*************************************************************
#******************** Main program ***********************
#*************************************************************
def main():
""" Usage: Place relevant input parameters in file 'input'
In conjunction with: smear.py, abs.py
Execute: python fc_direct.py
Computed in atomic units """
# Track program run time
start_time = time.time()
# Get pmn, eigenvalues, kpts, kpt weights
nkpt, nbnd, nfilled, kgrid, n_r, pmn_unit, diffeigs_unit, eigs_unit, kpts, kptwt, efermi_unit, temp, smear,ismear, aa, volume, minfreq, maxfreq, stepfreq = read_input()
# Frequencies to calculate over, eV
freqs = np.arange(minfreq,maxfreq,stepfreq)
# Constants for unit conversion
RytoeV = 13.60569253 # Ryd to eV conversion
RytoHar = 0.5 # Ryd to Hartree conversion
Hartocm1 = 219474.63 # Hartree to cm^-1 conversion
tpi = 2.0*np.pi # 2pi
au = 0.52917720859 # Bohr radius
me = 9.10938215e-31 # electron mass
eV = 1.602176487e-19 # electron charge and eV to J
hbar = 1.054571628e-34 # J
KtoJ = 1.38064852e-23 # Boltzmann constant
KtoeV = 8.6173324e-5 # Boltzmann constant
KtoHar = KtoeV / RytoeV * RytoHar
eps0 = 8.854187817e-12 # Vac permittivity
c_light = 137.035999070 # Hartree
# Convert input to atomic units
pmn = pmn_unit #* 2*au*RytoeV # VASP p_mn in units hbar/a_bohr
# looks like it is converted to non-a.u.
# in Transmatrix file
T = temp*KtoHar
eigs = eigs_unit / RytoeV * RytoHar
efermi = efermi_unit/ RytoeV * RytoHar
vol = volume / (au**3)
smear = smear / RytoeV * RytoHar
harfreqs = freqs / RytoeV * RytoHar
fdeigs = np.zeros((nkpt,nbnd))
fdeigs = (eigs-efermi)/T
diffeigs = diffeigs_unit / RytoeV * RytoHar
# # DEBUG: checking Fermi-Dirac distribtuion
# eigtest = np.arange(-0.5,0.5,0.05)
# fd = [sm.fermidirac(energy,0,T) for energy in eigtest]
# np.savetxt("fd-check",np.c_[eigtest,fd])
##### Calculate absorption coefficient #####
nfreqs = len(freqs)
alpha = np.zeros(nfreqs)
runtime = np.zeros(nfreqs)
prog_freq = np.ceil(nfreqs/10)
#### Vectorization #####
# relevant smearing functions
Ffd = np.vectorize(sm.fd)
if (ismear == 1):
Fdelta = np.vectorize(sm.w0gauss)
elif (ismear == 2):
Fdelta = np.vectorize(sm.mpdelta)
# array of FD-terms [nkpt][nbnd]
fd_vec = Ffd(fdeigs)
########################
fileout = open('progress','w')
fileout.write("================ Progress ============\n")
##### Calculate absorption coefficient ####
for f in range(nfreqs):
delta_vec = vec.vecdirac(Fdelta,diffeigs,harfreqs[f],smear,ismear)
alpha[f] = a.absvec(pmn,delta_vec,fd_vec,kptwt,nkpt,nbnd,nfilled)
#alpha[f] = a.absorb(pmn,eigs,efermi,T,harfreqs[f],kptwt,nkpt,nbnd,aa,smear,nfilled)
# progress bar
if (np.mod(int(f),int(prog_freq)) == 0):
runtime[f] = time.time() - start_time
fileout.write("frequency {0} of {1}: {2} s\n".format(int(f),int(nfreqs),runtime[f]))
fileout.flush()
# pre-factor to absorption coefficient
pre = 2 * 4 * np.pi**2 / (n_r * c_light)
pre = pre / vol
invfreq = [1.0/freq for freq in harfreqs]
pre = np.multiply(pre,invfreq)
alpha = np.multiply(pre,alpha)
# imag dielectric for double-checking
# from numbers in a.u.
imeps = (n_r * c_light) * np.multiply(invfreq,alpha)
# Convert to conventional units
alpha = alpha * Hartocm1 # au to cm^-1
# Output to file
omalpha = np.c_[freqs,alpha]
np.savetxt("runtime", runtime)
np.savetxt("alpha",omalpha)
np.savetxt("imepsilon",np.c_[freqs,imeps])
if __name__ == "__main__":
main()
|
import sys
import argparse
from pathlib import Path
from dateutil.parser import parse
from . import compare_all
from .plot import plotdiff
from ..utils import to_datetime
from .. import read
def compare_cli(P):
errs = compare_all(P.new_dir, P.ref_dir, file_format=P.file_format, only=P.only, plot=True)
if errs:
for e, v in errs.items():
print(f"{e} has {v} errors", file=sys.stderr)
raise SystemExit(f"FAIL: compare {P.new_dir}")
print(f"OK: Gemini comparison {P.new_dir} {P.ref_dir}")
def plot_cli(ref_dir: Path, new_dir: Path, *, time_str: str = None, var: set[str] = None):
ref_path = Path(ref_dir).expanduser().resolve(strict=True)
new_path = Path(new_dir).expanduser().resolve(strict=True)
if time_str:
time = parse(time_str)
new = read.frame(new_path, time, var=var)
ref = read.frame(ref_path, time, var=var)
else:
if not ref_path.is_file():
raise FileNotFoundError(f"{ref_path} must be a file when not specifying time")
if not new_path.is_file():
raise FileNotFoundError(f"{new_path} must be a file when not specifying time")
new = read.data(new_path, var=var)
ref = read.data(ref_path, var=var)
new_path = new_path.parent
ref_path = ref_path.parent
for k in ref.keys():
plotdiff(new[k], ref[k], to_datetime(new[k].time), new_path, ref_path)
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Compare simulation file outputs and inputs")
p.add_argument("new_dir", help="directory to compare")
p.add_argument("ref_dir", help="reference directory")
p.add_argument("-plot", help="plot instead of numeric compare", action="store_true")
p.add_argument("-only", help="only check in or out", choices=["in", "out"])
p.add_argument("-var", help="variable names (only works with -plot)", nargs="+")
p.add_argument("-t", "--time", help="requested time (if directory given)")
p.add_argument(
"-file_format",
help="specify file format to read from output dir",
choices=["h5", "nc", "raw"],
)
P = p.parse_args()
if P.plot:
plot_cli(P.new_dir, P.ref_dir, time_str=P.time, var=P.var)
else:
compare_cli(P)
|
"""
====================================
How to write your own TVTensor class
====================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_tv_tensors.py>` to download the full example code.
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own TVTensor class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_transforms_plot_tv_tensors.py`.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.tv_tensors.TVTensor` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.tv_tensors.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/tv_tensors/_bounding_box.py>`_.
class MyTVTensor(tv_tensors.TVTensor):
pass
my_dp = MyTVTensor([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom TVTensor class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyTVTensor class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", tv_tensor_cls=MyTVTensor)
def hflip_my_tv_tensor(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.tv_tensors.wrap` is used, see
# :ref:`tv_tensor_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyTVTensor`` instance:
my_dp = MyTVTensor(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_tv_tensor(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
# Write a Python program to convert list to list of dictionaries.
def listToDict(list1,list2):
if len(list1) == len(list2):
final_dict = {}
for i in range(len(list1)):
final_dict[list1[i]] = list2[i]
else:
print("ERROR ! Length are different. Mapping can't be done.")
return final_dict
list1 = [1,2,3,4]
list2 = ['a','b','c','d']
output = listToDict(list1,list2)
print(output) |
hands = [1,0,0,0,0,0]
remainChance=4
while True:
quest = int(input("please choose a hand"))
order = int(input("for gol inter 1 for poch inter 0"))
remainChance -=1
if order == 1 and hands[quest] == 1:
print("you won!")
break
if order == 0 and hands[quest] == 1:
print("you lost!")
break
if remainChance >0 and order == 0:
if hands[quest]==0:
hands.pop(quest)
|
import numpy as np
import tensorflow as tf
import re
from basic.read_data import DataSet
from my.nltk_utils import span_f1
from my.tensorflow import padded_reshape
from my.utils import argmax
from squad.utils import get_phrase, get_best_span, get_best_span_wy
from qangaroo.utils import get_best_candidate, get_word_span
class Evaluation(object):
def __init__(self, data_type, global_step, idxs, yp, tensor_dict=None):
self.data_type = data_type
self.global_step = global_step
self.idxs = idxs
self.yp = yp
self.num_examples = len(yp)
self.tensor_dict = None
self.dict = {'data_type': data_type,
'global_step': global_step,
'yp': yp,
'idxs': idxs,
'num_examples': self.num_examples}
if tensor_dict is not None:
self.tensor_dict = {key: val.tolist() for key, val in tensor_dict.items()}
for key, val in self.tensor_dict.items():
self.dict[key] = val
self.summaries = None
def __repr__(self):
return "{} step {}".format(self.data_type, self.global_step)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_idxs = self.idxs + other.idxs
new_tensor_dict = None
if self.tensor_dict is not None:
new_tensor_dict = {key: val + other.tensor_dict[key] for key, val in self.tensor_dict.items()}
return Evaluation(self.data_type, self.global_step, new_idxs, new_yp, tensor_dict=new_tensor_dict)
def __radd__(self, other):
return self.__add__(other)
class LabeledEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, y, tensor_dict=None):
super(LabeledEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.y = y
self.dict['y'] = y
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_idxs = self.idxs + other.idxs
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return LabeledEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, tensor_dict=new_tensor_dict)
class AccuracyEvaluation(LabeledEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=None):
super(AccuracyEvaluation, self).__init__(data_type, global_step, idxs, yp, y, tensor_dict=tensor_dict)
self.loss = loss
self.correct = correct
self.acc = sum(correct) / len(correct)
self.dict['loss'] = loss
self.dict['correct'] = correct
self.dict['acc'] = self.acc
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/loss'.format(data_type), simple_value=self.loss)])
acc_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc'.format(data_type), simple_value=self.acc)])
self.summaries = [loss_summary, acc_summary]
def __repr__(self):
return "{} step {}: accuracy={}, loss={}".format(self.data_type, self.global_step, self.acc, self.loss)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return AccuracyEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_correct, new_loss, tensor_dict=new_tensor_dict)
class Evaluator(object):
def __init__(self, config, model, tensor_dict=None):
self.config = config
self.model = model
self.global_step = model.global_step
self.yp = model.yp
self.tensor_dict = {} if tensor_dict is None else tensor_dict
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), tensor_dict=tensor_dict)
return e
def get_evaluation_from_batches(self, sess, batches):
e = sum(self.get_evaluation(sess, batch) for batch in batches)
return e
class LabeledEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(LabeledEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.y = model.y
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
y = feed_dict[self.y]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist(), tensor_dict=tensor_dict)
return e
class AccuracyEvaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(AccuracyEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.loss = model.get_loss()
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, loss, vals = sess.run([self.global_step, self.yp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
yp = yp[:data_set.num_examples]
correct = [self.__class__.compare(yi, ypi) for yi, ypi in zip(y, yp)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = AccuracyEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y, correct, float(loss), tensor_dict=tensor_dict)
return e
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
if start == int(np.argmax(ypi)):
return True
return False
class AccuracyEvaluator2(AccuracyEvaluator):
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
para_start = int(np.argmax(np.max(ypi, 1)))
sent_start = int(np.argmax(ypi[para_start]))
if tuple(start) == (para_start, sent_start):
return True
return False
class ForwardEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, loss, id2answer_dict, tensor_dict=None):
super(ForwardEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.yp2 = yp2
self.loss = loss
self.dict['loss'] = loss
self.dict['yp2'] = yp2
self.id2answer_dict = id2answer_dict
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_yp)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return ForwardEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_loss, new_id2answer_dict, tensor_dict=new_tensor_dict)
def __repr__(self):
return "{} step {}: loss={:.4f}".format(self.data_type, self.global_step, self.loss)
class F1Evaluation(AccuracyEvaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, y, correct, loss, f1s, id2answer_dict, tensor_dict=None):
super(F1Evaluation, self).__init__(data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=tensor_dict)
self.yp2 = yp2
self.f1s = f1s
self.f1 = float(np.mean(f1s))
self.dict['yp2'] = yp2
self.dict['f1s'] = f1s
self.dict['f1'] = self.f1
self.id2answer_dict = id2answer_dict
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/f1'.format(data_type), simple_value=self.f1)])
self.summaries.append(f1_summary)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_f1s = self.f1s + other.f1s
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
if 'na' in self.id2answer_dict:
new_id2na_dict = dict(list(self.id2answer_dict['na'].items()) + list(other.id2answer_dict['na'].items()))
new_id2answer_dict['na'] = new_id2na_dict
e = F1Evaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_y, new_correct, new_loss, new_f1s, new_id2answer_dict)
if 'wyp' in self.dict:
new_wyp = self.dict['wyp'] + other.dict['wyp']
e.dict['wyp'] = new_wyp
return e
def __repr__(self):
return "{} step {}: accuracy={:.4f}, f1={:.4f}, loss={:.4f}".format(self.data_type, self.global_step, self.acc, self.f1, self.loss)
class F1CandidateEvaluation(AccuracyEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, correct, loss, f1s, id2answer_dict, tensor_dict=None):
super(F1CandidateEvaluation, self).__init__(data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=tensor_dict)
#self.yp2 = yp2
self.f1s = f1s
self.f1 = float(np.mean(f1s))
#self.dict['yp2'] = yp2
self.dict['f1s'] = f1s
self.dict['f1'] = self.f1
self.id2answer_dict = id2answer_dict
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/f1'.format(data_type), simple_value=self.f1)])
self.summaries.append(f1_summary)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
#new_yp2 = self.yp2 + other.yp2
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_f1s = self.f1s + other.f1s
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
if 'na' in self.id2answer_dict:
new_id2na_dict = dict(list(self.id2answer_dict['na'].items()) + list(other.id2answer_dict['na'].items()))
new_id2answer_dict['na'] = new_id2na_dict
e = F1CandidateEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_correct, new_loss, new_f1s, new_id2answer_dict)
if 'wyp' in self.dict:
new_wyp = self.dict['wyp'] + other.dict['wyp']
e.dict['wyp'] = new_wyp
return e
def __repr__(self):
return "{} step {}: accuracy={:.4f}, f1={:.4f}, loss={:.4f}".format(self.data_type, self.global_step, self.acc, self.f1, self.loss)
class F1Evaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(F1Evaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.wyp = model.wyp
self.loss = model.get_loss()
if config.na:
self.na = model.na_prob
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
if self.config.na:
global_step, yp, yp2, wyp, loss, na, vals = sess.run([self.global_step, self.yp, self.yp2, self.wyp, self.loss, self.na, list(self.tensor_dict.values())], feed_dict=feed_dict)
else:
global_step, yp, yp2, wyp, loss, vals = sess.run([self.global_step, self.yp, self.yp2, self.wyp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
yp, yp2, wyp = yp[:data_set.num_examples], yp2[:data_set.num_examples], wyp[:data_set.num_examples]
if self.config.wy:
spans, scores = zip(*[get_best_span_wy(wypi, self.config.th) for wypi in wyp])
else:
spans, scores = zip(*[get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ""
if len(xi[span[0][0]]) <= span[1][1]:
return ""
return get_phrase(context, xi, span)
if self.config.split_supports:
id2answer_dict = {id_: _get2(context[0], xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x2'], spans, data_set.data['p2'])}
else:
id2answer_dict = {id_: _get2(context[0], xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for id_, score in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
if self.config.na:
id2na_dict = {id_: float(each) for id_, each in zip(data_set.data['ids'], na)}
id2answer_dict['na'] = id2na_dict
correct = [self.__class__.compare2(yi, span) for yi, span in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for yi, span in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y,
correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
if self.config.wy:
e.dict['wyp'] = wyp.tolist()
return e
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return False
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
class F1CandidateEvaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(F1CandidateEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.loss = model.get_loss()
if config.na:
self.na = model.na_prob
self.not_in_first_sent_count = 0
self.found_answer_doc_count = 0
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
partial_run = False
config = self.config
doc_lst = None
if config.use_assembler:
new_feed_dict = {}
to_run = []
feeds = list(feed_dict.keys())
for mid, model in enumerate(self.models):
to_run += [model.mac_rnn_cell.qsub_topk_ids, model.mac_rnn_cell.qsub_topk_probs, model.mac_rnn_cell.qsub_all_probs, model.yp, model.yp_list, model.mac_rnn_cell.doc_attn, \
model.x_sents_len_reconstruct]
feeds += [model.assembler.selected_sent_ids]
to_run += [self.global_step, self.model.assembler.yp, self.loss, list(self.tensor_dict.values())]
handle = sess.partial_run_setup(to_run, feeds)
for mid, (single_batch, model) in enumerate(zip(batch, self.models)):
data_cand_word = single_batch[1].data['cand_word']
data_x = single_batch[1].data['x']
if len(data_x) <= self.config.batch_size:
data_cand_word = data_cand_word + data_cand_word
data_x = data_x + data_x
partial_run = True
sents_ids, handle, tree_answer_cand_ids, doc_lst = model.assembler.get_sentence_ids(sess, data_cand_word, data_x, feed_dict, handle, mid)
new_feed_dict[model.assembler.selected_sent_ids] = sents_ids
if config.attn_visualization:
feed_dict[self.model.assembler.selected_sent_ids] = sents_ids
else:
partial_run = True
to_run = [self.global_step, self.model.assembler.yp, self.loss, list(self.tensor_dict.values())]
if partial_run:
if self.config.na:
global_step, yp, loss, na, vals = sess.partial_run(handle, to_run, feed_dict=new_feed_dict)
else:
if self.config.mode == 'test' and self.config.attn_visualization:
attn_lst, doc_lst, global_step, yp, loss, vals = sess.partial_run(handle, to_run, feed_dict=new_feed_dict)
elif config.use_assembler:
global_step, yp, loss, vals = sess.partial_run(handle, to_run, feed_dict=new_feed_dict)
else:
global_step, yp, loss, vals = sess.partial_run(handle, to_run, feed_dict=new_feed_dict)
else:
if self.config.na:
global_step, yp, loss, na, vals = sess.run([self.global_step, self.yp, self.loss, self.na, list(self.tensor_dict.values())], feed_dict=feed_dict)
else:
if config.mode == 'test' and config.attn_visualization:
if config.mac_reasoning_unit == 'attention-lstm':
attn_lst, attn_lst_bod, doc_lst, doc_weights, word_weights, word_weights_bod, global_step, yp, loss, vals, yp_list = \
sess.run([self.model.mac_rnn_cell.qsub_topk_ids, self.model.mac_rnn_cell.qbod_topk_ids, self.model.mac_rnn_cell.top_doc_attn, self.model.mac_rnn_cell.doc_attn_weights_lst, \
self.model.mac_rnn_cell.qsub_topk_probs, self.model.mac_rnn_cell.qbod_topk_probs, self.global_step, self.yp, self.loss, list(self.tensor_dict.values()), self.model.yp_list], feed_dict=feed_dict)
else:
attn_lst, doc_lst, doc_weights, word_weights, global_step, yp, loss, vals, yp_list = sess.run([self.model.mac_rnn_cell.top_attn, self.model.mac_rnn_cell.top_doc_attn, \
self.model.mac_rnn_cell.doc_attn_weights_lst, self.model.mac_rnn_cell.top_attn_prob, \
self.global_step, self.yp, self.loss, list(self.tensor_dict.values()), self.model.yp_list], feed_dict=feed_dict)
ensemble_yps = []
for i in range(config.num_hops):
ensemble_yps.append(yp_list[i])
ensemble_yps = np.array(ensemble_yps)
elif config.use_assembler:
global_step, yp, loss, vals = sess.run([self.global_step, self.model.assembler.yp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
else:
global_step, yp, loss, vals = sess.run([self.global_step, self.yp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
cand_span_y = data_set.data['cand_span_y']
cand_span = data_set.data['cand_span']
yp = yp[:data_set.num_examples]
y_answer = np.argmax(yp, axis=-1)
y = data_set.data['y']
if self.config.mode == 'test' and self.config.attn_visualization:
p2 = data_set.data['p2']
x = data_set.data['x']
print(data_set.data['q'])
for i, attn in enumerate(attn_lst):
for j, att in enumerate(attn):
if att[0] >= len(x[0][doc_lst[i][j][0]]):
print("exceed length")
continue
print('doc attention')
print(doc_lst[i])
print('word attention')
print('%s %s %s %s %s' %(x[0][doc_lst[i][j][0]][att[0]], x[0][doc_lst[i][j][0]][att[1]], x[0][doc_lst[i][j][0]][att[2]], x[0][doc_lst[i][j][0]][att[3]],x[0][doc_lst[i][j][0]][att[4]]))
print('%s %s %s %s %s' %(x[0][doc_lst[i][j][0]][att[5]], x[0][doc_lst[i][j][0]][att[6]], x[0][doc_lst[i][j][0]][att[7]], x[0][doc_lst[i][j][0]][att[8]],x[0][doc_lst[i][j][0]][att[9]]))
print('word attention weights')
print(word_weights[i])
if config.mac_reasoning_unit == 'attention-lstm':
print('q_body word attention')
print('%s %s %s %s %s' %(x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][0]], x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][1]], x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][2]], \
x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][3]],x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][4]]))
print('%s %s %s %s %s' %(x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][5]], x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][6]], x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][7]], \
x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][8]],x[0][doc_lst[i][j][0]][attn_lst_bod[i][j][9]]))
print('q_body word attention weights')
print(word_weights_bod[i])
answers = np.squeeze(np.squeeze(np.argmax(ensemble_yps, axis=-1), axis=-1), axis=-1)
correctness = [(answer == cand_span_y[0][0]) for answer in answers]
print(answers)
print(correctness)
spans, scores = zip(*[get_best_candidate(ypi, cand_spani) for ypi, cand_spani in zip(yp, cand_span)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ""
if len(xi[span[0][0]]) <= span[1][1]:
return ""
return get_phrase(context, xi, span)
if self.config.split_supports:
if self.config.mode == 'test':
for idx, (id_, cand_, real_cand_) in enumerate(zip(data_set.data['ids'], data_set.data['cand_word_found'], data_set.data['real_cand_word_found'])):
for idyp, _yp in enumerate(yp[idx][0]):
if cand_[idyp] in real_cand_:
yp[idx][0][idyp] += 1e-7
cand_index = np.argmax(yp, axis=-1)
num_exceed_cand = feed_dict[self.model.num_exceed_cand]
id2answer_dict = {id_: cand_[cand_index_]
for idx, (id_, cand_, cand_index_) in enumerate(zip(data_set.data['ids'], data_set.data['cand_word_found'], cand_index[:,0]))}
else:
id2answer_dict = {id_: _get2(context[0], xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for id_, score in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
if self.config.na:
id2na_dict = {id_: float(each) for id_, each in zip(data_set.data['ids'], na)}
id2answer_dict['na'] = id2na_dict
correct = [self.__class__.compare2(yi, span) for yi, span in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for yi, span in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1CandidateEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y,
correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
if self.config.wy:
e.dict['wyp'] = wyp.tolist()
if self.config.mode == 'test':
return e, doc_lst
else:
return e
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
class MultiGPUF1Evaluator(F1Evaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUF1Evaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
if config.split_supports == True:
N, M, JX = config.batch_size, 1, config.max_para_size
else:
N, M, JX = config.batch_size, config.max_num_sents, config.max_sent_size
self.yp = tf.concat(axis=0, values=[padded_reshape(model.yp, [N, M, JX]) for model in models])
self.yp2 = tf.concat(axis=0, values=[padded_reshape(model.yp2, [N, M, JX]) for model in models])
self.wy = tf.concat(axis=0, values=[padded_reshape(model.wy, [N, M, JX]) for model in models])
self.loss = tf.add_n([model.get_loss() for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
class MultiGPUF1CandidateEvaluator(F1CandidateEvaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUF1CandidateEvaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
if config.split_supports == True:
N, M, JX = config.batch_size, 1, tf.reduce_max([tf.shape(model.yp)[2] for model in models])
else:
N, M, JX = config.batch_size, config.max_num_sents, tf.reduce_max([tf.shape(model.yp)[2] for model in models])
self.yp = tf.concat(axis=0, values=[padded_reshape(model.yp, [N, M, JX]) for model in models])
self.loss = tf.add_n([model.get_loss() for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
class F1CandidateDocSelEvaluator(F1CandidateEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(F1CandidateDocSelEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
partial_run = False
config = self.config
doc_lst, answer_doc_ids = sess.run([self.model.mac_rnn_cell.top_doc_attn, self.model.answer_doc_ids], feed_dict=feed_dict)
for i in range(config.batch_size):
for j in range(config.num_hops):
selected_doc_id = doc_lst[j][i][0]
if selected_doc_id in answer_doc_ids[i]:
self.found_answer_doc_count += 1
print(self.found_answer_doc_count)
break
return None
class MultiGPUF1CandidateDocSelEvaluator(F1CandidateDocSelEvaluator):
def __init__(self, config, models, tensor_dict=None):
super(MultiGPUF1CandidateDocSelEvaluator, self).__init__(config, models[0], tensor_dict=tensor_dict)
self.models = models
with tf.name_scope("eval_concat"):
if config.split_supports == True:
N, M, JX = config.batch_size, 1, tf.reduce_max([tf.shape(model.yp)[2] for model in models])
else:
N, M, JX = config.batch_size, config.max_num_sents, tf.reduce_max([tf.shape(model.yp)[2] for model in models])
self.yp = tf.concat(axis=0, values=[padded_reshape(model.yp, [N, M, JX]) for model in models])
self.loss = tf.add_n([model.get_loss() for model in models])/len(models)
def _split_batch(self, batches):
idxs_list, data_sets = zip(*batches)
idxs = sum(idxs_list, ())
data_set = sum(data_sets, data_sets[0].get_empty())
return idxs, data_set
def _get_feed_dict(self, batches):
feed_dict = {}
for model, (_, data_set) in zip(self.models, batches):
feed_dict.update(model.get_feed_dict(data_set, False))
return feed_dict
class ForwardEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(ForwardEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.loss = model.get_loss()
if config.na:
self.na = model.na_prob
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
if self.config.na:
global_step, yp, yp2, loss, na, vals = sess.run([self.global_step, self.yp, self.yp2, self.loss, self.na, list(self.tensor_dict.values())], feed_dict=feed_dict)
else:
global_step, yp, yp2, loss, vals = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
spans, scores = zip(*[get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ""
if len(xi[span[0][0]]) <= span[1][1]:
return ""
return get_phrase(context, xi, span)
id2answer_dict = {id_: _get2(context, xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for id_, score in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
if self.config.na:
id2na_dict = {id_: float(each) for id_, each in zip(data_set.data['ids'], na)}
id2answer_dict['na'] = id2na_dict
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = ForwardEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), float(loss), id2answer_dict, tensor_dict=tensor_dict)
# TODO : wy support
return e
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return False
@staticmethod
def compare2(yi, span):
for start, stop in yi:
if tuple(start) == span[0] and tuple(stop) == span[1]:
return True
return False
@staticmethod
def span_f1(yi, span):
max_f1 = 0
for start, stop in yi:
if start[0] == span[0][0]:
true_span = start[1], stop[1]
pred_span = span[0][1], span[1][1]
f1 = span_f1(true_span, pred_span)
max_f1 = max(f1, max_f1)
return max_f1
def compute_answer_span(context, answer):
answer = answer.replace(' – ',' ').lower()
context = context.lower()
a = re.search(r'({})'.format(answer), context)
if a is None:
return None, None
start = a.start()
end = start + len(answer)
return start, end
|
class Circule:
def __init__(self,raduis,pie= 3.144):
self.raduis = raduis
self.pie = pie
#Find Area By Radius
def AreaByRadius(self):
return self.pie * self.raduis**2
#Find Perimeter By Radius
def PerimeterByRadius(self):
return 2*self.pie*self.raduis
class String:
def __init__(self,string=''):
self.string = string
def get_String(self):
self.string =input("Enter the string :")
def print_String(self):
print(f"The string You Enter Is '{self.string.upper()}'")
class string_Revers:
def __init__(self,string):
self.string = string
def Reverse_string(self):
return '.'.join(reversed(self.string.split()))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from keras import backend as K
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render
from django.conf import settings
import nltk
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import math
import os
import h5py
import numpy as np
import argparse
from google_images_download import google_images_download
from googlesearch import search as ims
import requests
import re
import urllib2
import urllib
import os
import argparse
import sys
import json
from extract_cnn_vgg16_keras import VGGNet
import shutil
import numpy as np
import h5py
import urllib2
import re
import os
from os.path import basename
from urlparse import urlsplit
from urlparse import urlparse as up
from urlparse import urlunparse
from posixpath import basename,dirname
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import argparse
from bs4 import BeautifulSoup as bs
import os
import sys
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
import pusher
BASE_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),'texim')
pusher_client = pusher.Pusher(
app_id='650084',
key='67d4a4100ac7bd39e18f',
secret='b771da9ec6664de0e850',
cluster='ap2',
ssl=True
)
# Create your views here.
def get_imlist(path):
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg') or f.endswith('.png') or f.endswith('.jpeg') or f.endswith('.svg')]
def remove_string_special_characters(s):
stripped = re.sub('[^\w\s]','',s)
stripped = re.sub('_','', stripped)
stripped = re.sub('\s+','',stripped)
stripped = stripped.strip()
return stripped
def count_words(sent):
count = 0
words = word_tokenize(sent)
for word in words:
count +=1
return count
def get_doc(sent):
i = 0
doc_info = []
for doc in sent:
i+=1
temp = {'doc_id':i,'doc_length':count_words(doc)}
doc_info.append(temp)
return doc_info
def create_freq_dicts(sents):
i=0
fdl = []
for sent in sents:
i+=1
freq_dict = {}
words = word_tokenize(sent)
for word in words:
word = word.lower()
if word in freq_dict:
freq_dict[word]+=1
else:
freq_dict[word]=1
temp = {'doc_id':i, 'freq_dict':freq_dict}
fdl.append(temp)
return fdl
def computeTF(doc_info,fdl,query):
tfs = []
for td in fdl:
id = td['doc_id']
for k in td['freq_dict']:
if k == query:
temp = {'doc_id':id,'TF_score':float(td['freq_dict'][k])/float(doc_info[id-1]['doc_length']),'key':k}
tfs.append(temp)
return tfs
def computeIDF(doc_info,fdl,query):
ids = []
for dic in fdl:
id = dic['doc_id']
for k in dic['freq_dict'].keys():
if k == query:
c = sum([k in tempDict['freq_dict'] for tempDict in fdl])
temp = {'doc_id':id,'IDF_score':math.log(len(doc_info)/c),'key':k}
ids.append(temp)
return ids
def computeTFIDF(tfs,ids):
tfids = []
for j in ids:
for i in tfs:
if j['key'] == i['key'] and j['doc_id'] == i['doc_id']:
temp = {'doc_id':j['doc_id'],'TFIDF_score':j['IDF_score']*i['TF_score'],'key':i['key']}
tfids.append(temp)
return tfids
def search_form(request):
print(BASE_DIR)
return render(request, 'texim/index.html')
@csrf_exempt
def search(request):
query = request.POST['message']
max_images = 20
save_directory = os.path.join(BASE_DIR,'database')
query_directory = os.path.join(BASE_DIR,'query')
image_type="Action"
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " Downloading Training images"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
if query not in os.listdir(save_directory):
response = google_images_download.googleimagesdownload() #class instantiation
arguments = {"keywords":query,"limit":max_images,"print_urls":True,"output_directory":save_directory} #creating list of arguments
paths = response.download(arguments)
db = os.path.join(save_directory,query)
img_list = get_imlist(db)
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " feature extraction starts"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
feats = []
names = []
model = VGGNet()
for i, img_path in enumerate(img_list):
try:
norm_feat = model.extract_feat(img_path)
img_name = os.path.split(img_path)[1]
feats.append(norm_feat)
names.append(img_name)
print("extracting feature from image No. %d , %d images in total" %((i+1), len(img_list)))
msg = "Extracting feature from image No."+str(i+1)+" images in total "+str(len(img_list))
pusher_client.trigger('texim', 'my-event', {'message': msg})
except Exception:
print "Skipping Unexpected Error:", sys.exc_info()[1]
msg = "Skipping Unexpected Error:" + str(sys.exc_info()[1])
pusher_client.trigger('texim', 'my-event', {'message': msg})
pass
feats = np.array(feats)
names = np.array(names)
# print(feats)
# directory for storing extracted features
# output = os.path.join(BASE_DIR,'feature.h5')
print("--------------------------------------------------")
print(" writing feature extraction results ...")
print("--------------------------------------------------")
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " writing feature extraction results ..."
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
# FEATURE.h5
# h5f = h5py.File(output, 'w')
# h5f.create_dataset('dataset_1', data = feats)
# # h5f.create_dataset('dataset_2', data = names)
# h5f.create_dataset('dataset_2', data = names)
# h5f.close()
# # read in indexed images' feature vectors and corresponding image names
# h5f = h5py.File(output,'r')
# # feats = h5f['dataset_1'][:]
# feats = h5f.get('dataset_1')
# # print(feats)
# feats = np.array(feats)
# #imgNames = h5f['dataset_2'][:]
# imgNames = h5f.get('dataset_2')
# # print(imgNames)
# imgNames = np.array(imgNames)
#h5f.close()
# print(feats)
# print(imgNames)
print("--------------------------------------------------")
print(" searching starts")
print("--------------------------------------------------")
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " searching starts"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
# read and show query image
sites = []
N = 5
#Google search
for url in ims(query, stop=13):
print(url)
sites.append(url)
sites = sites[:N]
print(sites)
# sites = ['https://www.cars.com/',]
total_img_scores = []
doc_dic = []
for site in sites:
try:
soup = bs(urllib2.urlopen(site),"html5lib")
drc = ""
for p in soup.find_all('p'):
drc+=p.getText()
doc_dic.append(drc)
except Exception:
pass
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " Ranking documents on basis of tf-idf scores "
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
doc_info = get_doc(doc_dic)
fdl = create_freq_dicts(doc_dic)
TF_score = computeTF(doc_info,fdl,query)
IDF_score = computeIDF(doc_info,fdl,query)
TFIDF_scores = computeTFIDF(TF_score,IDF_score)
total_doc_scores = [0 for x in range(len(sites))]
for el in TFIDF_scores:
total_doc_scores[el['doc_id']-1] = el['TFIDF_score']
total_doc_scores = np.array(total_doc_scores)
total_doc_scores.reshape((1, -1))
rank_ID2 = np.argsort(total_doc_scores)[::-1]
rank_score2 = total_doc_scores[rank_ID2]
maxres = N
doclist = [sites[index] for i,index in enumerate(rank_ID2[0:maxres])]
print("doclist")
print(doclist)
print(rank_score2)
pusher_client.trigger('results', 'my-event', {"doclist":doclist})
for site in sites:
try:
soup = bs(urllib2.urlopen(site),"html5lib")
img_tags = soup.find_all('img')
print(img_tags)
queryDir = os.path.join(query_directory,str(sites.index(site)))
os.mkdir(queryDir)
print("directory created")
urls = []
for img in img_tags:
try:
urls.append(img['src'])
except Exception:
pass
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " Downloading Query Images for Site-"+str(sites.index(site)+1)
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
for url in urls:
filename = re.search(r'/([\w_-]+[.](jpg|gif|png))$', url)
try:
if 'http' not in url:
url = '{}{}'.format(site, url)
imgdata=urllib2.urlopen(url).read()
filname=basename(urlsplit(url)[2])
output=open(os.path.join(queryDir,filname),'wb')
output.write(imgdata)
output.close()
except Exception:
print "Skipping Unexpected Error:", sys.exc_info()[1]
pass
img_list = get_imlist(queryDir)
qfeats = []
qnames = []
model = VGGNet()
for i, img_path in enumerate(img_list):
try:
norm_feat = model.extract_feat(img_path)
img_name = os.path.split(img_path)[1]
qfeats.append(norm_feat)
qnames.append(img_name)
except Exception:
print "Skipping Unexpected Error:", sys.exc_info()[1]
pass
qfeats = np.array(qfeats)
qnames = np.array(qnames)
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = " Calculating Image Score for Site-"+str(sites.index(site)+1)
pusher_client.trigger('texim', 'my-event', {'message': msg})
msg = "--------------------------------------------------"
pusher_client.trigger('texim', 'my-event', {'message': msg})
model = VGGNet()
# extract query image's feature, compute simlarity score and sort
if qfeats.any():
scores = []
scores = np.array(scores)
for qD in feats:
#qV = model.extract_feat(qD)
if scores.any():
scores += np.dot(qD, qfeats.T)
else:
scores = np.dot(qD,qfeats.T)
else:
scores = [0]
scores = np.array(scores)
total_img_scores.append(np.sum(scores))
except Exception:
scores = [0]
scores = np.array(scores)
total_img_scores.append(np.sum(scores))
pass
total_img_scores = np.array(total_img_scores)
total_img_scores.reshape((1, -1))
rank_ID1 = np.argsort(total_img_scores)[::-1]
rank_score1 = total_img_scores[rank_ID1]
maxres = N
imlist = [sites[index] for i,index in enumerate(rank_ID1[0:maxres])]
print("imlist")
print(imlist)
print(rank_score1)
shutil.rmtree(query_directory)
os.mkdir(query_directory)
image_type="Action"
final_scores = [sum(x) for x in zip(total_img_scores, total_doc_scores)]
final_scores = np.array(final_scores)
final_scores.reshape((1, -1))
rank_ID3 = np.argsort(final_scores)[::-1]
rank_score3 = final_scores[rank_ID3]
totlist = [sites[index] for i,index in enumerate(rank_ID3[0:maxres])]
print("totlist")
print(totlist)
print(rank_score3)
pusher_client.trigger('results', 'my-event', {"totlist":totlist})
K.clear_session()
return render(request,'texim/search_results.html',{"totlist":totlist,"doclist":doclist}) |
#!/usr/bin/env python3
def vector_mean(v: list) -> float:
return sum(v)/len(v)
def matrix_mean(v: list) -> float:
return 0.
def sigmoid(x: float) -> float:
from math import exp
return 1/(1+exp(-x))
def noisy_sine(samples, precision) -> list:
import matplotlib.pyplot as plt
from math import sin, pi
import random
xv = [] # X vector
for x_i in range(samples+1):
xv.append(sin((2*pi*x_i)/samples) + random.gauss(0, 1/precision))
plt.scatter(range(samples+1), xv)
plt.title('Generated Noisy Sinusoid')
plt.show()
return xv
def gaussian(alpha, x, sigma) -> float:
from math import exp
return alpha*exp(-(x*x/sigma*sigma))
def softmax(predictions: list) -> list:
from math import exp
denominator = 0.
for prediction in predictions:
denominator += exp(prediction)
return [exp(x) / denominator for x in predictions]
def binomial(total: int, choose: int) -> int:
from math import factorial as fact
if choose > total:
print('choose is higher than total. Please correct.')
else:
try:
binom = fact(total) // fact(choose) // fact(total - choose)
except ValueError:
binom = 0
return binom
|
#!/user/bin/env python3
'''
Helper functions for angularity analysis.
Ezra Lesser (elesser@berkeley.edu)
'''
import numpy as np
from math import pi
''' # Not needed: use instead pjet1.delta_R(pjet2)
# Return \Delta{R} between two fastjet.PsuedoJet objects
def deltaR(pjet1, pjet2):
# Check that there's not a problem with +/- pi in phi part
phi1 = pjet1.phi()
if phi1 - pjet2.phi() > pi:
phi1 -= 2*pi
elif pjet2.phi() - phi1 > pi:
phi1 += 2*pi
return np.sqrt( (pjet1.eta() - pjet2.eta())**2 + (phi1 - pjet2.phi())**2 )
'''
# Return jet angularity for fastjet.PseudoJet object
# OBSOLETE: PLEASE USE fjext.lambda_beta_kappa(jet, beta, kappa, jetR) within heppy
#def lambda_beta_kappa(jet, jetR, beta, kappa):
# return sum( [ (constit.pt() / jet.pt())**kappa * (jet.delta_R(constit) / jetR)**beta
# for constit in jet.constituents() ] )
# Return angularity for single fastjet.PseudoJet object with no constituents
def lambda_alpha_kappa_i(constit, jet, jetR, alpha, kappa):
return (constit.pt() / jet.pt())**kappa * (jet.delta_R(constit) / jetR)**alpha
# Helper function for finding the correct jet pT bin
def pT_bin(jet_pT, pTbins):
for i, pTmin in list(enumerate(pTbins))[0:-1]:
pTmax = pTbins[i+1]
if pTmin <= jet_pT < pTmax:
return (pTmin, pTmax)
return (-1, -1)
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
class CryptKey(object):
def __init__(self, blob_type, blob, blob_len, hnd_import_key, param_list, flags):
self.blob_type = blob_type
self.blob = blob
self.blob_len = blob_len
self.import_key = hnd_import_key
self.param_list = param_list
self.flags = flags
class CryptContext(object):
"""
Represents crypto context used by crypto functions
"""
curr_handle = 0x680
def __init__(self, cname, pname, ptype, flags):
self.container_name = cname
self.provider_name = pname
self.ptype = ptype
self.flags = flags
self.keys = {}
def get_handle(self):
hkey = CryptContext.curr_handle
CryptContext.curr_handle += 4
return hkey
def import_key(self, blob_type=None, blob=None, blob_len=None, hnd_import_key=None,
param_list=None, flags=None):
key = CryptKey(blob_type, blob, blob_len, hnd_import_key, param_list, flags)
hnd = self.get_handle()
self.keys.update({hnd: key})
return hnd
def get_key(self, hnd):
return self.keys.get(hnd, None)
def delete_key(self, hnd):
self.keys.pop(hnd)
class CryptoManager(object):
"""
Manages the emulation of crypto functions
"""
def __init__(self, config=None):
super(CryptoManager, self).__init__()
self.ctx_handles = {}
self.config = config
def crypt_open(self, cname=None, pname=None, ptype=None, flags=None):
ctx = CryptContext(cname, pname, ptype, flags)
hnd = ctx.get_handle()
self.ctx_handles.update({hnd: ctx})
return hnd
def crypt_close(self, hnd):
self.ctx_handles.pop(hnd)
def crypt_get(self, hnd):
return self.ctx_handles.get(hnd, None)
|
h = int(input("Enter your Height (cm): "))
w = int(input("Enter your weight (kg): "))
h1 = h * 0.01
body = w / (h1*h1)
print("Your BMI = ",body,end = ' is ')
if body < 16:
print("Severely Underweight")
elif body < 18.5:
print("Underweight")
elif body < 25:
print("Normal")
elif body < 30:
print("Overweight")
else: print("Oberse")
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import createmodel as crm
# import dataset as ds
from tensorflow import keras
# ************************************* Prepare Dataset ************************************************
x = np.load('x_filtered.npy')
y = np.load('y_data.npy')
print(x)
print(y)
print(x.shape)
# y,x = ds.load_data()
train_num = 8000
test_num = 523
x_train = x[0:train_num -1]
y_train = y[0:train_num -1]
x_test = x[train_num:train_num + test_num -1]
y_test = y[train_num:train_num + test_num -1]
# ======================================================================================================
# ************************************* Create Model ***************************************************
model = crm.create_model()
model.fit(x_train, y_train, epochs=8)
# -- model accurancy
val_loss, val_acc = model.evaluate(x_test, y_test) # evaluate the out of sample data with model
print(val_loss) # model's loss (error)
print(val_acc) # model's accuracy
# ======================================================================================================
# ************************************* Make predictions ***************************************************
predictions = model.predict(x_test)
test = 2
print("prediction:", np.argmax(predictions[test]))
print("real value:", y_test[test])
# ======================================================================================================
# ************************************* Save Model ***************************************************
# model.summary()
# Save entire model
model.save('my_model.h5')
# ======================================================================================================
|
def wallis(terms):
num=2.0
product=1.0
for aterms in range(terms):
product=((num/(num-1))*(num/(num+1)))*(product)
num=num+2
return product
term=(input("number if approximation"))
ans=wallis(term)
print ans*2
|
"""
bitpm_internal.py
Name: Wirmantono
Provides bitwise pattern matching function for bitpm.py
"""
from z_algo import bitwise_z_algo
CHAR_RANGE = ord('~') - ord(' ') + 1
def create_delta_array(pattern):
"""
Creates an array of information for delta
towards the string pattern
:return: array containing delta values for the pattern
Time Complexity: O(N); N as length of pattern
Space complexity: O(N); N as length of pattern
O(N) input space
O(1) auxiliary space (the delta_array has a constant size from list of
95 elements)
"""
delta_array = [None] * (CHAR_RANGE + 1)
delta_array[CHAR_RANGE] = (2 ** len(pattern)) - 1
"""
Determine delta for cases of each characters from ' ' to '~'
Fills in the character that matches pattern's character
by updating the values in the delta_array
i.e. pattern=abcd, delta array for character 'a' would be 0b0111
(character 'a' matches), character 'c' would be 0b1101
"""
for i in range(len(pattern)):
position = ord(pattern[i]) - ord(' ')
if delta_array[position] is None:
delta_array[position] = delta_array[CHAR_RANGE]
delta_array[position] -= 2 ** i
else:
delta_array[position] -= 2 ** i
return delta_array
def bitpm(text, pattern):
"""
Performs bitwise pattern matching for finding pattern
in text. The pattern matching takes advantage of the relation
between bitvector values and Delta vector to perform pattern matching.
Text that matches will have bit value of 0 in the position of len(pattern).
Time complexity: O(m+n); m as length of pattern, n as length of text
Space complexity: O(m+n); m as length of pattern, n as length of text
O(m+n) input space
O(m) auxiliary space
Cost of O(m+n) from bitwise_z_algo(), and iterating through the entire text
costs O(n). Resulting in overall complexity of O(m+n)
"""
result = []
if len(pattern) == 0:
return result
"""
Preprocessing
Creates delta array of the pattern for all possible characters
see create_delta_array()
"""
delta_array = create_delta_array(pattern)
"""
Obtain the bit vector for first possible match
see z_algo.py/bitwise_z_algo()
"""
bit_vector = bitwise_z_algo(pattern, text)
for i in range(len(pattern) - 1, len(text)):
"""
check if bit vector has 0 in the most significant bit
Most significant bit denotes the
"""
if (bit_vector & 2 ** (len(pattern) - 1)) == 0:
result += [i - len(pattern) + 1]
# prepare for next comparison
if i < len(text) - 1:
"""
Find successor for bit_vector by applying the formula:
bit_vector(n+1) = bit_vector(n) << 1 || delta(n + 1)
Performs bitwise 'and' operation to reduce the likelihood of
integer overflow
"""
current_delta = delta_array[ord(text[i + 1]) - ord(' ')]
# if delta(n+1) is empty assume no matches
if current_delta is None:
current_delta = delta_array[CHAR_RANGE]
bit_vector = ((bit_vector << 1) | current_delta) \
& 2 ** (len(pattern)) - 1
return result
|
from django import template
from ..models import Status
register = template.Library()
@register.simple_tag
def status_label(status):
return Status(status).label
|
#!/usr/bin/python
import json
import cgi
import cgitb
import os
import urllib2
import sqlite3
import os
import sys
import config
cgitb.enable()
www=True
streq = sys.argv[1:]
if streq :
sta = streq[0] # request the station
sta=sta.upper()
rg=sta.strip()
else:
rg = "ALL" # take it as default
dbpath =config.DBpath
html1="""<TITLE>Get the flights</TITLE> <IMG src="../gif/ogn-logo-150x150.png" border=1 alt=[image]><H1>Statistics of the OGN receiver stations:: </H1> <HR> <P> %s </P> </HR> """
html2="""<center><table><tr><td><pre>"""
html3="""</pre></td></tr></table></center>"""
filename=dbpath+'OGN.db' # open th DB in read only mode
fd = os.open(filename, os.O_RDONLY)
conn = sqlite3.connect('/dev/fd/%d' % fd)
cursD=conn.cursor()
vd = ('Valid station: %-s:' % rg) # prepate the literal to show
print (html1 % vd) # tell that
print html2 # cursor for the ogndata table
print "<a> Month Positions Gliders </a>"
if rg == "ALL":
cursD.execute('select idrec, descri from RECEIVERS ') # get all the receivers
else:
cursD.execute('select idrec, descri from RECEIVERS where idrec = ? ', [rg]) # get all the receivers
for row in cursD.fetchall(): # search all the rows
id=row[0]
desc=row[1]
if (id == None or id == "NONE"):
continue
j = urllib2.urlopen('http://flarmrange.onglide.com/api/1/stats?station='+id+'&grouping=month')
j_obj = json.load(j)
j=json.dumps(j_obj, indent=4)
stats=j_obj["stats"]
print "<a>",id, ":", desc, "</a>"
for month in stats:
#print month
#
time= month["t"]
pos= month["p"]
gliders= month["g"]
rows= month["n"]
temp= month["temp"]
if pos != 0:
print "<a>", time, "%9.0f"%pos, "%9.0f"%gliders, "</a>"
print html3
cursD.close()
os.close(fd)
|
import theano.tensor as T
import theano
import numpy as np
from braindecode.analysis.cluster import mean_min_distance_to_cluster
def test_distance_to_cluster():
wanted_activations_var = T.ftensor4()
actual_activations_var = T.ftensor3()
cost_var = mean_min_distance_to_cluster(actual_activations_var,
wanted_activations_var, n_cluster_samples=2)
cost_fn = theano.function([actual_activations_var,
wanted_activations_var],
cost_var)
wanted_activations = np.array([[[[0,1,1]]],
[[[0,1,0,]]],
[[[0,1,0,]]]]).astype(np.float32)
assert cost_fn([[[0,1,0]]], wanted_activations) == 0
assert np.allclose(cost_fn([[[0,1,1]]], wanted_activations), 1/6.0)
assert cost_fn([[[1,1,1]]], wanted_activations) == 0.5 |
from flask import Flask, request, make_response, abort
from flask import render_template, url_for, session, redirect
from flask import flash
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from myData import *
app = Flask(__name__)
app.config['SECRET_KEY'] = 'zoolakataYamatarajabhanasalagam'
bootstrap = Bootstrap(app)
moment = Moment(app)
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
@app.route('/')
def index():
return render_template('index.html', current_time=datetime.utcnow())
@app.route('/user/<name>')
def user (name):
if check_db(name):
# resp = make_response(f'<h1>namaste {name} </h1>')
# resp.set_cookie('alm', '42')
# resp.status_code = 301
# print (resp.content_type)
# print (url_for('user', name=name, _external=True))
return render_template('user.html', name=name, items=items)
else:
abort(500)
@app.route('/dyn/<name>')
def dyn(name):
name = name if check_db(name) else ""
return render_template('user.html', name=name, items=items)
@app.route('/form', methods=['GET', 'POST'])
def form():
name = None
form = NameForm()
if form.validate_on_submit():
old_name = form.name.data
if old_name is not None and old_name != session.get('name'):
flash('a name changer')
session['name'] = form.name.data
print (session.get('name'))
return redirect(url_for('form'))
return render_template('formin.html', form=form, name=session.get('name'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
if __name__ == '__main__':
app.run() |
# ##################################################
# YOU SHOULD NOT TOUCH THIS FILE !
# ##################################################
import math as m
import numpy as np
import pytest
from numpy_questions import wallis_product, max_index
def test_max_index():
X = np.array([[0, 1], [2, 0]])
assert max_index(X) == (1, 0)
X = np.random.randn(100, 100)
i, j = max_index(X)
assert np.all(X[i, j] >= X)
with pytest.raises(ValueError):
max_index(None)
with pytest.raises(ValueError):
max_index([[0, 1], [2, 0]])
with pytest.raises(ValueError):
max_index(np.array([0, 1]))
def test_wallis_product():
pi_approx = wallis_product(0)
assert pi_approx == 2.
pi_approx = wallis_product(1)
assert pi_approx == 8 / 3
pi_approx = wallis_product(100000)
assert abs(pi_approx - m.pi) < 1e-4
|
import fileinput
import math
l=[];
for line in fileinput.input():
l.append(line)
N = int(l[0])
#for i in xrange(0, int(l[0])):
# print int(l[2][i])
squeek = l[1].split()
sequence = [int(i) for i in squeek]
prog = (sequence[N-1] - sequence[0])/N
half = int(math.floor(N/2))
#print sequence[half]
#print sequence[0] + (prog * half)
#print sequence[half]==sequence[0] + prog * half
#print half
while N > 2:
#missing something in first half of sequence the following will be True
if sequence[half] > sequence[0] + prog * half:
#since something is missing in the first half, only look at first half of sequence
#print "hey"
sequence = sequence[:half+1]
#missing something in the second half of sequence the following will be True
elif sequence[half] == sequence[0] + prog * half:
#since something is missing int he second half, only look at second half of sequence
sequence = sequence[half:]
else:
sequence = sequence[:N-1]
N = len(sequence)
half = int(math.floor(N/2))
#print sequence
#print half
print (sequence[1]+sequence[0])/2
#print sequence[half] > sequence[0] + prog* half
#print N/2 * prog + sequence[0]
|
## run this file with command 'python -m tests.scripts.test_visual_metadata_db_dump' ##
from unittest import TestCase, main
from scripts.visual_metadata_db_dump import VisualMetaData, read_visual_metadata_map
from mock import patch, Mock
import requests
class TestVisualMetadataTest(TestCase):
def setUp(self):
self.species_list = [
{
"id":1,
"visual":"spiders"
},
{
"id":2,
"visual":"fishes"
},
{
"id":258,
"visual":"primates"
}]
self.visual_metadata = VisualMetaData(self.species_list)
def test_read_visual_metadat_map(self):
with patch.object(requests, 'get') as get_mock:
get_mock.return_value = mock_response = Mock()
mock_response.status_code = 200
mock_response.text = self.species_list
response = read_visual_metadata_map(get_mock)
self.assertEqual(response.text, self.species_list)
self.assertEqual(response.status_code, 200)
def test_delete_visual_metadata(self):
with patch.object(requests, 'post') as get_mock:
get_mock.return_value = mock_response = Mock()
mock_response.status_code = 200
mock_response.success = True
response = self.visual_metadata.delete_visual_metadata()
self.assertTrue(response.success)
self.assertEqual(response.status_code, 200)
def test_insert_visual_metadata(self):
with patch.object(requests, 'post') as get_mock:
get_mock.return_value = mock_response = Mock()
mock_response.status_code = 200
mock_response.success = True
response = self.visual_metadata.insert_visual_metadata()
self.assertTrue(response.success)
self.assertEqual(response.status_code, 200)
def test_get_species_data(self):
records = [
{
"id":1,
"metadata_id":0,
"visual":"test"
},
{
"id":2,
"metadata_id":1,
"visual":"spiders"
},
{
"id":3,
"metadata_id":2,
"visual":"fishes"
},
{
"id":4,
"metadata_id":258,
"visual":"primates"
}]
results = self.visual_metadata.get_species_data()
self.assertEqual(results, records)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3.6
from __future__ import division
import numpy as np
import pandas as pd
import sys
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import csv
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import classification_report
test_file = 'records.csv'
test_frame = pd.read_csv(test_file)
cols = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','asa','sizechange']
cols1 = ['correlation','conservation','polaritychange','hydroindexchange','secondarystruc','asa','sizechange']
cols2 = ['correlation','conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','sizechange']
cols3 = ['correlation','conservation','polaritychange','chargechange','secondarystruc','sizechange']
cm1 = ['correlation','conservation','polaritychange','chargechange']
cm2 = ['correlation','conservation','polaritychange','hydroindexchange']
cm3 = ['correlation','conservation','polaritychange','secondarystruc']
cm4 = ['correlation','conservation','polaritychange','asa']
cm5 = ['correlation','conservation','polaritychange','sizechange']
cm6 = ['correlation','conservation','chargechange','secondarystruc']
cm7 = ['correlation','conservation','hydroindexchange','secondarystruc','asa']
cm8 = ['conservation','polaritychange','hydroindexchange','secondarystruc','asa']
cm9 = ['chargechange']
cm10 = ['hydroindexchange']
cm11 = ['secondarystruc']
cm12 = ['asa']
cm13 = ['sizechange']
colsRes = ['class']
test_files = ['corr_cons_polar_charge_class.csv ',
'corr_cons_polar_hydro_class.csv',
'corr_cons_polar_secondary_class.csv',
'corr_cons_polar_asa_class.csv',
'corr_cons_polar_size_class.csv',
]
#dataset 150 150 vsetky parametre
train_file = 'train_data_cleaned_new.csv'
train_frame = pd.read_csv(train_file)
################################
trainArr = train_frame.as_matrix(cols)
trainRes = train_frame.as_matrix(colsRes)
trainRes = trainRes.ravel()
testArr = test_frame.as_matrix(cols)
testRes = test_frame.as_matrix(colsRes)
testRes = testRes.ravel()
######################################
# na SVM aj RF
###################################
trainArr0 = train_frame.as_matrix(cols3)
trainRes0 = train_frame.as_matrix(colsRes)
trainRes0 = trainRes0.ravel()
testArr0 = test_frame.as_matrix(cols3)
testRes0 = test_frame.as_matrix(colsRes)
testRes0 = testRes0.ravel()
##########################################
# na SVM aj RF vsetky parametre
#########################################
trainArr1 = train_frame.as_matrix(cols)
trainRes1 = train_frame.as_matrix(colsRes)
trainRes1 = trainRes1.ravel()
testArr1 = test_frame.as_matrix(cols)
testRes1 = test_frame.as_matrix(colsRes)
testRes1 = testRes1.ravel()
##########################################
# na SVM
#########################################
trainArr2 = train_frame.as_matrix(cm6)
trainRes2 = train_frame.as_matrix(colsRes)
trainRes2= trainRes2.ravel()
testArr2 = test_frame.as_matrix(cm6)
testRes2 = test_frame.as_matrix(colsRes)
testRes2 = testRes2.ravel()
##########################################
# na SVM
##########################################
trainArr3 = train_frame.as_matrix(cm5)
trainRes3 = train_frame.as_matrix(colsRes)
trainRes3 = trainRes3.ravel()
testArr3 = test_frame.as_matrix(cm5)
testRes3 = test_frame.as_matrix(colsRes)
testRes3 = testRes3.ravel()
##########################################
# na RF
##########################################
trainArr4 = train_frame.as_matrix(cols1)
trainRes4 = train_frame.as_matrix(colsRes)
trainRes4 = trainRes4.ravel()
testArr4 = test_frame.as_matrix(cols1)
testRes4 = test_frame.as_matrix(colsRes)
testRes4 = testRes4.ravel()
########################################
trainArr5 = train_frame.as_matrix(cm5)
trainRes5 = train_frame.as_matrix(colsRes)
trainRes5 = trainRes5.ravel()
testArr5 = test_frame.as_matrix(cm5)
testRes5 = test_frame.as_matrix(colsRes)
testRes5 = testRes5.ravel()
test_class = test_frame[['class']]
#rf = RandomForestClassifier(max_features='auto',n_estimators=100,n_jobs=1,min_samples_leaf=50,class_weight="balanced")
#rf.fit(trainArr,trainRes)
#result = rf.predict(testArr)
classifier = svm.SVC(kernel = 'linear',class_weight='balanced')
classifier.fit(trainArr, trainRes)
result = classifier.predict(testArr)
"""
rf = RandomForestClassifier(max_features='auto',n_estimators=1000,n_jobs=1,min_samples_leaf=50,class_weight="balanced")
rf.fit(trainArr0,trainRes0)
result4 = rf.predict(testArr0)
rf = RandomForestClassifier(max_features='auto',n_estimators=1000,n_jobs=1,min_samples_leaf=50,class_weight="balanced")
rf.fit(trainArr1,trainRes1)
result5 = rf.predict(testArr1)
rf = RandomForestClassifier(max_features='auto',n_estimators=1000,n_jobs=1,min_samples_leaf=50,class_weight="balanced")
rf.fit(trainArr4,trainRes4)
result6 = rf.predict(testArr4)
classifier = svm.SVC(kernel = 'rbf',degree=3,class_weight='balanced')
classifier.fit(trainArr0, trainRes0)
result0 = classifier.predict(testArr0)
classifier = svm.SVC(kernel = 'poly',degree=4,class_weight='balanced')
classifier.fit(trainArr1, trainRes1)
result1 = classifier.predict(testArr1)
classifier = svm.SVC(kernel = 'poly',degree=4,class_weight='balanced')
classifier.fit(trainArr2, trainRes2)
result2 = classifier.predict(testArr2)
classifier = svm.SVC(kernel = 'rbf',degree=3,class_weight='balanced')
classifier.fit(trainArr3, trainRes3)
result3 = classifier.predict(testArr3)
"""
predicted_class = result
mcc = matthews_corrcoef(test_class, predicted_class)
print("File : "+ str(mcc))
#with open('majority_voting4_new.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerows(zip(result0,result1,result2,result3,result4,result5,result6))
#zatial korelacia 0.476 |
""" Track the robot by color """
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--record", type=bool,
default=False, nargs="?",
help="record a video")
args = vars(ap.parse_args())
shape = (864,480)
sepShape = 10
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenlower = (38,50,50)
greenupper = (48,255,255)
bluelower = (100,50,50)
blueupper = (110,255,255)
redlower = (0,50,50)
redupper = (5,255,255)
camera = cv2.VideoCapture(1)
camera.set(3,shape[0])
camera.set(4,shape[1])
size = tuple(map(int,(camera.get(3),camera.get(4))))
if args["record"]:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
movieSize = size[0]*2 + sepShape,size[1]*2 + sepShape
recorder = cv2.VideoWriter('simulation.avi',fourcc,20.0,movieSize)
def findRobot(lower,upper,hsv):
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
inRange = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(inRange, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
hsvInRange = cv2.bitwise_and(hsv,hsv,mask=inRange)
bgrInRange = cv2.cvtColor(hsvInRange,cv2.COLOR_HSV2BGR)
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
# draw the circle and centroid on the frame,
# then update the list of tracked points
if radius > 2:
return ((int(x),int(y)),int(radius)),bgrInRange
else:
return None,bgrInRange
else:
return None,bgrInRange
def circleRobot(image,xy,radius):
cv2.circle(image, xy, radius,
(0, 255, 255), 2)
cv2.circle(image, xy, 5, (0, 0, 255), -1)
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
if grabbed:
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame)
blurred = cv2.GaussianBlur(frame, (11, 11), sigmaX=0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
greenCircle,greenInRange = findRobot(greenlower,greenupper,hsv)
blueCircle,blueInRange = findRobot(bluelower,blueupper,hsv)
redCircle,redInRange = findRobot(redlower,redupper,hsv)
if greenCircle:
circleRobot(frame,*greenCircle)
if blueCircle:
circleRobot(frame,*blueCircle)
if redCircle:
circleRobot(frame,*redCircle)
vertSep = np.zeros((sepShape,shape[0],3),dtype=np.uint8)
horrSep = np.zeros((shape[1],sepShape,3),dtype=np.uint8)
middSep = np.zeros((sepShape,sepShape,3),dtype=np.uint8)
vertSep += 100
horrSep += 100
middSep += 100
top = np.concatenate((frame,vertSep,greenInRange),axis=0)
mid = np.concatenate((horrSep,middSep,horrSep),axis=0)
bot = np.concatenate((redInRange,vertSep,blueInRange),axis=0)
combined = np.concatenate((top,mid,bot),axis=1)
# show the frame to our screen
cv2.imshow("combined",combined)
key = cv2.waitKey(1) & 0xFF
if args["record"]:
recorder.write(combined)
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
if args["record"]:
recorder.release()
cv2.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.