text stringlengths 8 6.05M |
|---|
# show image until 'esc' gets pressed
import cv2 as cv
img = cv.imread("irene.jpg", cv.IMREAD_UNCHANGED) # opencv에서는 BGR 순서로 색을 numpy에 저장
ESC = 27
cv.imshow('img', img) #윈도우 창이 생성되면서
while True:
key = cv.waitKey()
print(key)
if key == ESC:
break
# 이거 없으면 바로 실행이 종료된다. 입력값을 받을 때 까지 대기한다.
# ms단위로 시간을 지정할 수 있다. && 입력값을 반환한다.
|
import os
import pygame
from game import App
# Position of game screen
x_pos = 300
y_pos = 120
cmd = 'wmic desktopmonitor get screenheight, screenwidth'
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (x_pos, y_pos)
# Create class to show Start Window
class Initial:
# Screen size
windowWidth = 880
windowHeight = 616
# Init App() -Class
App = App()
# Define Start Window
def game_intro(self):
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
# Start Game file in new window
if event.key == pygame.K_SPACE:
os.system("python game.py")
pygame.quit()
quit()
# Call Help Window
if event.key == pygame.K_h:
os.system("python Help_win.py")
pygame.quit()
quit()
# Quit Game
if event.key == pygame.K_q:
pygame.quit()
quit()
pygame.display.set_caption('Snake game')
# Set size of window
self.App._display_surf = pygame.display.set_mode((self.App.windowWidth, self.App.windowHeight))
# Load Background Image
self.App._background_surf = pygame.image.load("start_font.jpg")
# Set Background Image
self.App.display_surf.blit(self.App.background_surf, (0, 0))
# Show text on Display
self.App.message_to_screen("It`s Slither Game", (43, 88, 12), -160, "large")
self.App.message_to_screen("The objective of the game is to eat red apples", (0, 0, 0), -70)
self.App.message_to_screen("The more apples you eat, the longer you get", (0, 0, 0), -30)
self.App.message_to_screen("If you run into yourself, or the edges, you die!", (0, 0, 0), 10)
self.App.message_to_screen("Beware of fish, it can eat you!", (0, 0, 0), 50)
self.App.message_to_screen("Press 'Space' to play, H to Help,", (0, 0, 0), 110, "medium")
self.App.message_to_screen("P to pause or Q to quit.", (0, 0, 0), 150, "medium")
pygame.display.update()
# Initialise Start Window
if __name__ == "__main__":
Start = Initial()
Start.game_intro() |
# Skanda Srikkanth
# I pledge my honor that I have abided by the Stevens Honor System
# Microsoft (MSFT) and Apple (AAPL) Stock
"""This program will analyze the open/close, high/low prices of Apple and Microsoft"""
# Question: Which company's stock price was more negatively affected by the COVID pandemic, compared to their performance in 2019?
import csv
from matplotlib import pyplot as plt
print("I will split the data into pre-pandemic and pandemic economies")
print("I will define the pandemic economy as dates on and after Jan 31, which was when the White House declared this a public health emergency")
print()
label = ['MSFT', 'AAPL']
microsoft = "C:/Users/srikk/OneDrive/Documents/CS-110/Data/MSFT_2019-2020.csv"
apple = "C:/Users/srikk/OneDrive/Documents/CS-110/Data/AAPL_2019-2020.csv"
company = ["C:/Users/srikk/OneDrive/Documents/CS-110/Data/MSFT_2019-2020.csv",
"C:/Users/srikk/OneDrive/Documents/CS-110/Data/AAPL_2019-2020.csv"]
class Apple:
def normal_graph(self):
date = []
variable = []
with open(apple) as file:
print("Apple (AAPL):")
i = input("Enter 'CP' for closing price, 'OCA' for open/close average, and 'HLC' for the high/low change: ")
if i == 'CP':
for row in list(csv.reader(file))[1:274]:
date.append(row[0])
variable.append(float(row[4]))
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Closing Price")
plt.title("Apple Closing Price from 1/2/2019 to 1/31/2020")
# ax = plt.gca()
# ax.axes.xaxis.set_ticks(x_axis_date)
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif i == 'OCA':
for row in list(csv.reader(file))[1:274]:
date.append(row[0])
oca = (float(row[1]) + float(row[4])) / 2
variable.append(oca)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Open/Close Average")
plt.title("Open/Close Average of Apple from 1/2/2019 to 1/31/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif i == 'HLC':
for row in list(csv.reader(file))[1:274]:
date.append(row[0])
hlc = float(row[2]) - float(row[3])
variable.append(hlc)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("High Minus Low")
plt.title("Change in Daily High and Low Price of Apple from 1/2/2019 to 1/31/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
def pandemic_graph(self):
date = []
variable = []
with open(apple) as file:
print("Apple (AAPL):")
j = input("Enter 'CP' for closing price, 'OCA' for open/close average, and 'HLC' for the high/low change: ")
if j == 'CP':
for row in list(csv.reader(file))[274:]:
date.append(row[0])
variable.append(float(row[4]))
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Closing Price")
plt.title("Apple Closing Price from 1/31/2020 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif j == 'OCA':
for row in list(csv.reader(file))[274:]:
date.append(row[0])
oca = (float(row[1]) + float(row[4])) / 2
variable.append(oca)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Open/Close Average")
plt.title("Open/Close Average of Apple from 1/31/2020 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif j == 'HLC':
for row in list(csv.reader(file))[274:]:
date.append(row[0])
hlc = float(row[2]) - float(row[3])
variable.append(hlc)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("High Minus Low")
plt.title("Change in High and Low Price of Apple from 1/31/2020 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
class Microsoft:
def normal_graph(self):
date = []
variable = []
with open(microsoft) as file:
print("Microsoft (MSFT):")
i = input("Enter 'CP' for closing price, 'OCA' for open/close average, and 'HLC' for the high/low change: ")
if i == 'CP':
for row in list(csv.reader(file))[1:274]:
date.append(row[0])
variable.append(float(row[4]))
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Closing Price")
plt.title("Microsoft Closing Price from 1/2/2019 to 1/31/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif i == 'OCA':
for row in list(csv.reader(file))[1:274]:
date.append(row[0])
oca = (float(row[1]) + float(row[4])) / 2
variable.append(oca)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Open/Close Average")
plt.title("Open/Close Average of Microsoft from 1/2/2019 to 1/31/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif i == 'HLC':
for row in list(csv.reader(file))[1:274]:
date.append(row[0])
hlc = float(row[2]) - float(row[3])
variable.append(hlc)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("High Minus Low")
plt.title("Change in Daily High and Low Price of Microsoft from 1/2/2019 to 1/31/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
def pandemic_graph(self):
date = []
variable = []
with open(microsoft) as file:
print("Microsoft (MSFT):")
i = input("Enter 'CP' for closing price, 'OCA' for open/close average, and 'HLC' for the high/low change: ")
if i == 'CP':
for row in list(csv.reader(file))[274:]:
date.append(row[0])
variable.append(float(row[4]))
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Closing Price")
plt.title("Microsoft Closing Price from 1/31/2020 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif i == 'OCA':
for row in list(csv.reader(file))[274:]:
date.append(row[0])
oca = (float(row[1]) + float(row[4])) / 2
variable.append(oca)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("Open/Close Average")
plt.title("Open/Close Average of Microsoft from 1/31/2020 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
elif i == 'HLC':
for row in list(csv.reader(file))[274:]:
date.append(row[0])
hlc = float(row[2]) - float(row[3])
variable.append(hlc)
plt.plot(date, variable)
plt.xlabel("Date")
plt.ylabel("High Minus Low")
plt.title("Change in Daily High and Low Price of Microsoft from 1/31/2020 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.grid()
plt.show()
def main_graph():
count = 0
for file in company:
date = []
closing_price = []
with open(file) as csv_file:
for row in list(csv.reader(csv_file))[1:]:
date.append(row[0])
closing_price.append(float(row[4]))
plt.plot(date, closing_price, label=label[count])
count = count + 1
plt.plot(date, closing_price)
plt.xlabel("Date")
plt.ylabel("Closing Price")
plt.title("Closing Price of Apple and Microsoft from 1/2/2019 to 11/24/2020")
plt.xticks(rotation=90)
plt.xticks(fontsize=2)
plt.legend(loc = 'upper left')
plt.grid()
plt.show()
# Apple.normal_graph(Apple)
# Apple.pandemic_graph(Apple)
# Microsoft.normal_graph(Microsoft)
# Microsoft.pandemic_graph(Microsoft)
# main_graph()
|
"""Setup script for write-me Python package."""
from setuptools import setup, find_packages
setup(
name='write_me',
# package_dir={'': 'write_me', 'readme_generator': 'readme_generator'},
# py_modules=['readme_generator', 'write_me'],
packages=find_packages(),
entry_points={
'console_scripts': ['genreadme=readme_generator.make_scaffold:main'],
},
version='0.5.4',
description='Python package to assist developers with constructing README as project evolves.',
author=['Chelsea Dole',
'Matt Favoino',
'Darren Haynes',
'Chris Closser',
'Gabriel Meringolo'],
author_email='chelseadole@gmail.com',
url='https://github.com/chelseadole/write-me',
download_url='https://github.com/chelseadole/write-me/archive/0.5.4.tar.gz',
keywords=['Python', 'README', 'PyPi', 'pip'],
classifiers=[],
install_requires=[
"markdown_generator",
],
)
|
# coding: utf-8
# adding javascript and CSS support
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
if __name__ == '__main__':
app.run(debug = True)
'''
in the template file,
A special endpoint ‘static’ is used to generate URL for static files.
The HTML script of ./templates/index.html
<html>
<head>
<script type = "text/javascript"
src = "{{ url_for('static', filename = 'hello.js') }}" ></script>
</head>
<body>
<input type = "button" onclick = "sayHello()" value = "Say Hello" />
</body>
</html>
a javascript function defined in hello.js is called
on OnClick event of HTML button in index.html,
which is rendered on ‘/’ URL of the Flask application.
./static/hello.js contains sayHello() function.
function sayHello() {
alert("Hello World")
}
''' |
import unittest
from katas.beta.sum_of_all_arguments import sum_all
class SumAllTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sum_all(6, 2, 3), 11)
def test_equals_2(self):
self.assertEqual(sum_all(756, 2, 1, 10), 769)
def test_equals_3(self):
self.assertEqual(sum_all(76856, -32, 1981, 1076), 79881)
def test_equals_4(self):
self.assertEqual(sum_all(7, -3452, 1981, 1076), -388)
def test_false(self):
self.assertFalse(sum_all(1, -32, "codewars", 1076))
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2008-2011 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl/european-union-public-licence-eupl-v.1.1
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
import commands
import logging
#import optparse
import os
#import signal
import sys
import threading
import time
import traceback
__short_name__ = os.path.basename(os.path.splitext(sys.argv[0])[0])
try:
from wnodes.utils import utils
except ImportError:
sys.exit("%s: python module 'wnodes.utils' not found."
% __short_name__)
try:
from wnodes.utils import wsocket
except ImportError:
sys.exit("%s: python module 'wnodes.utils' not found."
% __short_name__)
try:
from wnodes.cachemanager.database import database
except ImportError:
sys.exit("%s: python module 'wnodes.cachemanager.database' not found."
% __short_name__)
class CacheManager(wsocket.ClientRequestHandler):
# lock to protect the logging system
LOCK_LOG = threading.Lock()
# lock to protect the database management system
LOCK_DB = threading.Lock()
def __init__(self, config):
# insert here other constructor options
"""NOTE:
keypair is the keypair used to login via ssh to configure VM
('<prv_filename>:<pub_filename>')
client_key is the filename of the .pem private key
used for socket encryption
client_cert is the filename of the .pem certificate
corresponding to client_key used for socket encryption
TODO:
consider using the same key for each task (VM configuration/socket)
"""
self._cm_conf__max_log_size = int(config._cm_conf__max_log_size)
self._cm_conf__log_file = config._cm_conf__log_file
self._cm_conf__cache_refresh_interval = float(config._cm_conf__cache_refresh_interval)
self._cm_conf__cache_size = int(config._cm_conf__cache_size)
self._cm_conf__timeout = float(config._cm_conf__timeout)
self._batch_sys__queue = config._batch_sys__queue
self._batch_sys__type = config._batch_sys__type
self._batch_sys__user = config._batch_sys__user
self._bait_conf__port = int(config._bait_conf__port)
self._cm_conf__port = int(config._cm_conf__port)
self._cm_conf__host = config._cm_conf__host
self._ns_conf__host = config._ns_conf__host
self._ns_conf__port = int(config._ns_conf__port)
self._webapp_conf__host = config._webapp_conf__host
self._key_cert__ca_cert = config._key_cert__ca_cert
self._key_cert__server_cert = config._key_cert__server_cert
self._key_cert__server_key = config._key_cert__server_key
#define private and public keys of cloud
self._key_cert__cm_private_key = config._key_cert__cm_private_key
self._key_cert__cm_public_key = config._key_cert__cm_public_key
self._default_vm_option = {}
self._default_vm_option['IMG'] = config._default_vm_option__img
self._default_vm_option['MEM'] = int(config._default_vm_option__mem)
self._default_vm_option['STORAGE'] = \
int(config._default_vm_option__storage)
self._default_vm_option['CPU'] = int(config._default_vm_option__cpu)
self._default_vm_option['BANDWIDTH'] = \
int(config._default_vm_option__bandwidth)
wsocket.ClientRequestHandler.__init__(self, None, None, \
key=None, cert=None, ca=None)
# flag used to check if the cache is being refreshed
self.refreshing_cache = False
#flag used to kill the active threads on exit
self.exit = False
if (len(self._key_cert__cm_private_key) > 0
and not os.path.isfile(self._key_cert__cm_private_key)):
sys.exit('Specified private key file does not exist: %s'
% self._key_cert__cm_private_key)
if (len(self._key_cert__cm_public_key) > 0
and not os.path.isfile(self._key_cert__cm_public_key)):
sys.exit('Specified public key file does not exist: %s'
% self._key_cert__cm_public_key)
if (len(self._key_cert__server_key) > 0
and not os.path.isfile(self._key_cert__server_key)):
sys.exit('Specified private key file does not exist: %s'
% self._key_cert__server_key)
if (len(self._key_cert__server_key) > 0
and not os.path.isfile(self._key_cert__server_key)):
sys.exit('Specified public key file does not exist: %s'
% self._key_cert__server_key)
if (len(self._key_cert__ca_cert) > 0
and not os.path.isfile(self._key_cert__ca_cert)):
sys.exit('Specified public key file does not exist: %s'
% self._key_cert__ca_cert)
# define which parameters are MANDATORY,
# i.e. can't be configured run-time
self.mandatoryList = ['IMG', 'MEM', 'STORAGE', 'BANDWIDTH', 'CPU']
# define which parameters are CONFIGURABLE run-time
self.configurableList = ['MOUNT', 'PUB_KEY']
# define the list of possible statuses
self.status_cache_pending = "CACHE_P"
self.status_cache_ready = "CACHE_R"
self.status_cache_available = "CACHE_A"
self.status_cache_destroyed = "CACHE_D"
self.status_pending = "PENDING"
self.status_allocated = "ALLOC"
self.status_destroyed = "DESTR"
self.status_aborted = "ABORTED"
self.status_unknown = "UNKNOWN"
# set up logging
max_log_count = 5
self.logging_default = logging.DEBUG
self.cm_logger = logging.getLogger('CacheManagerLogger')
self.cm_logger.setLevel(self.logging_default)
fmt = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler = utils.CompressedRotatingFileHandler(\
self._cm_conf__log_file,
maxBytes=self._cm_conf__max_log_size,
backupCount=max_log_count)
handler.setFormatter(fmt)
self.cm_logger.addHandler(handler)
# import batch_system module
try:
if (self._batch_sys__type.lower() == 'pbs' or
self._batch_sys__type.lower() == 'torque'):
try:
#from wnodes.utils.batch import pbsCmds
from wnodes.utils.batch_pbs import PbsCommands
except:
sys.exit("%s: python module 'batch' not found."
% __short_name__)
#self.batch_cmd = pbsCmds()
self.batch_cmd = PbsCommands()
elif self._batch_sys__type.lower() == 'lsf':
try:
#from wnodes.utils.batch import lsfCmds
from wnodes.utils.batch_lsf import LsfCommands
except ImportError:
sys.exit("%s: python module 'batch' not found."
% __short_name__)
#self.batch_cmd = lsfCmds('/etc/profile.d/lsf.sh')
self.batch_cmd = LsfCommands('/etc/profile.d/lsf.sh')
else:
sys.exit('Batch system not supported')
except KeyError:
sys.exit('Variable of Batch System Type, ' +
'is not defined in the configuration array')
def init_database(self, db_session, compute_table, get_next_id):
# Initialize database
# TODO: This is a quick and dirty development, it can be improved
self.database = database.Database(db_session, \
compute_table, \
get_next_id)
@utils.synchronized(LOCK_LOG)
def updateLog(self, msg, level=None):
"""
Emit a string to the WNoDeS log file.
Thread safe.
"""
print str(msg)
if level is None:
level = self.logging_default
else:
level = utils.LOG_LEVELS.get(level, logging.NOTSET)
self.cm_logger.log(level, msg)
@utils.synchronized(LOCK_DB)
def add_element_to_db(self, obj):
"""
Add an element to database.
Thread safe.
"""
self.database.session.add(obj)
return obj
@utils.synchronized(LOCK_DB)
def db_commit(self):
"""
Commit changes to database.
Thread safe.
"""
self.database.session.commit()
def get_available_hosts(self):
"""
Returns the list of hosts in status HOST_AVAILABLE.
NOTE that an host AVAILABLE is not necessarily FREE!!
"""
"""
NOTE: At the moment this function
does NOT WORK as expected by WNoDeS architecture:
CURRENT: ask to batch system for clouduser RUNNING jobs
and ask to the corresponding bait for each job the status of each VM
FUTURE: ask a list of cloud machines and their status to the Nameserver
"""
#NOTE: each host is an array like: host = [ bait_name, jobid, vm_name ]
hostslist = []
output = self.batch_cmd.bjobs(user=self._batch_sys__user)[1]
lines = output.split('\n')
for line in lines:
try:
words = line.split()
jobStatus = words[2]
# the following variable will represent
# the bait list to ask for available VM's
# this is a trick to avoid multiple requests
# to batch system for performance issues
# baits_to_analyze = {
# '<bait1_name>':
# ['jobID1', 'jobID2', ...],
# '<bait2_name>':
# ['jobID1', 'jobID2', ...],
# ...}
baits_to_analyze = {}
if jobStatus == 'RUN':
if '-bait' in words[5]:
jobid = words[0]
bait = words[5]
try:
baits_to_analyze[bait].append(jobid)
except KeyError:
baits_to_analyze[bait] = [jobid]
for bait in baits_to_analyze.keys():
msg = {'getStatus': []}
baitStatus = self.sendRequest(bait,
self._bait_conf__port,
msg)
if not baitStatus[0]:
baitStatus = baitStatus[1]
if not baitStatus[0]:
baitStatus = baitStatus[1]
else:
self.updateLog("Method error on request " +
"%s to bait %s"
% (str(msg), bait))
else:
self.updateLog("Communication error requesting " +
"the status to bait %s" % bait)
# now check for hosts available
VMStatus = baitStatus[1]
for jobid in baits_to_analyze[bait]:
try:
if VMStatus[jobid][0] == 'HOST_AVAILABLE':
hostslist.append([bait, jobid,
VMStatus[jobid][2]])
except:
self.updateLog("Exception caught while " +
"retrieving VM status on " +
"bait %s for job %s"
% (bait, jobid))
except:
#this exception may happen for example
# when bjobs doesn't find any job or lfs tells to wait
pass
return hostslist
def is_host_available(self, host):
"""
Check if an host is AVAILABLE (i.e. is in status HOST_AVAILABLE)
This function asks to the bait for the status of the given host
return True if status is HOST_AVAILABLE or False otherwise
"""
try:
bait = host[0]
jobid = host[1]
#hostname = host[2]
msg = {'getStatus': []}
baitStatus = self.sendRequest(bait,
self._bait_conf__port,
msg)
if not baitStatus[0]:
baitStatus = baitStatus[1]
if not baitStatus[0]:
baitStatus = baitStatus[1]
VMsInfo = baitStatus[1]
VMStatus = VMsInfo[jobid][0]
if VMStatus == 'HOST_AVAILABLE':
return True
else:
return False
else:
err_msg = ("Method error on request %s to bait %s"
% (str(msg), bait))
self.updateLog(err_msg, "error")
return False
else:
err_msg = ("Communication error on request %s to bait %s"
% (str(msg), bait))
self.updateLog(err_msg, "error")
return False
except:
a, b, c = sys.exc_info()
traceback.print_exception(a, b, c)
err_msg = ("Exception while checking if host %s"
% host +
"is available, see stderr for details")
self.updateLog(err_msg, "error")
return False
def is_host_free(self, host):
"""
Check if an host is FREE (i.e. the machine is not allocated to a user)
"""
"""
NOTE: At the moment this function
does not work as expected by WNoDeS architecture:
CURRENT: login as root into VM
and check for user connected using command 'users'
FUTURE: check the VM status as given by Nameserver
"""
output = []
try:
self.updateLog("IS HOST FREE")
command_input = ('ssh -i %s '
% self._key_cert__cm_private_key +
'-o UserKnownHostsFile=/dev/null ' +
'-o StrictHostKeyChecking=no ' +
'-q root@%s users' % host[2])
self.updateLog("SSH command " + command_input)
output = commands.getoutput(command_input)
output = output.split()
except:
pass
#print output
if not len(output):
return True
else:
return False
def get_vm_bait(self, vm_hostname):
"""
Contacts Nameserver to retrieve the bait name for the given host
"""
msg = {'whoIs_TheBait': [vm_hostname]}
bait = self.sendRequest(self._ns_conf__host,
self._ns_conf__port,
msg)
if not bait[0]:
bait = bait[1]
if not bait[0]:
bait = bait[1]
else:
self.updateLog("Method error on request %s to nameserver %s"
% (str(msg), self._ns_conf__host))
bait = "unknown_bait"
else:
self.updateLog("Communication error on request %s to nameserver %s"
% (str(msg), self._ns_conf__host))
bait = "unknown_bait"
return bait
def get_free_hosts(self):
"""
Returns the list of hosts in status which should be in cache hosts.
This function contacts the DB and looks for hosts in status
self.status_cache_ready
(SEE __init__ FUNCTION FOR ACTUAL VARIABLE VALUE)
NOTE that an host AVAILABLE is not necessarily FREE!!
"""
free_hosts = []
vm_db_instances = (self.database.session.query
(self.database.tables['Compute']).filter_by
(STATUS=self.status_cache_ready).all())
for vm_db_instance in vm_db_instances:
jobid = str(vm_db_instance.JOBID)
vm_hostname = vm_db_instance.HOSTNAME
bait = self.get_vm_bait(vm_hostname)
free_hosts.append([bait, jobid, vm_hostname])
return free_hosts
def sync_cache(self):
"""
This function is run at CM startup.
It tries to deduce from batch system an WNoDeS the actual status
of the VMs and synchronize the information with the DB
"""
self.updateLog("Synchronizing cache", "info")
jobids = []
# Control that VM in status CACHE_P on database
# really correspond to an existing request
vm_db_instances = (self.database.session.query
(self.database.tables['Compute']).filter_by
(STATUS=self.status_cache_pending).all())
for vm_db_instance in vm_db_instances:
try:
jobid = str(vm_db_instance.JOBID)
self.updateLog('Found a VM in status CACHE_P corresponding ' +
'to job %s in database' % jobid, 'info')
lines = self.batch_cmd.bjobs(jobid=jobid)[1]
lines = lines.split()
if lines[0] != str(jobid) or lines[1] != self._batch_sys__user:
raise Exception
jobids.append(jobid)
self.updateLog('VM with jobid %s is synchronized with database'
% jobid, 'info')
except:
self.updateLog('Could not find a cloud VM corresponding ' +
'to jobid %s as indicated by db. ' % jobid +
'The status on db will be set to UNKNOWN',
'info')
vm_db_instance.STATUS = self.status_unknown
# Control that VM in status CACHE_R on database
# really correspond to an existing VM
vm_db_instances = (self.database.session.query
(self.database.tables['Compute']).filter_by
(STATUS=self.status_cache_ready).all())
for vm_db_instance in vm_db_instances:
jobid = str(vm_db_instance.JOBID)
vm_hostname = vm_db_instance.HOSTNAME
self.updateLog("Found a VM in status CACHE_R corresponding " +
"to job %s in database" % jobid, "info")
try:
lines = self.batch_cmd.bjobs(jobid=jobid)[1]
lines = lines.split()
bait = self.get_vm_bait(vm_hostname)
free = self.is_host_free([bait, jobid, vm_hostname])
if (lines[0] != str(jobid).split('.')[0]
or lines[1] != self._batch_sys__user
or lines[2] != "RUN"
or not free):
raise Exception
jobids.append(jobid)
self.updateLog("VM with jobid %s is synchronized with database"
% jobid, "info")
except:
self.updateLog("Could not find an available cloud VM " +
"corresponding to jobid %s " % jobid +
"as indicated by db. " +
"The status on db will be set to UNKNOWN",
"info")
vm_db_instance.STATUS = self.status_unknown
# Control that VM in status ALLOC on database
# really correspond to an existing VM
vm_db_instances = (self.database.session.query
(self.database.tables['Compute']).filter_by
(STATUS=self.status_allocated).all())
for vm_db_instance in vm_db_instances:
jobid = str(vm_db_instance.JOBID)
vm_hostname = vm_db_instance.HOSTNAME
self.updateLog("Found a VM in status ALLOC" +
"corresponding to job %s in database"
% jobid, "info")
try:
lines = self.batch_cmd.bjobs(jobid=jobid)[1]
lines = lines.split()
bait = self.get_vm_bait(vm_hostname)
available = self.is_host_available([bait, jobid, vm_hostname])
if (lines[0] != str(jobid)
or lines[1] != self._batch_sys__user
or lines[2] != "RUN"
or not available):
raise Exception
jobids.append(jobid)
self.updateLog("VM with jobid %s is synchronized with database"
% jobid, "info")
except:
self.updateLog("Could not find an available VM " +
"corresponding to jobid %s " % jobid +
"as indicated by db. " +
"The status on db will be set to DESTR and " +
"the corresponding job (if any) will be killed",
"info")
self.batch_cmd.bkill(jobid, user=self._batch_sys__user)
vm_db_instance.STATUS = self.status_destroyed
# Manage PENDING request (at the moment the only way
# is to destroy those requests!)
self.updateLog("Looking for requests still pending", "info")
vm_db_instances = (self.database.session.query
(self.database.tables['Compute']).filter_by
(STATUS=self.status_pending).all())
for vm_db_instance in vm_db_instances:
jobid = "NULL"
try:
jobid = str(vm_db_instance.JOBID)
except:
jobid = None
self.updateLog("Found a pending request with jobid %s: destroying"
% jobid, "info")
self.batch_cmd.bkill(jobid, user=self._batch_sys__user)
vm_db_instance.STATUS = self.status_aborted
self.db_commit()
# Control that every job submitted by the cloud user
# corresponds to a possible state of [CACHE_P, CACHE_R, ALLOC]
self.updateLog("Looking for orphan jobs", "info")
try:
lines = self.batch_cmd.bjobs(user=self._batch_sys__user)[1]
lines = lines.split('\n')
for line in lines:
line = line.split()
jobid = line[0]
if (jobid == 'JOBID') | (jobid == 'No'):
continue
if jobid not in jobids:
self.updateLog("Found orphan job with jobid %s, killing it"
% jobid, "info")
self.batch_cmd.bkill(jobid, user=self._batch_sys__user)
except:
a, b, c = sys.exc_info()
traceback.print_exception(a, b, c)
self.updateLog("Error while checking for orphans jobs, " +
"see stderr for details. Continuing anyway", "info")
self.updateLog("Cache synchronized", "info")
def refresh_cache(self):
"""
This function is spawn in a separate thread inside main()
it checks every self._cm_conf__cache_refresh_interval the cache status
if the cache is too small:
it submit the right number of parallel requests to WNoDeS
if the cache is too big:
it destroys the right number of VMs
The status of the cache is controlled through the DB:
Possible VM statuses on DB
(SEE __init__ FUNCTION FOR ACTUAL STATUS VARIABLES VALUE):
- self.status_cache_pending (cache_pending):
The VM is being created
and is supposed to be a cached VM once created
- self.status_cache_ready (cache_ready):
The VM is ready and is part of the cache.
Actually, the CM looks for VM in this status
to look for VMs which can be allocated to users
- self.status_cache_available (cache_allocated):
The VM WAS part of the cache and now it is allocated to an user
(i.e. is no more part of the cache).
For each DB record in this status, there should be another record
with the same host infos (jobid, hostname, ecc) corresponding
to the actual allocation to the user, in status
"ALLOC/DESTR/ABORTED, ecc"
NOTE that for this record the status of the VM remains
self.status_cache_available even ater VM destruction
- self.status_cache_destroyed (cache_destroyed):
The VM is put in this status if it is destroyed
because the cache was found too big
- self.status_allocated:
The VM is allocated to a user
- self.status_pending:
The VM is under creation
- self.status_destroyed:
The VM is destroyed
- self.status_aborted:
The VM creation was aborted for some reason
- self.status_unknown:
The VM status is unknown
because the actual information fetched from BatchSystem and WNoDeS
does not match the informations fetched from DB
This functions is able to manage the pending requests through the DB:
it looks for VM in status self.status_cache_pending on DB
and assumes that these VM are being created.
"""
start = 0
# Main loop to check if the thread must exit
while not self.exit:
# Loop to manage the time elapsed
if (time.time() - start) > self._cm_conf__cache_refresh_interval:
start = time.time()
self.refreshing_cache = True
fhosts = self.get_free_hosts()
db_instances = (self.database.session.query
(self.database.tables['Compute']).filter_by
(STATUS=self.status_cache_pending).all())
pending = len(db_instances)
# Total cache size is vm_available + vm_requested
cacheSize = len(fhosts) + pending
self.updateLog("found %s VM's in cache, they should be %s"
% (cacheSize, self._cm_conf__cache_size))
# Manage cache size
if cacheSize < self._cm_conf__cache_size:
VmNumber = self._cm_conf__cache_size - cacheSize
self.updateLog("too few VM in cache, " +
"submitting %s standard request(s)"
% VmNumber)
for i in range(0, VmNumber):
KWARGS = {'cache': True}
create_vm_thread = (threading.Thread
(target=self.create_vm,
kwargs=KWARGS))
create_vm_thread.start()
if cacheSize > self._cm_conf__cache_size:
self.updateLog("too much VM in cache, " +
"destroying exceeding VMs")
for i in range(0, cacheSize - self._cm_conf__cache_size):
self.destroy_vm(fhosts[i], True)
if cacheSize == self._cm_conf__cache_size:
self.updateLog("Chache refreshed (%s pending requests)"
% pending)
else:
time.sleep(1)
def create_vm(self, requirements={}, cache=False, vm_db_instance=None):
"""
Function used to create a VM with specified requirements.
For mandatory parameters not specified in requirements,
default ones will be used (see configfile)
This function updates the variable self._requests
to manage pending requests, through the thread safe functions
It acts as follow:
1) submit job
2) use bjobs to periodically check if job is running
3) when job is running, get the bait_name from bjobs output
4) periodically query the bait to get vm status until is HOST_AVAILABLE
5) when host is available return a structure like
['<bait_hostname>', '<jobid>', '<vm_hostname>']
If the VM is not available in a time < timeout,
the creation will be aborted
If variable cache = True it means that:
the VM to be created is supposed to be part of the cache.
In this case the status of this VM will be updated in the DB during
the creation, to make the function able to manage this pending request:
self.refresh_cache
(SEE __init__ FUNCTION FOR ACTUAL STATUS VARIABLES VALUE)
The status on DB is updated as follow:
1) creation of db_instance
2) db_instance.STATUS = self.status_cache_pending (pending request)
3) Once the job is submitted:
db_instance.JOBID = <jobid>
4) Once the VM is in status HOST_AVAILABLE:
db_instance.STATUS = self.status_cache_ready;
db_instance.HOSTNAME = <vm_hostname>
5) If something goes wrong:
db_instance.STATUS = self.status_aborted
NOTE: Do NOT use
cache = True
for requests that are not supposed to go in the cache once ready
(ex. requests submitted after a cache-miss request),
OR the CM will think that the VM is in the cache!!
"""
newhost = []
# options = {}
options = self._default_vm_option
# Override the default parameters
# with the ones specified in requirements
for key in requirements.keys():
# insert here a control on keys,
# something like if key not in ['IMAGE', 'RAM', ecc]
options[key] = requirements[key]
self.updateLog("---- Creating VM with options: %s"
% str(options), 'info')
if cache:
vm_db_instance = self.add_element_to_db(
self.database.tables['Compute'](
# ID=self.database.get_next_id
# (self.database.tables['Compute']),
ARCH="x86_64",
MEMORY=str(options['MEM']),
NAME="NO_NAME",
THROUGHPUT=str(options['BANDWIDTH']),
STATUS=self.status_pending,
UUID=str(utils.guid()),
CORES=str(options['CPU']),
#DB_ADDED
IMG_TAG="NULL",
#BASENAME="NULL",
DATE="NULL",
OS="NULL",
PUBLICKEY="NULL",
PUID="NULL",
#QUANT="NULL",
STORAGE="NULL",
TIMESTAMP="NULL",
SSH="NULL"
# WARNING the following columns can be useful
#DB_MOD_TAG
#TYPE="CLOUD",
#SESSION_START_TIME=str(time.time())
)
)
# Update the status on db to PENDING
# (only for requests which must create a cached VM)
if cache:
vm_db_instance.STATUS = self.status_cache_pending
self.db_commit()
# Create the wrapper for LSF or PBS
wrapperFile = '/tmp/bsubwrapper_%s' % str(time.time())
output = open(wrapperFile, 'a')
if self._batch_sys__type == 'lsf':
output.write("#!/bin/bash \n")
output.write("# LSF directives: \n")
output.write("#BSUB -L /bin/bash \n")
output.write("#BSUB -q %s \n" % self._batch_sys__queue)
output.write("while [[ 1 ]]; do \n")
output.write(" /bin/sleep 10d \n")
output.write("done \n")
output.write("##### WNoDeS variable definition ##### \n")
output.write("# WNoDeS_VM_TYPE:CLOUD\n")
output.write("# WNoDeS_VM_NETWORK_TYPE:OPEN\n")
output.write("# WNoDeS_VM_IMG:" + options['IMG'] + "\n")
output.write("# WNoDeS_VM_MEM:" + str(options['MEM']) + "\n")
output.write("# WNoDeS_VM_STORAGE:" +
str(options['STORAGE']) + "\n")
output.write("# WNoDeS_VM_BANDWIDTH:" +
str(options['BANDWIDTH']) + "\n")
output.write("# WNoDeS_VM_CPU:" + str(options['CPU']) + "\n")
# if len(options['MOUNT']) > 1:
# i = 0
# for MOUNT in options['MOUNT'].split(','):
# MPARAM = MOUNT.split(':')
# output.write("# WNoDeS_VM_CONFIG_MOUNT_%s" % str(i) +
# "-MOUNT:%s:%s %s\n"
# % (MPARAM[0], MPARAM[1], MPARAM[2]))
# i = i + 1
# else:
# output.write("# WNoDeS_VM_CONFIG_MOUNT_111-MOUNT:" +
# "/dev/vdb /mnt\n")
pub_file = open(self._key_cert__cm_public_key, 'r')
PUB_KEY_STRING = pub_file.readline().strip()
output.write("# WNoDeS_VM_CONFIG_SSH_PUBKEY:%s\n" % PUB_KEY_STRING)
output.write("##### END WNoD variable definition ##### \n")
output.write("##### WNoDeS end variable definition ##### \n")
elif (self._batch_sys__type == 'pbs'
or self._batch_sys__type == 'torque'):
output.write("#!/bin/bash \n")
output.write("# PBS directives: \n")
output.write("#PBS -q %s \n" % self._batch_sys__queue)
output.write("while [[ 1 ]]; do \n")
output.write(" /bin/sleep 10d \n")
output.write("done \n")
pub_file = open(self._key_cert__cm_public_key, 'r')
PUB_KEY_STRING = pub_file.readline().strip()
output.close()
bait = ""
timeout = self._cm_conf__timeout
elapsed = 0.0 # min
start = time.time()
ready = False
while ((not ready)
and (elapsed < timeout)
and (not self.exit)):
elapsed = float(time.time() - start) / 60.0
if self._batch_sys__type == 'lsf':
# self.updateLog('batch type %s; option %s; suer %s' %
# (self.batch_sys, wrapperFile,
# self._batch_sys__user),"debug")
bsub_out = self.batch_cmd.bsub(wrapperFile,
user=self._batch_sys__user)
elif (self._batch_sys__type == 'pbs'
or self._batch_sys__type == 'torque'):
command_submit = (wrapperFile + " -v WNoDeS_VM_TYPE=\"CLOUD" +
"\",WNoDeS_VM_NETWORK_TYPE=\"OPEN\"," +
"WNoDeS_VM_IMG=\"" +
options['IMG'] +
"\",WNoDeS_VM_MEM=\"" +
str(options['MEM']) +
"\",WNoDeS_VM_STORAGE=\"" +
str(options['STORAGE']) +
"\",WNoDeS_VM_BANDWIDTH=\"" +
str(options['BANDWIDTH']) +
"\",WNoDeS_VM_CPU=\"" +
str(options['CPU']) +
"\",WNoDeS_VM_CONFIG_SSH_PUBKEY=\"" +
PUB_KEY_STRING +
"\"")
bsub_out = self.batch_cmd.bsub(command_submit,
user=self._batch_sys__user)
self.updateLog(bsub_out, 'debug')
if bsub_out[0]:
self.updateLog('bsub failed! Aborting VM creation', "error")
self.updateLog('bsub_out: %s' % bsub_out[1], "debug")
if vm_db_instance:
vm_db_instance.STATUS = self.status_aborted
self.db_commit()
return newhost
#print bsub_out
jobid = ""
"""
WARNING!! the following code uses the string:
"is submitted to queue"
to identify the line specifing the jobid,
so it may not work for batch systems different from LSF!
"""
#self.updateLog(bsub_out, "debug")
if self._batch_sys__type == 'lsf':
for line in bsub_out[1].split("\n"):
if "is submitted to queue" in line:
jobid = line.split()[1]
jobid = jobid[1:len(jobid) - 1]
elif (self._batch_sys__type == 'pbs'
or self._batch_sys__type == 'torque'):
for line in bsub_out[1].split("\."):
jobid = line.split()[0]
if vm_db_instance:
vm_db_instance.JOBID = str(jobid)
time.sleep(1)
self.db_commit()
time.sleep(1)
self.updateLog('My jobid is: %s' % jobid)
self.updateLog("Waiting for job running")
jobRunning = False
# Loop to wait for job running
while ((elapsed < timeout)
and (not jobRunning)
and (not self.exit)):
elapsed = float(time.time() - start) / 60.0
lines = self.batch_cmd.bjobs(jobid=jobid)[1]
lines = lines.split('\n')
for line in lines:
line = line.split()
try:
if line[0] in jobid:
hostname_new = line[5].split('/')[0].split('.')[0]
if hostname_new == 'Empty':
bait = 'unknown_bait'
else:
bait = self.get_vm_bait(hostname_new)
#self.updateLog("jobid %s for the bait is ---- for line %s : %s" % (jobid, hostname_new, bait))
if ((line[2] == 'RUN')
and (hostname_new == bait)):
jobRunning = True
self.updateLog("job running on bait: %s" % bait)
self.updateLog("Waiting for VM available")
VMReady = False
# Loop to wait for HOST_AVAILABLE
max_request_retries = 24
retries = 1
lastStatus = "NULL"
while ((not VMReady)
and (elapsed < timeout)
and (not self.exit)):
elapsed = float(time.time() - start) / 60.0
msg = {'getStatus': []}
baitStatus = (self.sendRequest
(bait,
self._bait_conf__port,
msg))
if not baitStatus[0]:
baitStatus = baitStatus[1]
if not baitStatus[0]:
baitStatus = baitStatus[1]
else:
self.updateLog("Method error " +
"on request " +
"%s to bait %s"
% (str(msg), bait))
time.sleep(5)
continue
else:
self.updateLog("Communication error" +
"requesting " +
"the status to bait %s"
% bait)
time.sleep(5)
continue
# Now check if host is in status HOST_AVAILABLE
VMStatus = None
try:
VMStatus = baitStatus[1][jobid]
except:
self.updateLog("Bait %s does not know "
% bait +
"the status of VM with " +
"jobid %s. (%s/%s)"
% (jobid, retries,
max_request_retries))
retries += 1
if retries >= max_request_retries:
self.updateLog("Max limit of status " +
"requests reached, " +
"checking again the " +
"status of job %s"
% jobid, "info")
jobRunning = False
time.sleep(5)
break
else:
time.sleep(5)
continue
try:
#lastStatus = "NULL"
self.updateLog("START CHECKING Status %s " % VMStatus[0])
if VMStatus[0] == 'HOST_AVAILABLE':
newhost = [bait, jobid, VMStatus[2]]
if cache:
vm_db_instance.HOSTNAME = \
newhost[2]
vm_db_instance.STATUS = \
self.status_cache_ready
self.db_commit()
self.updateLog("The new host is " +
"available: %s"
% newhost)
VMReady = True
ready = True
break
elif VMStatus[0] == 'UNKNOWN':
self.updateLog("Unable to retrieve " +
"the status of VM with jobid %s, "
% jobid +
"retrying in 5 seconds",
"info")
time.sleep(5)
else:
currentStatus = VMStatus[0]
if currentStatus != lastStatus:
lastStatus = currentStatus
self.updateLog("The VM with jobid "
+ "%s is in status:"
% jobid
+ " %s."
% currentStatus,
"info")
time.sleep(5)
except:
a, b, c = sys.exc_info()
traceback.print_exception(a, b, c)
self.updateLog("Exception caught while " +
"retrieving VM status on " +
"bait %s for job %s, "
% (bait, jobid) +
"retrying in 5 seconds",
"info")
time.sleep(5)
else:
time.sleep(15)
except:
a, b, c = sys.exc_info()
traceback.print_exception(a, b, c)
self.updateLog("Unable to get the job status," +
"retrying in 5 seconds")
jobRunning = False
time.sleep(5)
else:
if self.exit:
self.batch_cmd.bkill(jobid, user=self._batch_sys__user)
if vm_db_instance:
vm_db_instance.STATUS = self.status_aborted
self.db_commit()
self.updateLog("VM creation aborted! No VM created")
if elapsed > timeout:
self.batch_cmd.bkill(jobid, user=self._batch_sys__user)
if vm_db_instance:
vm_db_instance.STATUS = self.status_aborted
self.db_commit()
self.updateLog("Request Timed Out!! No VM created")
# Return the newhost
return newhost
def get_host_info(self, host):
"""
Function used to get the hardware informations for a specific host
host must be in the format of: ['<bait_name>', '<jobid>', '<vm_name>']
it queries the bait to get the informations about this VM
"""
hw_info = {}
bait = host[0]
jobid = host[1]
vm_host = host[2]
msg = {'getStatus': []}
baitStatus = self.sendRequest(bait, self._bait_conf__port, msg)
if not baitStatus[0]:
baitStatus = baitStatus[1]
if not baitStatus[0]:
baitStatus = baitStatus[1]
else:
self.updateLog("Method error on request %s to bait %s"
% (str(msg), bait))
else:
self.updateLog("Communication error " +
"requesting the status to bait %s"
% bait)
vm_list = baitStatus[2]
try:
# output[2] is a dictionary containing
# all the VM managed by the specified bait
for key in vm_list.keys():
#vm_hw[1] is the hostname of that virtual machine
if vm_list[key][1] == vm_host:
# vm_hw[3] is a dictionary containing
# the hardware information that virtual machine
hw_info = vm_list[key][3]
except:
self.updateLog("Unable to get info for host %s on bait %s"
% (vm_host, bait))
for key in hw_info.keys():
hw_info[key] = str(hw_info[key])
return hw_info
def extractMandatoryParameters(self, requirements):
"""
This function return a dictionary containing only
the MANDATORY parameters contained in 'requirements'
the list of mandatory parameters is defined
in self.mandatoryList (see class constructor)
"""
mandatoryParameters = {}
for key in self.mandatoryList:
try:
mandatoryParameters[key] = requirements[key]
except KeyError:
return {}
return mandatoryParameters
def extractConfigurableParameters(self, requirements):
"""
This function return a dictionary containing only
the CONFIGURABLE parameters contained in 'requirements'
the list of configurables parameters is defined
in self.configurableList (see class constructor)
"""
configurableParameters = {}
for key in self.configurableList:
try:
configurableParameters[key] = requirements[key]
except:
pass
return configurableParameters
def host_matches_requirements(self, requirements, hw_info):
"""
This functions takes as argments two requirements dictionaries
and checks if mandatory parameters of 'hw_info'
satisfies mandatory parameters of 'requirements'
"""
mandatoryRequested = self.extractMandatoryParameters(requirements)
mandatoryAvailable = self.extractMandatoryParameters(hw_info)
if len(mandatoryRequested) == 0 or len(mandatoryAvailable) == 0:
return False
if mandatoryRequested == mandatoryAvailable:
return True
else:
return False
def configure_vm(self, hostname, configurableParameters):
"""
This function acts the same way of the one in hypervisor code.
Consider to configure the VM recalling the same function on hypervisor
"""
# NOTE: we should configure VM by a request to HyperVisor??
CONFIG_COMPONENTS = {}
for key in configurableParameters.keys():
if 'VM_CONFIG_' in key:
COMPONENT = key.split('_')[2]
CONFIG_COMPONENTS[COMPONENT] = configurableParameters[key]
PLUGINS_OUTPUT = ""
for COMPONENT in CONFIG_COMPONENTS.keys():
print "----- Configuring component: %s" % COMPONENT
PLUGIN = ""
try:
cmd = ('from wnodes.utils.plugins import plugin_%s as PLUGIN'
% COMPONENT.lower())
exec cmd
PLUGIN_RETURN_OUTPUT = PLUGIN.handlePlugin(
self._key_cert__cm_private_key,
hostname,
CONFIG_COMPONENTS[COMPONENT])
PLUGINS_OUTPUT += str(PLUGIN_RETURN_OUTPUT)
if PLUGIN_RETURN_OUTPUT[0] == 1:
return [1, PLUGINS_OUTPUT]
except:
err_msg = ('Plugin missing to configure the component: %s'
% COMPONENT)
a, b, c = sys.exc_info()
traceback.print_exception(a, b, c)
self.updateLog(err_msg, "error")
return [1, COMPONENT]
return [0, PLUGINS_OUTPUT]
def _get_first_requested_host(self, *args):
"""
This function controls the cache to get
the first free host matching requested req
if a host is found, it directly return it.
if no host is found it submit a request (and the user must wait...)
NOTE that since this function
uses above functions to manage and check the cache,
the architecture is still not the right one
(see for example:
self.get_available_hosts or self.is_host_free)
"""
parameters = args[0]
req = {}
first_host = []
# parameters[0] must be the dictionary with the req
try:
req = parameters[0]
except:
req = self._default_vm_option
mandatoryParams = self.extractMandatoryParameters(req)
if not len(mandatoryParams):
err_msg = ('Cannot extract mandatory parameters from ' +
'%s!!' % str(req))
self.updateLog(err_msg, "error")
return [1, err_msg]
configurableParameters = self.extractConfigurableParameters(req)
"""
Now change configurableParameters in the form used by utils plugins
"""
# PUBKEY:
try:
if configurableParameters['PUB_KEY'] != "":
SSH_CONFIG = {}
SSH_CONFIG['PUBKEY'] = configurableParameters['PUB_KEY']
configurableParameters['VM_CONFIG_SSH'] = SSH_CONFIG
del configurableParameters['PUB_KEY']
except KeyError:
pass
# MOUNT:
try:
MOUNT_CONFIG = {}
i = 0
for MOUNT in configurableParameters['MOUNT'].split(','):
MPARAM = MOUNT.split(':')
MOUNT_CONFIG["MOUNT_%s-MOUNT" % str(i)] = \
"%s:%s %s" % (MPARAM[0], MPARAM[1], MPARAM[2])
i += 1
configurableParameters['VM_CONFIG_MOUNT'] = MOUNT_CONFIG
del configurableParameters['MOUNT']
except KeyError:
pass
print ("----- POST-Adapted configurable parameters are: %s"
% configurableParameters)
"""
now mandatoryParams contains ONLY the list of hardware req
and configurableParameters contains ONLY the list of configurable
parameters in a form directly usable by the plugin modules
"""
# create the corresponding DB record
vm_db_instance = None
"""
now manage the web-app specific options
"""
webrequestParameters = {}
if 'VM_TOKEN' in req.keys():
try:
webrequestParameters['VM_TOKEN'] = req['VM_TOKEN']
webrequestParameters['VM_NAME'] = req['VM_NAME']
# For webapp request, the record on the DB
# is created by the webapp and uniquely identified by the token
vm_db_instance = (
self.database.session.query
(self.database.tables['Compute']).filter_by
(TOKEN=webrequestParameters['VM_TOKEN']).first())
self.db_commit()
if not vm_db_instance:
self.updateLog("Could find record on Database for VM " +
"with token %s"
% webrequestParameters['VM_TOKEN'],
"error")
newhost = ['unexistent_bait', '-1', 'unexistent_vm']
return [1, newhost]
#vm_db_instance.TOKEN = webrequestParameters['VM_TOKEN']
#vm_db_instance.NAME = webrequestParameters['VM_NAME']
##vm_db_instance.TYPE = "CLOUD_WEB" #DB_MOD_TAG
except KeyError:
self.updateLog("One or more needed key " +
"for WEB request missing!!",
"error")
newhost = ['unexistent_bait', '-1', 'unexistent_vm']
return [1, newhost]
else:
# For VIP request, we need to create the DB record
vm_db_instance = self.add_element_to_db(
self.database.tables['Compute'](
ARCH="x86_64",
MEMORY=str(req['MEM']),
NAME="NO_NAME",
THROUGHPUT=str(req['BANDWIDTH']),
STATUS=self.status_pending,
UUID=str(commands.getoutput('uuidgen')),
CORES=str(req['CPU']),
#DB_ADDED
IMG_TAG="NULL",
#BASENAME="NULL",
DATE="NULL",
OS="NULL",
PUBLICKEY="NULL",
PUID="NULL",
#QUANT = "NULL",
STORAGE="NULL",
TIMESTAMP="NULL",
SSH="NULL"
# WARNING the following columns can be useful
#DB_MOD_TAG
#TYPE="CLOUD",
#SESSION_START_TIME = str(time.time())
)
)
#vm_db_instance.TYPE = "CLOUD_VIP" #DB_MOD_TAG
vm_db_instance.STATUS = self.status_pending
self.db_commit()
"""
now look for compatible VM in cache
"""
#ahosts = self.get_available_hosts()
#for host in ahosts:
# if (self.is_host_free(host)):
fhosts = self.get_free_hosts()
for host in fhosts:
hw_info = self.get_host_info(host)
# NOTE that one can also use full 'req'
# istead of 'mandatoryParams',
# because 'host_matches_requirements'
# automatically extracts mandatory parameters
if self.host_matches_requirements(mandatoryParams, hw_info):
self.updateLog("Found a free host in cache: %s"
% str(host))
cached_vm_db_instance = (self.database.session.query
(self.database.tables['Compute']).filter_by
(JOBID=host[1]).filter_by
(HOSTNAME=host[2]).first())
cached_vm_db_instance.STATUS = self.status_cache_available
self.db_commit()
create_vm_thread = threading.Thread(target=self.create_vm,
args=(mandatoryParams,
True))
create_vm_thread.start()
first_host = host
break
# if no matching VM is found, submit a creation request
if not len(first_host):
self.updateLog("No free host found in cache, " +
"need to create a new one (this may take a while)")
newhost = self.create_vm(mandatoryParams)
if not len(newhost):
self.updateLog("Unable to create the requested host!")
vm_db_instance.STATUS = self.status_aborted
self.db_commit()
newhost = ['unexistent_bait', '-1', 'unexistent_vm']
return [1, newhost]
else:
self.updateLog("This is your new host: %s" % str(newhost[1]))
first_host = newhost
"""
Will now configure the VM using plugins
"""
print ("Will now configure VM with parameters: %s"
% configurableParameters)
setupResult = self.configure_vm(first_host[2], configurableParameters)
if setupResult[0]:
err_msg = ("Unable to configure components: %s"
% str(setupResult[1]))
self.updateLog(err_msg, "error")
return [1, err_msg]
if len(webrequestParameters) > 0:
# execute PUT to web-app
curl_command_string = ('curl' +
' -X PUT' +
' -d "token=%s&hostname=%s"'
% (webrequestParameters['VM_TOKEN'],
first_host[2]) +
' -H "Content-Type: ' +
'application/x-www-form-urlencoded" ' +
self._webapp_conf__host
+ ':8080/grid-cloud-0.0.1/jaxrs/compute/%s'
% webrequestParameters['VM_NAME'])
print curl_command_string
commands.getstatusoutput(curl_command_string)
#TODO: should now update database!
vm_db_instance.HOSTNAME = first_host[2]
vm_db_instance.JOBID = first_host[1]
vm_db_instance.STATUS = self.status_allocated
self.db_commit()
return [0, first_host]
def get_first_requested_host(self, *ARGS):
"""
This is a dummy function to manage the requests,
coming from webapp and VIP in a diffrent way.
To see the actual code for first requested host,
see _get_first_requested_host
"""
requirements = {}
try:
requirements = ARGS[0][0]
except:
error_msg = "Error fetching parameters from args: %s" % ARGS
self.updateLog(error_msg, "error")
return [1, error_msg]
"""
Distinguish the requests coming from webapp or from VIP
"""
if 'VM_TOKEN' in requirements.keys():
"""
Request coming from webapp, the actual request will be managed
in a separate thread to avoid blocking socket on webapp
(ask Daniele Andreotti fo details)
a generic and immediate response will be given
"""
get_first_requested_host_thread = \
(threading.Thread
(target=self._get_first_requested_host,
args=ARGS))
get_first_requested_host_thread.start()
dummy_host = ["dummy_bait", "-1", "dummy_hostname"]
return [0, dummy_host]
else:
"""
Request coming from VIP
the socket will remain opened
until the actual request is resolved
"""
response = self._get_first_requested_host(*ARGS)
return response
def destroy_vm(self, host, cache=False):
"""
This function destroys the give host killing the corresponding job
"""
self.updateLog("will now destroy the VM %s" % host)
kill_output = self.batch_cmd.bkill(host[1], user=self._batch_sys__user)
time.sleep(5)
if kill_output[0] != 0:
self.updateLog("Unable to kill the job %s with error: %s"
% (host[1], kill_output[1]))
if cache:
try:
vm_db_instance = (self.database.session.query
(self.database.tables['Compute']).filter_by
(JOBID=host[1]).filter_by
(HOSTNAME=host[2]).first())
vm_db_instance.STATUS = self.status_cache_destroyed
self.db_commit()
except:
self.updateLog("Could not find a record corresponding " +
"to host %s on db!" % str(host),
"error")
return kill_output[0]
def request_vm_destroy(self, *args):
"""
This function can be recalled via socket to destroy a VM
using the above function and updating the database
"""
dictio = args[0][0]
try:
bait = dictio['BAIT']
jobid = dictio['JOBID']
vm_hostname = dictio['VM_HOSTNAME']
except:
self.updateLog("Dictionary for vm destruction " +
"is not formatted as expected! needed: " +
"{'BAIT': <bait_hostname>, " +
"'JOBID': <jobid>, " +
"'VM_HOSTNAME': <vm_hostname>, " +
" got: %s" % dictio)
return [1, "Unable to destroy VM"]
if (('UUID' in dictio) &
(bait == 'unknown') &
(jobid == 'UNKNOWN')):
uuid = dictio['UUID']
# case with (BAIT == 'unknown') & (JOBID== 'UNKNOWN')
# used to remove ABORTED VMs
vm_db_instance = (self.database.session.query
(self.database.tables['Compute']).filter_by
(UUID=uuid).all()[-1])
vm_db_instance.STATUS = self.status_destroyed
exit_status = 0
self.updateLog("VM_HOSTNAME %s set as destroyed," % vm_hostname
+ " because BAIT or JOBID are 'unknown'", "info")
self.database.session.commit()
return [0, "VM %s successfully destroyed" % vm_hostname]
else:
host = [bait,
jobid,
vm_hostname]
destroy_vm_thread = threading.Thread(target=self.destroy_vm(host))
destroy_vm_thread.start()
vm_db_instance = (self.database.session.query
(self.database.tables['Compute']).filter_by
(JOBID=jobid).all()[-1])
if vm_db_instance:
vm_db_instance.STATUS = self.status_destroyed
else:
self.updateLog("could not find a database record with JOBID " +
" = %s" % jobid, "error")
return [0, "VM %s successfully destroyed" % host]
if exit_status:
return [1, "Unable to destroy VM %s" % host]
else:
return [0, "VM %s successfully destroyed" % host]
def exit_function(self, sig=None, data=None):
"""
This function is called at the end of the process
it is called when ^C is pressed or when SIGTERM is catched
SIGTERM is often spawned by the init scipt with command 'stop'
(i.e. service wnodes_cachemanager stop)
it waits until all threads are finished and then exit
if timeout is reached the exit is forced
"""
self.updateLog("shutting down cache manager (this may take some time)")
self.exit = True
# Will now wait until all thread exit (with timeout)
start = time.time()
timeout = 600 # seconds
elapsed = 0
canExit = False
thread_list = threading.enumerate()
# Remove the MainThread (this one!!) from list
del thread_list[0]
while elapsed < timeout and not canExit:
elapsed = time.time() - start
canExit = True
for th in thread_list:
try:
if th.isAlive():
#cm.updateLog("Thread %s is still alive!" % th.getName)
canExit = False
except:
self.updateLog("Exception for thread %s!" % th.getName)
pass
time.sleep(1)
if not canExit:
self.updateLog("Timeout reached before every thread finished!")
sys.exit(1)
self.updateLog("Successfully exit!")
sys.exit(0)
|
from utils import makeSoup
link = 'https://mangadex.org/title/19636/8-tales-of-the-zqn'
soup = makeSoup(link)
a = soup.find_all('div', class_='chapter-row d-flex row no-gutters p-2 align-items-center border-bottom odd-row')
for c in a:
print(c)
print('_________') |
import tensorflow as tf
import numpy as np
tf.random.set_seed(1)
np.random.seed(1)
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer() # more info : https://goo.gl/U2Uwz2
X=cancer['data']
y=cancer['target']
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,train_size=0.8, test_size=0.2)
import tensorflow.keras as keras
model = keras.Sequential([
keras.layers.Dense(30, activation=tf.nn.relu,
input_shape=(X_train.shape[1],)),
keras.layers.Dense(30, activation=tf.nn.relu),
keras.layers.Dense(30, activation=tf.nn.relu),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer="rmsprop",metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=10, batch_size=10)
eval = model.evaluate(X_test, y_test)
print(eval)
|
# -*- coding:utf-8 -*-
import socket
import sys
import threading
clist = []
def main(argc, argv):
# メッセージ入力スレッド
th = threading.Thread(target=send_loop)
th.start()
# サーバ起動
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssock.bind(("0.0.0.0", 25555)) # ポートを変更可能
ssock.listen(10)
# クライアント接続待機
while True:
try:
csock, caddr = ssock.accept()
except KeyboardInterrupt:
for cinfo in clist:
cinfo[0].close()
ssock.close()
print("Server closed.")
return
th = threading.Thread(target=client_loop, args=(csock, caddr))
th.start()
return
def client_loop(csock, caddr):
cname = csock.recv(1024).decode().strip()
if(len(cname) <= 0):
csock.close()
return
cinfo = [csock, caddr, cname]
clist.append(cinfo)
print("Connect: {0}".format(cname))
while True:
try:
recvmsg = csock.recv(1024).decode()
except KeyboardInterrupt:
csock.close()
return
except OSError: # killコマンド使用時等
csock.close()
return
if(len(recvmsg) <= 0):
csock.close()
break
print("%s:%s" % (cname, recvmsg))
print("Disconnect: {0} (by Client)".format(cname))
clist.remove(cinfo)
return
def send_loop():
helpMsg()
while True:
# コマンド待機
try:
cmd = input()
except KeyboardInterrupt:
return
# リストコマンド
buf = cmd.split(" ")
if(buf[0] == "/cmd"):
for cinfo in clist:
if(cinfo[2] == buf[1]):
print("Command ->", end='')
cmd = input()
cinfo[0].sendall(cmd.encode())
elif(buf[0] == "/list"):
print("---------- Client name list ----------")
for cinfo in clist:
print("{0}\t".format(cinfo[2]), end='')
print(cinfo[1])
elif(buf[0] == "/close"):
for cinfo in clist:
if(cinfo[2] == buf[1]):
print("Disconnect: {0} (by /close)".format(cinfo[2]))
cinfo[0].close()
clist.remove(cinfo)
elif(buf[0] == "/closeall"):
for cinfo in clist:
print("Disconnect: {0} (by /closeall)".format(cinfo[2]))
cinfo[0].close()
clist.remove(cinfo)
elif(buf[0] == "/kill"):
for cinfo in clist:
if(cinfo[2] == buf[1]):
print("Kill: {0} (by /kill)".format(cinfo[2]))
cinfo[0].sendall("/kill".encode())
elif(buf[0] == "/help"):
helpMsg()
else:
print("Undefined command: {0}".format(buf[0]))
return
def helpMsg():
print("Commands: /cmd [Name], /list, ", end='')
print("/close [Name], /closeall, /kill [Name]")
print()
return
if(__name__ == "__main__"):
main(len(sys.argv), sys.argv)
sys.exit()
|
# coding=utf-8
import os
import sys
import unittest
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import user_login, add_subjects
from webTest_pro.common.model.baseActionDel import del_subject
from webTest_pro.common.model.baseActionSearch import search_subject
from webTest_pro.common.model.baseActionModify import update_Subjects
from webTest_pro.common.logger import logger, T_INFO
reload(sys)
sys.setdefaultencoding("utf-8")
loginInfo = init.loginInfo
subjects = [{
'subjectName': u'书法',
'description': u'学习中国文化'
}, {
'subjectName': u'计算机',
'description': u'计算机基础应用'
}]
subjectsData = [{
'subjectName': u'测试科目名称',
'description': u'描述说明',
'searchName': u'书法'
}, {
'subjectName': u'书法',
'description': u'描述说明',
'searchName': u'测试科目名称'
}]
class subjectmanager(unittest.TestCase):
''''科目互动管理'''
def setUp(self):
if init.execEnv['execType'] == 'local':
T_INFO(logger,"\nlocal exec testcase")
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
else:
T_INFO(logger,"\nremote exec testcase")
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
T_INFO(logger,"tenantmanger end!")
def test_add_subjects(self):
'''添加科目'''
print "exec:test_add_subjects..."
driver = self.driver
user_login(driver, **loginInfo)
for subject in subjects:
add_subjects(driver, **subject)
self.assertEqual(u"添加成功!",
driver.find_element_by_css_selector(
".layui-layer-content").text)
sleep(0.5)
print "exec:test_add_subjects success."
def test_bsearch_subjects(self):
'''查询科目信息'''
print "exec:test_bsearch_subjects"
driver = self.driver
user_login(driver, **loginInfo)
for subject in subjects:
search_subject(driver, **subject)
self.assertEqual(
subject['subjectName'],
driver.find_element_by_xpath(
"//table[@id='subjecttable']/tbody/tr/td[2]").text)
print "exec: test_bsearch_subjects success."
sleep(0.5)
def test_bupdate_subjects(self):
'''修改科目信息'''
print "exec:test_bupdate_subjects"
driver = self.driver
user_login(driver, **loginInfo)
for subject in subjectsData:
update_Subjects(driver, **subject)
print "exec: test_bupdate_subjects success."
sleep(0.5)
def test_del_subjects_ok(self):
'''删除科目_确定'''
print "exec:test_del_subjects_ok..."
driver = self.driver
user_login(driver, **loginInfo)
for subject in subjects:
del_subject(driver, **subject)
sleep(1.5)
self.assertEqual(u"删除成功!",
driver.find_element_by_css_selector(
".layui-layer-content").text)
sleep(0.5)
print "exec:test_del_subjects_ok success."
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
if __name__ == '__main__':
unittest.main()
|
# Строим дом
class House:
"""класс дома""" # Первая строка считается описанием класса/метода. Стандарт PEP8
house_size: int; house_weight: int # Локальные атрибуты класса
def __init__(self):
"""инициализатор класса дома"""
print("Класс дома создан")
def setLocalValues(self, size: int, weight: int):
"""даем значения локальным атрибутам класса"""
self.house_size = size
self.house_weight = weight
# только благодаря первому параметру self мы можем «знать» из какого конкретно экземпляра был вызван данный метод: self всегда ссылается на этот экземпляр
def window(size:int, weight:int):
"""окно"""
return size, weight
def __del__(self):
"""финализатор класса"""
print("Удаление экземпляра:", self.__str__())
def checkAttr(className: object, attrName: str, desc: str = "") -> str:
"""проверяет значение атрибута, если имеется, то выводит о нем информацию, если нет, то выводит информацию о том, что его нет
- className - объект класса, в котором делаем проверку (required parameter type: object)
- attrName - имя атрибута (required parameter type: string)
- desc - описание атрибута (parameter type: string)"""
if hasattr(className, attrName):
if desc != "":
return "Атрибут {} {}: {}".format(attrName, desc, getattr(className, attrName))
else:
return "Атрибут {} имеет значение: {}".format(attrName, getattr(className, attrName))
else:
info = "Атрибут {} не найден".format(attrName)
return info
print("Описание класса:", House.__doc__)
print("Имя класса:", House.__name__)
print("Полный набор данных класса:", dir(House))
h = House()
h.setLocalValues(size=100, weight=15)
print("Список локальных переменных:", h.__dict__)
# Возвращаем значение атрибута
print(checkAttr(h, "house_size", "размер дома"))
print(checkAttr(h, "house_weight", "ширина дома"))
print(checkAttr(h, "house_height", "высота дома"))
# Добавляем атрибут и значение
setattr(h, "house_height", 10)
print(checkAttr(h, "house_height", "высота дома"))
# Удаляем атрибут высота дома
delattr(h, "house_height")
print(checkAttr(h, "house_height", "высота дома"))
# Принадлежность экземпляра к тому или иному классу
print(isinstance(h, House)) |
# Generated by Django 3.0.8 on 2020-10-26 06:34
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('eshop_products', '0015_product_attribute'),
]
operations = [
migrations.AddField(
model_name='product',
name='likes',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='لایک ها'),
),
migrations.AlterField(
model_name='productcomment',
name='email',
field=models.EmailField(max_length=200, verbose_name='آدرس ایمیل '),
),
]
|
import datetime
import os
import time
import torch
import torch.utils.data
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
from Utils.coco_utils import get_coco, get_coco_kp
from Utils.group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from Utils.engine import train_one_epoch, evaluate
from Reader.InstanceReader.InstanceReaderCoCoStyle import ChemScapeDataset
from Utils.coco_utils import get_coco_api_from_dataset
from Utils.coco_eval import CocoEvaluator
from Utils.engine import _get_iou_types
from Utils.utils import get_transform
import numpy as np
import cv2 as cv
import tkinter
import matplotlib
import matplotlib.pyplot as plt
import tkinter
import matplotlib
matplotlib.use('TkAgg')
def findContours(*args, **kwargs):
"""
Wraps cv2.findContours to maintain compatiblity between versions
3 and 4
Returns:
contours, hierarchy
"""
if cv.__version__.startswith('4'):
contours, hierarchy = cv.findContours(*args, **kwargs)
elif cv.__version__.startswith('3'):
_, contours, hierarchy = cv.findContours(*args, **kwargs)
else:
raise AssertionError(
'cv2 must be either version 3 or 4 to call this method')
return contours, hierarchy
class ChemDemo(object):
def __init__(self, model, data_loader, confidence_threshold=0.7, device=torch.device("cpu")):
self.model = model
self.model.eval()
self.device = device
self.model.to(device)
self.confidence = confidence_threshold
self.mask_threshold = 0.5
self.iou_types = _get_iou_types(model)
self.palette = [[0,0,255], [0,255,0], [255,0,0], [255,255,0], [0,255,255], [255,0,255], [255,255,255]]
def compute_prediction(self, image):
cpu_device = torch.device("cpu")
image = list(img.to(self.device) for img in image)
outputs = self.model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
return outputs[0]
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions["scores"]
keep = torch.nonzero(scores > self.confidence).squeeze(1)
predictions = {key: predictions[key][keep] for key in predictions.keys()}
return predictions
def run_on_image(self, image, target, outDir):
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
results = image[0].numpy().copy()
results = np.dstack((results[0], results[1], results[2]))
results = self.overlay_boxes(results, top_predictions)
results = self.overlay_mask(results, top_predictions)
plt.imshow(results)
#plt.show()
plt.savefig(outDir + "/" + target[0]["fname"] + '.png')
plt.clf()
return results
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions["labels"]
boxes = predictions["boxes"]
scores = predictions["scores"]
template = "{}: {:.2f}"
for label, box, score in zip(labels, boxes, scores):
color = self.compute_colors_for_labels(label)
box = box.to(torch.int64)
x, y = box[2:]
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv.rectangle(image, tuple(top_left), tuple(bottom_right), tuple(color), 1)
s = template.format(label, score)
image = cv.putText(
image, s, (x, y), cv.FONT_HERSHEY_SIMPLEX, 1, color, 1
)
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions["masks"].numpy()
labels = predictions["labels"]
for mask, label in zip(masks, labels):
color = self.compute_colors_for_labels(label)
contours, hierarchy = findContours(((mask[0]>0.50)*255).astype(np.uint8), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
image = cv.drawContours(image, contours, -1, color, 3)
composite = image
return composite
def compute_colors_for_labels(self, label):
"""
Simple function that adds fixed colors depending on the class
"""
color = self.palette[label]
return color
def compute_panoptic(self, image, label, folder, json, show=False):
sub_classes = ["",
"V",
"V Label",
"V Cork",
"V Parts GENERAL",
"Ignore",
"Liquid GENERAL",
"Liquid Suspension",
"Foam",
"Gel",
"Solid GENERAL",
"Granular",
"Powder",
"Solid Bulk",
"Vapor",
"Other Material",
"Filled vessel"]
json[label[0]["fname"]] = {
"PartCats": {},
"MaterialCats": {},
"MultiPhaseMaterial": [],
"MultiPhaseVessels": []
}
material_cat = {}
predictions = self.compute_prediction(image)
top_pred = self.select_top_predictions(predictions)
material = np.zeros((image[0].size(1), image[0].size(2)))
vessel = material.copy()
parts = material.copy()
m_id = 1
v_id = 1
for i in range(top_pred["scores"].size(0)):
if top_pred["labels"][i] not in [0,2]:
sub_cls = torch.nonzero(top_pred['sub_cls'][i]).squeeze(1).tolist()
sub_labels = [sub_classes[idx] for idx in sub_cls]
material_cat[str(m_id)]= sub_labels
material += (top_pred['masks'][i][0].numpy() > 0.5) * (material == 0) * m_id
m_id += 1
if top_pred["labels"][i] == 2:
sub_cls = torch.nonzero(top_pred['sub_cls'][i]).squeeze(1).tolist()
sub_labels = [sub_classes[idx] for idx in sub_cls]
material_cat[str(m_id)] = sub_labels
vessel += (top_pred['masks'][i][0].numpy() > 0.5) * (vessel == 0) * v_id
v_id += 1
if show:
image = image[0].numpy().copy()
image = np.dstack((image[0], image[1], image[2]))
fig, ax = plt.subplots(nrows=1, ncols=3)
ax[0].imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
ax[1].matshow(material)
ax[2].matshow(vessel)
plt.show()
anno_file = os.path.join(folder, label[0]["fname"])
results = np.dstack(( vessel,parts, material)).astype(np.uint8)
plt.imsave(anno_file+".png", results)
json[label[0]["fname"]]["MaterialCats"] = material_cat
return json
|
# program started
num1 = int(input("Enter 1st number"))
num2 = int(input("Enter 2nd number"))
add = num1 + num2
print("The adition of " , num1 , " + " , num2 , " = " , add)
|
from datetime import datetime
from django.views.generic import TemplateView
from elections.models import Election
class HomeView(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
election_qs = Election.public_objects.all()
election_qs = election_qs.filter(group_type="election")
election_qs = election_qs.filter(poll_open_date__gte=datetime.today())
election_qs = election_qs.order_by("poll_open_date", "election_id")[:15]
context["upcoming_elections"] = election_qs
return context
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine.console import Console
from pants.engine.fs import Digest, MergeDigests, PathGlobs, SpecsPaths
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule
from pants.option.option_types import ArgsListOption
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
class SuccinctCodeCounter(TemplatedExternalTool):
options_scope = "scc"
name = "SCC"
help = "The Succinct Code Counter, aka `scc` (https://github.com/boyter/scc)."
default_version = "3.0.0"
default_known_versions = [
"3.0.0|macos_arm64 |846cb1b25025a0794d455719bc17cfb3f588576a58af1d95036f6c654e294f98|2006145",
"3.0.0|macos_x86_64|9c3064e477ab36e16204ad34f649372034bca4df669615eff5de4aa05b2ddf1a|2048134",
"3.0.0|linux_arm64 |04f9e797b70a678833e49df5e744f95080dfb7f963c0cd34f5b5d4712d290f33|1768037",
"3.0.0|linux_x86_64|13ca47ce00b5bd032f97f3af7aa8eb3c717b8972b404b155a378b09110e4aa0c|1948341",
]
default_url_template = (
"https://github.com/boyter/scc/releases/download/v{version}/scc-{version}-{platform}.zip"
)
default_url_platform_mapping = {
"macos_arm64": "arm64-apple-darwin",
"macos_x86_64": "x86_64-apple-darwin",
"linux_arm64": "arm64-unknown-linux",
"linux_x86_64": "x86_64-unknown-linux",
}
args = ArgsListOption(
example="--no-cocomo",
passthrough=True,
extra_help="Refer to to https://github.com/boyter/scc.",
)
def generate_exe(self, _: Platform) -> str:
return "./scc"
class CountLinesOfCodeSubsystem(GoalSubsystem):
name = "count-loc"
help = "Count lines of code."
class CountLinesOfCode(Goal):
subsystem_cls = CountLinesOfCodeSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule
async def count_loc(
console: Console,
succinct_code_counter: SuccinctCodeCounter,
specs_paths: SpecsPaths,
platform: Platform,
) -> CountLinesOfCode:
if not specs_paths.files:
return CountLinesOfCode(exit_code=0)
specs_digest, scc_program = await MultiGet(
Get(Digest, PathGlobs(globs=specs_paths.files)),
Get(
DownloadedExternalTool,
ExternalToolRequest,
succinct_code_counter.get_request(platform),
),
)
input_digest = await Get(Digest, MergeDigests((scc_program.digest, specs_digest)))
result = await Get(
ProcessResult,
Process(
argv=(scc_program.exe, *succinct_code_counter.args),
input_digest=input_digest,
description=f"Count lines of code for {pluralize(len(specs_paths.files), 'file')}",
level=LogLevel.DEBUG,
),
)
console.print_stdout(result.stdout.decode())
return CountLinesOfCode(exit_code=0)
def rules():
return collect_rules()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .ligne import Ligne
L4EB_recettes_brutes = Ligne('4EB', 'Micro foncier - recettes brutes')
class Micro_Foncier:
'''
https://www3.impots.gouv.fr/simulateur/calcul_impot/2020/aides/fonciers.htm
https://www.corrigetonimpot.fr/impot-location-vide-appartement-proprietaire-calcul/
'''
def __init__(self, database):
self._database = database
self._lignes = list()
def add_ligne(self, type_, valeur):
if type_ == L4EB_recettes_brutes and valeur > self._database.micro_foncier_revenu_foncier_plafond:
raise Exception("Revenu Foncier supérieur au plafond {}".format(
self._database.micro_foncier_revenu_foncier_plafond))
self._lignes.append({'type': type_, 'valeur': valeur})
def get_ligne(self, lignes):
if not isinstance(lignes, list):
lignes = [lignes]
return sum(ligne['valeur'] for ligne in self._lignes if ligne['type'] in lignes)
@property
def recettes_brutes(self):
return self.get_ligne(L4EB_recettes_brutes)
@property
def revenu_foncier_taxable(self):
return self.recettes_brutes * (1 - self._database.micro_foncier_taux)
@property
def prelevement_sociaux(self):
return self.revenu_foncier_taxable * self._database.prelevement_sociaux_taux
|
import unittest
from Bubble_Sort import bubble_sort
class BubbleSortTest(unittest.TestCase):
def test_Bubble_Sort(self):
self.assertEquals(bubble_sort([8,1,2,3,4,5,6,7]),[1,2,3,4,5,6,7,8])
self.assertEqual(bubble_sort([10,5,90,0,10]), [0,5,10,10,90])
self.assertEqual(bubble_sort([64, 34, 25, 22, 11, 90,12]), [11,12,22,25,34,64,90])
self.assertEqual(bubble_sort([8,90,4,1,5,5]), [1,4,5,5,8,90])
if __name__ == "__main__":
unittest.main() |
from pywebio import *
from pywebio.output import *
from pywebio.input import *
def generate_name(car_info):
'''
Customized algorithm used to genearte a name (type: str) from input data (type: dict)
This demo implements a very simple naming rule.
'''
if car_info['year'] >= 2020:
return 'Golden Retriever'
else:
return 'Labrador'
def main():
'''
The function that runs as a PyWeb app
Use this to define web components and user interaction logic
'''
#Header section
put_markdown('# 🚗 Generate a nickname for your car')
put_markdown('> Made with ❤️ by PyWeb.io')
#The form for users to fill in
car_info = input_group("Select all that match your car",[
#input box component: func input()
input('---Car maker---', name='maker'),
#drop-down selection component: func select()
select('---Select year (Required)---', options=[2021, 2020, 2019, 2018], name='year'),
#checkbox component: func checkbox()
checkbox('---Who drives it---', options=['Mom', 'Dad', 'Alice', 'Bob'], inline=True, name='engine'),
#radio button component: func radio()
radio('---Color(Required)---', options=['Red', 'Blue', 'Black', 'Other'], required=True, inline=True, name='color'),
#file uploading component: func file_upload()
file_upload('---Upload a picture---', accept=['.jpg', '.png'],
help_text='Only accept jpg and png formats. Your file is not uploaded to our server in this demo.', name='picture')
])
#Generate car name based on user inputs
car_name = generate_name(car_info)
#Display the name on the web app
put_markdown(' ### Your car\'s new nick name: `%s`' % car_name)
#A counter for how many code names have been generated by users
#A local file is used to store the count.
with open('__name_generator_counter.txt', 'a+') as fp:
fp.seek(0)
count = fp.read()
if not count: count = 0
count = int(count) + 1
fp.seek(0)
fp.truncate()
fp.write(str(count))
#Display tool usage stats
put_markdown('> %s nick names has been generated using this tool so far. [Generate one for another car](https://pyweb.io/app/58817/).' % count)
#Add a section to collect user feedback.
textarea('Tell us how to improve the tool', help_text='Please share with us what other features you like to see.')
toast('Thanks for submitting your feedback!')
|
from configparser import ConfigParser
from threading import Lock
class Database(ConfigParser):
""" Абстракция над ConfigParser
Необходимо использовать как singleton,
иначе может произойти потеря данных при
использовании двумя разными скриптами
Все переменные хранятся в нижнем регистре,
соответственно регистр при создании/поиске
переменной не учитывается
Переменные хранятся в виде строк, поэтому
смену типов необходимо делать вне базы данных
"""
def __init__(self, path, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read(path)
self.path = path
self.database_lock = Lock()
self.vars = self['Variables']
def save(self):
with open(self.path, "w") as f:
self.write(f)
self.clear()
self.read(self.path)
def getVar(self, name):
return self.vars.get(name)
def setVar(self, name, value):
with self.database_lock:
self.vars[name] = str(value)
self.save()
def delVar(self, name):
with self.database_lock:
del self.vars[name]
self.save()
def iterVars(self):
return iter(self.vars.items())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-12 15:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Type_of_Work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Название')),
],
options={
'verbose_name': 'Обл. деятельности',
},
),
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Название')),
('type', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='freelance.Type_of_Work')),
],
options={
'verbose_name': 'Под-обл. деятельности',
},
),
]
|
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
# Comments Section:
# - Straight forward algorithm using the fact that (if A divibes B) that implies that (A <= sqrt(B))
import math as m
def isprime(x):
for i in range(2,int(m.sqrt(x))+1):
if x % i == 0:
return False
return True
def problem10():
sum = 0
for i in range(2,2000000):
if isprime(i) is True:
sum += i
return sum
|
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from my.tensorflow.nn import linear_logits, get_logits, softsel
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
from tensorflow.contrib.rnn import BasicLSTMCell
from reasoning_layers.utils import biattention_layer
def dynamic_mac_rnn(cell, context, query, q_len, c_mask, q_mask, q_sub_st=None, context_st=None, query_st=None, cdoc_mask=None, candidates=None, cand_mask=None, greedy_read=False):
if cdoc_mask is None:
assert context_st is None
return cell.apply(context, query, q_len, c_mask, q_mask, q_sub_st=q_sub_st, candidates=candidates, cand_mask=cand_mask)
else:
assert context_st is not None and q_sub_st is not None
assert isinstance(cell, HierarchicalAttnMACRnn)
return cell.apply(context, context_st, query, query_st, q_sub_st, q_len, c_mask, cdoc_mask, q_mask, candidates=candidates, cand_mask=cand_mask, greedy_read=greedy_read)
class MACRnn(object):
"""
This class implements a standard MAC RNN (https://arxiv.org/abs/1803.03067) adapted for multi-hop qa.
"""
def __init__(self, batch_size, context_dim, query_dim, hidden_dim=80, num_hops=6, bidirectional_input_unit=False, prediction='span-single', \
reuse_cell=True, is_train=None, use_control_unit=True, mode="train", output_unit_type='similarity', reasoning_unit='answer_unit', \
answer_state_update_rule='mlp'):
"""
num_hops: the number of mac cell chained together, or number of reasoning steps.
bidriectional_input_unit: use bi-lstm for input unit. Default to false to save memory.
prediction: prediction layer. Could be 'span-single/dual', 'candidates'
reuse_cell: use one single cell for all reasoning steps. (not sure what Hudson and Mannning did.)
"""
self.batch_size = batch_size
self.hidden_dim = hidden_dim
self.context_dim = context_dim
self.query_dim = query_dim
self.num_hops = num_hops
self.bidirectional_input_unit = bidirectional_input_unit
self.prediction = prediction
self.reuse_cell = reuse_cell
self.is_train = is_train
self.use_control_unit = use_control_unit
self.mode = mode
self.output_unit_type = output_unit_type
self.reasoning_unit = reasoning_unit
self.answer_state_update_rule = answer_state_update_rule
self.top_attn = []
def apply(self, context, query, q_len, c_mask, q_mask, candidates=None, cand_mask=None, q_sub_st=None):
batch_size = self.batch_size
hidden_dim = self.hidden_dim
query_dim = self.query_dim
reuse_cell = self.reuse_cell
context = tf.squeeze(context, axis=1)
if candidates is not None:
candidates = tf.squeeze(candidates, axis=1)
c_state = tf.zeros((batch_size, hidden_dim))
m_state = tf.zeros((batch_size, hidden_dim))
with tf.variable_scope('MACRnn'):
query, q_rep = self.MACInputUnit(query, q_len)
c_history = []
m_history = []
for i in range(self.num_hops):
if reuse_cell:
scope_str = 'MACRnn-layer-%d' % 0
c_state, m_state = self.MACCell(i, query, q_rep, context, c_mask, q_mask, c_history, m_history, \
c_state, m_state, scope_str, reuse=(i!=0))
else:
scope_str = 'MACRnn-layer-%d' % i
c_state, m_state = self.MACCell(i, query, q_rep, context, c_mask, q_mask, c_history, m_history, \
c_state, m_state, scope_str, reuse=False)
c_history.append(c_state)
m_history.append(m_state)
if self.prediction == 'candidates':
g1 = self.MACOutputUnit(m_state, context, candidates)
return tf.expand_dims(g1, axis=1)
elif self.prediction == 'span-dual':
g1, g2 = self.MACOutputUnit(m_state, context)
return tf.expand_dims(g1, axis=1), tf.expand_dims(g2, axis=1)
else:
assert self.prediction == 'span-single'
g1, logits = self.MACOutputUnit(m_state, context)
return tf.expand_dims(g1, axis=1), logits
def MACInputUnit(self, query, query_len, reuse=False):
"""
Inputs: encodede query and length.
Outputs: query encoded by another lstm, and the final state of this lstm as
a fixed-size representation of this query.
"""
with tf.variable_scope('input_unit', initializer=tf.random_uniform_initializer, reuse=reuse):
hidden_dim = self.hidden_dim
if self.bidirectional_input_unit is True:
cell_fw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, query, \
dtype=tf.float32, sequence_length=query_len, swap_memory=True)
query_embed = tf.concat(axis=2, values=encoder_outputs)
query_rep = tf.concat([fw_st.c, bw_st.c], axis=1)
W_emb = tf.get_variable('W_emb', [2*hidden_dim, hidden_dim])
b_emb = tf.get_variable('b_emb', [hidden_dim])
W_rep = tf.get_variable('W_rep', [2*hidden_dim, hidden_dim])
b_rep = tf.get_variable('b_rep', [hidden_dim])
query_embed = tf.einsum('ijk,kl->ijl', query_embed, W_emb) + b_emb
query_rep = tf.matmul(query_rep, W_rep) + b_rep
else:
cell_fw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
query_embed, final_st = tf.nn.dynamic_rnn(cell_fw, query, dtype=tf.float32, \
sequence_length=query_len)
query_rep = final_st.c
return query_embed, query_rep
def MACCell(self, layer: int, cw, q, k, c_mask, q_mask, c_history, m_history, c_state, m_state, scope_str, reuse=False):
hidden_dim = self.hidden_dim
context_dim = self.context_dim
query_dim = self.query_dim
def control_unit():
with tf.variable_scope('control_unit'):
W_cq = tf.get_variable('W_cq', [2*hidden_dim, hidden_dim])
b_cq = tf.get_variable('b_cq', [hidden_dim])
cq = tf.matmul(tf.concat([c_state, q], axis=1), W_cq) + b_cq
W_ca = tf.get_variable('W_ca', [hidden_dim, 1])
b_ca = tf.get_variable('b_ca', [1])
ca = tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', cq, cw), W_ca), axis=2) + b_ca
cv = tf.nn.softmax(ca)
return tf.einsum('ijk,ij->ik', cw, cv)
def read_unit(new_c_state):
"""
Does not include the I' in the original MAC paper.
"""
with tf.variable_scope('read_unit'):
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('il,ijl->ijl', tf.matmul(m_state, W_m) + b_m, tf.einsum('ijk,kl->ijl', k, W_k) + b_k) # [batch_size, context_len, hidden_dim]
W_ra = tf.get_variable('W_ra', [hidden_dim, 1])
b_ra = tf.get_variable('b_ra', [1])
ra = tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', new_c_state, I), W_ra), axis=2) + b_ra
rv = tf.nn.softmax(ra)
return tf.einsum('ijk,ij->ik', k, rv)
def write_unit(r, new_c_state):
with tf.variable_scope('write_unit'):
W_m = tf.get_variable('W_m', [context_dim + hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
m_prev = tf.matmul(tf.concat([r, m_state], axis=1), W_m) + b_m
if layer > 0 or self.reuse_cell:
W_c = tf.get_variable('W_c', [hidden_dim, 1])
b_c = tf.get_variable('b_c', [1])
#sa = tf.nn.softmax(tf.squeeze(tf.einsum('ijk,kl->ijl', tf.multiply(new_c_state, c_history), W_c), axis=2))
W_s = tf.get_variable('W_s', [hidden_dim, hidden_dim])
W_p = tf.get_variable('W_p', [hidden_dim, hidden_dim])
b = tf.get_variable('b', [hidden_dim])
if layer > 0:
sa = tf.nn.softmax(tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', new_c_state, c_history), W_c) + b_c, axis=2))
m_sa = tf.einsum('ijk,ij->ik', m_history, sa)
m_prime = tf.matmul(m_sa, W_s) + tf.matmul(m_prev, W_p) + b
else:
m_prime = tf.matmul(m_prev, W_p) + b
W_c_2 = tf.get_variable('W_c_2', [hidden_dim, 1])
b_c_2 = tf.get_variable('b_c_2', [1])
c_prime = tf.matmul(new_c_state, W_c_2) + b_c_2
return tf.nn.sigmoid(c_prime) * m_state + (1 - tf.nn.sigmoid(c_prime)) * m_prime
if layer > 0:
c_history = tf.stack(c_history, axis=1)
m_history = tf.stack(m_history, axis=1)
with tf.variable_scope(scope_str, reuse=reuse) as scope:
new_c_state = control_unit()
new_m_state = write_unit(read_unit(new_c_state), new_c_state)
return new_c_state, new_m_state
def MACOutputUnit(self, m_state, context, candidates=None, query=None, reuse=False):
hidden_dim = self.hidden_dim
context_dim = self.context_dim
with tf.variable_scope('output_unit', reuse=reuse):
if self.prediction == 'candidates':
assert candidates is not None
cand_dim = context_dim
#cand_dim = candidates.get_shape()[-1]
if self.output_unit_type == 'similarity':
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
M = tf.matmul(m_state, W_m) + b_m
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('ijk,kl->ijl', candidates, W_k) + b_k
g1 = tf.einsum('ik,ijk->ijk', M, I)
elif self.output_unit_type == 'nested-triplet-mlp':
num_cand = tf.shape(candidates)[1]
if self.reasoning_unit == 'bi-attn' or self.reasoning_unit == 'attention-lstm' or self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc':
similarity = tf.einsum('ik,ijk->ijk', m_state, candidates)
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*cand_dim, 2*cand_dim])
b1 = tf.get_variable('b1', [2*cand_dim])
W2 = tf.get_variable('W2', [2*cand_dim, cand_dim])
b2 = tf.get_variable('b2', [cand_dim])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, cand_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, cand_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
else:
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
similarity = tf.einsum('ik,ijk->ijk', m_state, tf.einsum('ijk,kl->ijl', candidates, W_k)) + b_k
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [2*hidden_dim + cand_dim, hidden_dim])
b1 = tf.get_variable('b1', [hidden_dim])
W2 = tf.get_variable('W2', [hidden_dim, 40])
b2 = tf.get_variable('b2', [40])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, hidden_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, hidden_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.reshape(output, [self.batch_size, -1, context_dim])
elif self.output_unit_type == 'triplet-mlp':
assert query is not None
assert self.reasoning_unit == 'None' or self.reasoning_unit is None
num_cand = tf.shape(candidates)[1]
query_dim = self.query_dim
W_q = tf.get_variable('W_q', [query_dim, hidden_dim])
b_q = tf.get_variable('b_q', [hidden_dim])
query = tf.matmul(query, W_q) + b_q
query = tf.tile(tf.expand_dims(query, axis=1), [1, num_cand, 1])
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
similarity = tf.einsum('ik,ijk->ijk', m_state, tf.einsum('ijk,kl->ijl', candidates, W_k)) + b_k
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*hidden_dim + cand_dim, hidden_dim])
b1 = tf.get_variable('b1', [hidden_dim])
W2 = tf.get_variable('W2', [hidden_dim, 40])
b2 = tf.get_variable('b2', [40])
concat_in = tf.concat(axis=-1, values=[tf.reshape(query, [-1, hidden_dim]), tf.reshape(M, [-1, hidden_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, hidden_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.reshape(output, [self.batch_size, -1, 40])
else:
raise NotImplementedError
return g1
else:
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('ijk,kl->ijl', context, W_k) + b_k
M = tf.matmul(m_state, W_m) + b_m
g1 = tf.einsum('ik,ijk->ijk', M, I)
if self.prediction == 'span-dual':
p2 = tf.concat([I, g1], axis=2)
W_p = tf.get_variable('W_p', [2*hidden_dim, hidden_dim])
b_p = tf.get_variable('b_p', [hidden_dim])
I_prime = tf.einsum('ijk,kl->ijl', p2, W_p) + b_p
g2 = tf.einsum('ik,ijk->ijk', M, I_prime)
return g1, g2
else:
W_ra = tf.get_variable('W_ra', [hidden_dim, 1])
b_ra = tf.get_variable('b_ra', [1])
ra = tf.squeeze(tf.einsum('ijk,kl->ijl', g1, W_ra), axis=2) + b_ra
return g1, ra
class HierarchicalAttnMACRnn(MACRnn):
def __init__(self, batch_size, context_dim, query_dim, hidden_dim=80, num_hops=6, bidirectional_input_unit=False, prediction='candidates', input_keep_prob=0.8, reuse_cell=True, \
is_train=None, use_control_unit=True, mode="train", read_strategy='full', output_unit_type='similarity', reasoning_unit='answer_unit', \
memory_state_update_rule=None, answer_state_update_rule='mlp', attention_style='similarity', \
answer_doc_ids=None, sents_len=None, oracle=None, reinforce=False, attention_cell_dropout=False, \
read_topk_docs=0):
"""
num_hops: the number of mac cell chained together, or number of reasoning steps.
bidriectional_input_unit: use bi-lstm for input unit. Default to false to save memory.
prediction: prediction layer. Could be 'span-single/dual', 'candidates'
reuse_cell: use one single cell for all reasoning steps. (not sure what Hudson and Mannning did.)
"""
assert prediction == "candidates"
assert reuse_cell == True
super(HierarchicalAttnMACRnn, self).__init__(batch_size, context_dim, query_dim, hidden_dim, num_hops, \
bidirectional_input_unit, prediction, reuse_cell, is_train, use_control_unit, mode, output_unit_type, \
reasoning_unit, answer_state_update_rule)
self.input_keep_prob = input_keep_prob
self.top_doc_attn = []
self.top_attn_prob = []
self.doc_attn = []
self.read_strategy = read_strategy
self.rv_doc_history = []
self.doc_indices_history = []
self.attention_style = attention_style
self.memory_state_update_rule = memory_state_update_rule
self.oracle = oracle
if self.oracle is not None:
assert answer_doc_ids is not None
self.answer_doc_ids = answer_doc_ids
self.sents_len = sents_len
self.answer_list = []
self._c_state = tf.placeholder('float', [batch_size, query_dim], name='_c_state')
self._m_state = tf.placeholder('float', [batch_size, hidden_dim], name='_m_state')
self._a_state = tf.placeholder('float', [batch_size, hidden_dim], name='_a_state')
self._c_history = tf.placeholder('float', [batch_size, None, query_dim], name='_c_history')
self._m_history = tf.placeholder('float', [batch_size, None, hidden_dim], name='_m_history')
self.reinforce = reinforce
self.attention_cell_dropout = attention_cell_dropout
self.read_topk_docs = read_topk_docs
def apply(self, context, context_st, query, query_st, q_sub_st, q_len, c_mask, cdoc_mask, q_mask, candidates, cand_mask, greedy_read=False, reuse=False):
batch_size = self.batch_size
hidden_dim = self.hidden_dim
query_dim = self.query_dim
self.docs_len = tf.reduce_sum(tf.cast(c_mask, 'int32'), 2)
candidates = tf.squeeze(candidates, axis=1)
c_state = tf.zeros((batch_size, query_dim))
m_state = tf.zeros((batch_size, hidden_dim))
a_state = tf.zeros((batch_size, hidden_dim))
with tf.variable_scope('MACRnn'):
with tf.variable_scope('q_sub_proj'):
W = tf.get_variable('W', [query_dim, hidden_dim])
b = tf.get_variable('b', [hidden_dim])
m_state = tf.matmul(q_sub_st, W) + b
self.c_history = []
self.m_history = []
self.a_history = []
self.doc_attn_logits_lst = []
self.word_attn_logits_lst = []
self.doc_attn_weights_lst = []
cell = tf.contrib.rnn.GRUCell(hidden_dim)
self.cell = cell
for i in range(self.num_hops):
scope_str = 'MACRnn-layer-%d' % 0
if self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step' and i > 1:
m_state = self.m_history[0]
a_state = self.a_history[0]
c_state, m_state, a_state, doc_attn_logits, doc_attn_weights, word_attn_logits = self.HierarchicalAttnMACCell(i, cell, query, query_st, q_sub_st, context, context_st, c_mask, cdoc_mask, \
q_mask, self.c_history, self.m_history, c_state, m_state, a_state, scope_str, reuse=(reuse or i!=0), greedy_read=greedy_read)
self.doc_attn_logits_lst.append(doc_attn_logits)
self.word_attn_logits_lst.append(word_attn_logits)
self.doc_attn_weights_lst.append(doc_attn_weights)
self.c_history.append(c_state)
self.m_history.append(m_state)
if (self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc') and i == self.num_hops - 1:
with tf.variable_scope("concat_read_lstm", reuse=False):
max_len = tf.reduce_max(self.concat_selected_doc_len)
self.concat_selected_doc_mask = []
for k in range(self.batch_size):
concat_selected_doc_mask_k = tf.concat(values=[tf.ones([self.concat_selected_doc_len[k]]), tf.zeros([max_len-self.concat_selected_doc_len[k]])], axis=0)
self.concat_selected_doc_mask.append(concat_selected_doc_mask_k)
self.concat_selected_doc = tf.stack(self.concat_selected_doc, axis=0)
self.concat_selected_doc_mask = tf.cast(tf.stack(self.concat_selected_doc_mask, axis=0), 'bool')
p0 = biattention_layer(self.is_train, self.concat_selected_doc, query, h_mask=self.concat_selected_doc_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
cell_fw = BasicLSTMCell(40, state_is_tuple=True)
cell_bw = BasicLSTMCell(40, state_is_tuple=True)
cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob=self.input_keep_prob)
cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob=self.input_keep_prob)
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell_fw, cell_bw, p0, self.concat_selected_doc_len, dtype='float')
x = tf.concat(axis=2, values=[fw_h, bw_h])
logits = linear_logits([x], True, input_keep_prob=self.input_keep_prob, mask=self.concat_selected_doc_mask, is_train=self.is_train, scope='logits1')
probs = tf.nn.softmax(logits)
doc_rep = tf.einsum('ijk,ij->ik', self.concat_selected_doc, probs)
a_state = doc_rep
self.a_history.append(a_state)
if self.oracle == 'extra':
scope_str = 'MACRnn-layer-%d' % 0
if self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step' and i > 1:
m_state = self.m_history[0]
a_state = self.a_history[0]
_, _, a_state, _, _, _ = self.HierarchicalAttnMACCell(self.num_hops, cell, query, query_st, q_sub_st, context, context_st, c_mask, cdoc_mask, \
q_mask, self.c_history, self.m_history, c_state, m_state, a_state, scope_str, reuse=True, greedy_read=greedy_read)
if self.prediction == 'candidates':
if self.output_unit_type == 'triplet-mlp':
g1 = self.MACOutputUnit(a_state, context, candidates, query=query)
if (self.reasoning_unit != 'concat_first_sent' and self.reasoning_unit != 'concat_full_doc') and (self.reasoning_unit != 'attention-lstm' or self.read_strategy != 'one_doc_per_it'):
for i in range(self.num_hops):
gi = self.MACOutputUnit(self.a_history[i], context, candidates, query=query)
self.answer_list.append(tf.expand_dims(gi, axis=1))
else:
g1 = self.MACOutputUnit(a_state, context, candidates)
if (self.reasoning_unit != 'concat_first_sent' and self.reasoning_unit != 'concat_full_doc') and (self.reasoning_unit != 'attention-lstm' or self.read_strategy != 'one_doc_per_it'):
for i in range(self.num_hops):
gi = self.MACOutputUnit(self.a_history[i], context, candidates, reuse=True)
self.answer_list.append(tf.expand_dims(gi, axis=1))
return tf.expand_dims(g1, axis=1)
else:
raise NotImplementedError
def initialize_state(self, q_sub):
with tf.variable_scope('initial_m'):
W = tf.get_variable('W', [self.hidden_dim*2, self.hidden_dim])
b = tf.get_variable('b', [self.hidden_dim])
new_state = tf.matmul(q_sub, W) + b
return new_state
def HierarchicalAttnMACCell(self, layer: int, cell, cw, cw_st, q_sub_st, k, k_st, c_mask, cdoc_mask, q_mask, c_history, m_history, c_state, m_state, a_state, scope_str, \
reuse=False, out_of_graph=False, greedy_read=False):
"""
The 2nd implementation based on MAC Cell with hierarchical attention.
The read unit does not depend on c_state any more.
Added a_state.
Input: k [N, M, JX, context_dim]
"""
hidden_dim = self.hidden_dim
context_dim = self.context_dim
query_dim = self.query_dim
def control_unit():
with tf.variable_scope('control_unit'):
W_cq = tf.get_variable('W_cq', [query_dim + hidden_dim, query_dim])
b_cq = tf.get_variable('b_cq', [query_dim])
cq = tf.matmul(tf.concat([c_state, m_state], axis=1), W_cq) + b_cq
pre_ca = tf.einsum('ik,ijk->ijk', cq, cw)
ca = linear_logits([pre_ca], True, input_keep_prob=self.input_keep_prob, is_train=self.is_train, mask=q_mask)
cv = tf.nn.softmax(ca)
return tf.einsum('ijk,ij->ik', cw, cv)
def read_unit(m_state):
with tf.variable_scope('read_unit'):
W_cm = tf.get_variable('W_cm', [hidden_dim, hidden_dim])
b_cm = tf.get_variable('b_cm', [hidden_dim])
cm_state = tf.matmul(m_state, W_cm) + b_cm
if layer > 1 and self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
ra_doc = self.doc_attn_logits_lst[1]
rv_doc = self.doc_attn_weights_lst[1]
else:
W_k2 = tf.get_variable('W_k2', [query_dim, hidden_dim])
b_k2 = tf.get_variable('b_k2', [hidden_dim])
I_doc = tf.einsum('ijk,kl->ijl', k_st, W_k2) + b_k2 # [N, M, hidden_dim]
pre_ra_doc = tf.einsum('ik,ijk->ijk', cm_state, I_doc)
if self.attention_style == 'Bahdanau':
W_b2 = tf.get_variable('W_b2', [hidden_dim, hidden_dim])
b_b2 = tf.get_variable('b_b2', [hidden_dim])
shape_1 = tf.shape(I_doc)[1]
tiled_cm_state = tf.tile(tf.expand_dims(cm_state, axis=1), [1, shape_1, 1])
concat_in = tf.reshape(tiled_cm_state, [-1, hidden_dim]) + tf.reshape(I_doc, [-1, hidden_dim]) + tf.reshape(pre_ra_doc, [-1, hidden_dim])
pre_ra_doc = tf.matmul(concat_in, W_b2) + b_b2
pre_ra_doc = tf.reshape(pre_ra_doc, [-1, shape_1, hidden_dim])
ra_doc = linear_logits([pre_ra_doc], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=cdoc_mask, scope='logits2')
rv_doc = tf.nn.softmax(ra_doc) # document-level attention weight
# Word-level attention
if self.memory_state_update_rule is None:
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I_word = tf.einsum('ijkl,lm->ijkm', k, W_k) + b_k
pre_ra_word = tf.einsum('il,ijkl->ijkl', cm_state, I_word)
if self.attention_style == 'Bahdanau':
W_b = tf.get_variable('W_b', [hidden_dim, hidden_dim])
b_b = tf.get_variable('b_b', [hidden_dim])
shape_1 = tf.shape(I_word)[1]
shape_2 = tf.shape(I_word)[2]
tiled_cm_state = tf.tile(tf.expand_dims(tf.expand_dims(cm_state, axis=1), axis=1), [1, shape_1, shape_2, 1])
concat_in = tf.reshape(tiled_cm_state, [-1, hidden_dim]) + tf.reshape(I_word, [-1, hidden_dim]) + tf.reshape(pre_ra_word, [-1, hidden_dim])
pre_ra_word = tf.matmul(concat_in, W_b) + b_b
pre_ra_word = tf.reshape(pre_ra_word, [-1, shape_1, shape_2, hidden_dim])
ra_word = linear_logits([pre_ra_word], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=c_mask, scope='logits1')
rv_word = tf.nn.softmax(ra_word) # word-level attention weight
r_doc = tf.einsum('ijkl,ijk->ijl', k, rv_word) # [N, M, context_dim]
doc_indices = None
if self.read_strategy == 'one_doc_per_it' or self.read_strategy == 'one_doc_per_it_and_mask_all_read' or self.read_strategy == 'one_doc_per_it_and_mask_read_pairs' \
or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
if out_of_graph or layer > 0:
if self.read_strategy == 'one_doc_per_it_and_mask_read_pairs':
prev_read = self.doc_attn[layer-1]
doc_idx = tf.expand_dims(tf.stack(self.doc_attn, axis=1), axis=2)
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size, layer]))
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1), axis=1), [1, layer, 1])
indices = tf.concat([batch_nums, doc_idx], axis=2) # [batch_size, layer, 2]
elif self.read_strategy == 'one_doc_per_it':
if out_of_graph:
doc_idx = tf.stack(self.doc_attn, axis=1)[:, layer-1]
else:
doc_idx = self.doc_attn[layer-1]
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size]))
batch_nums = tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1)
indices = tf.concat([batch_nums, tf.reshape(doc_idx, [self.batch_size, 1])], axis=1)
elif self.read_strategy == 'one_doc_per_it_and_mask_all_read' or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
#if self.mode == 'train':
doc_idx = tf.stack(self.doc_attn, axis=1)
# else:
# doc_idx = tf.expand_dims(tf.stack(self.doc_attn, axis=1), axis=2)
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size, layer]))
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1), axis=1), [1, layer, 1])
indices = tf.concat([batch_nums, doc_idx], axis=2) # [batch_size, layer, 2]
updates_2 = tf.ones([self.batch_size, layer]) * 1e-30
very_small_number = tf.scatter_nd(indices, updates_2, shape)
mask = tf.scatter_nd(indices, updates, shape)
mask = mask + 1
rv_doc = rv_doc * mask
if self.read_strategy == 'one_doc_per_it_and_mask_all_read' or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
rv_doc = rv_doc + very_small_number
if self.mode == 'test':
if self.oracle == 'final' and layer == self.num_hops - 1:
new_doc_idx = tf.slice(self.answer_doc_ids, [0, 0], [-1, 1])
else:
new_doc_idx = tf.expand_dims(tf.argmax(tf.log(rv_doc), axis=1), axis=-1)
elif self.mode == 'train':
if (self.oracle == 'final' and layer == self.num_hops - 1) or (self.oracle == 'extra' and layer == self.num_hops):
new_doc_idx = tf.slice(self.answer_doc_ids, [0, 0], [-1, 1])
else:
if self.read_topk_docs > 0:
topk_doc_mask_1 = tf.ones([self.batch_size, tf.minimum(tf.shape(rv_doc)[1], self.read_topk_docs)])
topk_doc_mask_0 = tf.zeros([self.batch_size, tf.maximum(tf.shape(rv_doc)[1]-self.read_topk_docs, 0)])
topk_doc_mask = tf.concat([topk_doc_mask_1, topk_doc_mask_0], axis=1)
rv_doc = rv_doc * topk_doc_mask
if (greedy_read or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step') and \
self.reinforce is False:
new_doc_idx = tf.expand_dims(tf.argmax(tf.log(rv_doc), axis=1), axis=-1)
else:
new_doc_idx = tf.multinomial(tf.log(rv_doc), 1)
#new_doc_idx = tf.argmax(tf.log(rv_doc), axis=1)
else:
raise NotImplementedError
new_doc_idx = tf.cast(new_doc_idx, 'int32')
shape = tf.shape(rv_doc)
updates = tf.ones([self.batch_size])
batch_nums = tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1)
doc_indices = tf.concat([batch_nums, tf.cast(tf.reshape(new_doc_idx, [self.batch_size, 1]), 'int32')], axis=1)
if self.memory_state_update_rule == 'bi-attn':
selected_doc = tf.gather_nd(k, indices)
selected_mask = tf.gather_nd(c_mask, indices)
p0 = biattention_layer(self.is_train, selected_doc, cw, h_mask=selected_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
W_p0 = tf.get_variable('W_p0', [hidden_dim*2, hidden_dim])
b_p0 = tf.get_variable('b_p0', [hidden_dim])
I_word = tf.einsum('ijk,km->ijm', p0, W_p0) + b_p0
pre_ra_word = tf.einsum('ik,ijk->ijk', cm_state, I_word)
ra_word = linear_logits([pre_ra_word], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=selected_mask, scope='logits1')
rv_word = tf.nn.softmax(ra_word) # word-level attention weight
r_doc = tf.einsum('ikl,ik->il', p0, rv_word) # [N, M, context_dim]
r = r_doc # No need to apply doc_mask again.
else:
r = tf.gather_nd(r_doc, doc_indices)
print('one_doc_per_it')
elif self.read_strategy == 'mask_previous_max':
if layer > 0:
doc_idx = self.doc_attn[layer-1]
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size]))
batch_nums = tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1)
indices = tf.concat([batch_nums, tf.cast(tf.reshape(doc_idx, [self.batch_size, 1]), 'int32')], axis=1)
mask = tf.scatter_nd(indices, updates, shape)
mask = mask + 1
#self.mask = mask
rv_doc = rv_doc * mask
new_doc_idx = tf.argmax(tf.log(rv_doc), axis=1)
r = tf.einsum('ijk,ij->ik', r_doc, rv_doc)
else:
assert self.read_strategy == 'full'
new_doc_idx = tf.argmax(tf.log(rv_doc), axis=1)
r = tf.einsum('ijk,ij->ik', r_doc, rv_doc)
if out_of_graph is False:
self.doc_attn.append(new_doc_idx)
self.rv_doc_history.append(rv_doc)
self.doc_indices_history.append(doc_indices)
_, topk_docs = tf.nn.top_k(rv_doc, 3)
topk_words_prob, topk_words = tf.nn.top_k(rv_word[:,topk_docs[0, 0]], 20)
if out_of_graph is False:
self.top_doc_attn.append(topk_docs)
self.top_attn.append(topk_words)
self.top_attn_prob.append(topk_words_prob)
return r, ra_doc, rv_doc, ra_word
def write_unit(r, new_c_state, c_history, m_history, query=None):
with tf.variable_scope('write_unit'):
doc_indices = self.doc_indices_history[layer]
new_m_state, output = cell(r, m_state)
if self.reasoning_unit == 'answer_unit':
W_c = tf.get_variable('W_c', [query_dim, hidden_dim])
b_c = tf.get_variable('b_c', [hidden_dim])
c_proj = tf.matmul(new_c_state, W_c) + b_c
W1 = tf.get_variable('W1', [3*hidden_dim, 2*hidden_dim])
b1 = tf.get_variable('b1', [2*hidden_dim])
W2 = tf.get_variable('W2', [2*hidden_dim, hidden_dim])
b2 = tf.get_variable('b2', [hidden_dim])
concat_in = tf.concat(axis=-1, values=[output, c_proj, output*c_proj])
new_ans = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
if self.answer_state_update_rule == 'bi-attn':
assert query is not None
selected_doc = tf.einsum('ijkl,ij->ikl', k, doc_mask)
selected_mask = tf.cast(tf.einsum('ijk,ij->ik', tf.cast(c_mask, 'float32'), doc_mask), 'bool')
p0 = biattention_layer(self.is_train, selected_doc, query, h_mask=selected_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
logits = linear_logits([selected_doc, p0], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=selected_mask)
weights = tf.nn.softmax(logits)
new_ans_2 = tf.einsum('ijk,ij->ik', selected_doc, weights)
W_a = tf.get_variable('W_a', [self.context_dim, hidden_dim])
b_a = tf.get_variable('b_a', [hidden_dim])
new_ans_2 = tf.matmul(new_ans_2, W_a) + b_a
new_ans = tf.concat([new_ans, new_ans_2], axis=-1)
W_a2 = tf.get_variable('W_a2', [hidden_dim * 2, hidden_dim])
b_a2 = tf.get_variable('b_a2', [hidden_dim])
new_ans = tf.matmul(new_ans, W_a2) + b_a2
else:
assert self.answer_state_update_rule == 'mlp'
W_g = tf.get_variable('W_g', [hidden_dim, 1])
b_g = tf.get_variable('b_g', [1])
gate = tf.matmul(output*c_proj, W_g) + b_g
new_a_state = tf.sigmoid(gate) * new_ans + (1-tf.sigmoid(gate)) * a_state
elif self.reasoning_unit == 'mlp':
c_proj = new_c_state
W1 = tf.get_variable('W1', [3*query_dim, 3*query_dim])
b1 = tf.get_variable('b1', [3*query_dim])
W2 = tf.get_variable('W2', [3*query_dim, hidden_dim])
b2 = tf.get_variable('b2', [hidden_dim])
# concat_in = tf.concat(axis=-1, values=[output, c_proj, output*c_proj])
concat_in = tf.concat(axis=-1, values=[r, c_proj, r*c_proj])
new_a_state = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
elif self.reasoning_unit == 'bi-attn':
c_proj = new_c_state
#selected_doc = tf.einsum('ijkl,ij->ikl', k, doc_mask)
selected_doc = tf.gather_nd(k, doc_indices)
#selected_mask = tf.cast(tf.einsum('ijk,ij->ik', tf.cast(c_mask, 'float32'), doc_mask), 'bool')
selected_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc_indices), 'bool')
p0 = biattention_layer(self.is_train, selected_doc, query, h_mask=selected_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
logits = linear_logits([selected_doc, p0], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=selected_mask)
weights = tf.nn.softmax(logits)
new_a_state = tf.einsum('ijk,ij->ik', selected_doc, weights)
elif self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc':
doc2 = tf.gather_nd(k, doc_indices)
doc2_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc_indices), 'bool')
if self.reasoning_unit == 'concat_first_sent':
doc2_first_sent_len = tf.gather_nd(self.sents_len[:, :, 0], doc_indices)
else:
doc2_first_sent_len = tf.gather_nd(self.docs_len, doc_indices)
if layer == 0:
print(doc2.get_shape())
print(tf.reshape(tf.slice(doc2, [0, 0, 0], [-1, tf.reduce_max(doc2_first_sent_len), -1]), [self.batch_size, -1, context_dim]).get_shape())
self.concat_selected_doc = tf.unstack(tf.reshape(tf.slice(doc2, [0, 0, 0], [-1, tf.reduce_max(doc2_first_sent_len), -1]), [self.batch_size, -1, context_dim]), axis=0)
assert len(self.concat_selected_doc) == self.batch_size, (len(self.concat_selected_doc))
self.concat_selected_doc_len = doc2_first_sent_len
else:
for i in range(self.batch_size):
prev_doc = tf.slice(self.concat_selected_doc[i], [0, 0], [self.concat_selected_doc_len[i], -1])
new_doc = tf.slice(doc2[i], [0, 0], [doc2_first_sent_len[i], -1])
padding_len = tf.reduce_max(self.concat_selected_doc_len + doc2_first_sent_len) - self.concat_selected_doc_len[i] - doc2_first_sent_len[i]
padding = tf.zeros([padding_len, context_dim])
self.concat_selected_doc[i] = tf.concat([prev_doc, new_doc, padding], axis=0)
self.concat_selected_doc_len += doc2_first_sent_len
new_a_state = None
elif self.reasoning_unit == 'attention-lstm':
if layer > 0:
if self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
doc1_indices = self.doc_indices_history[0]
else:
doc1_indices = self.doc_indices_history[layer-1]
doc1 = tf.gather_nd(k, doc1_indices)
doc1_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc1_indices), 'bool')
else:
doc1 = cw
doc1_mask = q_mask
if self.read_strategy == 'one_doc_per_it' and (layer < self.num_hops - 1 and layer > 0):
new_a_state = None
else:
doc1_len = tf.reduce_sum(tf.cast(doc1_mask, 'int32'), axis=-1)
doc2 = tf.gather_nd(k, doc_indices)
doc2_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc_indices), 'bool')
doc2_len = tf.reduce_sum(tf.cast(doc2_mask, 'int32'), axis=-1)
lstm_cell = BasicLSTMCell(hidden_dim, state_is_tuple=True)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=hidden_dim,
memory=doc1,
memory_sequence_length=doc1_len)
attention_cell = tf.contrib.seq2seq.AttentionWrapper(lstm_cell, attention_mechanism, output_attention=False)
if self.attention_cell_dropout:
attention_cell = tf.contrib.rnn.DropoutWrapper(attention_cell, input_keep_prob=self.input_keep_prob)
decoder_initial_state = attention_cell.zero_state(
dtype=tf.float32, batch_size=self.batch_size)
lstm_output, _ = tf.nn.dynamic_rnn( cell=attention_cell,
inputs=doc2,
sequence_length=doc2_len,
initial_state=decoder_initial_state,
dtype=tf.float32)
W_x = tf.get_variable('W_x', [hidden_dim, context_dim])
b_x = tf.get_variable('b_x', [context_dim])
#x = tf.reshape(tf.einsum('ijk,kl->ijl', lstm_output, W_x) + b_x, [config.batch_size, self.tree_width, -1, d])
x = tf.einsum('ijk,kl->ijl', lstm_output, W_x) + b_x
similarity_with_q_sub = tf.einsum('ijk,ik->ijk', x, q_sub_st)
similarity_with_q_bod = tf.einsum('ijk,ik->ijk', x, cw_st)
doc2_mask = tf.reshape(doc2_mask, [self.batch_size, -1])
logits_q_sub = linear_logits([similarity_with_q_sub], True, input_keep_prob=self.input_keep_prob, mask=doc2_mask, \
is_train=self.is_train, scope='logits1')
logits_q_bod = linear_logits([similarity_with_q_bod], True, input_keep_prob=self.input_keep_prob, mask=doc2_mask, \
is_train=self.is_train, scope='logits2')
similarity_w_qsub_probs = tf.nn.softmax(logits_q_sub)
similarity_w_qbod_probs = tf.nn.softmax(logits_q_bod)
similarity_probs = (similarity_w_qsub_probs + similarity_w_qbod_probs) / 2
doc_rep = tf.einsum('ijk,ij->ik', doc2, similarity_probs)
new_a_state = doc_rep
qsub_topk_probs, qsub_topk_ids = tf.nn.top_k(similarity_w_qsub_probs, 10)
qbod_topk_probs, qbod_topk_ids = tf.nn.top_k(similarity_w_qbod_probs, 10)
if layer > 0:
self.qsub_topk_ids.append(qsub_topk_ids)
self.qsub_topk_probs.append(qsub_topk_probs)
self.qbod_topk_ids.append(qbod_topk_ids)
self.qbod_topk_probs.append(qbod_topk_probs)
self.qsub_all_probs.append(similarity_w_qsub_probs)
else:
self.qsub_topk_ids = [qsub_topk_ids]
self.qsub_topk_probs = [qsub_topk_probs]
self.qbod_topk_ids = [qbod_topk_ids]
self.qbod_topk_probs = [qbod_topk_probs]
self.qsub_all_probs = [similarity_w_qsub_probs]
elif self.reasoning_unit == 'None' or self.reasoning_unit is None:
new_a_state = output
else:
raise NotImplementedError
return new_m_state, new_a_state
if out_of_graph is False and layer > 0:
c_history = tf.stack(c_history, axis=1)
m_history = tf.stack(m_history, axis=1)
with tf.variable_scope(scope_str, reuse=reuse) as scope:
if self.use_control_unit:
new_c_state = control_unit()
else:
new_c_state = cw_st
# Read unit
r, ra_doc, rv_doc, ra_word = read_unit(m_state)
# Write unit
new_m_state, new_a_state = write_unit(r, new_c_state, c_history, m_history, cw)
return new_c_state, new_m_state, new_a_state, ra_doc, rv_doc, ra_word
|
from django.contrib import admin
from .models import Tag, Post, PostTag
class TagAdmin(admin.ModelAdmin):
list_display = ('name', )
class PostTagInline(admin.TabularInline):
model = PostTag
class PostAdmin(admin.ModelAdmin):
list_display = ('title', )
inlines = [
PostTagInline,
]
admin.site.register(Tag, TagAdmin)
admin.site.register(Post, PostAdmin)
|
test_case = int(input())
for _ in range(test_case):
l, r = map(int, input().split())
print(*[l, 2 * l] if 2 * l <= r else [-1]*2)
|
import Queue
import threading
import time
import logging
import logging.config
from fxengine.execution.execution import ExecutionAtOANDA, MockExecution
from fxengine.portfolio.portfolio import Portfolio
from fxengine.settings import *
from fxengine.strategy.strategy import TestRandomStrategy
from fxengine.streaming.streaming import *
def trade(events, strategy, portfolio, execution, stoprequest):
"""
Carries out an infinite while loop that polls the events queue and
directs each event to either the strategy component, the execution
handler or the portfolio.
"""
while not stoprequest.isSet():
try:
event = events.get(True, 0.5) # block and wait a half second if queue is empty
except Queue.Empty:
pass
else:
if event is not None:
if event.type == 'TICK':
logger.debug("recv new tick signal: %s", event)
strategy.calculate_signals(event)
portfolio.execute_tick_event(event)
elif event.type == 'SIGNAL':
logger.info("recv new order signal: %s", event)
portfolio.execute_signal_event(event)
elif event.type == 'ORDER':
logger.info("Executing order! %s", event)
execution.execute_order(event)
elif event.type == 'FILL':
logger.info("recv new fill event: %s", event)
portfolio.execute_fill_event(event)
while not events.empty(): # execute remaining events
event = events.get()
if event is not None:
if event.type == 'FILL': # throw everything away except fillevents
logger.info("recv new fill event: %s", event)
portfolio.execute_fill_event(event)
else:
pass
# close all positions
logger.info("Closing all positions")
portfolio.execute_close_all_positions()
# and execute the resulting order and fill events
while not events.empty():
event = events.get()
if event is not None:
if event.type == 'ORDER':
logger.info("Executing order! %s", event)
execution.execute_order(event)
elif event.type == 'Fill':
logger.info("recv new fill event: %s", event)
portfolio.execute_fill_event(event)
if __name__ == "__main__":
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__) # get a new logger
events = Queue.Queue() # Queue for communication between threads
stoprequest = threading.Event() # For stopping the threads
# Trade UNITS units of INSTRUMENTS
instruments = INSTRUMENTS
units = UNITS
if BACKTEST:
# Create the price streaming class
prices = StreamingPricesFromFile(
BACKTESTFILE, events, stoprequest
)
# Create the mock execution handler
execution = MockExecution(events, prices)
else:
# Create the OANDA market price streaming class
# making sure to provide authentication commands
prices = StreamingForexPrices_OANDA(
STREAM_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID,
instruments, events, stoprequest
)
# Create the execution handler making sure to
# provide authentication commands
execution = ExecutionAtOANDA(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, events)
# Create the strategy/signal generator, passing the
# instrument, quantity of units and the events queue
strategy = TestRandomStrategy(events)
# Create the portfolio object that will be used to
# compare the OANDA positions with the local, to
# ensure backtesting integrity.
portfolio = Portfolio(prices, events, equity=units)
# Create two separate threads: One for the trading loop
# and another for the market price streaming class
trade_thread = threading.Thread(target=trade,
args=(events, strategy, portfolio,
execution, stoprequest))
price_thread = threading.Thread(target=prices.stream_to_queue,
args=[])
# Start both threads
trade_thread.start()
price_thread.start()
# say to the threads if i have pressed ctrl+c
try:
while trade_thread.is_alive():
trade_thread.join(10)
except (KeyboardInterrupt, SystemExit):
logger.info("Sending stop request to threads")
stoprequest.set()
logger.info("Waiting for threads to terminate")
logging.shutdown()
|
#!/usr/bin/python -Wd
# runTests.py -- Portage Unit Test Functionality
# Copyright 2006-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os, sys
import os.path as osp
import grp
import platform
import pwd
import signal
def debug_signal(signum, frame):
import pdb
pdb.set_trace()
if platform.python_implementation() == 'Jython':
debug_signum = signal.SIGUSR2 # bug #424259
else:
debug_signum = signal.SIGUSR1
signal.signal(debug_signum, debug_signal)
# Pretend that the current user's uid/gid are the 'portage' uid/gid,
# so things go smoothly regardless of the current user and global
# user/group configuration.
os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
# Insert our parent dir so we can do shiny import "tests"
# This line courtesy of Marienz and Pkgcore ;)
sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.abspath(__file__)))))
import portage
# Ensure that we don't instantiate portage.settings, so that tests should
# work the same regardless of global configuration file state/existence.
portage._disable_legacy_globals()
if os.environ.get('NOCOLOR') in ('yes', 'true'):
portage.output.nocolor()
import portage.tests as tests
from portage.const import PORTAGE_BIN_PATH
path = os.environ.get("PATH", "").split(":")
path = [x for x in path if x]
if not path or not os.path.samefile(path[0], PORTAGE_BIN_PATH):
path.insert(0, PORTAGE_BIN_PATH)
os.environ["PATH"] = ":".join(path)
del path
if __name__ == "__main__":
sys.exit(tests.main())
|
# import the main window object (mw) from ankiqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo
# import all of the Qt GUI library
from aqt.qt import *
from anki.hooks import wrap
import anki.sync
# markdawn2.py from http://daringfireball.net/projects/markdown/
from markdown2 import Markdown
import re
def markdownconverter(arg = None):
if arg is None:
col = mw.col
else:
col = arg
changed = 0
# Iterate over the cards
ids = col.findCards("tag:Markdown")
for id in ids:
card = col.getCard(id)
note = card.note()
for (name, value) in note.items():
converted = ''
converted = remade(value)
#showInfo("%s:\n%s" % (name, converted) )
note[name] = converted
note.delTag("Markdown")
++changed
note.flush()
# mw.reset()
if arg is None:
showInfo("Done: %d db" % changed)
if changed > 0:
return changed
# create a new menu item, "test"
action = QAction("Markdown2Html", mw)
# set it to call testFunction when it's clicked
mw.connect(action, SIGNAL("triggered()"), markdownconverter)
# and add it to the tools menu
mw.form.menuTools.addAction(action)
def remade(data):
md = Markdown()
html = data
html = re.sub('<br />','\n', data)
html = md.convert(html)
return html
from aqt.sync import SyncManager
#https://groups.google.com/forum/#!topic/anki-addons/qmgXMKG2KRU
#Runs after sync has finished, before collection is reloaded
def mysync(self):
#reload the colection
if not self.mw.col:
self.mw.loadCollection()
#run my function,
#which returns true if something changed.
if markdownconverter(self.mw.col):
#Sometthing changed, so unload and sync again
self.mw.unloadCollection()
self._sync()
SyncManager.sync= wrap(SyncManager.sync, mysync)
|
# Section 1
# below is how we can import something into our code.
# We can do this with things from the python team or our other files.
import random
# randint(a, b) allows us to get a psuedorandom number that is in integer
random_integer = random.randint(1, 10)
print(random_integer)
random_float = random.random() * 5
print(random_float)
# Section 2
some_states_of_america = ["Delaware", "Pennsylvania", "New Jersey", "Georgia", "Connecticut"]
# number on the end picks, in brackets, the 3rd item from the list
print(some_states_of_america[3])
# Using .append will add an item to the end of the list
some_states_of_america.append("Massachusetts")
print(some_states_of_america)
# Python docs for data structures https://docs.python.org/3/tutorial/datastructures.html
# Using .extend adds another list to the end of the first list
some_states_of_america.extend(["Maryland", "South Carolina", "New Hampshire"])
print(some_states_of_america)
# You can also use a negative number to pull from the back of the list
print(some_states_of_america[-1])
# You can nest lists inside a list
food = ["hotdog", "burger", "fries"]
condiments = ["catchup", "mustard", "relish"]
menuList = [food, condiments]
print(menuList) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 21:34:38 2020
@author: thomas
"""
import os, shutil, sys
import pandas as pd
import numpy as np
import random
import zipfile
import matplotlib
import matplotlib.pyplot as plt
from shutil import copyfile
import pathlib
cwd_PYTHON = os.getcwd()
#CONSTANTS GRID PLACEMENT
RADIUSLARGE = 0.002
RADIUSSMALL = 0.001
def Rotate(xy, theta):
# https://en.wikipedia.org/wiki/Rotation_matrix#In_two_dimensions
#First Rotate based on Theta
#Allocate Arrays
rotationMatrix = np.zeros((2,2))
#Calculate rotation matrix
rotationMatrix[0,0] = np.cos(theta)
rotationMatrix[0,1] = -1.0*np.sin(theta)
rotationMatrix[1,0] = np.sin(theta)
rotationMatrix[1,1] = np.cos(theta)
return rotationMatrix.dot(xy)
def Translate(xy, offset):
return xy + offset
# Python program to check if rectangles overlap
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
class Rectangle:
def __init__(self,x,y,angle,name):
#self.x = x #center in x axis
#self.y = y #center in y axis
#self.center = np.array([self.x,self.y])
self.angle = angle
self.name = name
self.normX = np.array([np.cos(Theta),np.sin(Theta)])
self.normY = np.array([np.cos(Theta+np.pi/2.0),np.sin(Theta+np.pi/2.0)])
self.diag = np.zeros((4,2)) #diagonal coordinates on Rec1 axes
self.proj = np.zeros((4,2)) #Diagonal coordinates on Rec2 axes
print('normX = ',self.normX)
print('normY = ',self.normY)
def Diagonals(self,points):
#Here we find diagonal coordinates for the rectangle in normal x-y space
for idx in range(4):
#self.diag[idx] = points[idx] - self.center
self.diag[idx] = points[idx]
print(self.name)
print(self.diag)
self.diag_xmin, self.diag_xmax = np.amin(self.diag[:,0]), np.amax(self.diag[:,0])
self.diag_ymin, self.diag_ymax = np.amin(self.diag[:,1]), np.amax(self.diag[:,1])
#self.width = np.amax(self.diag[:,0]) - np.amin(self.diag[:,0])
#self.height = np.amax(self.diag[:,1]) - np.amin(self.diag[:,1])
def Projection(self):
#Here we find the diagonal coordinates for rectangle 2's axes
for idx in range(4):
self.proj[idx,0] = np.dot(self.normX,self.diag[idx])
self.proj[idx,1] = np.dot(self.normY,self.diag[idx])
print(self.name)
print(self.proj)
self.proj_xmin, self.proj_xmax = np.amin(self.proj[:,0]), np.amax(self.proj[:,0])
self.proj_ymin, self.proj_ymax = np.amin(self.proj[:,1]), np.amax(self.proj[:,1])
#self.width_proj = np.amax(self.proj[:,0]) - np.amin(self.proj[:,0])
#self.height_proj = np.amax(self.proj[:,1]) - np.amin(self.proj[:,1])
def Intersects(self,other):
#If all 4 are not separated, then there is overlap
#If one of these is separated, then there is no overlap
#x1 proj
bool_x1 = other.diag_xmax < self.diag_xmin or self.diag_xmax < other.diag_xmin
print('bool_x1 = ',bool_x1)
#y1 proj
bool_y1 = other.diag_ymax < self.diag_ymin or self.diag_ymax < other.diag_ymin
print('bool_y1 = ',bool_y1)
#x2 proj
bool_x2 = other.proj_xmax < self.proj_xmin or self.proj_xmax < other.proj_xmin
print('bool_x2 = ',bool_x2)
#y2 proj
bool_y2 = other.proj_ymax < self.proj_ymin or self.proj_ymax < other.proj_ymin
print('bool_y2 = ',bool_y2)
self.overlap = not(bool_x1 or bool_x2 or bool_y1 or bool_y2)
if(self.overlap):
#No side is separated. They are overlapping and intersecting
print('Intersection and Overlap!')
print(self.overlap)
else:
print('They are Separated!')
print(self.overlap)
return self.overlap
'''if(abs(self.x - other.x) < abs(width_self + width_other)/2.0 and
abs(self.y - other.y) < abs(height_self + height_other)/2.0):
print('They collide and intersect!')
else:
print('They donot intersect')'''
def PlotRectangles(points1,points2,centers):
#Plot Rectangles to visually check overlap
figRec = plt.figure(num=1,figsize=(4,4),dpi=120)
axRec = figRec.add_subplot(111)
#Rectangle 1
axRec.plot([points1[0,0],points1[1,0]],[points1[0,1],points1[1,1]],c='orange')
axRec.plot([points1[1,0],points1[2,0]],[points1[1,1],points1[2,1]],c='r')
axRec.plot([points1[2,0],points1[3,0]],[points1[2,1],points1[3,1]],c='b')
axRec.plot([points1[3,0],points1[0,0]],[points1[3,1],points1[0,1]],c='g')
#Rectangle 2
axRec.plot([points2[0,0],points2[1,0]],[points2[0,1],points2[1,1]],c='orange')
axRec.plot([points2[1,0],points2[2,0]],[points2[1,1],points2[2,1]],c='r')
axRec.plot([points2[2,0],points2[3,0]],[points2[2,1],points2[3,1]],c='b')
axRec.plot([points2[3,0],points2[0,0]],[points2[3,1],points2[0,1]],c='g')
#Centers
axRec.scatter(centers[:,0],centers[:,1],c='k')
#Add Rectangle 2 axes
x2 = np.linspace(-5.0,10.0,100)
y2x = centers[1,1] -1.0*(x2 - centers[1,0])
y2y = centers[1,1] +1.0*(x2 - centers[1,0])
axRec.plot(x2,y2x,c='k',ls='--')
axRec.plot(x2,y2y,c='k',ls='--')
axRec.axis([-5,10,-5,10])
figRec.tight_layout()
plt.show()
if __name__ == '__main__':
Theta = 3.0*np.pi/8.0
disp = np.array([6.0,6.0])
#Check if Rectangles overlap before creating files
# Create the square relative to (0, 0)
#Rectangle 1
points1 = np.array([
[-3.0, -4.0],
[-3.0, 4.0],
[3.0, 4.0],
[3.0, -4.0],
])
print('points1 = ',points1)
#Rectangle 2
#Rotate and Translate rectangle
points2 = np.zeros((4,2))
for idx in range(4):
points2[idx,:] = Translate(Rotate(points1[idx],Theta),disp)
#points2[idx,:] = rotationMatrix.dot(points1[idx,:]) + xList[1]
print('points2 = ',points2)
#Centers of Rectangles
center1 = [0.0,0.0]
center2 = (0.5*(np.amin(points2[:,0])+np.amax(points2[:,0])),
0.5*(np.amin(points2[:,1])+np.amax(points2[:,1])))
centers = np.array([center1,center2])
print('centers = ',centers)
#Plot to visually check overlap
PlotRectangles(points1,points2,centers)
Rec1 = Rectangle(center1[0],center1[1],0.0,'Rec1')
Rec2 = Rectangle(center2[0],center2[1],Theta,'Rec2')
Rec1.Diagonals(points1)
Rec2.Diagonals(points2)
Rec1.Projection()
Rec2.Projection()
isOverlap = Rec1.Intersects(Rec2)
'''#Projection of spherobot 1 onto spherobot 2
#Find diagonal vectors from center of rectangle
diagVec1 = np.zeros((4,2))
diagVec1[0] = points1[0] - center1
diagVec1[1] = points1[1] - center1
diagVec1[2] = points1[2] - center1
diagVec1[3] = points1[3] - center1
diagVec2 = np.zeros((4,2))
diagVec2[0] = points2[0] - center2
diagVec2[1] = points2[1] - center2
diagVec2[2] = points2[2] - center2
diagVec2[3] = points2[3] - center2
#Normal vector of spherobot 2
normVec2x = np.array([np.cos(Theta),np.sin(Theta)])
normVec2y = np.array([np.cos(Theta-np.pi/2.0),np.sin(Theta-np.pi/2.0)])
print('normVec2 = ',normVec2)
dotv1_n2 = [np.dot(diagVec1[idx],normVec2) for idx in range(4)]
dotv2_n2 = [np.dot(diagVec2[idx],normVec2) for idx in range(4)]
projVec1 = np.zeros((4,2))
projVec2 = np.zeros((4,2))
for idx in range(4):
projVec1[idx] = dotv1_n2[idx]*normVec2
projVec2[idx] = dotv2_n2[idx]*normVec2
print(projVec1)
print(projVec2)
Rec1 = Rectangle(center1[0],center1[1],2.0*RADIUSSMALL,8.0*RADIUSSMALL,0.0,'Rec1')
Rec2 = Rectangle(center2[0],center2[1],2.0*RADIUSSMALL,8.0*RADIUSSMALL,Theta,'Rec2')
print(Rec2.intersects(Rec1))''' |
import reverse_geocoder as rg
import pprint
from collections import OrderedDict
import pandas as pd
import numpy as np
df = pd.read_csv('3DMassing_2018_WGS84.csv')
for frame in df.head():
print(rg.search((frame[2], frame[3])))
df.to_csv('output.csv')
#coordinates =(43.66030373, -79.59226603)
#result = rg.search(coordinates)
#print(dict(result[0])['name'])
#pprint.pprint(json.dumps(result))
#print(df)
|
def create_list(element1, element2):
i = 1
lst = []
while i <= 4:
lst.append(element1)
lst.append(element2)
i += 1
return lst
print(create_list('Me', 1))
|
# LEVEL 19
# http://www.pythonchallenge.com/pc/hex/bin.html
import base64
with open('data/level_19.txt', 'rb') as f:
content = f.read()
decoded = base64.decodebytes(content)
with open('data/indian_little.wav', 'wb') as wav_l:
with open('data/indian_big.wav', 'wb') as wav_b:
data_start = 45
# write header as is
wav_l.write(decoded[:45])
wav_b.write(decoded[:45])
# change endianness for the rest of 16 bit words
while True:
if data_start + 2 <= len(decoded):
word_b = decoded[data_start:data_start + 2]
word_i_b = int.from_bytes(word_b, 'big')
word_l = word_i_b.to_bytes(2, 'little')
wav_l.write(word_l)
wav_b.write(word_b)
else:
break
data_start += 2
|
#!/usr/bin/python
#from pylab import plot,show,norm
from pylab import plot,show,norm
import numpy
import sys
from csv import reader, writer
class Perceptron:
def __init__(self):
self.w = numpy.random.random(3)
self.w[0] = 0 # weight for bias
self.w[1] = 0 # weight for feature 1
self.w[2] = 0 # weight for feature 2
self.learningRate = 0.003
def calculate(self, data, output):
done = False
while not done:
sumErrors = 0.0
for x in data:
r = x[0]*self.w[0]+x[1]*self.w[1]+x[2]*self.w[2]
if x[3] * r <= 0.0: # error. fix the weights
self.w[0] += x[3] * x[0]
self.w[1] += x[3] * x[1]
self.w[2] += x[3] * x[2]
sumErrors += 1
temp = []
temp.append(int(self.w[1])) # weight for feature 1
temp.append(int(self.w[2])) # weight for feature 2
temp.append(int(self.w[0])) # weight for bias
output.writerow(temp)
#print temp
if sumErrors == 0.0:
done = True
return
def load_csv(filename):
samples = list()
with open(filename, 'r') as fd:
csv_reader = reader(fd)
for row in csv_reader:
row.insert(0, '1') # bias
samples.append(row)
return samples
# pos 0 has feature 1
# pos 1 has feature 2
# pos 2 has true label
def convert_to_float(samples, column):
for row in samples:
row[column] = float(row[column].strip())
return
def plot_trainset(trainset):
for x in trainset:
if x[3] == 1.0:
plot(x[1],x[2],'ob')
else:
plot(x[1],x[2],'or')
return
def plot_convergence(trainset, w):
#n = norm(w)
#n1 = norm(trainset)
#ww = w/n
#wt = trainset/n1
#ww1 = [ww[2]*wt[2],-ww[1]*wt[1]]
#ww2 = [-ww[2]*wt[2],ww[1]*wt[1]]
#plot([ww1[0], ww2[0]],[ww1[1], ww2[1]],'--k')
plot([0,-w[0]/w[1]], [-w[0]/w[2],0],'--k')
return
def main(script, *args):
if len(sys.argv) != 3:
print "Error in arguments!"
sys.exit()
trainset = load_csv(sys.argv[1])
columns = len(trainset[0])
for i in range(columns):
convert_to_float(trainset, i)
perceptron = Perceptron()
fd = open(sys.argv[2],'w')
output = writer(fd)
perceptron.calculate(trainset, output)
fd.close()
plot_convergence(trainset, perceptron.w)
plot_trainset(trainset)
show()
if __name__ == '__main__':
main(*sys.argv)
|
import cx_Oracle
def getConnection():
connection = cx_Oracle.connect("system/system@localhost:1521/XE")
return connection
def fetchData():
connection = getConnection()
cursor = connection.cursor()
sql_fetch_data="select * from tab"
cursor.execute(sql_fetch_data)
for result in cursor:
print(result)
connection.commit()
cursor.close()
connection.close()
if __name__ == "__main__":
fetchData() |
# from openpyxl import load_workbook
# from konlpy.tag import Komoran
# from sklearn.feature_extraction.text import TfidfVectorizer
# import pandas as pd
# import numpy as np
#
# df = load_workbook('데이터의 복사본.xlsx') # 엑셀파일 열기
# data = df.active # 시트 활성화(시트 하나뿐이기때문에 첫번째 시트 선택됨)
#
# komoran = Komoran()
# doc = list()
# results = []
#
# col = data['D']
# for cell in col:
# if cell.value is None:
# cell.value = "0"
# doc.append(komoran.pos(cell.value))
#
#
#
# # NNG: 일반명사, NNP: 고유명사, NNB: 의존명사, NP: 대명사, NR: 수사
# # VV: 동사, VA: 형용사, MM: 관형사, SL: 외국어
# pos = ["NNG", "NNP", "NNB", "NP", "NR", "VV", "VA", "MM", "SL"]
#
# for i, sent in enumerate(doc):
# words = [token[0] for token in sent if token[1] in pos]
# results.append(" ".join(words))
# if i >= 1:
# data.cell(row=i+1, column=5).value = results[i]
#
# print(results)
#
# vectorizer = TfidfVectorizer()
# tfidfv = TfidfVectorizer().fit(results) # tf-idf 객체선언 후 단어 학습시킴
# v = tfidfv.vocabulary_
# feature_names = tfidfv.get_feature_names()
# print(sorted(tfidfv.transform(results).toarray()[1], reverse=True)) # 코퍼스로부터 각 단어의 빈도수 기록,
# # 단어 없이 빈도수만 출력 어떤 빈도수인지 알 수 없다.
# a = tfidfv.transform(results).toarray()
# data = {'tf-idf': a, 'vocabulary': v}
# df = pd.DataFrame(data)
# print(df)
# # feature_names = np.array(vectorizer.get_features_names())
# print(feature_names[a[:10]])
# vectorizer = TfidfVectorizer()
# X = vectorizer.fit_transform(results)
# # feature_array = vectorizer.get_feature_names()
# ziptwo = sorted(list(zip(vectorizer.get_feature_names(),
# X.sum(0).getA1())), key=lambda x:x[1], reverse=True)
# print(ziptwo[:5])
# a = tfidfv.transform(results)
# max_value = a.max(axis=1).toarray()
# sorted_by_tfidf = max_value.argsort()
# print(sorted_by_tfidf)
# print(tfidfv.vocabulary_) # 각 단어의 인덱스가 어떻게 부여되었는지 확인
# # 단어의 인덱스를 붙여주고 인덱스가 0부터 시작
# # sorted(tfidfv.vocabulary_items()) # 단어 사전 정렬
# last = tfidfv.transform(results)
# max_value = last.max(axis=0).toarray().ravel()
# sorted_by_tifidf = max_value.argsort()
# feature_names = np.array(tfidfv.get_feature_names())
# print(feature_names[sorted_by_tifidf[:10]])
# # 엑셀파일 저장
# df.save('데이터의 복사본.xlsx')
# df.close()
from collections import defaultdict
from openpyxl import load_workbook
from konlpy.tag import Komoran # 품사 태깅 클래스
# 사이킥런의 feature_extraction.text 서브패키지 사용
# TfidfVectorizer: tf-idf 방식으로 단어의 가중치를 조정한 bow 인코딩 벡터를 만듬
from sklearn.feature_extraction.text import TfidfVectorizer
# 파일 이름이 데이터의 복사본.xlsx인 파일을 불러옴
df = load_workbook('데이터의 복사본.xlsx')
data = df.active # 시트 활성화
komoran = Komoran() # komoran
tfidf_vectorizer = TfidfVectorizer() # TF-IDF 객체선언
doc = list()
results = []
col = data['D']
for cell in col:
if cell.value is None:
cell.value = "0"
doc.append(komoran.pos(cell.value)) # pos(test): 텍스트에 품사 정보 부착해서 반환
# print(doc) # [[('논문', 'NNP'), ('제목', 'NNG'),...,]]
pos = ["NNG", "NNP", "NNB", "NP", "NR", "VV", "VA", "MM", "SL"]
minus = []
for i, sent in enumerate(doc):
words = [token[0] for token in sent if token[1] in pos]
minus_set = set(words)
minus.append(" ".join(minus_set))
results.append(" ".join(words))
# TF-IDF
vectorizer = TfidfVectorizer()
sp_matrix = vectorizer.fit_transform(results)
word2id = defaultdict(lambda: 0)
for idx, feature in enumerate(vectorizer.get_feature_names()):
word2id[feature] = idx
for i, sent in enumerate(minus):
fin_words = ([(token, sp_matrix[i, word2id[token]]) for token in sent.split()])
fin_words.sort(key=lambda x: x[1], reverse=True)
fin_words = ([x[0] for x in fin_words])
if i >= 1:
data.cell(row=i+1, column=5).value = " ".join(fin_words)
# 엑셀 저장
df.save('데이터의 복사본.xlsx')
df.close() |
#!/usr/bin/env python
# coding: utf-8
# In[19]:
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import requests
import bs4
import smtplib
# In[10]:
url = 'https://www.xe.com/currencyconverter/convert/?Amount=1&From=AUD&To=NPR'
# In[11]:
page = requests.get(url)
# In[12]:
page.text
# In[13]:
html = page.content
# In[17]:
html = soup.find('p',{'class':'result__BigRate-sc-1bsijpp-1 iGrAod'}).text
# In[18]:
html
# #### SENDING EMAIL
# In[ ]:
# In[49]:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
# In[50]:
server.login('romishkrishnashrestha@gmail.com','Kathmandu1983')
# In[57]:
email = 'romishkrishnashrestha@gmail.com'
# In[55]:
message = '''Hi Romish and Suprina,
Today the exchange rate for Nepalese Rupees to Australian Dollar is:
1 AUD = ''' + html +'''
Sent from Python'''
# In[56]:
message
# In[53]:
tolist = ['romishshrestha@gmail.com','suprinabasnyat@gmail.com']
# In[54]:
server.sendmail(from_addr=email, to_addrs= tolist, msg = message)
# In[ ]:
server.quit()
|
from unittest.mock import patch
import pytest
from rubicon_ml.client.utils.exception_handling import FAILURE_MODES, set_failure_mode
from rubicon_ml.exceptions import RubiconException
@pytest.mark.parametrize("failure_mode", FAILURE_MODES)
def test_set_failure_mode(failure_mode):
set_failure_mode(failure_mode=failure_mode)
from rubicon_ml.client.utils.exception_handling import FAILURE_MODE
assert FAILURE_MODE == failure_mode
# cleanup: reset to default
set_failure_mode("raise")
def test_set_failure_mode_traceback_options():
traceback_chain = True
traceback_limit = 1
set_failure_mode("raise", traceback_chain=traceback_chain, traceback_limit=traceback_limit)
from rubicon_ml.client.utils.exception_handling import (
TRACEBACK_CHAIN,
TRACEBACK_LIMIT,
)
assert TRACEBACK_CHAIN == traceback_chain
assert TRACEBACK_LIMIT == traceback_limit
def test_set_failure_mode_error():
with pytest.raises(ValueError) as e:
set_failure_mode("invalid mode")
assert f"`failure_mode` must be one of {FAILURE_MODES}" in repr(e)
def test_failure_mode_raise(rubicon_client):
set_failure_mode("raise")
with pytest.raises(RubiconException) as e:
rubicon_client.get_project(name="does not exist")
assert "No project with name 'does not exist' found." in repr(e)
@patch("warnings.warn")
def test_failure_mode_log(mock_warn, rubicon_client):
set_failure_mode("warn")
rubicon_client.get_project(name="does not exist")
mock_warn.assert_called_once()
# cleanup: reset to default
set_failure_mode("raise")
@patch("logging.error")
def test_failure_mode_warn(mock_logger, rubicon_client):
set_failure_mode("log")
rubicon_client.get_project(name="does not exist")
mock_logger.assert_called_once()
# cleanup: reset to default
set_failure_mode("raise")
|
from utility import swap
def ripple(set, start, end):
for i in range(end - 1, start, -1):
set[i] = set[i - 1]
def insert_sort(data_set):
for i in range(1, len(data_set)):
val = data_set[i]
for j in range(i, 0, -1):
if(val < data_set[j]):
ripple(data_set, j+1, i)
|
import requests as req
from bs4 import BeautifulSoup as soup
import json
def crawlhome():
url = "https://www.kabum.com.br/"
rs = req.get(url)
content =rs.content
json = []
page_soup = soup(content,'html.parser')
containers = page_soup.findAll('div',{"class":"H-box"})
for container in containers:
#
nome=preco = container.findAll('span',{"class":"H-titulo"})
nome= nome[0].text
nome = nome.replace('..','')
preco = container.findAll('div',{"class":"H-preco"})
preco = preco[0].text
obj = {"nome":nome , "preco":preco}
json.append(obj)
return json
|
"""
Batchcode for Balamb Garden.
"""
from evennia import create_object
from typeclasses import rooms, exits, characters
###############################################################################
# INITIATE ROOMS
# Rooms are created first, as adding exits and details will cross refer to
# different locations which would otherwise not be instantiated yet.
###############################################################################
# BASEMENT
underground_chamber = create_object(rooms.Room, key="Underground Chamber")
garden_masters_office = create_object(rooms.Room, key="Garden Master's Office")
# FLOOR 1
front_gate = create_object(rooms.Room, key="Front Gate")
front_courtyard = create_object(rooms.Room, key="Front Courtyard")
reception = create_object(rooms.Room, key="Reception")
lobby = create_object(rooms.Room, key="Lobby")
infirmary = create_object(rooms.Room, key="Infirmary")
quad = create_object(rooms.Room, key="Quad")
cafeteria = create_object(rooms.Room, key="Cafeteria")
common_room = create_object(rooms.Room, key="Dormitory Common Room")
parking = create_object(rooms.Room, key="Parking")
training_centre_entrance = create_object(rooms.Room, key="Training Centre Entrance")
forest_track = create_object(rooms.Room, key="Forest Track")
mountain_track = create_object(rooms.Room, key="Mountain Track")
river_track = create_object(rooms.Room, key="River Track")
training_centre_lookout = create_object(rooms.Room, key="Training Centre Lookout")
library = create_object(rooms.Room, key="Library")
# FLOOR 2
classroom_hallway = create_object(rooms.Room, key="Classroom Hallway")
classroom = create_object(rooms.Room, key="Classroom")
escape_balcony = create_object(rooms.Room, key="Escape Balcony")
# FLOOR 3
headmasters_lobby = create_object(rooms.Room, key="Headmaster's Lobby")
headmasters_office = create_object(rooms.Room, key="Headmaster's Office")
###############################################################################
#
# BASEMENT
#
###############################################################################
###############################################################################
# UNDERGROUND CHAMBER
###############################################################################
"""
Underground Chamber
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Underground Chamber")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# GARDEN MASTERS OFFICE
###############################################################################
"""
Garden Master's Office
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Garden Master's Office")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
#
# FLOOR 1
#
###############################################################################
###############################################################################
# FRONT GATE
###############################################################################
"""
Front Gate
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Front Gate")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# FRONT COURTYARD
###############################################################################
"""
Front Courtyard
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Front Courtyard")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# RECEPTION
###############################################################################
"""
Reception
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Reception")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# LOBBY
###############################################################################
"""
Lobby
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Lobby")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# INFIRMARY
###############################################################################
"""
Infirmary
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Infirmary")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# QUAD
###############################################################################
"""
Quad
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Quad")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# CAFETERIA
###############################################################################
"""
Cafeteria
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Cafeteria")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# DORMITORY COMMON ROOM
###############################################################################
"""
Dormitory Common Room
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Dormitory Common Room")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# PARKING
###############################################################################
"""
Parking
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Parking")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# TRAINING CENTRE ENTRANCE
###############################################################################
"""
Training Centre Entrance
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Training Centre Entrance")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# FOREST TRACK
###############################################################################
"""
Forest Track
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Forest Track")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# MOUNTAIN TRACK
###############################################################################
"""
Mountain Track
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Mountain Track")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# RIVER TRACK
###############################################################################
"""
River Track
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("River Track")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# TRAINING CENTRE LOOKOUT
###############################################################################
"""
Training Centre Lookout
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Training Centre Lookout")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# LIBRARY
###############################################################################
"""
Library
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Library")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# CLASSROOM HALLWAY
###############################################################################
"""
Classroom Hallway
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Classroom Hallway")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# CLASSROOM
###############################################################################
"""
Classroom
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Classroom")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# ESCAPE BALCONY
###############################################################################
"""
Escape Balcony
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Escape Balcony")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# HEADMASTERS LOBBY
###############################################################################
"""
Headmaster's Lobby
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Headmaster's Lobby")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
###############################################################################
# HEADMASTERS OFFICE
###############################################################################
"""
Headmaster's Office
Details:
Objects:
Features:
Exits
"""
front_lobby.db.desc = ("Headmaster's Office")
front_lobby.db.details = {}
front_lobby.db.details["detail"] = ("Test Detail")
front_lobby.db.ambient_msgs = {}
front_lobby.db.ambient_msgs["Test Ambient Message"] = 1
# ROOM OBJECTS
# ROOM EXITS
##############################################################################
caller.msg(str(front_lobby.dbref) + "created") |
import email.mime.application
import smtplib
import ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def lacze_sie_z_poczta(adresat, adres_nadawcy, haslo_nadawcy, jezyk_adresata):
# Przekazuję programowi dane logowania.
serwer_nadawcy = ['smtp.gmail.com', 465]
# Wskazuję adres odbiorcy.
adres_adresata = adresat
wiadomosc = MIMEMultipart("alternative")
wiadomosc["Subject"] = "Roman Glegoła - Tester Manualny"
wiadomosc["From"] = adres_nadawcy
wiadomosc["To"] = adres_adresata
# Przekazuję nagi tekst do wiadomości.
from TrescMaila.OdczytTresciMaila import odczyt_pliku, odczyt_zalacznika
text = odczyt_pliku(jezyk_adresata, 'TrescMailaTekstowego.txt')
# Przekazuję tekst HTML do wiadomości.
html = odczyt_pliku(jezyk_adresata, 'TrescMailaHtml.html')
czesc1 = MIMEText(text, "plain")
czesc2 = MIMEText(html, "html")
# Przekazuję załącznik PDF do wiadomości.
zyciorys = 'CV.pdf'
zalacznik = odczyt_zalacznika(jezyk_adresata, zyciorys)
zalacz = email.mime.application.MIMEApplication(zalacznik.read(), _subtype="pdf")
zalacz.add_header('Content-Disposition', 'attachment', filename=zyciorys)
# Dodaję trzy reprezentacje wiadomosci - najpierw zwykly tekst, potem tryb HTML, a następnie plik PDF.
wiadomosc.attach(czesc1)
wiadomosc.attach(czesc2)
wiadomosc.attach(zalacz)
# Ustawiam bezpieczne połączenie ze skrzynką pocztową.
context = ssl.create_default_context()
server = smtplib.SMTP_SSL(serwer_nadawcy[0], serwer_nadawcy[1])
server.login(adres_nadawcy, haslo_nadawcy)
server.sendmail(
adres_nadawcy, adres_adresata, wiadomosc.as_string()
)
|
# What is the value of the first triangle number to have over five hundred divisors?
# Comments Section:
# - Straight forward algorithm but reducing the number of tests using factor pairs
from math import sqrt
def counter(n):
counter = 0
for i in range(1,int(sqrt(n))+1):
if n % i == 0:
counter += 1
if sqrt(n)**2 == n:
return counter*2 - 1
else:
return counter*2
def problem12():
n = 0
while True:
n += 1
if counter(((n+1)*n)/2) > 500:
return int(((n+1)*n)/2)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def find_missing(df):
print(" \nshow the boolean Dataframe : \n\n", df.isnull())
print(" \nCount total NaN at each column in a DataFrame : \n\n",
df.isnull().sum())
def basic_stats(column_name):
"""
takes a column name and returns basic statisctics of it
:param:
:return:
"""
grouped = data.groupby(column_name).agg(
min_price=('SalePrice', np.min),
mean_price=('SalePrice', np.mean),
max_price=('SalePrice', np.max),
median_price=('SalePrice', np.median),
std_price=('SalePrice', np.std)
)
uniques = data[column_name].value_counts()
left_joined = pd.concat([grouped, uniques], axis=1)
left_joined.rename(columns={column_name: 'count'}, inplace=True)
left_joined.sort_values('mean_price', inplace=True, ascending=False)
return left_joined
def scatter(x, y):
"""
creates a scatter plot of a variable
:param x:
:param y:
:return:
"""
plt.scatter(x, y)
plt.xlabel(x.name)
plt.ylabel(y.name)
plt.show()
data = pd.read_csv('Data/train.csv', sep=',', header=0)
data['MasVnrType'].fillna(0, inplace=True)
print(len(data.columns))
print(data.columns)
print(basic_stats('MSSubClass'))
find_missing(data['MSSubClass'])
scatter(data['MSSubClass'], data['SalePrice'])
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 09:41:58 2019
@author: MG
"""
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
train = pd.read_csv("C:\\Users\\MG\\Desktop\\H&E New dataset\\train_labels.csv", sep=",", dtype = 'unicode')
train.head()
train.columns
train_image = []
for i in tqdm(range(train.shape[0])):
img = image.load_img('Multi_Label_dataset/Images/'+train['Id'][i]+'.png',target_size=(100,100,3))
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
X.shape
plt.imshow(X[2])
train['label'][2]
y = np.array(train.drop(['label'],axis=1))
y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.1)
dataset_path = os.path.join(base_path, 'dataset')
if not(os.path.exists(dataset_path)):
os.mkdir(dataset_path)
np.save('D:\\Data set\\H&E data\\sample_dataset\\' + 'X_train.npy', np.array(X_train))
np.save('D:\\Data set\\H&E data\\sample_dataset\\' + 'y_train.npy', np.array(y_train))
np.save('D:\\Data set\\H&E data\\sample_dataset\\' + 'X_val.npy', np.array(X_test))
np.save('D:\\Data set\\H&E data\\sample_dataset\\' + 'y_val.npy', np.array(y_test))
print(X_train[-1].shape, y_train[-1].shape)
print(X_test[-1].shape, y_test[-1].shape) |
# I wanted to try making some classes as practice.
RUNNING = True
RESTING = False
class Reindeer:
def __init__(self, line):
"""
:param line: Parses line into the class.
"""
line = line.split()
self.speed = int(line[3])
self.running_time = int(line[6])
self.resting_time = int(line[13])
def calculate_distance_at(self, time):
"""
:param time: Amount of time this race should continue for
:return: The distance this reindeer has run at the end of the race.
"""
state = RUNNING
distance = 0
state_timer = self.running_time
timer = time
for i in range(time):
if state == RUNNING:
distance += self.speed
state_timer -= 1
if state_timer <= 0:
if state == RUNNING:
state = RESTING
state_timer = self.resting_time
else:
state = RUNNING
state_timer = self.running_time
timer -= 1
if timer <= 0:
return distance
reindeer_distances = []
with open("inputData.txt", "r") as infile:
for line in infile:
testing = Reindeer(line)
reindeer_distances.append(testing.calculate_distance_at(2503))
print(str(max(reindeer_distances)))
|
'''
Capture multiple Faces from multiple users to be stored on a DataBase (dataset directory)
==> Faces will be stored on a directory: dataset/ (if does not exist, pls create one)
==> Each face will have a unique numeric integer ID as 1, 2, 3, etc
Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition
Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18
'''
import cv2
import os
def scan_new_user():
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video width
cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# For each person, enter one numeric face face_id
face_id = input('\n enter new user name')
try:
os.mkdir("DataSet/Users/" + str(face_id))
except OSError:
print("\nUser already exists in the system")
else:
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while count < 50:
ret, img = cam.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("DataSet/Users/" + str(face_id) + '/' + str(count) + ".jpg", gray[y:y + h, x:x + w])
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python3
from lilaclib import *
build_prefix = 'arch4edu-x86_64'
depends = ['vtk-py3-qt4']
def pre_build():
aur_pre_build()
for line in edit_file('PKGBUILD'):
if 'makedepends=(' in line:
print(line.replace(')',' "openmpi" "gdal" "unixodbc" "tk" "ffmpeg" "jsoncpp" "gcc-libs")'))
elif 'patch -p1 < ../python35-ast.patch' in line:
print(line)
print(' #To strip non-ascii characters from docs')
print(' sed "166s/doc/doc.encode(\'ascii\',errors=\'ignore\').decode(\'ascii\')/" -i tvtk/indenter.py')
else:
print(line)
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
|
from scripts.phone_basic_operate import Phone_Basic_Operate
class JJFW(Phone_Basic_Operate):
def __init__(self):
Phone_Basic_Operate.__init__(self)
def __del__(self):
Phone_Basic_Operate.__del__(self)
def click_jjfw(self):
# 点击交警服务标签
xpath_jjfw_value = "//*[contains(@text,'交警服务')]"
ele_jjfw=self.locate_element("xpath",xpath_jjfw_value)
self.ele_tap(ele_jjfw)
#长按交警服务头部
xpath_jjfwtb_value="//*[contains(@text,'banner')]"
ele_jjfwtb=self.locate_element("xpath",xpath_jjfwtb_value)
self.ele_longpress(ele_jjfwtb,3000)
#获取保存图片标签文本
xpath_picture_value="//*[contains(@text,'保存图片')]"
p_text=self.get_attriname("xpath",xpath_picture_value,"text")
return p_text
# p_text2 = self.get_attriname("xpath", xpath_picture_value, "name")
# p_text3 = self.get_attriname("xpath", xpath_picture_value, "className")
# p_text4 = self.get_attriname("xpath", xpath_picture_value, "resourceId")
|
# Generated by Django 3.2.3 on 2021-06-07 18:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beverages', '0010_auto_20210607_1758'),
]
operations = [
migrations.AlterField(
model_name='beverage',
name='volume',
field=models.IntegerField(blank=True, null=True, verbose_name='Volume (mL)'),
),
migrations.AlterField(
model_name='beverage',
name='year',
field=models.IntegerField(blank=True, null=True, verbose_name='Year'),
),
]
|
# Generated by Django 3.1.4 on 2021-06-27 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('General', '0009_auto_20210627_2259'),
]
operations = [
migrations.AlterField(
model_name='delivery',
name='time',
field=models.DateTimeField(blank=True),
),
]
|
# Projeto e Analise de Algoritmos (PAA)
# 2o sem/2018 - IFMG - Campus Formiga
# Pedro Henrique Oliveira Veloso (0002346)
# Saulo Ricardo Dias Fernandes (0021581)
class Move():
""" Representacao de uma jogada. """
def __init__(self, word, pos, direction):
self.word = word; # Palavra formada.
self.pos = pos; # Comeco da palavra no tabuleiro (lin, col).
self.dir = direction; # Direcao em que a palavra foi formada (esq -> dir, ou cima -> baixo).
self.value = 0; # Valor total da jogada.
self._value = 0; # Valor da palavra colocada.
self.crosswords = []; # Palavras a mais formadas pela jogada.
self.brancos = {}; # Informacao das pecas em branco utilizadas na jogada.
def getWords(self):
res = self.word + '(' + str(self._value) +')';
if(len(self.crosswords) > 0):
for crossword in self.crosswords:
res += ' ' + crossword[0] + '(' + str(crossword[1]) + ')';
res = '[' + ", ".join(res.split(' ')) + ']';
return res;
def parseBrancos(self, brancos):
""" [IA] Adiciona conjunto de pecas em branco
utilizadas para formar a jogada.
"""
for (letra, posicao) in brancos:
self.brancos[posicao] = letra;
return;
def __str__(self):
return self.word + " (" + str(self.value) + ")" + " [" + str(self.pos[0]) + ", " + str(self.pos[1]) + "]" + " - dir: " + self.dir; |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
import cv2
import sys
import os
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, LaserScan, CameraInfo
import random
from std_msgs.msg import Bool, Int32, Float32
import numpy as np
import math
import time
from std_msgs.msg import String
from geometry_msgs.msg import Twist
PI = 3.1415926535897
import image_geometry
import colorsys
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from math import radians, degrees
from actionlib_msgs.msg import *
from geometry_msgs.msg import Point
global x,y,goal
def rotate(angle):
velocity_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
current_dis=0
t1=0
t0=0
vel_msg= Twist()
angle2 = abs(angle)
print("Let's rotate your robot!! angle: "+str(angle))
speed = angle/7
angular_speed = speed*2*PI/360
relative_angle = angle*2*PI/360
#We wont use linear components
vel_msg.linear.x=0
vel_msg.linear.y=0
vel_msg.linear.z=0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = angular_speed
t0 = rospy.Time.now().to_sec()
current_angle = 0
r=rospy.Rate(40)
print "turning"
while( current_angle < abs(relative_angle) and (not rospy.is_shutdown())):
velocity_publisher.publish(vel_msg)
t1 = rospy.Time.now().to_sec()
r.sleep()
current_angle = abs(angular_speed)*(t1-t0)
vel_msg.angular.z = abs(0)
velocity_publisher.publish(vel_msg)
rospy.sleep(1)
def move_forward():
data=rospy.wait_for_message("/scan", LaserScan)
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
current_dis=0
t1=0
t0=0
print (data.angle_min)
sum=0
for i in range(0,11):
sum=sum+data.ranges[i]
for i in range(349,360):
sum=sum+data.ranges[i]
distance = sum/20
rospy.loginfo(distance)
msg= Twist()
msg.linear.x=0.1
t0=rospy.Time.now().to_sec()
r=rospy.Rate(40)
print "distance avg:"+str(distance)
distance = distance-0.3
while((not rospy.is_shutdown()) and current_dis<distance):
pub.publish(msg)
t1=rospy.Time.now().to_sec()
r.sleep()
current_dis=msg.linear.x*(t1-t0)
print "current dis"+str(current_dis)
msg.linear.x= 0.0
pub.publish(msg)
return True
def move_forward2():
data=rospy.wait_for_message("/scan", LaserScan)
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
current_dis=0
t1=0
t0=0
print (data.angle_min)
msg= Twist()
msg.linear.x= 0.1
t0=rospy.Time.now().to_sec()
r=rospy.Rate(40)
moving_distance = 0.5
print "moving dis"+str(moving_distance)
while((not rospy.is_shutdown()) and current_dis<moving_distance):
pub.publish(msg)
t1=rospy.Time.now().to_sec()
r.sleep()
current_dis=msg.linear.x*(t1-t0)
print "current dis"+str(current_dis)
msg.linear.x= 0.0
pub.publish(msg)
return True
def move_forward3():
data=rospy.wait_for_message("/scan", LaserScan)
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
current_dis=0
t1=0
t0=0
msg= Twist()
msg.linear.x=0.1
t0=rospy.Time.now().to_sec()
r=rospy.Rate(40)
dist =10
for i in range(0,16):
if(data.ranges[i]<dist):
dist = data.ranges[i]
for i in range(344,360):
if(data.ranges[i]<dist):
dist = data.ranges[i]
print "distance from obstacle:"+str(dist)
while((not rospy.is_shutdown()) and current_dis<dist-0.5):
pub.publish(msg)
t1=rospy.Time.now().to_sec()
r.sleep()
current_dis=msg.linear.x*(t1-t0)
print "current dis"+str(current_dis)
msg.linear.x= 0.0
pub.publish(msg)
return True
def find_object(r,g,b):
print "find object"
imgdata=rospy.wait_for_message('camera/image_raw',Image)
if(imgdata is None):
print "error reciving image from camera"
return None,None
bridge = CvBridge()
image = bridge.imgmsg_to_cv2(imgdata,"bgr8")
print "we have image :)"
camInfodata=rospy.wait_for_message('camera/camera_info',CameraInfo)
if(camInfodata is None):
print "error reciving caminfo"
return None,None
camInfo=camInfodata
print "camInfo:" +str(camInfo.K[0])+ str(camInfo.K[1])+ str(camInfo.K[2])
scannerdata=rospy.wait_for_message('scan',LaserScan)
if(scannerdata is None):
print "error getting scanner data"
return None,None
distances=scannerdata
if(camInfo!=0 and distances!=0 and len(image)!=0):
camera = image_geometry.PinholeCameraModel()
camera.fromCameraInfo(camInfo)
Radius_center=(0,0)
Radius_center = findCenter(image,r,g,b)
if(Radius_center==None):
print "OBJECT NOT FOUND!"
msg = Float32()
msg.data =-1.0
return None,None
else:
ray = camera.projectPixelTo3dRay(camera.rectifyPoint(Radius_center))
alpha = np.dot(ray[0],ray[2])
if(alpha < 0):
alpha = -alpha
else:
alpha = math.floor(math.pi * 2 - alpha)
distance_index = int((alpha - distances.angle_min) / distances.angle_increment)
actual_distance = distances.ranges[distance_index]
print "the distance to the object is "+str(actual_distance)
return actual_distance ,alpha
def findCenter(cv_image,r,g,b):
r1 ,g1,b1=colorsys.rgb_to_hsv(r,g,b)
red = np.uint8([[[b,g,r ]]])
redHSV = cv2.cvtColor(red, cv2.COLOR_BGR2HSV)
greenLower = (redHSV[0][0][0]-25, 80, 80)
greenUpper = (redHSV[0][0][0]+25, 255, 255)
height, width, channels = cv_image.shape
blurred = cv2.GaussianBlur(cv_image, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
(_,cnts, _) = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
return (x,y)
else:
return None
def move_to_object():
data=rospy.wait_for_message("/scan", LaserScan)
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
current_dis=0
t1=0
t0=0
center=data.ranges[0]
print "center move to object "+str (center)
rospy.loginfo(center)
msg= Twist()
if( center>0.7):
msg.linear.x= 0.1
else:
msg.linear.x= 0.0
pub.publish(msg)
return False
t0=rospy.Time.now().to_sec()
r=rospy.Rate(40)
while((not rospy.is_shutdown()) and current_dis<0.5):
pub.publish(msg)
t1=rospy.Time.now().to_sec()
r.sleep()
current_dis=msg.linear.x*(t1-t0)
print "current dis"+str(current_dis)
msg.linear.x= 0.0
pub.publish(msg)
return True
def move ():
global r
global g
global b
r=0
g=0
b=255
found=False
print "move function"
len, ang=find_object(r,g,b)
i=0
print "len before loop ="+str(len)
while(len is None ):
moved = move_forward()
print "moved: "+str(moved)
rotate(30)
i+=1
print "len ="+str(len)
len, ang=find_object(r,g,b)
print "ang main" +str(ang)
print "Rotated"
move_forward2()
print "moved to object"
def center_obstacle ():
data=rospy.wait_for_message("/scan", LaserScan)
r=rospy.Rate(40)
while(abs(data.ranges[4]-data.ranges[355]>0.2) and data.ranges[4]+data.ranges[355]>0.9):
if(data.ranges[4] > data.ranges[355]):
rotate(-3)
else:
rotate(3)
def callback(data):
global goal
print "dist.data:" +str(data.data)
if (data.data !=0.0):
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
#wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base action server to come up")
ac.cancel_all_goals()
move_forward3()
rotate(35)
move_forward()
rotate(-125)
center_obstacle()
move_forward2()
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(80))
def start():
global x,y,goal
goalReached = False
# initiliaze
rospy.Rate(40)
goalReached = moveToGoal(x,y)
print "after sub"
if(goalReached):
rospy.loginfo("Congratulations!")
else:
rospy.loginfo("Hard Luck!")
def shutdown(self):
# stop turtlebot
rospy.loginfo("Quit program")
rospy.sleep()
def moveToGoal(xGoal,yGoal):
global goal
#define a client for to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
#wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base action server to come up")
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(xGoal,yGoal,0)
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
bool_obstacle = rospy.Subscriber("/obstaclefound",Float32,callback)
ac.wait_for_result(rospy.Duration(80))
if(ac.get_state() == GoalStatus.SUCCEEDED):
rospy.loginfo("You have reached the destination")
return True
else:
rospy.loginfo("The robot failed to reach the destination")
return False
if __name__ == '__main__':
try:
rospy.init_node('map_navigation', anonymous=False)
global x,y
x = float(sys.argv[1])
y= float(sys.argv[2])
start()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("map_navigation node terminated.")
#http://edu.gaitech.hk/turtlebot/map-navigation.html
|
import numpy as np
import functions as f
'''
単レイヤーのクラス群
'''
class MatMul:
"""
乗算レイヤー
"""
def __init__(self, W):
self.params = [W]
self.grads = np.zeros_like(W)
self.x = None
def forward(self, x):
W, = self.params
out = np.dot(x, W)
self.x = x
return out
def backward(self, dout):
W, = self.params
dx = np.dot(dout, W.T)
dW = np.dot(self.x.T, dout)
self.grads[0][...] = dW
return dx
class Sigmoid:
def __init__(self):
self.params, self.grads = [], []
self.out = None
def forward(self, x):
out = 1 / (1 + np.exp(-x))
self.out = out
return out
def backward(self, dout):
dx = dout * ( 1.0 - self.out) * self.out
return dx
class Relu:
def __init__(self):
self.params, self.grads = [], []
self.mask = None
def forward(self, x):
self.mask = (x <=0)
out = x.copy() # 値渡し
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Affine:
"""
全結合層レイヤ
"""
def __init__(self, W, b):
self.params = [W, b]
self.grads = [np.zeros_like(W), np.zeros_like(b)]
self.x = None
def forward(self, x):
W, b = self.params
out = np.dot(x, W) + b
self.x = x
return out
def backward(self, dout):
W, b = self.params
dx = np.dot(dout, W.T)
dW = np.dot(self.x.T, dout)
db = np.sum(dout, axis=0)
self.grads[0][...] = dW
self.grads[1][...] = db
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None # 損失
self.pred = None # softmaxの出力 それぞれのカテゴリの分類確率
self.true = None # 教師データ one-hot vector
def forward(self, x, true):
self.true = true
self.pred = f.softmax(x)
self.loss = f.cross_entropy_error(self.pred, self.true)
return self.loss
def backward(self, dout=1):
batch_size = self.true.shape[0]
dx = (self.pred - self.true)/ batch_size
return dx
|
# MAIN GOAL
#
# In this project, you will make a game similar to 21/blackjack. Since this is not an actual game (as far as I'm aware of), here the the instructions for how to play.
#
# In this version, there is only one player, and there are two types of scores - the round score and the game score. The game score will begin at 100, and the game will last for five rounds.
#
# At the beginning of the round, the player is given two random cards from a deck and they will be added together to make the player's round score. From here, the player has two options - draw another card to try to get their round score closer to 21, or they can end the round. The player can draw as many cards as they want until they end the round or their round score exceeds 21.
#
# At the end of the round, the difference between 21 and the round score is subtracted from the game score, and then the next round begins. After the five rounds, the player is given their total score and the game is over.
#
# ---Other Information About The Game---
#
# Aces are only worth 1.
#
# If a player busts, 21 is subtracted from their total score.
#
# All face cards are worth 10.
#
# So the point of your program is to allow the user to play the game described above. Many of the subgoals listed below can be added to shine up the game.
#
# SUBGOALS
#
# At the beginning of each round, print the round number (1 to 5).
#
# Since this is a text base game, tell the user what is happening. For example, tell him/her when he/she draws a card, the name of the card, when they bust, etc.
#
# Create a ranking system at the end of the game and tell the user their rank. For example, if the player finishes with 50-59 points they get an F, 60-69 is a D, 70-79 is a C, 80-89 is a B, and 90-100 is an A.
#
# At the end of each round, print out the user's total score.
#
# This may be the hardest part of the project, depending on how you wrote it. Make sure the deck has 4 of each type of card, and then remove cards as they are drawn. At the end of each round, make the deck have all of the cards again.
#
#
import random
import time
cards = {
"Ace": 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
"Jack": 10,
"Queen": 10,
"King": 10
}
deck = {
"Ace": 4,
2: 4,
3: 4,
4: 4,
5: 4,
6: 4,
7: 4,
8: 4,
9: 4,
10: 4,
"Jack": 4,
"Queen": 4,
"King": 4
}
def draw_card():
card = random.choice(list(cards.items()))
round_deck[card[0]] -= 1
if round_deck[card[0]] == 0:
del round_deck[card[0]]
return card
game_score = 100
for i in range(1, 6):
round_deck = deck.copy()
print("Round {} of 5!".format(i))
print("")
time.sleep(1)
print("Your game score is {}".format(game_score))
print("")
first_card = draw_card()
second_card = draw_card()
time.sleep(1)
print("Your first card is a {}".format(first_card[0]))
time.sleep(1)
print('Your second card is a {}'.format(second_card[0]))
total = first_card[1] + second_card[1]
time.sleep(1)
print("")
print("Your total is {}".format(total))
while True:
time.sleep(1)
print("")
print("Stick or twist?")
choice = input(">> ")
print("")
if choice.lower() == "stick":
game_score = game_score - (21 - total)
print("")
break
elif choice.lower() == "twist":
card = draw_card()
time.sleep(1)
print("You have received a {}".format(card[0]))
time.sleep(1)
total = total + card[1]
if total > 21:
print("Bust! Your total is {}!".format(total))
print("")
time.sleep(3)
game_score = game_score - 21
break
else:
print("Your total is {}".format(total))
if game_score < 0:
game_score = 0
print("Game over! Your total score is {}".format(game_score))
|
# Generated by Django 3.1 on 2021-06-18 20:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stores', '0002_country'),
]
operations = [
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(max_length=50, unique=True),
),
]
|
#I pledge my honor that I have abided by the Stevens Honor System.
#Zachary Jones
#HW 5 Problem 2
def recursive_sum(list):
if not list:
return 0
return list[0] + recursive_sum(list[1:])
number_list = [2, 4, 6, 10, 23, 432, 43, 782]
print('Sum of list: ' + str(recursive_sum(number_list))) |
import dash_bootstrap_components as dbc
from dash import html
stack = html.Div(
[
dbc.Stack(
[
html.Div(
"This stack has no gaps", className="bg-light border"
),
html.Div("Next item", className="bg-light border"),
html.Div("Last item", className="bg-light border"),
]
),
html.Hr(),
dbc.Stack(
[
html.Div("This stack has gaps", className="bg-light border"),
html.Div("Next item", className="bg-light border"),
html.Div("Last item", className="bg-light border"),
],
gap=3,
),
]
)
|
import logging
import os
import sys
from logging.handlers import TimedRotatingFileHandler
from flask import Flask
from flask_cors import CORS
from flask_injector import FlaskInjector
from injector import singleton, Provider
from parking.api.config.loader import load_config_from_json
from parking.api.namespace import api
from parking.core.db.database import Database
from parking.core.services.parking_service import ParkingService
app = Flask(__name__)
CORS(app)
def configure_injector(config, binder):
class _ParkingServiceProvider(Provider):
def get(self, injector):
return ParkingService(injector.get(Database).handshake(config['DB_URL'], config['DB_NAME']))
binder.bind(
Database,
to=Database(),
scope=singleton
)
binder.bind(
ParkingService,
to=_ParkingServiceProvider(),
scope=singleton
)
def configure_app(_app):
LOGGING_FORMAT = '%(asctime)-15s %(name)-15s %(levelname)-8s %(message)s'
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(fmt=LOGGING_FORMAT, datefmt='%Y-%m-%dT%H:%M:%S'))
logging.getLogger().handlers = []
logging.getLogger().addHandler(console_handler)
logging.getLogger().setLevel(logging.INFO)
_app.logger.handlers = []
_app.logger.propagate = True
logging.getLogger().info('Configure new Flask App')
api.init_app(_app)
_app = load_config_from_json(_app)
# activate = True if os.environ['PP_AUTH'] == '1' else False
if _app.config['LOG_PATH'] is not None:
file_handler = TimedRotatingFileHandler(
os.path.join(_app.config['LOG_PATH'], 'log_api.txt'),
when='d',
interval=1,
backupCount=30
)
file_handler.setFormatter(logging.Formatter(fmt=LOGGING_FORMAT, datefmt='%Y-%m-%dT%H:%M:%S'))
file_handler.setLevel(logging.INFO)
logging.getLogger().addHandler(file_handler)
def _injector_module(binder):
configure_injector(_app.config, binder)
injector = FlaskInjector(app=_app, modules=[_injector_module])
# Initialize Database
injector.injector.get(Database)
if __name__ == '__main__':
apiapp = app
configure_app(app)
logging.getLogger().info('Application configured, starting on port {}'.format(app.config['PORT']))
apiapp.run(host=app.config['HOST'], debug=app.config['DEBUG'], port=app.config['PORT'])
|
import torch
from pathlib import Path
from tqdm import tqdm
from test.edit_distance import edit_distance
from model.asr_model import ASRTransformerModel
from utils.logger import get_logger
from utils.ipa_encoder import SOS_ID, EOS_ID, IPAEncoder
from utils.config_utils import read_model_config, read_binf_mapping
from utils.dataset_utils import get_loader, load_dataset
logger = get_logger('asr.train')
def run_test(test_data, device, vocab,
checkpoint_path, test_batch_size, max_batches_count=None,
encoder=None, beam_size=3):
checkpoint_dir = Path(checkpoint_path).parents[0]
model_params = read_model_config(checkpoint_dir)
binf_map = None
n_outputs = len(vocab)
if model_params.binf_targets:
binf_map = read_binf_mapping(checkpoint_dir, vocab).to(device)
n_outputs = binf_map.size(0)
test_loader = get_loader(test_data, model_params.sample_rate, test_batch_size,
False, model_params.max_src_len, model_params.max_tgt_len)
model = ASRTransformerModel(model_params, n_outputs, binf_map).to(device)
logger.debug(f'Loading checkpoint from {checkpoint_path}')
model.load_state_dict(torch.load(checkpoint_path)['state_dict'])
model.eval()
pbar = tqdm(test_loader)
distances = []
with torch.no_grad():
for i, batch in enumerate(pbar):
if max_batches_count is not None and i >= max_batches_count:
break
x, x_lengths, targets, _ = [x.to(device) for x in batch]
partial_targets = torch.full((x.size(0), 1), SOS_ID, device=device, dtype=torch.long)
partial_target_lengths = torch.ones((x.size(0),), device=device, dtype=torch.long)
if beam_size > 0:
outputs = model.inference_beam_search(x, x_lengths, partial_targets, partial_target_lengths,
eos=EOS_ID, beam_size=beam_size)
distances.append(edit_distance(outputs[:, 0, :], targets[:, 1:], EOS_ID).detach())
if encoder is not None:
for i in range(x.size(0)):
for beam in range(beam_size):
ipa_outputs = encoder.decode(outputs[i, beam, ...].detach().cpu().numpy())
logger.debug(f'Beam {beam}: {ipa_outputs}')
ipa_targets = encoder.decode(targets[i, 1:].detach().cpu().numpy())
logger.debug(f'Targets: {ipa_targets}\n\n')
else:
outputs = model.inference(x, x_lengths, partial_targets, partial_target_lengths,
eos=EOS_ID)
distances.append(edit_distance(outputs[:, :], targets[:, 1:], EOS_ID).detach())
if encoder is not None:
for i in range(x.size(0)):
ipa_outputs = encoder.decode(outputs[i, ...].detach().cpu().numpy())
ipa_targets = encoder.decode(targets[i, 1:].detach().cpu().numpy())
logger.debug(f'Outputs: {ipa_outputs}\nTargets: {ipa_targets}\n')
distances = torch.cat(distances)
return distances.mean().cpu().numpy()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True,
help='Path to directory with CSV files.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Checkpoint to test.')
parser.add_argument('--batch_size', type=int, default=64,
help='Test batch size')
parser.add_argument('--subset', default='test', choices=['train', 'dev', 'test'],
help='Dataset on which to run test.')
parser.add_argument('--max_batches_count', type=int, default=None,
help='Maximal batches count to limit the test.')
parser.add_argument('--beam_size', type=int, default=3,
help='Beam size.')
parser.add_argument('--verbose', action='store_true',
help='Output predictions and targets.')
args = parser.parse_args()
test_data = load_dataset(Path(args.data_dir), subset=args.subset)
if torch.cuda.is_available():
device = 'cuda'
logger.debug('Using CUDA')
else:
device = 'cpu'
encoder = IPAEncoder(args.data_dir)
PER = run_test(test_data, device, encoder.vocab,
args.checkpoint, args.batch_size, args.max_batches_count,
encoder if args.verbose else None, beam_size=args.beam_size)
logger.info(f'Average PER is {PER}. {len(test_data)} samples tested.')
|
"""
EXAMPLE 3.1
CONVERTED FROM THE BOOK TO PYTHON BY CASPER STEINMANN
USING MPI BY GROPP ET AL.
USE AT YOUR OWN RISK
"""
from numpy import pi
from mpi4py import MPI
from mpiutil import getMPIInformation
comm = MPI.COMM_WORLD
(rank,size) = getMPIInformation(comm)
def f(x):
return 4.0 / (1.0 + x*x)
while True:
if rank == 0:
print
print "Number of intervals [0 exits]:"
input = raw_input()
try:
n = int(input)
except:
print "Error: '%s' is not a valid interval." % (input)
continue
else:
n = 0
n = comm.bcast(n,root=0)
if n > 0:
# define intervals and make the appropriate sub-divisions
h = 1.0/n
temp_sum = 0.0
for i in range(rank+1, n, size):
x = h * (float(i) - 0.5)
temp_sum = temp_sum + f(x)
# sum up integral from this rank
mypi = h * temp_sum
# global sum
ourpi = comm.reduce(mypi,MPI.SUM,root=0)
# print the result if we are the master
if rank == 0:
print "pi is %16.9f, error is %16.9f" % (ourpi, ourpi-pi)
else:
break
|
from django.db import models
from easy_thumbnails.fields import ThumbnailerImageField
from django.db.models.signals import post_delete
from django.dispatch import receiver
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class Page(models.Model):
name = models.CharField(verbose_name='название', max_length=200, blank=True)
slug = models.SlugField('название к сылке')
description = RichTextUploadingField(verbose_name='описание',config_name='default', default=None, blank=True)
class Meta:
verbose_name = 'Страница'
verbose_name_plural = 'Страницы'
ordering = ['name']
def __str__(self):
return self.name
class Work(models.Model):
name = models.CharField(verbose_name='название', max_length=200, blank=True)
description = models.CharField(verbose_name='описание', max_length=500, default=None, blank=True)
photo = ThumbnailerImageField(verbose_name='фото', upload_to='works/photos')
class Meta:
verbose_name = 'Протфолио'
verbose_name_plural = 'Портфолии'
ordering = ['name']
def __str__(self):
return self.name
@receiver(post_delete, sender=Work)
def photo_post_delete_handler(sender, **kwargs):
work = kwargs['instance']
storage, path = work.photo.storage, work.photo.path
storage.delete(path)
|
import json
from pathlib import Path
import sys
import time
import appdirs
import click
from tabulate import tabulate
from ai.backend.cli.interaction import ask_yn
from . import admin
from ..pretty import print_done, print_error, print_fail, print_info, print_wait
from ..session import Session
@admin.group()
def manager():
"""Set of manager control operations."""
@manager.command()
def status():
"""Show the manager's current status."""
try:
with Session() as session:
resp = session.Manager.status()
print(tabulate([('Status', 'Active Sessions'),
(resp['status'], resp['active_sessions'])],
headers='firstrow'))
except Exception as e:
print_error(e)
sys.exit(1)
@manager.command()
@click.option('--wait', is_flag=True,
help='Hold up freezing the manager until '
'there are no running sessions in the manager.')
@click.option('--force-kill', is_flag=True,
help='Kill all running sessions immediately and freeze the manager.')
def freeze(wait, force_kill):
"""Freeze manager."""
if wait and force_kill:
print('You cannot use both --wait and --force-kill options '
'at the same time.', file=sys.stderr)
return
try:
with Session() as session:
if wait:
while True:
resp = session.Manager.status()
active_sessions_num = resp['active_sessions']
if active_sessions_num == 0:
break
print_wait('Waiting for all sessions terminated... ({0} left)'
.format(active_sessions_num))
time.sleep(3)
print_done('All sessions are terminated.')
if force_kill:
print_wait('Killing all sessions...')
session.Manager.freeze(force_kill=force_kill)
if force_kill:
print_done('All sessions are killed.')
print('Manager is successfully frozen.')
except Exception as e:
print_error(e)
sys.exit(1)
@manager.command()
def unfreeze():
"""Unfreeze manager."""
try:
with Session() as session:
session.Manager.unfreeze()
print('Manager is successfully unfrozen.')
except Exception as e:
print_error(e)
sys.exit(1)
@admin.group()
def announcement():
"""Global announcement related commands"""
@announcement.command()
def get():
"""Get current announcement."""
try:
with Session() as session:
result = session.Manager.get_announcement()
if result.get('enabled', False):
msg = result.get('message')
print(msg)
else:
print('No announcements.')
except Exception as e:
print_error(e)
sys.exit(1)
@announcement.command()
@click.option('-m', '--message', default=None, type=click.STRING)
def update(message):
"""
Post new announcement.
MESSAGE: Announcement message.
"""
try:
with Session() as session:
if message is None:
message = click.edit(
"<!-- Use Markdown format to edit the announcement message -->",
)
if message is None:
print_info('Cancelled')
sys.exit(1)
session.Manager.update_announcement(enabled=True, message=message)
print_done('Posted new announcement.')
except Exception as e:
print_error(e)
sys.exit(1)
@announcement.command()
def delete():
"""Delete current announcement."""
if not ask_yn():
print_info('Cancelled.')
sys.exit(1)
try:
with Session() as session:
session.Manager.update_announcement(enabled=False)
print_done('Deleted announcement.')
except Exception as e:
print_error(e)
sys.exit(1)
@announcement.command()
def dismiss():
"""Do not show the same announcement again."""
if not ask_yn():
print_info('Cancelled.')
sys.exit(1)
try:
local_state_path = Path(appdirs.user_state_dir('backend.ai', 'Lablup'))
with open(local_state_path / 'announcement.json', 'rb') as f:
state = json.load(f)
state['dismissed'] = True
with open(local_state_path / 'announcement.json', 'w') as f:
json.dump(state, f)
print_done('Dismissed the last shown announcement.')
except (IOError, json.JSONDecodeError):
print_fail('No announcements seen yet.')
sys.exit(1)
except Exception as e:
print_error(e)
sys.exit(1)
@manager.group()
def scheduler():
"""
The scheduler operation command group.
"""
pass
@scheduler.command()
@click.argument('agent_ids', nargs=-1)
def include_agents(agent_ids):
"""
Include agents in scheduling, meaning that the given agents
will be considered to be ready for creating new session containers.
"""
try:
with Session() as session:
session.Manager.scheduler_op('include-agents', agent_ids)
print_done('The given agents now accepts new sessions.')
except Exception as e:
print_error(e)
sys.exit(1)
@scheduler.command()
@click.argument('agent_ids', nargs=-1)
def exclude_agents(agent_ids):
"""
Exclude agents from scheduling, meaning that the given agents
will no longer start new sessions unless they are "included" again,
regardless of their restarts and rejoining events.
"""
try:
with Session() as session:
session.Manager.scheduler_op('exclude-agents', agent_ids)
print_done('The given agents will no longer start new sessions.')
except Exception as e:
print_error(e)
sys.exit(1)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
from typing import Iterable
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals.package_dists import package_python_dist
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.subsystems.setup_py_generation import (
FirstPartyDependencyVersionScheme,
SetupPyGeneration,
)
from pants.backend.python.subsystems.setuptools import PythonDistributionFieldSet
from pants.backend.python.target_types import (
PexBinary,
PythonDistribution,
PythonProvidesField,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.util_rules import dists, python_sources
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.package_dists import (
AmbiguousOwnerError,
DependencyOwner,
DistBuildChroot,
DistBuildChrootRequest,
DistBuildSources,
ExportedTarget,
ExportedTargetRequirements,
FinalizedSetupKwargs,
GenerateSetupPyRequest,
InvalidEntryPoint,
InvalidSetupPyArgs,
NoDistTypeSelected,
NoOwnerError,
OwnedDependencies,
OwnedDependency,
SetupKwargs,
SetupKwargsRequest,
SetupPyError,
declares_pkg_resources_namespace_package,
determine_explicitly_provided_setup_kwargs,
determine_finalized_setup_kwargs,
generate_chroot,
generate_setup_py,
get_exporting_owner,
get_owned_dependencies,
get_requirements,
get_sources,
merge_entry_points,
validate_commands,
)
from pants.base.exceptions import IntrinsicError
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FileTarget, ResourcesGeneratorTarget, ResourceTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import rule
from pants.engine.target import InvalidFieldException
from pants.engine.unions import UnionRule
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule, engine_error
from pants.util.strutil import softwrap
_namespace_decl = "__import__('pkg_resources').declare_namespace(__name__)"
def create_setup_py_rule_runner(*, rules: Iterable) -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
rules=rules,
target_types=[
PexBinary,
PythonDistribution,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
ResourceTarget,
ResourcesGeneratorTarget,
FileTarget,
],
objects={"python_artifact": PythonArtifact},
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
# We use a trivial test that our SetupKwargs plugin hook works.
class PluginSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _) -> bool:
return True
@rule
def setup_kwargs_plugin(request: PluginSetupKwargsRequest) -> SetupKwargs:
kwargs = {**request.explicit_kwargs, "plugin_demo": "hello world"}
return SetupKwargs(kwargs, address=request.target.address)
@pytest.fixture
def chroot_rule_runner() -> PythonRuleRunner:
return create_setup_py_rule_runner(
rules=[
*core_target_types_rules(),
determine_explicitly_provided_setup_kwargs,
generate_chroot,
generate_setup_py,
determine_finalized_setup_kwargs,
get_sources,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*python_sources.rules(),
*target_types_rules.rules(),
setup_kwargs_plugin,
*SetupPyGeneration.rules(),
UnionRule(SetupKwargsRequest, PluginSetupKwargsRequest),
QueryRule(DistBuildChroot, (DistBuildChrootRequest,)),
QueryRule(DistBuildSources, (DistBuildChrootRequest,)),
QueryRule(FinalizedSetupKwargs, (GenerateSetupPyRequest,)),
]
)
def assert_chroot(
rule_runner: PythonRuleRunner,
expected_files: list[str],
expected_setup_kwargs,
addr: Address,
interpreter_constraints: InterpreterConstraints | None = None,
) -> None:
if interpreter_constraints is None:
interpreter_constraints = InterpreterConstraints(["CPython>=3.7,<4"])
tgt = rule_runner.get_target(addr)
req = DistBuildChrootRequest(
ExportedTarget(tgt), interpreter_constraints=interpreter_constraints
)
chroot = rule_runner.request(DistBuildChroot, [req])
snapshot = rule_runner.request(Snapshot, [chroot.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_setup_kwargs is not None:
sources = rule_runner.request(DistBuildSources, [req])
setup_kwargs = rule_runner.request(
FinalizedSetupKwargs,
[GenerateSetupPyRequest(ExportedTarget(tgt), sources, interpreter_constraints)],
)
assert expected_setup_kwargs == setup_kwargs.kwargs
def assert_chroot_error(
rule_runner: PythonRuleRunner, addr: Address, exc_cls: type[Exception]
) -> None:
tgt = rule_runner.get_target(addr)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
DistBuildChroot,
[
DistBuildChrootRequest(
ExportedTarget(tgt),
InterpreterConstraints(["CPython>=3.7,<4"]),
)
],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def test_use_existing_setup_script(chroot_rule_runner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/bar/BUILD": "python_sources()",
"src/python/foo/bar/__init__.py": "",
"src/python/foo/bar/bar.py": "",
# Add a `.pyi` stub file to ensure we include it in the final result.
"src/python/foo/bar/bar.pyi": "",
"src/python/foo/resources/BUILD": 'resource(source="js/code.js")',
"src/python/foo/resources/js/code.js": "",
"files/BUILD": 'file(source="README.txt")',
"files/README.txt": "",
"BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':setup',
],
generate_setup=False,
provides=python_artifact(
name='foo', version='1.2.3',
)
)
python_sources(name="setup", dependencies=["src/python/foo"])
"""
),
"setup.py": textwrap.dedent(
"""
from setuptools import setup
setup(
name = "foo",
version = "1.2.3",
package_dir={"": "src/python"},
packages = ["foo"],
)
"""
),
"src/python/foo/BUILD": textwrap.dedent(
"""
python_sources(
dependencies=[
'src/python/foo/bar',
'src/python/foo/resources',
'files',
]
)
"""
),
"src/python/foo/__init__.py": _namespace_decl,
"src/python/foo/foo.py": "",
}
)
assert_chroot(
chroot_rule_runner,
[
"setup.py",
"files/README.txt",
"src/python/foo/bar/__init__.py",
"src/python/foo/bar/bar.py",
"src/python/foo/bar/bar.pyi",
"src/python/foo/resources/js/code.js",
"src/python/foo/__init__.py",
"src/python/foo/foo.py",
],
None,
Address("", target_name="foo-dist"),
)
def test_use_generate_setup_script_package_provenance_agnostic(chroot_rule_runner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_sources(
dependencies=[
'src/python/resources',
]
)
"""
),
"src/python/foo/bar.py": "",
# Here we have a Python package of resources.js defined via files owned by a resources
# target. From a packaging perspective, we should be agnostic to what targets own a
# python package when calculating package_data, we just need to know which packages are
# defined by Python files in the distribution.
"src/python/resources/BUILD": 'resources(sources=["**/*.py", "**/*.js"])',
"src/python/resources/js/__init__.py": "",
"src/python/resources/js/code.js": "",
"src/python/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
'src/python/foo',
],
generate_setup=True,
provides=python_artifact(
name='foo', version='1.2.3',
)
)
"""
),
}
)
assert_chroot(
chroot_rule_runner,
[
"foo/bar.py",
"resources/js/__init__.py",
"resources/js/code.js",
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": ("foo", "resources.js"),
"namespace_packages": (),
"package_data": {
"resources.js": (
"__init__.py",
"code.js",
)
},
"install_requires": (),
"python_requires": "<4,>=3.7",
},
Address("src/python", target_name="foo-dist"),
)
def test_merge_entry_points() -> None:
sources = {
"src/python/foo:foo-dist `entry_points`": {
"console_scripts": {"foo_tool": "foo.bar.baz:Tool.main"},
"foo_plugins": {"qux": "foo.qux"},
},
"src/python/foo:foo-dist `provides.entry_points`": {
"console_scripts": {"foo_qux": "foo.baz.qux"},
"foo_plugins": {"foo-bar": "foo.bar:plugin"},
},
}
expect = {
"console_scripts": {
"foo_tool": "foo.bar.baz:Tool.main",
"foo_qux": "foo.baz.qux",
},
"foo_plugins": {
"qux": "foo.qux",
"foo-bar": "foo.bar:plugin",
},
}
assert merge_entry_points(*list(sources.items())) == expect
conflicting_sources = {
"src/python/foo:foo-dist `entry_points`": {"console_scripts": {"my-tool": "ep1"}},
"src/python/foo:foo-dist `provides.entry_points`": {"console_scripts": {"my-tool": "ep2"}},
}
err_msg = softwrap(
"""
Multiple entry_points registered for console_scripts my-tool in:
src/python/foo:foo-dist `entry_points`,
src/python/foo:foo-dist `provides.entry_points`
"""
)
with pytest.raises(ValueError, match=err_msg):
merge_entry_points(*list(conflicting_sources.items()))
def test_generate_chroot(chroot_rule_runner: PythonRuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_distribution(
name="baz-dist",
dependencies=[':baz'],
provides=python_artifact(
name='baz',
version='1.1.1'
)
)
python_sources()
"""
),
"src/python/foo/bar/baz/baz.py": "",
"src/python/foo/qux/BUILD": textwrap.dedent(
"""
python_sources()
pex_binary(name="bin", entry_point="foo.qux.bin:main")
"""
),
"src/python/foo/qux/__init__.py": "",
"src/python/foo/qux/qux.py": "",
# Add a `.pyi` stub file to ensure we include it in the final result.
"src/python/foo/qux/qux.pyi": "",
"src/python/foo/resources/BUILD": 'resource(source="js/code.js")',
"src/python/foo/resources/js/code.js": "",
"files/BUILD": 'file(source="README.txt")',
"files/README.txt": "",
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':foo',
],
provides=python_artifact(
name='foo', version='1.2.3'
),
entry_points={
"console_scripts":{
"foo_main": "src/python/foo/qux:bin",
},
},
)
python_sources(
dependencies=[
'src/python/foo/bar/baz',
'src/python/foo/qux',
'src/python/foo/resources',
'files',
]
)
"""
),
"src/python/foo/__init__.py": _namespace_decl,
"src/python/foo/foo.py": "",
}
)
assert_chroot(
chroot_rule_runner,
[
"files/README.txt",
"foo/qux/__init__.py",
"foo/qux/qux.py",
"foo/qux/qux.pyi",
"foo/resources/js/code.js",
"foo/__init__.py",
"foo/foo.py",
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": ("foo", "foo.qux"),
"namespace_packages": ("foo",),
"package_data": {"foo": ("resources/js/code.js",), "foo.qux": ("qux.pyi",)},
"install_requires": ("baz==1.1.1",),
"python_requires": "<4,>=3.7",
"entry_points": {"console_scripts": ["foo_main = foo.qux.bin:main"]},
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_chroot_entry_points(chroot_rule_runner: PythonRuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/qux/BUILD": textwrap.dedent(
"""
python_sources()
pex_binary(name="bin", entry_point="foo.qux.bin:main")
"""
),
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
entry_points={
"console_scripts":{
"foo_main": "src/python/foo/qux:bin",
"foo_tool":"foo.bar.baz:Tool.main",
"bin_tool":"//src/python/foo/qux:bin",
"bin_tool2":"src/python/foo/qux:bin",
"hello":":foo-bin",
},
"foo_plugins":{
"qux":"foo.qux",
},
},
provides=python_artifact(
name='foo',
version='1.2.3',
entry_points={
"console_scripts":{
"foo_qux":"foo.baz.qux:main",
"foo_bin":":foo-bin",
},
"foo_plugins":[
"foo-bar=foo.bar:plugin",
],
},
)
)
python_sources(
dependencies=[
'src/python/foo/qux',
]
)
pex_binary(name="foo-bin", entry_point="foo.bin:main")
"""
),
}
)
assert_chroot(
chroot_rule_runner,
[
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": tuple(),
"namespace_packages": tuple(),
"package_data": {},
"install_requires": tuple(),
"python_requires": "<4,>=3.7",
"entry_points": {
"console_scripts": [
"foo_main = foo.qux.bin:main",
"foo_tool = foo.bar.baz:Tool.main",
"bin_tool = foo.qux.bin:main",
"bin_tool2 = foo.qux.bin:main",
"hello = foo.bin:main",
"foo_qux = foo.baz.qux:main",
"foo_bin = foo.bin:main",
],
"foo_plugins": [
"qux = foo.qux",
"foo-bar = foo.bar:plugin",
],
},
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_long_description_field_from_file(chroot_rule_runner: PythonRuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
long_description_path="src/python/foo/readme.md",
provides=python_artifact(
name='foo',
version='1.2.3',
)
)
"""
),
"src/python/foo/readme.md": "Some long description.",
}
)
assert_chroot(
chroot_rule_runner,
[
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": tuple(),
"namespace_packages": tuple(),
"package_data": {},
"install_requires": tuple(),
"python_requires": "<4,>=3.7",
"long_description": "Some long description.",
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_long_description_field_from_file_already_having_it(
chroot_rule_runner: PythonRuleRunner,
) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
long_description_path="src/python/foo/readme.md",
provides=python_artifact(
name='foo',
version='1.2.3',
long_description="Some long description.",
)
)
"""
),
"src/python/foo/readme.md": "Some long description.",
}
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/foo", target_name="foo-dist"),
InvalidFieldException,
)
def test_generate_long_description_field_from_non_existing_file(
chroot_rule_runner: PythonRuleRunner,
) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
long_description_path="src/python/foo/readme.md",
provides=python_artifact(
name='foo',
version='1.2.3',
)
)
"""
),
}
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/foo", target_name="foo-dist"),
IntrinsicError,
)
def test_invalid_binary(chroot_rule_runner: PythonRuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/invalid_binary/lib.py": "",
"src/python/invalid_binary/app1.py": "",
"src/python/invalid_binary/app2.py": "",
"src/python/invalid_binary/BUILD": textwrap.dedent(
"""\
python_sources(name='not_a_binary', sources=['lib.py'])
pex_binary(name='invalid_entrypoint_unowned1', entry_point='app1.py')
pex_binary(name='invalid_entrypoint_unowned2', entry_point='invalid_binary.app2')
python_distribution(
name='invalid_bin1',
provides=python_artifact(
name='invalid_bin1', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":not_a_binary",
},
},
)
python_distribution(
name='invalid_bin2',
provides=python_artifact(
name='invalid_bin2', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":invalid_entrypoint_unowned1",
},
},
)
python_distribution(
name='invalid_bin3',
provides=python_artifact(
name='invalid_bin3', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":invalid_entrypoint_unowned2",
},
},
)
"""
),
}
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin1"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin2"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin3"),
InvalidEntryPoint,
)
def test_binary_shorthand(chroot_rule_runner: PythonRuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/project/app.py": "",
"src/python/project/BUILD": textwrap.dedent(
"""
python_sources()
pex_binary(name='bin', entry_point='app.py:func')
python_distribution(
name='dist',
provides=python_artifact(
name='bin', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":bin",
},
},
)
"""
),
}
)
assert_chroot(
chroot_rule_runner,
["project/app.py", "setup.py", "MANIFEST.in"],
{
"name": "bin",
"version": "1.1.1",
"plugin_demo": "hello world",
"packages": ("project",),
"namespace_packages": (),
"install_requires": (),
"python_requires": "<4,>=3.7",
"package_data": {},
"entry_points": {"console_scripts": ["foo = project.app:func"]},
},
Address("src/python/project", target_name="dist"),
)
def test_get_sources() -> None:
def assert_sources(
expected_files,
expected_packages,
expected_namespace_packages,
expected_package_data,
addrs,
):
rule_runner = create_setup_py_rule_runner(
rules=[
get_sources,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
*python_sources.rules(),
QueryRule(OwnedDependencies, (DependencyOwner,)),
QueryRule(DistBuildSources, (DistBuildChrootRequest,)),
]
)
rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_sources(name='baz1', sources=['baz1.py'])
python_sources(name='baz2', sources=['baz2.py'])
"""
),
"src/python/foo/bar/baz/baz1.py": "",
"src/python/foo/bar/baz/baz2.py": "",
"src/python/foo/bar/__init__.py": _namespace_decl,
"src/python/foo/qux/BUILD": "python_sources()",
"src/python/foo/qux/__init__.py": "",
"src/python/foo/qux/qux.py": "",
"src/python/foo/resources/BUILD": 'resource(source="js/code.js")',
"src/python/foo/resources/js/code.js": "",
"src/python/foo/__init__.py": "",
# We synthesize an owner for the addrs, so we have something to put in SetupPyChrootRequest.
"src/python/foo/BUILD": textwrap.dedent(
f"""
python_distribution(
name="dist",
dependencies=["{'","'.join(addr.spec for addr in addrs)}"],
provides=python_artifact(name="foo", version="3.2.1"),
)
"""
),
}
)
owner_tgt = rule_runner.get_target(Address("src/python/foo", target_name="dist"))
srcs = rule_runner.request(
DistBuildSources,
[
DistBuildChrootRequest(
ExportedTarget(owner_tgt),
InterpreterConstraints(["CPython>=3.7,<4"]),
)
],
)
chroot_snapshot = rule_runner.request(Snapshot, [srcs.digest])
assert sorted(expected_files) == sorted(chroot_snapshot.files)
assert sorted(expected_packages) == sorted(srcs.packages)
assert sorted(expected_namespace_packages) == sorted(srcs.namespace_packages)
assert expected_package_data == dict(srcs.package_data)
assert_sources(
expected_files=["foo/bar/baz/baz1.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz1")],
)
assert_sources(
expected_files=["foo/bar/baz/baz2.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz2")],
)
assert_sources(
expected_files=["foo/qux/qux.py", "foo/qux/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.qux"],
expected_namespace_packages=[],
expected_package_data={},
addrs=[Address("src/python/foo/qux")],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/baz/baz2.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/bar/baz", target_name="baz2"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
def test_get_requirements() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_explicitly_provided_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
*SetupPyGeneration.rules(),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"3rdparty/BUILD": textwrap.dedent(
"""
python_requirement(name='ext1', requirements=['ext1==1.22.333'])
python_requirement(name='ext2', requirements=['ext2==4.5.6'])
python_requirement(name='ext3', requirements=['ext3==0.0.1'])
"""
),
"src/python/foo/bar/baz/a.py": "",
"src/python/foo/bar/baz/BUILD": "python_sources(dependencies=['3rdparty:ext1'])",
"src/python/foo/bar/qux/a.py": "",
"src/python/foo/bar/qux/BUILD": "python_sources(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'])",
"src/python/foo/bar/a.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=[':bar'],
provides=python_artifact(name='bar', version='9.8.7'),
)
python_sources(dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'])
"""
),
"src/python/foo/corge/a.py": "",
"src/python/foo/corge/BUILD": textwrap.dedent(
"""
python_distribution(
name='corge-dist',
# Tests having a 3rdparty requirement directly on a python_distribution.
dependencies=[':corge', '3rdparty:ext3'],
provides=python_artifact(name='corge', version='2.2.2'),
)
python_sources(dependencies=['src/python/foo/bar'])
"""
),
}
)
assert_requirements(
rule_runner,
["ext1==1.22.333", "ext2==4.5.6"],
Address("src/python/foo/bar", target_name="bar-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar==9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar~=9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.COMPATIBLE,
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.ANY,
)
def test_get_requirements_with_exclude() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_explicitly_provided_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
*SetupPyGeneration.rules(),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"3rdparty/BUILD": textwrap.dedent(
"""
python_requirement(name='ext1', requirements=['ext1==1.22.333'])
python_requirement(name='ext2', requirements=['ext2==4.5.6'])
python_requirement(name='ext3', requirements=['ext3==0.0.1'])
"""
),
"src/python/foo/bar/baz/a.py": "",
"src/python/foo/bar/baz/BUILD": "python_sources(dependencies=['3rdparty:ext1'])",
"src/python/foo/bar/qux/a.py": "",
"src/python/foo/bar/qux/BUILD": "python_sources(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'])",
"src/python/foo/bar/a.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=['!!3rdparty:ext2',':bar'],
provides=python_artifact(name='bar', version='9.8.7'),
)
python_sources(dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'])
"""
),
}
)
assert_requirements(
rule_runner, ["ext1==1.22.333"], Address("src/python/foo/bar", target_name="bar-dist")
)
def test_get_requirements_with_override_dependency_issue_17593() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_explicitly_provided_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
*SetupPyGeneration.rules(),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"3rdparty/BUILD": textwrap.dedent(
"""
python_requirement(name='ext1', requirements=['ext1==1.22.333'], dependencies=[':ext2'])
python_requirement(name='ext2', requirements=['ext2==4.5.6'])
"""
),
"src/python/foo/bar/baz/a.py": "",
"src/python/foo/bar/baz/BUILD": "python_sources(dependencies=['3rdparty:ext1'])",
"src/python/foo/bar/a.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=[':bar'],
provides=python_artifact(name='bar', version='9.8.7'),
)
python_sources(dependencies=['src/python/foo/bar/baz'])
"""
),
}
)
assert_requirements(
rule_runner,
["ext1==1.22.333", "ext2==4.5.6"],
Address("src/python/foo/bar", target_name="bar-dist"),
)
def assert_requirements(
rule_runner,
expected_req_strs,
addr: Address,
*,
version_scheme: FirstPartyDependencyVersionScheme = FirstPartyDependencyVersionScheme.EXACT,
):
rule_runner.set_options(
[f"--setup-py-generation-first-party-dependency-version-scheme={version_scheme.value}"],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
tgt = rule_runner.get_target(addr)
reqs = rule_runner.request(
ExportedTargetRequirements,
[DependencyOwner(ExportedTarget(tgt))],
)
assert sorted(expected_req_strs) == list(reqs)
def test_owned_dependencies() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
QueryRule(OwnedDependencies, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_sources(name='baz1')
python_sources(name='baz2')
"""
),
"src/python/foo/bar/resource.txt": "",
"src/python/foo/bar/bar1.py": "",
"src/python/foo/bar/bar2.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar1-dist',
dependencies=[':bar1'],
provides=python_artifact(name='bar1', version='1.1.1'),
)
python_sources(
name='bar1',
sources=['bar1.py'],
dependencies=['src/python/foo/bar/baz:baz1'],
)
python_sources(
name='bar2',
sources=['bar2.py'],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resource(name='bar-resources', source='resource.txt')
"""
),
"src/python/foo/foo.py": "",
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[':foo'],
provides=python_artifact(name='foo', version='3.4.5'),
)
python_sources(
sources=['foo.py'],
dependencies=['src/python/foo/bar:bar1', 'src/python/foo/bar:bar2'],
)
"""
),
}
)
def assert_owned(owned: Iterable[str], exported: Address):
tgt = rule_runner.get_target(exported)
assert sorted(owned) == sorted(
od.target.address.spec
for od in rule_runner.request(
OwnedDependencies,
[DependencyOwner(ExportedTarget(tgt))],
)
)
assert_owned(
[
"src/python/foo/bar/bar1.py:bar1",
"src/python/foo/bar:bar1-dist",
"src/python/foo/bar/baz:baz1",
],
Address("src/python/foo/bar", target_name="bar1-dist"),
)
assert_owned(
[
"src/python/foo/bar/bar2.py:bar2",
"src/python/foo/foo.py",
"src/python/foo:foo-dist",
"src/python/foo/bar:bar-resources",
"src/python/foo/bar/baz:baz2",
],
Address("src/python/foo", target_name="foo-dist"),
)
@pytest.fixture
def exporting_owner_rule_runner() -> PythonRuleRunner:
return create_setup_py_rule_runner(
rules=[
get_exporting_owner,
*target_types_rules.rules(),
QueryRule(ExportedTarget, (OwnedDependency,)),
]
)
def assert_is_owner(rule_runner: PythonRuleRunner, owner: str, owned: Address):
tgt = rule_runner.get_target(owned)
assert (
owner
== rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
).target.address.spec
)
def assert_owner_error(rule_runner, owned: Address, exc_cls: type[Exception]):
tgt = rule_runner.get_target(owned)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def assert_no_owner(rule_runner: PythonRuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, NoOwnerError)
def assert_ambiguous_owner(rule_runner: PythonRuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, AmbiguousOwnerError)
def test_get_owner_simple(exporting_owner_rule_runner: PythonRuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_sources(name='baz1')
python_sources(name='baz2')
"""
),
"src/python/foo/bar/resource.ext": "",
"src/python/foo/bar/bar2.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar1',
dependencies=['src/python/foo/bar/baz:baz1'],
provides=python_artifact(name='bar1', version='1.1.1'),
)
python_sources(
name='bar2',
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resource(name='bar-resources', source='resource.ext')
"""
),
"src/python/foo/foo2.py": "",
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo1',
dependencies=['src/python/foo/bar/baz:baz2'],
provides=python_artifact(name='foo1', version='0.1.2'),
)
python_sources(name='foo2')
python_distribution(
name='foo3',
dependencies=['src/python/foo/bar:bar2'],
provides=python_artifact(name='foo3', version='3.4.5'),
)
"""
),
}
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar", target_name="bar1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar/baz", target_name="baz1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo1",
Address("src/python/foo", target_name="foo1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo", target_name="foo3"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar2", relative_file_path="bar2.py"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar-resources"),
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/foo", target_name="foo2"))
assert_ambiguous_owner(
exporting_owner_rule_runner, Address("src/python/foo/bar/baz", target_name="baz2")
)
def test_get_owner_siblings(exporting_owner_rule_runner: PythonRuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/siblings/BUILD": textwrap.dedent(
"""
python_sources(name='sibling1')
python_distribution(
name='sibling2',
dependencies=['src/python/siblings:sibling1'],
provides=python_artifact(name='siblings', version='2.2.2'),
)
"""
),
}
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling2"),
)
def test_get_owner_not_an_ancestor(exporting_owner_rule_runner: PythonRuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/notanancestor/aaa/BUILD": textwrap.dedent(
"""
python_sources(name='aaa')
"""
),
"src/python/notanancestor/bbb/BUILD": textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/notanancestor/aaa'],
provides=python_artifact(name='bbb', version='11.22.33'),
)
"""
),
}
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/notanancestor/aaa"))
assert_is_owner(
exporting_owner_rule_runner,
"src/python/notanancestor/bbb:bbb",
Address("src/python/notanancestor/bbb"),
)
def test_get_owner_multiple_ancestor_generations(
exporting_owner_rule_runner: PythonRuleRunner,
) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/aaa/bbb/ccc/BUILD": textwrap.dedent(
"""
python_sources(name='ccc')
"""
),
"src/python/aaa/bbb/BUILD": textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/aaa/bbb/ccc'],
provides=python_artifact(name='bbb', version='1.1.1'),
)
"""
),
"src/python/aaa/BUILD": textwrap.dedent(
"""
python_distribution(
name='aaa',
dependencies=['src/python/aaa/bbb/ccc'],
provides=python_artifact(name='aaa', version='2.2.2'),
)
"""
),
}
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb:bbb", Address("src/python/aaa/bbb/ccc")
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb:bbb", Address("src/python/aaa/bbb")
)
assert_is_owner(exporting_owner_rule_runner, "src/python/aaa:aaa", Address("src/python/aaa"))
def test_validate_args() -> None:
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("bdist_wheel", "upload"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("sdist", "-d", "new_distdir/"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("--dist-dir", "new_distdir/", "sdist"))
validate_commands(("sdist",))
validate_commands(("bdist_wheel", "--foo"))
@pytest.mark.parametrize(
"python_src",
[
"__import__('pkg_resources').declare_namespace(__name__)",
"\n__import__('pkg_resources').declare_namespace(__name__) # type: ignore[attr-defined]",
"import pkg_resources; pkg_resources.declare_namespace(__name__)",
"from pkg_resources import declare_namespace; declare_namespace(__name__)",
],
)
def test_declares_pkg_resources_namespace_package(python_src: str) -> None:
assert declares_pkg_resources_namespace_package(python_src)
@pytest.mark.parametrize(
"python_src",
[
"",
"import os\n\nos.getcwd()",
"__path__ = 'foo'",
"import pkg_resources",
"add(1, 2); foo(__name__); self.shoot(__name__)",
"declare_namespace(bonk)",
"just nonsense, not even parseable",
],
)
def test_does_not_declare_pkg_resources_namespace_package(python_src: str) -> None:
assert not declares_pkg_resources_namespace_package(python_src)
def test_no_dist_type_selected() -> None:
rule_runner = PythonRuleRunner(
rules=[
determine_explicitly_provided_setup_kwargs,
generate_chroot,
generate_setup_py,
determine_finalized_setup_kwargs,
get_sources,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
package_python_dist,
*dists.rules(),
*python_sources.rules(),
*target_types_rules.rules(),
*SetupPyGeneration.rules(),
QueryRule(BuiltPackage, (PythonDistributionFieldSet,)),
],
target_types=[PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
rule_runner.write_files(
{
"src/python/aaa/BUILD": textwrap.dedent(
"""
python_distribution(
name='aaa',
provides=python_artifact(name='aaa', version='2.2.2'),
wheel=False,
sdist=False
)
"""
),
}
)
address = Address("src/python/aaa", target_name="aaa")
with pytest.raises(ExecutionError) as exc_info:
rule_runner.request(
BuiltPackage,
inputs=[
PythonDistributionFieldSet(
address=address,
provides=PythonProvidesField(
PythonArtifact(name="aaa", version="2.2.2"), address
),
)
],
)
assert 1 == len(exc_info.value.wrapped_exceptions)
wrapped_exception = exc_info.value.wrapped_exceptions[0]
assert isinstance(wrapped_exception, NoDistTypeSelected)
assert (
"In order to package src/python/aaa:aaa at least one of 'wheel' or 'sdist' must be `True`."
== str(wrapped_exception)
)
def test_too_many_interpreter_constraints(chroot_rule_runner: PythonRuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
provides=python_artifact(
name='foo',
version='1.2.3',
)
)
"""
),
}
)
addr = Address("src/python/foo", target_name="foo-dist")
tgt = chroot_rule_runner.get_target(addr)
err = softwrap(
"""
Expected a single interpreter constraint for src/python/foo:foo-dist,
got: CPython<3,>=2.7 OR CPython<3.10,>=3.8.
"""
)
with engine_error(SetupPyError, contains=err):
chroot_rule_runner.request(
DistBuildChroot,
[
DistBuildChrootRequest(
ExportedTarget(tgt),
InterpreterConstraints([">=2.7,<3", ">=3.8,<3.10"]),
)
],
)
|
import dropbox
_API_ACCESS_TOKEN = 'Od8i3MHlBiAAAAAAAAAABZISCA_JLdWe5vROh3RpDGkyE1m3gwztshfcx863Eyy6'
# all the bucket_name is unused in the dropbox implementation
_BUCKET_NAME = ''
def upload_file(service,from_file_name,to_file_name):
# try delete it first
try:
delete_file(service,'',"/" + to_file_name)
except Exception as e:
#print e
pass
f = open(from_file_name, 'rb')
response = service.put_file(to_file_name, f)
def upload_string(service,str_to_upload,to_file_name):
fake_name = 'justtmpfile123testjust'
f = open(fake_name,'w')
f.write(str_to_upload);
f.close();
upload_file(service,'',fake_name,to_file_name)
os.remove(fake_name)
def delete_file(service,object_name):
service.file_delete(object_name)
def download_file(service, object_name, to_file_name):
f, metadata = service.get_file_and_metadata(object_name)
out = open('magnum-opus.txt', 'wb')
return f.read()
def get_all_file_names(service):
folder_metadata = service.metadata('/')
ret =[]
for entry in folder_metadata['contents']:
path = entry['path']
if path[0] == '/':
path = path[1:]
size = entry['bytes']
ret.append((path,size))
return ret
def create_service_object(extra_info):
client = dropbox.client.DropboxClient(_API_ACCESS_TOKEN)
return client
#s = create_service_object()
#get_all_file_names(s,_BUCKET_NAME)
#download_file(s,_BUCKET_NAME,'hehe.txt')
#upload_file(s,_BUCKET_NAME,'tuzi.txt','tuzi.txt')
#print get_all_file_names(s,_BUCKET_NAME); |
from __future__ import absolute_import
# import apis into api package
from .audit_api import AuditApi
from .auth_api import AuthApi
from .auth_groups_api import AuthGroupsApi
from .auth_providers_api import AuthProvidersApi
from .auth_roles_api import AuthRolesApi
from .auth_users_api import AuthUsersApi
from .cloud_api import CloudApi
from .cluster_api import ClusterApi
from .debug_api import DebugApi
from .dedupe_api import DedupeApi
from .event_api import EventApi
from .filepool_api import FilepoolApi
from .filesystem_api import FilesystemApi
from .fsa_api import FsaApi
from .job_api import JobApi
from .license_api import LicenseApi
from .protocols_api import ProtocolsApi
from .protocols_hdfs_api import ProtocolsHdfsApi
from .quota_api import QuotaApi
from .quota_quotas_api import QuotaQuotasApi
from .quota_reports_api import QuotaReportsApi
from .remotesupport_api import RemotesupportApi
from .snapshot_api import SnapshotApi
from .snapshot_changelists_api import SnapshotChangelistsApi
from .snapshot_snapshots_api import SnapshotSnapshotsApi
from .statistics_api import StatisticsApi
from .storagepool_api import StoragepoolApi
from .sync_api import SyncApi
from .sync_policies_api import SyncPoliciesApi
from .sync_reports_api import SyncReportsApi
from .sync_target_api import SyncTargetApi
from .worm_api import WormApi
from .zones_api import ZonesApi
from .zones_summary_api import ZonesSummaryApi
|
from collections import Counter
input = [x.split('\n') for x in open('data/06.txt').read().split('\n\n')]
summed = 0
for group in input:
group_size = len(group)
group_choices = []
for choice in group:
group_choices += list(choice)
for count in Counter(group_choices).values():
if count == group_size:
summed += 1
print(summed)
|
"""
合工大 问句相似度研究算法实现
"""
from model.questiontype import QuestionType
from score.word_similarity import WordSimilarity
class SentenceSimilarity:
def __init__(self):
# 初始化评分权重
self.__scores = 0.0
self.__syntax_scores = 0.0
self.__class_weight = 0.2
self.__key_weight = 0.3
self.__syntax_weight = 0.2
self.__semantic_weight = 0.5
self.__order_weight = 0.1
self.__len_weight = 0.1
self.__word_weight = 0.7
self.__dis_weight = 0.1
def set_score_weight(self, weight_list):
self.__syntax_scores = weight_list[0]
self.__order_weight = 0.1
self.__len_weight = 0.1
self.__word_weight = 0.7
self.__dis_weight = 0.1
def get_score(self):
return self.__scores
# param 问题的分词list 词形相似度,用两个问句中含有共同词的个数来衡量
def word_sim(self, question1, question2):
intersection_words = [w for w in question1 if w in question2]
same_count = 0
for i in intersection_words:
a = question1.count(i)
b = question2.count(i)
if a >= b:
same_count += b
else:
same_count += a
score = 2*same_count/(len(question1)+len(question2))
self.__syntax_scores += self.__word_weight * score
# 词序相似度
def order_sim(self, question1, question2):
intersection_words = [w for w in question1 if w in question2
and question1.count(w) == 1 and question2.count(w)]
if len(intersection_words) <= 1:
score = 0
else:
pfirst = sorted([question1.index(i) for i in intersection_words])
psecond = [question2.index(question1[i]) for i in pfirst]
count = 0
for i in range(len(psecond)-1):
if psecond[i] < psecond[i+1]:
count += 1
score = 1 - count/(len(intersection_words)-1)
self.__syntax_scores += self.__order_weight * score
# 句子长度相似度
def len_sim(self, question1, question2):
score = 1 - abs((len(question1)-len(question2))/(len(question1)+len(question2)))
self.__syntax_scores += self.__order_weight*score
# 语义方法
def semantic_sim(self, question1, question2):
n = len(question1)
m = len(question2)
score1 = 0.0
for i in range(n):
# question1中每个词与2中每个词最相似
score1 += max(WordSimilarity().get_similarity(question1[i], question2[j]) for j in range(m))
score1 /= (2*n)
score2 = 0.0
for j in range(m):
score2 += max(WordSimilarity().get_similarity(question1[i], question2[j]) for i in range(n))
score2 /= (2*n)
self.__scores += self.__semantic_weight * (score1+score2)
# 问题类型
def class_sim(self, question_type1, question_type2):
if question_type1 == question_type2:
score = 1
elif question_type1 is QuestionType.Solution or question_type2 is QuestionType.Solution:
score = 0.5
else:
score = 0
self.__scores += self.__class_weight * score
# 关键词
def key_sim(self,question_a,question_b):
keya = question_a.get_disease()
keyb = question_b.get_disease()
score = 0
for i in keya:
if i in keyb:
score += 1
if len(keya) > 0:
self.__scores += self.__key_weight * score / len(keya)
def combination_sim(self,question_a, question_b):
"""
:param question_a: 问题类
:param question_b:
:param question_type1: 问题类型
:param question_type2:
:return:
"""
question1 = question_a.get_words()
question2 = question_b.get_words()
# self.class_sim(question_type1,question_type2)
self.key_sim(question_a, question_b)
self.semantic_sim(question1,question2)
self.word_sim(question1,question2)
self.len_sim(question1,question2)
self.order_sim(question1,question2)
self.__scores += self.__syntax_scores * self.__syntax_weight
if __name__ == '__main__':
a = SentenceSimilarity()
# a.combination_sim(['牙疼','怎么','治疗'],['怎么','治疗','牙痛'],QuestionType.Solution,QuestionType.Description)
print(a.get_score())
|
class Solution(object):
def setZeroes(self, matrix):
m = len(matrix)
n = len(matrix[0])
# Mark them
for i in range(m):
for j in range(n):
if matrix[i][j] == 0: matrix[i][j] = 'x'
# Delete them
for i in range(m):
for j in range(n):
if matrix[i][j] == 'x':
matrix[i][j] = 0
for k in range(m):
if matrix[k][j] != 'x': matrix[k][j] = 0
for k in range(n):
if matrix[i][k] != 'x': matrix[i][k] = 0
matrix = [
[0, 1, 1, 1],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 1, 1]
]
Solution().setZeroes(matrix)
print(matrix)#[[0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 1, 1]]
|
# some training and examples on namedtuple!!!!
from collections import namedtuple
# Car = namedtuple("car", "color speed")
# # Car = namedtuple("Car", ['color', 'speed'])
# my_car = Car('red', 100)
# print(my_car.color)
# print(my_car.speed)
# print(my_car)
# print(*my_car) # '*' for unpacking the arguments!!!
# #
# # output: red
# # output: 100
# # output: car(color='red', speed=100)
# # output: red 100
# ###########
# print(my_car[0])
# print(tuple(my_car))
# #
# # output: red
# # output: ('red', 100)
###############################################
# # make class form tupple with desired methods
# Car = namedtuple('Car', "color speed")
# class MyCarWithMethods(Car): # inherit from Car!!! so it require it's arguments!!!
# def hexcolor(self):
# if self.color == 'red':
# return '#ff0000'
# else:
# return '#000000'
# c = MyCarWithMethods('red', 80)
# print(c.hexcolor())
# #
# # output: #ff0000
###############################################
# Car = namedtuple('Car', 'color speed')
# print(Car._fields)
# #
# # output: ('color', 'speed')
# # add 'charge' as tuple, to fields!!!
# ElectircCar = namedtuple('ElectricCar', Car._fields + ('charge',))
# my_eleccar = ElectircCar('blue', 60, 4000)
# print(my_eleccar)
# #
# # output: ElectricCar(color='blue', speed=60, charge=4000)
###############################################
Car = namedtuple("car", "color speed")
my_car = Car('red', 100)
print(type(my_car._asdict()))
print(my_car._asdict())
#
# output: <class 'dict'>
# output: {'color': 'red', 'speed': 100}
# ###########
new_car = my_car._replace(color='green')
print(new_car)
#
# output: car(color='green', speed=100)
# ###########
second_car = Car._make(['yellow', 160]) # it take iterable args!
print(second_car)
#
# output: car(color='yellow', speed=160)
|
import math
def square():
numbers = input("Enter the numbers in the list seperated by a comma: ")
squared = []
list = numbers.split(",")
for number in list:
squared.append(int(number) ** 2)
print(squared)
square() |
'''''''''''''''''''''''''''''''''''''''
import modules
'''''''''''''''''''''''''''''''''''''''
import numpy as np
import pygame
import sys
import math
import tkinter as tk
import json
import os
pygame.init()
pygame.display.set_caption('Connect Four - Global Offensive')
'''''''''''''''''''''''''''''''''''''''
define global variables
'''''''''''''''''''''''''''''''''''''''
#Casual Leaderboards
if not os.path.isfile('nleader.txt'):
nleaderboard = {'1':'', '2':'', '3':'', '4':'','5':'','6':'','7':''}
nleaderboard['1'] = '_____________'
nleaderboard['2'] = '_____________'
nleaderboard['3'] = '_____________'
nleaderboard['4'] = '_____________'
nleaderboard['5'] = '_____________'
nleaderboard['6'] = '_____________'
nleaderboard['7'] = '_____________'
with open('nleader.txt','w') as fp:
json.dump(nleaderboard, fp)
else:
with open('nleader.txt', 'r') as fp:
nleaderboard = json.load(fp)
if not os.path.isfile('sleader.txt'):
sleaderboard = {'1':'', '2':'', '3':'', '4':'','5':'','6':'','7':''}
sleaderboard['1'] = '__'
sleaderboard['2'] = '__'
sleaderboard['3'] = '__'
sleaderboard['4'] = '__'
sleaderboard['5'] = '__'
sleaderboard['6'] = '__'
sleaderboard['7'] = '__'
with open('sleader.txt','w') as fp:
json.dump(sleaderboard, fp)
else:
with open('sleader.txt', 'r') as fp:
sleaderboard = json.load(fp)
#Competitive Leaderboards
if not os.path.isfile('cnleader.txt'):
cnleaderboard = {'1':'', '2':'', '3':'', '4':'','5':'','6':'','7':''}
cnleaderboard['1'] = ' '
cnleaderboard['2'] = ' '
cnleaderboard['3'] = ' !Under! '
cnleaderboard['4'] = ' '
cnleaderboard['5'] = ' !Construction!'
cnleaderboard['6'] = ' '
cnleaderboard['7'] = ' '
with open('cnleader.txt','w') as fp:
json.dump(cnleaderboard, fp)
else:
with open('cnleader.txt', 'r') as fp:
cnleaderboard = json.load(fp)
if not os.path.isfile('csleader.txt'):
csleaderboard = {'1':'', '2':'', '3':'', '4':'','5':'','6':'','7':''}
csleaderboard['1'] = ' '
csleaderboard['2'] = ' '
csleaderboard['3'] = ' '
csleaderboard['4'] = ' '
csleaderboard['5'] = ' '
csleaderboard['6'] = ' '
csleaderboard['7'] = ' '
with open('csleader.txt','w') as fp:
json.dump(csleaderboard, fp)
else:
with open('csleader.txt', 'r') as fp:
csleaderboard = json.load(fp)
#Colours
WHITE = (255,255,255)
BLACK = (0,0,0)
GREY = (128,128,128)
BEIGE = (204, 201, 192)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
YELLOW = (255,255,0)
PURPLE = (200,0,200)
CYAN = (0,255,255)
DRED = (94,25,20)
colorIn = {
"Red" : RED,
"Green" : GREEN,
"Blue" : BLUE,
"Yellow" : YELLOW,
"Purple" : PURPLE,
"Cyan" : CYAN
}
winfont = pygame.font.SysFont("monospace", 75)
leadfont = pygame.font.SysFont('monospace', 50)
C4font = pygame.font.SysFont("Comicsans", 100)
GEfont = pygame.font.SysFont("Comicsans", 60)
exit_game = False
WIN_REQ = 4
ROW_COUNT = 6
COLUMN_COUNT = 7
P1col = ""
P2col = ""
nav = "home"
lnav = 'casual'
winner = 0
score = 0
winnername = ''
p1p2entcol = False
p1entcol = False
p2entcol = False
p1p2entsamecol = False
nmm = False
if P1col in colorIn:
P1col = colorIn[P1col]
if P2col in colorIn:
P2col = colorIn[P2col]
SQUARESIZE = 100
boardWidth = COLUMN_COUNT * SQUARESIZE
boardHeight = (ROW_COUNT + 1) * SQUARESIZE
screenWidth = boardWidth
screenHeight = boardHeight
size = (screenWidth,screenHeight)
screenCenterH = screenWidth/2
screenCenterV = screenHeight/2
radius = int(SQUARESIZE/2 -5)
screen = pygame.display.set_mode(size)
#screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
'''''''''''''''''''''''''''''''''''''''
define button properties
'''''''''''''''''''''''''''''''''''''''
class button():
def __init__(self, color, x,y,width,height, text=''):
self.color = color
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self,win,outline):
#Call this method to draw the button on the screen
pygame.draw.rect(win, outline, (self.x-3,self.y-3,self.width+6,self.height+6),0)
pygame.draw.rect(win, self.color, (self.x,self.y,self.width,self.height),0)
if self.text != '':
font = pygame.font.SysFont('comicsans', 60)
text = font.render(self.text, 1, (0,0,0))
win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
def isOver(self, pos):
#Pos is the mouse position or a tuple of (x,y) coordinates
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
return False
buttonW = 250
buttonH = 100
buttonVcenter = screenCenterV - buttonH/2
buttonWcenter = screenCenterH - buttonW/2
playbutton = button(WHITE, buttonWcenter,(buttonVcenter-30),buttonW,buttonH,"Play")
leaderboardbutton = button(WHITE, (buttonWcenter-25), (buttonVcenter+100), (buttonW + 50), buttonH, "Leaderboard")
backbutton = button(WHITE, 20, (screenHeight-120), 150, 100, "Back")
quitGamebutton = button(WHITE, (screenWidth - buttonW - 20), (screenHeight-120), buttonW, 100, "Quit Game")
clearBoardbutton = button(WHITE, (screenWidth - buttonW - 20 - 20), (screenHeight-120), (buttonW+20), 100, "Clear Board")
StartGamebutton = button(WHITE, (buttonWcenter-25), (buttonVcenter+280), (buttonW + 50), buttonH, "Start Game")
rarrowbutton = button(WHITE, (screenWidth - 100), 25, 70, 70, ">")
larrowbutton = button(WHITE, 30, 25, 70, 70, "<")
continuebutton = button(WHITE, screenCenterH - 200/2, screenCenterV + 23, 200, 75, 'Continue')
colbuttonW = 90
colbuttonH = 60
redp1 = button(RED, (screenCenterH/4 - colbuttonW/2 - 20), 360, colbuttonW, colbuttonH, "")
greenp1 = button(GREEN, (screenCenterH/2 - colbuttonW/2), 360, colbuttonW, colbuttonH, "")
bluep1 = button(BLUE, ((3*screenCenterH)/4 - colbuttonW/2 + 20), 360, colbuttonW, colbuttonH, "")
blackp1 = button(BLACK, (screenCenterH/4 - colbuttonW/2 - 20), 450, colbuttonW, colbuttonH, "")
purplep1 = button(PURPLE, (screenCenterH/2 - colbuttonW/2), 450, colbuttonW, colbuttonH, "")
whitep1 = button(WHITE, ((3*screenCenterH)/4 - colbuttonW/2 + 20), 450, colbuttonW, colbuttonH, "")
redp2 = button(RED, ((5*screenCenterH)/4 - colbuttonW/2 - 20), 360, colbuttonW, colbuttonH, "")
greenp2 = button(GREEN, ((3*screenCenterH)/2 - colbuttonW/2), 360, colbuttonW, colbuttonH, "")
bluep2 = button(BLUE, ((7*screenCenterH)/4 - colbuttonW/2 + 20), 360, colbuttonW, colbuttonH, "")
blackp2 = button(BLACK, ((5*screenCenterH)/4 - colbuttonW/2 - 20), 450, colbuttonW, colbuttonH, "")
purplep2 = button(PURPLE, ((3*screenCenterH)/2 - colbuttonW/2), 450, colbuttonW, colbuttonH, "")
whitep2 = button(WHITE, ((7*screenCenterH)/4 - colbuttonW/2 + 20), 450, colbuttonW, colbuttonH, "")
'''''''''''''''''''''''''''''''''''''''
define text input sequences
'''''''''''''''''''''''''''''''''''''''
#must be less than 13 characters
'''''''''''''''''''''''''''''''''''''''
define main sequences
'''''''''''''''''''''''''''''''''''''''
def game_intro():
screen.fill(BEIGE)
pygame.draw.circle(screen, BLACK, (int(screenCenterH), int(screenCenterV)),300)
pygame.draw.circle(screen, BEIGE, (int(screenCenterH), int(screenCenterV)),290)
BarW = 540
pygame.draw.rect(screen, BLACK, (int(screenCenterH - BarW/2), int(screenCenterH - 130), BarW,20))
pygame.draw.rect(screen, BLACK, (int(screenCenterH - BarW/2), int(screenCenterH + 110), BarW,20))
C4 = C4font.render("Connect Four", 1, BLACK)
GE = GEfont.render("Global Offensive", 1, BLACK)
c4W = C4.get_width()
goW = GE.get_width()
screen.blit(C4, (int(screenCenterH - c4W/2),290))
screen.blit(GE, (int(screenCenterH -goW/2),380))
pygame.display.update()
pygame.time.wait(3000)
def home_page():
global nav
nav = "home"
screen.fill(BEIGE)
C4 = C4font.render("Connect Four", 1, BLACK)
GE = GEfont.render("Global Offensive", 1, BLACK)
c4W = C4.get_width()
goW = GE.get_width()
screen.blit(C4, (int(screenCenterH - c4W/2),40))
screen.blit(GE, (int(screenCenterH -goW/2),120))
playbutton.draw(screen,(0,0,0))
leaderboardbutton.draw(screen, (0,0,0))
quitGamebutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if playbutton.isOver(pos):
nav = 'casual'
if leaderboardbutton.isOver(pos):
nav = 'leaderboard'
if quitGamebutton.isOver(pos):
nav = 'quit'
if event.type == pygame.MOUSEMOTION:
if playbutton.isOver(pos):
playbutton.color = GREY
elif leaderboardbutton.isOver(pos):
leaderboardbutton.color = GREY
elif quitGamebutton.isOver(pos):
quitGamebutton.color = GREY
else:
playbutton.color = WHITE
leaderboardbutton.color = WHITE
quitGamebutton.color = WHITE
def leaderboard_screen():
global nav
global lnav
lnav = 'casual'
while lnav == 'casual':
casual_leaderboard_screen()
while lnav == 'competitive':
competitive_leaderboard_screen()
def casual_leaderboard_screen():
global nav
global lnav
screen.fill(BEIGE)
Leaderboard = GEfont.render("Casual Leaderboard", 1 , BLACK)
lW = Leaderboard.get_width()
screen.blit(Leaderboard, ((screenCenterH - lW/2),40))
#Numbers
One = leadfont.render('1', 1, BLACK)
Two = leadfont.render('2', 1, BLACK)
Three = leadfont.render('3', 1, BLACK)
Four = leadfont.render('4', 1, BLACK)
Five = leadfont.render('5', 1, BLACK)
Six = leadfont.render('6', 1, BLACK)
Seven = leadfont.render('7', 1, BLACK)
screen.blit(One, (40,120))
screen.blit(Two, (40,180))
screen.blit(Three, (40,240))
screen.blit(Four, (40,300))
screen.blit(Five, (40,360))
screen.blit(Six, (40,420))
screen.blit(Seven, (40,480))
#Names
nOne = leadfont.render(nleaderboard['1'], 1, BLACK)
nTwo = leadfont.render(nleaderboard['2'], 1, BLACK)
nThree = leadfont.render(nleaderboard['3'], 1, BLACK)
nFour = leadfont.render(nleaderboard['4'], 1, BLACK)
nFive = leadfont.render(nleaderboard['5'], 1, BLACK)
nSix = leadfont.render(nleaderboard['6'], 1, BLACK)
nSeven = leadfont.render(nleaderboard['7'], 1, BLACK)
screen.blit(nOne, (140,120))
screen.blit(nTwo, (140,180))
screen.blit(nThree, (140,240))
screen.blit(nFour, (140,300))
screen.blit(nFive, (140,360))
screen.blit(nSix, (140,420))
screen.blit(nSeven, (140,480))
#Scores
sOne = leadfont.render(sleaderboard['1'], 1, BLACK)
sTwo = leadfont.render(sleaderboard['2'], 1, BLACK)
sThree = leadfont.render(sleaderboard['3'], 1, BLACK)
sFour = leadfont.render(sleaderboard['4'], 1, BLACK)
sFive = leadfont.render(sleaderboard['5'], 1, BLACK)
sSix = leadfont.render(sleaderboard['6'], 1, BLACK)
sSeven = leadfont.render(sleaderboard['7'], 1, BLACK)
screen.blit(sOne, ((screenWidth - 100),120))
screen.blit(sTwo, ((screenWidth - 100),180))
screen.blit(sThree, ((screenWidth - 100),240))
screen.blit(sFour, ((screenWidth - 100),300))
screen.blit(sFive, ((screenWidth - 100),360))
screen.blit(sSix, ((screenWidth - 100),420))
screen.blit(sSeven, ((screenWidth - 100),480))
backbutton.draw(screen, BLACK)
rarrowbutton.draw(screen, BLACK)
clearBoardbutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if backbutton.isOver(pos):
nav = 'home'
lnav = 'out'
if rarrowbutton.isOver(pos):
lnav = 'competitive'
if clearBoardbutton.isOver(pos):
nleaderboard['1'] = '_____________'
nleaderboard['2'] = '_____________'
nleaderboard['3'] = '_____________'
nleaderboard['4'] = '_____________'
nleaderboard['5'] = '_____________'
nleaderboard['6'] = '_____________'
nleaderboard['7'] = '_____________'
sleaderboard['1'] = '__'
sleaderboard['2'] = '__'
sleaderboard['3'] = '__'
sleaderboard['4'] = '__'
sleaderboard['5'] = '__'
sleaderboard['6'] = '__'
sleaderboard['7'] = '__'
with open('nleader.txt','w') as fp:
json.dump(nleaderboard, fp)
with open('sleader.txt','w') as fp:
json.dump(sleaderboard, fp)
casual_leaderboard_screen()
if event.type == pygame.MOUSEMOTION:
if backbutton.isOver(pos):
backbutton.color = GREY
else:
backbutton.color = WHITE
if clearBoardbutton.isOver(pos):
clearBoardbutton.color = GREY
else:
clearBoardbutton.color = WHITE
if rarrowbutton.isOver(pos):
rarrowbutton.color = GREY
else:
rarrowbutton.color = WHITE
def competitive_leaderboard_screen():
global nav
global lnav
screen.fill(BEIGE)
Leaderboard = GEfont.render("Comp Leaderboard", 1 , BLACK)
lW = Leaderboard.get_width()
screen.blit(Leaderboard, ((screenCenterH - lW/2),40))
#Numbers
One = leadfont.render('1', 1, BLACK)
Two = leadfont.render('2', 1, BLACK)
Three = leadfont.render('3', 1, BLACK)
Four = leadfont.render('4', 1, BLACK)
Five = leadfont.render('5', 1, BLACK)
Six = leadfont.render('6', 1, BLACK)
Seven = leadfont.render('7', 1, BLACK)
screen.blit(One, (40,120))
screen.blit(Two, (40,180))
screen.blit(Three, (40,240))
screen.blit(Four, (40,300))
screen.blit(Five, (40,360))
screen.blit(Six, (40,420))
screen.blit(Seven, (40,480))
#Names
nOne = leadfont.render(cnleaderboard['1'], 1, BLACK)
nTwo = leadfont.render(cnleaderboard['2'], 1, BLACK)
nThree = leadfont.render(cnleaderboard['3'], 1, BLACK)
nFour = leadfont.render(cnleaderboard['4'], 1, BLACK)
nFive = leadfont.render(cnleaderboard['5'], 1, BLACK)
nSix = leadfont.render(cnleaderboard['6'], 1, BLACK)
nSeven = leadfont.render(cnleaderboard['7'], 1, BLACK)
screen.blit(nOne, (140,120))
screen.blit(nTwo, (140,180))
screen.blit(nThree, (140,240))
screen.blit(nFour, (140,300))
screen.blit(nFive, (140,360))
screen.blit(nSix, (140,420))
screen.blit(nSeven, (140,480))
#Scores
sOne = leadfont.render(csleaderboard['1'], 1, BLACK)
sTwo = leadfont.render(csleaderboard['2'], 1, BLACK)
sThree = leadfont.render(csleaderboard['3'], 1, BLACK)
sFour = leadfont.render(csleaderboard['4'], 1, BLACK)
sFive = leadfont.render(csleaderboard['5'], 1, BLACK)
sSix = leadfont.render(csleaderboard['6'], 1, BLACK)
sSeven = leadfont.render(csleaderboard['7'], 1, BLACK)
screen.blit(sOne, ((screenWidth - 100),120))
screen.blit(sTwo, ((screenWidth - 100),180))
screen.blit(sThree, ((screenWidth - 100),240))
screen.blit(sFour, ((screenWidth - 100),300))
screen.blit(sFive, ((screenWidth - 100),360))
screen.blit(sSix, ((screenWidth - 100),420))
screen.blit(sSeven, ((screenWidth - 100),480))
backbutton.draw(screen, BLACK)
larrowbutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if backbutton.isOver(pos):
nav = 'home'
lnav = 'out'
if larrowbutton.isOver(pos):
lnav = 'casual'
if event.type == pygame.MOUSEMOTION:
if backbutton.isOver(pos):
backbutton.color = GREY
else:
backbutton.color = WHITE
if larrowbutton.isOver(pos):
larrowbutton.color = GREY
else:
larrowbutton.color = WHITE
def casual_game():
global nav
global P1col
global P2col
global p1p2entcol
global p1entcol
global p2entcol
global p1p2entsamecol
casnav = "home"
if casnav == "home":
screen.fill(BEIGE)
cashead = GEfont.render("Casual Game", 1 , BLACK)
cW = cashead.get_width()
screen.blit(cashead, ((screenCenterH - cW/2),40))
play1 = GEfont.render("Player 1", 1 , BLACK)
p1W = play1.get_width()
screen.blit(play1, ((screenCenterH/2 - p1W/2)-5,170))
col1 = GEfont.render("Colour", 1 , BLACK)
c1W = col1.get_width()
screen.blit(col1, ((screenCenterH/2 - c1W/2)-6,230))
play2 = GEfont.render("Player 2", 1 , BLACK)
p2W = play1.get_width()
screen.blit(play2, ((3*screenCenterH/2 - p2W/2)+5,170))
col2 = GEfont.render("Colour", 1 , BLACK)
c2W = col2.get_width()
screen.blit(col2, ((3*screenCenterH/2 - c2W/2)+5,230))
dividerh = 420
pygame.draw.rect(screen, BLACK, (int(screenCenterH - 8), int(screenCenterH - dividerh/2 - 20), 8,dividerh))
StartGamebutton.draw(screen, (0,0,0))
backbutton.draw(screen, BLACK)
redp1.draw(screen, GREY)
greenp1.draw(screen, GREY)
bluep1.draw(screen, GREY)
blackp1.draw(screen, GREY)
purplep1.draw(screen, GREY)
whitep1.draw(screen, GREY)
redp2.draw(screen, GREY)
greenp2.draw(screen, GREY)
bluep2.draw(screen, GREY)
blackp2.draw(screen, GREY)
purplep2.draw(screen, GREY)
whitep2.draw(screen, GREY)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
while p1p2entcol == True:
p1p2entercol()
while p1entcol == True:
p1entercol()
while p2entcol == True:
p2entercol()
while p1p2entsamecol == True:
p1p2entersamecol()
if P1col == RED:
redp1.draw(screen,DRED)
elif P1col == GREEN:
greenp1.draw(screen,DRED)
elif P1col == BLUE:
bluep1.draw(screen,DRED)
elif P1col == BLACK:
blackp1.draw(screen,DRED)
elif P1col == PURPLE:
purplep1.draw(screen,DRED)
elif P1col == WHITE:
whitep1.draw(screen,DRED)
if P2col == RED:
redp2.draw(screen,DRED)
elif P2col == GREEN:
greenp2.draw(screen,DRED)
elif P2col == BLUE:
bluep2.draw(screen,DRED)
elif P2col == BLACK:
blackp2.draw(screen,DRED)
elif P2col == PURPLE:
purplep2.draw(screen,DRED)
elif P2col == WHITE:
whitep2.draw(screen,DRED)
pygame.display.update()
pos = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
#Player 1 color options
if redp1.isOver(pos):
P1col = RED
if greenp1.isOver(pos):
P1col = GREEN
if bluep1.isOver(pos):
P1col = BLUE
if blackp1.isOver(pos):
P1col = BLACK
if purplep1.isOver(pos):
P1col = PURPLE
if whitep1.isOver(pos):
P1col = WHITE
#player 2 colour options
if redp2.isOver(pos):
P2col = RED
if greenp2.isOver(pos):
P2col = GREEN
if bluep2.isOver(pos):
P2col = BLUE
if blackp2.isOver(pos):
P2col = BLACK
if purplep2.isOver(pos):
P2col = PURPLE
if whitep2.isOver(pos):
P2col = WHITE
#start the game once players have chosen colours
if StartGamebutton.isOver(pos):
if P1col == '' and P2col == '':
p1p2entcol = True
elif P1col == '' and P2col != '':
p1entcol = True
elif P1col != '' and P2col == '':
p2entcol = True
elif P1col == P2col:
p1p2entsamecol = True
else:
casnav = 'casgame'
if backbutton.isOver(pos):
nav = 'home'
if event.type == pygame.MOUSEMOTION:
#Player 1 color options
if redp1.isOver(pos):
redp1.draw(screen, DRED)
pygame.display.update()
if greenp1.isOver(pos):
greenp1.draw(screen, DRED)
pygame.display.update()
if bluep1.isOver(pos):
bluep1.draw(screen, DRED)
pygame.display.update()
if blackp1.isOver(pos):
blackp1.draw(screen, DRED)
pygame.display.update()
if purplep1.isOver(pos):
purplep1.draw(screen, DRED)
pygame.display.update()
if whitep1.isOver(pos):
whitep1.draw(screen, DRED)
pygame.display.update()
#player 2 colour options
if redp2.isOver(pos):
redp2.draw(screen, DRED)
pygame.display.update()
if greenp2.isOver(pos):
greenp2.draw(screen, DRED)
pygame.display.update()
if bluep2.isOver(pos):
bluep2.draw(screen, DRED)
pygame.display.update()
if blackp2.isOver(pos):
blackp2.draw(screen, DRED)
pygame.display.update()
if purplep2.isOver(pos):
purplep2.draw(screen, DRED)
pygame.display.update()
if whitep2.isOver(pos):
whitep2.draw(screen, DRED)
pygame.display.update()
if StartGamebutton.isOver(pos):
StartGamebutton.color = GREY
else:
StartGamebutton.color = WHITE
if backbutton.isOver(pos):
backbutton.color = GREY
else:
backbutton.color = WHITE
if casnav == "casgame":
game_seq()
if not nmm:
casual_win_screen()
nav = "home"
def casual_win_screen():
global winner
global score
global winnername
screen.fill(BEIGE)
playerpwin = GEfont.render("Player "+ str(winner)+ " Wins!", 1 , BLACK)
pwW = playerpwin.get_width()
screen.blit(playerpwin, ((screenCenterH - pwW/2),200))
Score = GEfont.render("Score: "+str(score), 1 , BLACK)
sW = Score.get_width()
screen.blit(Score, ((screenCenterH - sW/2),260))
enterName = GEfont.render("Please enter name to", 1 , BLACK)
enW = enterName.get_width()
screen.blit(enterName, ((screenCenterH - enW/2),340))
addlead = GEfont.render("be added to the Leaderboard", 1 , BLACK)
alW = addlead.get_width()
screen.blit(addlead, ((screenCenterH - alW/2),400))
pygame.display.update()
alpha = tk.Tk()
alpha.attributes("-topmost", True)
tk.Label(alpha, text="Please enter your name here:").grid(row=1, column=1)
tk.Label(alpha, text=' ').grid(row=0,column=0)
tk.Label(alpha, text=' ').grid(row=0,column=1)
tk.Label(alpha, text=' ').grid(row=0,column=2)
tk.Label(alpha, text=' ').grid(row=1,column=0)
tk.Label(alpha, text=' ').grid(row=1,column=2)
tk.Label(alpha, text=' ').grid(row=2,column=0)
tk.Label(alpha, text=' ').grid(row=2,column=2)
tk.Label(alpha, text=' ').grid(row=3,column=0)
tk.Label(alpha, text=' ').grid(row=3,column=1)
tk.Label(alpha, text=' ').grid(row=3,column=2)
tk.Label(alpha, text=' ').grid(row=4,column=0)
tk.Label(alpha, text=' ').grid(row=4,column=1)
tk.Label(alpha, text='Name must be no longer').grid(row=4,column=1)
tk.Label(alpha, text=' ').grid(row=5,column=0)
tk.Label(alpha, text=' ').grid(row=5,column=2)
tk.Label(alpha, text='than 13 characters').grid(row=5,column=1)
tk.Label(alpha, text=' ').grid(row=6,column=0)
tk.Label(alpha, text=' ').grid(row=6,column=1)
tk.Label(alpha, text=' ').grid(row=6,column=2)
e1 = tk.Entry(alpha)
e1.grid(row=2, column=1)
def keypressed(event):
global winnername
if event.keysym == "Return":
if len(e1.get()) > 13:
alpha.mainloop()
else:
winnername = e1.get()
alpha.destroy()
alpha.bind("<Key>", keypressed)
alpha.mainloop()
update_cas_leaderboard()
def comp_game():
global nav
pass
def quit_screen():
screen.fill(BEIGE)
ThanksFor = GEfont.render("Thanks For", 1 , BLACK)
Playing = GEfont.render("Playing", 1 , BLACK)
pygame.draw.circle(screen, BLACK, (int(screenCenterH), int(screenCenterV)),300)
pygame.draw.circle(screen, BEIGE, (int(screenCenterH), int(screenCenterV)),290)
BarW = 540
pygame.draw.rect(screen, BLACK, (int(screenCenterH - BarW/2), int(screenCenterH - 130), BarW,20))
pygame.draw.rect(screen, BLACK, (int(screenCenterH - BarW/2), int(screenCenterH + 110), BarW,20))
tfW = ThanksFor.get_width()
pW = Playing.get_width()
screen.blit(ThanksFor, (int(screenCenterH - tfW/2),120))
screen.blit(Playing, (int(screenCenterH - pW/2),160))
C4 = C4font.render("Connect Four", 1, BLACK)
GE = GEfont.render("Global Offensive", 1, BLACK)
c4W = C4.get_width()
goW = GE.get_width()
screen.blit(C4, (int(screenCenterH - c4W/2),290))
screen.blit(GE, (int(screenCenterH -goW/2),380))
pygame.display.update()
pygame.time.wait(3000)
'''''''''''''''''''''''''''''''''''''''
define aux sequences
'''''''''''''''''''''''''''''''''''''''
def create_board():
board = np.zeros((ROW_COUNT,COLUMN_COUNT))
return board
def drop_piece(board, row, col, piece):
board[row][col] = piece
def is_valid_location(board, col):
return board[ROW_COUNT - 1][col] == 0
def get_next_open_row(board, col):
for r in range(ROW_COUNT):
if board[r][col] == 0:
return r
def print_board(board):
print(np.flip(board, 0))
def winning_move(board, piece):
check = 0
#check horizontal
for c in range(COLUMN_COUNT-(WIN_REQ-1)):
for r in range(ROW_COUNT):
for w in range(WIN_REQ):
if board[r][c+w] == piece:
check += 1
else:
check = 0
if check == WIN_REQ:
return True
elif check == 0:
break
#check vertical
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT-(WIN_REQ - 1)):
for w in range(WIN_REQ):
if board[r+w][c] == piece:
check = check + 1
else:
check = 0
if check == WIN_REQ:
return True
elif check == 0:
break
#check positive diagonals
for c in range(COLUMN_COUNT-(WIN_REQ -1)):
for r in range(ROW_COUNT-(WIN_REQ -1)):
for w in range(WIN_REQ):
if board[r+w][c+w] == piece:
check = check + 1
else:
check = 0
if check == WIN_REQ:
return True
elif check == 0:
break
#check negative diagonals
for c in range(COLUMN_COUNT-(WIN_REQ -1)):
for r in range((WIN_REQ -1), ROW_COUNT):
for w in range(WIN_REQ):
if board[r-w][c+w] == piece:
check = check + 1
else:
check = 0
if check == WIN_REQ:
return True
elif check == 0:
break
def draw_board(board):
screen.fill(BEIGE)
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
RECT = c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE,SQUARESIZE
InRECT = (c*SQUARESIZE+2), (r*SQUARESIZE+SQUARESIZE+2), (SQUARESIZE-4),(SQUARESIZE-4)
pygame.draw.rect(screen, GREY, RECT)
pygame.draw.rect(screen, CYAN, InRECT)
if board[r][c] ==0:
pygame.draw.circle(screen, GREY, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)),(radius+2))
pygame.draw.circle(screen, BEIGE, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)),radius)
def update_board(board):
global P1col
global P2col
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
if board[r][c]==1:
pygame.draw.circle(screen, P1col, (int(c*SQUARESIZE+SQUARESIZE/2), boardHeight - int(r*SQUARESIZE+SQUARESIZE/2)),radius)
elif board[r][c] == 2:
pygame.draw.circle(screen, P2col, (int(c*SQUARESIZE+SQUARESIZE/2), boardHeight - int(r*SQUARESIZE+SQUARESIZE/2)),radius)
pygame.display.update()
def update_cas_leaderboard():
global score
global winnername
r = 7
stb = 0
x = 0
while x != 1:
if sleaderboard[str(r)] == '__':
stb = 0
else:
stb = int(sleaderboard[str(r)])
if score > stb:
r = r-1
elif score <= stb:
a = 7
while a != r:
sleaderboard[str(a)] = sleaderboard[str(a-1)]
nleaderboard[str(a)] = nleaderboard[str(a-1)]
a = a-1
sleaderboard[str(r+1)] = str(score)
nleaderboard[str(r+1)] = winnername
with open('nleader.txt','w') as fp:
json.dump(nleaderboard, fp)
with open('sleader.txt','w') as fp:
json.dump(sleaderboard, fp)
x = 1
if r == 0:
a = 7
while a>1:
sleaderboard[str(a)] = sleaderboard[str(a-1)]
nleaderboard[str(a)] = nleaderboard[str(a-1)]
a = a - 1
sleaderboard[str(1)] = str(score)
nleaderboard[str(1)] = winnername
with open('nleader.txt','w') as fp:
json.dump(nleaderboard, fp)
with open('sleader.txt','w') as fp:
json.dump(sleaderboard, fp)
x = 1
def game_seq():
global winner
global score
global P1col
global P2col
global nmm
score = 20
game_over = False
nmm = False
turn = 1
board = create_board()
#print_board(board)
draw_board(board)
pygame.display.update()
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
nmm = True
game_over = True
elif event.type == pygame.MOUSEMOTION:
pygame.draw.rect(screen, BEIGE, (0,0, boardWidth, SQUARESIZE))
posx = event.pos[0]
if turn == 1:
pygame.draw.circle(screen, P1col, (posx, int((SQUARESIZE/2))),radius)
else:
pygame.draw.circle(screen, P2col, (posx, int((SQUARESIZE/2))),radius)
pygame.display.update()
elif event.type == pygame.MOUSEBUTTONDOWN:
pygame.draw.rect(screen, BEIGE, (0,0, boardWidth, SQUARESIZE))
#ask for player 1 input
if turn == 1:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if is_valid_location(board, col):
row = get_next_open_row(board, col)
drop_piece(board, row, col, 1)
update_board(board)
if winning_move(board, 1):
label = winfont.render("Player 1 Wins!", 1, BLACK)
screen.blit(label, (40, 10))
pygame.display.update()
game_over = True
turn = 0
else:
turn = 1
#ask for player 2 input
else:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if is_valid_location(board, col):
row = get_next_open_row(board, col)
drop_piece(board, row, col, 2)
update_board(board)
if winning_move(board, 2):
label = winfont.render("Player 2 Wins!", 1, BLACK)
screen.blit(label, (40,10))
pygame.display.update()
game_over = True
score = score - 1
turn = 1
if score == -1:
gOver = winfont.render("No More Moves!", 1, BLACK)
screen.blit(gOver, (40,10))
pygame.display.update()
pygame.time.wait(3000)
nmm = True
game_over = True
else:
turn = 0
#print_board(board)
if game_over and not nmm:
winner = turn+1
pygame.time.wait(3000)
elif game_over and nmm:
pass
def p1p2entercol():
global p1p2entcol
popupoutlineW = 600
popupoutlineH = 250
popupW = 592
popupH = 242
pygame.draw.rect(screen, BLACK, (screenCenterH - popupoutlineW/2, screenCenterV - popupoutlineH/2, popupoutlineW, popupoutlineH))
pygame.draw.rect(screen, BEIGE, (screenCenterH - popupW/2, screenCenterV - popupH/2, popupW, popupH))
playerx = GEfont.render("Players 1 and 2:", 1 , BLACK)
pxW = playerx.get_width()
screen.blit(playerx, ((screenCenterH - pxW/2),250))
choosecol = GEfont.render("Please choose a Colour", 1 , BLACK)
ccW = choosecol.get_width()
screen.blit(choosecol, ((screenCenterH - ccW/2)+5,310))
continuebutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
pygame.display.update()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if continuebutton.isOver(pos):
p1p2entcol = False
casual_game()
if event.type == pygame.MOUSEMOTION:
if continuebutton.isOver(pos):
continuebutton.color = GREY
else:
continuebutton.color = WHITE
def p1entercol():
global p1entcol
popupoutlineW = 600
popupoutlineH = 250
popupW = 592
popupH = 242
pygame.draw.rect(screen, BLACK, (screenCenterH - popupoutlineW/2, screenCenterV - popupoutlineH/2, popupoutlineW, popupoutlineH))
pygame.draw.rect(screen, BEIGE, (screenCenterH - popupW/2, screenCenterV - popupH/2, popupW, popupH))
playerx = GEfont.render("Player 1:", 1 , BLACK)
pxW = playerx.get_width()
screen.blit(playerx, ((screenCenterH - pxW/2),250))
choosecol = GEfont.render("Please choose a Colour", 1 , BLACK)
ccW = choosecol.get_width()
screen.blit(choosecol, ((screenCenterH - ccW/2)+5,310))
continuebutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
pygame.display.update()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if continuebutton.isOver(pos):
p1entcol = False
casual_game()
if event.type == pygame.MOUSEMOTION:
if continuebutton.isOver(pos):
continuebutton.color = GREY
else:
continuebutton.color = WHITE
def p2entercol():
global p2entcol
popupoutlineW = 600
popupoutlineH = 250
popupW = 592
popupH = 242
pygame.draw.rect(screen, BLACK, (screenCenterH - popupoutlineW/2, screenCenterV - popupoutlineH/2, popupoutlineW, popupoutlineH))
pygame.draw.rect(screen, BEIGE, (screenCenterH - popupW/2, screenCenterV - popupH/2, popupW, popupH))
playerx = GEfont.render("Player 2:", 1 , BLACK)
pxW = playerx.get_width()
screen.blit(playerx, ((screenCenterH - pxW/2),250))
choosecol = GEfont.render("Please choose a Colour", 1 , BLACK)
ccW = choosecol.get_width()
screen.blit(choosecol, ((screenCenterH - ccW/2)+5,310))
continuebutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
pygame.display.update()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if continuebutton.isOver(pos):
p2entcol = False
casual_game()
if event.type == pygame.MOUSEMOTION:
if continuebutton.isOver(pos):
continuebutton.color = GREY
else:
continuebutton.color = WHITE
def p1p2entersamecol():
global p1p2entsamecol
popupoutlineW = 600
popupoutlineH = 250
popupW = 592
popupH = 242
pygame.draw.rect(screen, BLACK, (screenCenterH - popupoutlineW/2, screenCenterV - popupoutlineH/2, popupoutlineW, popupoutlineH))
pygame.draw.rect(screen, BEIGE, (screenCenterH - popupW/2, screenCenterV - popupH/2, popupW, popupH))
playerx = GEfont.render("Players must choose", 1 , BLACK)
pxW = playerx.get_width()
screen.blit(playerx, ((screenCenterH - pxW/2),250))
choosecol = GEfont.render("different Colours", 1 , BLACK)
ccW = choosecol.get_width()
screen.blit(choosecol, ((screenCenterH - ccW/2)+5,310))
continuebutton.draw(screen, BLACK)
pygame.display.update()
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
pygame.display.update()
if event.type == pygame.QUIT:
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if continuebutton.isOver(pos):
p1p2entsamecol = False
casual_game()
if event.type == pygame.MOUSEMOTION:
if continuebutton.isOver(pos):
continuebutton.color = GREY
else:
continuebutton.color = WHITE
'''''''''''''''''''''''''''''''''''''''
game loop
'''''''''''''''''''''''''''''''''''''''
game_intro()
while not exit_game:
while nav == "home":
home_page()
while nav == "casual":
casual_game()
while nav == "comp":
comp_game()
while nav == "leaderboard":
leaderboard_screen()
while nav == "quit":
quit_screen()
exit_game = True
nav = 'x' |
from PIL import Image
from .mazeToGif import makeGIF #pylint: disable=relative-beyond-top-level
def make_step(k:int):
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] == k:
if i>0 and m[i-1][j] == 0 and a[i-1][j] == 0:
m[i-1][j] = k + 1
if j>0 and m[i][j-1] == 0 and a[i][j-1] == 0:
m[i][j-1] = k + 1
if i<len(m)-1 and m[i+1][j] == 0 and a[i+1][j] == 0:
m[i+1][j] = k + 1
if j<len(m[i])-1 and m[i][j+1] == 0 and a[i][j+1] == 0:
m[i][j+1] = k + 1
def runSolver(mazeMatrix:list, startCoord:tuple, endCoord:tuple, fileNameGif:str):
global a, m
images = []
a = mazeMatrix
start = startCoord
end = endCoord
m = []
for i in range(len(a)):
m.append([])
for j in range(len(a[i])):
m[-1].append(0)
i,j = start
m[i][j] = 1
k = 0
while m[end[0]][end[1]] == 0:
k += 1
make_step(k)
images = makeGIF(start,end,a,m,[],images)
i, j = end
k = m[i][j]
the_path = [(i,j)]
while k > 1:
if i > 0 and m[i - 1][j] == k-1:
i, j = i-1, j
the_path.append((i, j))
k-=1
elif j > 0 and m[i][j - 1] == k-1:
i, j = i, j-1
the_path.append((i, j))
k-=1
elif i < len(m) - 1 and m[i + 1][j] == k-1:
i, j = i+1, j
the_path.append((i, j))
k-=1
elif j < len(m[i]) - 1 and m[i][j + 1] == k-1:
i, j = i, j+1
the_path.append((i, j))
k -= 1
images = makeGIF(start,end,a,m,the_path,images)
for i in range(10):
if i % 2 == 0:
images = makeGIF(start,end,a,m,the_path,images)
else:
images = makeGIF(start,end,a,m,[],images)
images[0].save(fileNameGif,
save_all=True, append_images=images[1:],
optimize=False, duration=0.5, loop=0) |
@cuda.jit('void(uint64[:], uint64[:])', target='gpu')
def checkwieferich(check, result):
"""
checks if a given number is a wieferich prime
:param check: list of numbers to check
:param result: list of wieferich numbers (or 0)
:return:
"""
#current index
i = cuda.grid(1)
ac = check[i]
#modexp params
m = 2
e = ac-1
n = ac**2
if modexp(m, e, n) == 1:
result[i] = ac
@cuda.jit('uint64(uint64)', inline=True, device=True)
def inline_wieferich(ac):
m = 2
e = ac-1
n = ac**2
if modexp(m, e, n) == 1:
return ac
return 0
@cuda.jit('void(uint64[:], uint64[:])', target='gpu')
def fermat_wieferich(check, result):
"""
checks if a given number is composite fermat
:param check: list of numbers to check
:param result: list of composite fermats numbers (or 0)
:return:
"""
#current index
i = cuda.grid(1)
ac = check[i]
#params for fermat-check
m = 2
e = ac-1
n = ac
#if fermat run wieferich-check
if modexp(m, e, n) != 1:
result[i] = inline_wieferich(ac)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class GraphvizToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('graphviz')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['graphviz'].version
values['PFX'] = spec['graphviz'].prefix
fname = 'graphviz.xml'
contents = str("""<tool name="graphviz" version="$VER">
<info url="http://www.research.att.com/sw/tools/graphviz/"/>
<client>
<environment name="GRAPHVIZ_BASE" default="$PFX"/>
</client>
<runtime name="PATH" value="$$GRAPHVIZ_BASE/bin" type="path"/>
<use name="expat"/>
<use name="zlib"/>
<use name="libjpeg-turbo"/>
<use name="libpng"/>
</tool>""")
write_scram_toolfile(contents, values, fname, prefix)
|
import numpy as np
import torch
from tqdm import trange
from .inception import InceptionV3
from .fid_score import calculate_frechet_distance
def get_inception_and_fid_score(images, device, fid_cache, is_splits=10,
batch_size=50, verbose=False):
block_idx1 = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
block_idx2 = InceptionV3.BLOCK_INDEX_BY_DIM['prob']
model = InceptionV3([block_idx1, block_idx2]).to(device)
model.eval()
if batch_size > len(images):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(images)
fid_acts = np.empty((len(images), 2048))
is_probs = np.empty((len(images), 1008))
if verbose:
iterator = trange(0, len(images), batch_size)
else:
iterator = range(0, len(images), batch_size)
for start in iterator:
end = start + batch_size
batch_images = images[start: end]
batch_images = torch.from_numpy(batch_images).type(torch.FloatTensor)
batch_images = batch_images.to(device)
with torch.no_grad():
pred = model(batch_images)
fid_acts[start: end] = pred[0].view(-1, 2048).cpu().numpy()
is_probs[start: end] = pred[1].cpu().numpy()
# Inception Score
scores = []
for i in range(is_splits):
part = is_probs[
(i * is_probs.shape[0] // is_splits):
((i + 1) * is_probs.shape[0] // is_splits), :]
kl = part * (
np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
is_score = (np.mean(scores), np.std(scores))
# FID Score
m1 = np.mean(fid_acts, axis=0)
s1 = np.cov(fid_acts, rowvar=False)
f = np.load(fid_cache)
m2, s2 = f['mu'][:], f['sigma'][:]
f.close()
fid_score = calculate_frechet_distance(m1, s1, m2, s2)
return is_score, fid_score
|
import pandas as pd
import numpy as np
import os
def load(folder):
"""
Method Description: This method will load Accelerometer, EMG, Gyro, Orientation, Orientation Euler
for an input activity folder
Input: Folder name of the folder containing the 5 data files
Output: A list of 5 (Accelerometer, EMG, Gyro, Orientation, Orientation Euler) 2d matrix as dataframe.
"""
datafile = []
#path_to_data is the path to the folder containing the EatFood data
#Change the path
path_to_data = "data"
path = os.path.join(path_to_data, folder)
print(path)
#featch all files in the folder as a dataframe and append it to a list
for files in os.listdir(path):
file = os.path.join(path,files)
datafile.append(pd.read_csv(file))
return datafile
def loadData(folders):
"""
Method description: This method recieves all the eatfood activity folders and returns all the datafiles in a single file
input: folder names of the eatfood activities
output: A 2d list containing 5(Accelerometer, EMG, Gyro, Orientation, Orientation Euler) files for each eating activity
"""
data = []
for folder in folders:
data.append(load(folder))
return data
def RMS(data):
"""
Method Description: Returns RMS value for a time series
"""
return np.sqrt(sum(data*data) * 1/len(data))
def getInterval(variable1, variable2, iteration, count):
"""
Method Description: This method calculates the correlation between two variables
over windows and return the window index where the correlation is greater than 0.5
Input:
Variable1
variable2
Iteration: number of windows
Count: number of samples per window
Output:
A list containing the windows where corr(variable1,variable2) > 0.5
"""
corr = []
for j in range(iteration):
start = j * count
end = (j+1) * count
interval1 = variable1.iloc[start:end]
interval2 = variable2.iloc[start:end]
correlation = np.corrcoef(interval1, interval2)[0][1]
corr.append(correlation)
corr = pd.Series(corr)
corr = corr[corr > 0.5]
return corr.index.tolist()
def getInterval1(data, pitch, iteration, count):
"""
Method Description: This method calculates the correlation between two variables
over windows and return the window index where the correlation is less than -0.5
Input:
Variable1
variable2
Iteration: number of windows
Count: number of samples per window
Output:
A list containing the windows where corr(variable1,variable2) < -0.5
"""
corr = []
for j in range(iteration):
start = j * count
end = (j+1) * count
interval1 = data.iloc[start:end]
interval2 = pitch.iloc[start:end]
correlation = np.corrcoef(interval1, interval2)[0][1]
corr.append(correlation)
corr = pd.Series(corr)
corr = corr[corr < -0.5]
return corr.index.tolist()
if __name__ == "__main__":
#
#Folders of eat food activity
folders = ['EatFood1', 'EatFood2', 'EatFood3', 'EatFood4']
data = loadData(folders)
# to get the minimum length of IMU data among all the 4 activities
#initialising a random high value because the objective is to get the minimum length
sample = 1e10
for i in range(1):
#
#Eat Food Activity
activity = data[i]
#
#We just check Accelerometer since all the IMU files have same number if records
accelerometer = activity[0]
orientation = activity[4]
sample = min(sample, len(accelerometer))
#
#Get the windows
keys = ['x', 'y', 'z', 'roll', 'pitch', 'yaw']
#
#A dictionary to store the rms values of the keys ('x', 'y', 'z', 'roll', 'pitch', 'yaw')
rms = dict([(key, []) for key in keys])
#
#A dictionary to store the std values of the keys ('x', 'y', 'z', 'roll', 'pitch', 'yaw')
std = dict([(key, []) for key in keys])
corrkeys = ['x,y','x,roll', 'y,roll', 'z,roll', 'x,pitch', 'y,pitch', 'z,pitch', 'x,yaw', 'y,yaw','z,yaw']
#
#A dictionary to store the correlation values
correlation = dict([(key, []) for key in corrkeys])
#
#for each activity
for i in range(4):
#A Eatfood Activity
activity = data[i]
#0 is the accelerometer file, 4 is the Orientation Euler File
accelerometer = activity[0]
orientation = activity[4]
#Extract Accelerometer x,y and z values and, Euler Roll, pitch and Yaw
#We conisder only till sample size(minimum of all activity) to
#ensure uniform sample size during feature selection
y = (accelerometer['y'][:sample])
x = (accelerometer['x'][:sample])
z = (accelerometer['z'][:sample])
pitch = (orientation['pitch'][:sample])
roll = (orientation['roll'][:sample])
yaw = (orientation['yaw'][:sample])
print(np.corrcoef(x,y)[0][1])
#
#Split - Each activity is split into 16 windows
#The entire time series is for 270 seconds.
#So take 270/16 ~= 15 seconds for each window
#we consider each window as a eat food activity.
#I.e., We break 1 eat food into 16 eat foods
split = 16
#Since length of all x,y,z,roll,yaw and pitch are same, We consider only y
length = len(y)
#
#count gives number of samples in each window.
count = int(length/split)
iteration = int(length/count)
#
#get windows with corr(y, pitch) > 0.5
activityInterval1 = getInterval(y, pitch, iteration, count)
#
#get windows with corr(x, pitch) < -0.5
activityInterval2 = getInterval1(x, pitch, iteration, count)
#
#get windows with corr(z, roll) > 0.5
activityInterval3 = getInterval(z, roll, iteration, count)
#
#get windows with corr(x, y) < -0.5
activityInterval4 = getInterval1(x, y, iteration, count)
final = []
#
#intersection of windows that satisfy all the three filtering conditions mentioned above
final = list(set.intersection(set(activityInterval1), set(activityInterval2), set(activityInterval3), set(activityInterval4)))
#
#A dict to store the windows for the keys ('x', 'y', 'z', 'roll', 'pitch', 'yaw')
windows = dict([(key, []) for key in keys])
for j in range(iteration):
start = j * count
end = (j+1) * count
windows['x'].append(x.iloc[start:end])
windows['y'].append(y.iloc[start:end])
windows['z'].append(z.iloc[start:end])
windows['roll'].append(roll.iloc[start:end])
windows['pitch'].append(pitch.iloc[start:end])
windows['yaw'].append(yaw.iloc[start:end])
#Change this to "for index in final:" to get the filtered windows
#"for index in range(iteration): to get the unfiltered windows"
#To get the feature values over the windows
for index in final:
for key in keys:
rms[key].append(RMS(windows[key][index]))
std[key].append(windows[key][index].std())
correlation['x,y'].append(np.corrcoef(windows['x'][index], windows['y'][index])[0][1])
correlation['x,roll'].append(np.corrcoef(windows['x'][index], windows['roll'][index])[0][1])
correlation['y,roll'].append(np.corrcoef(windows['y'][index], windows['roll'][index])[0][1])
correlation['z,roll'].append(np.corrcoef(windows['z'][index], windows['roll'][index])[0][1])
correlation['x,pitch'].append(np.corrcoef(windows['x'][index], windows['pitch'][index])[0][1])
correlation['y,pitch'].append(np.corrcoef(windows['y'][index], windows['pitch'][index])[0][1])
correlation['z,pitch'].append(np.corrcoef(windows['z'][index], windows['pitch'][index])[0][1])
correlation['x,yaw'].append(np.corrcoef(windows['x'][index], windows['yaw'][index])[0][1])
correlation['y,yaw'].append(np.corrcoef(windows['y'][index], windows['yaw'][index])[0][1])
correlation['z,yaw'].append(np.corrcoef(windows['z'][index], windows['yaw'][index])[0][1])
#Load mean values into a dataframe
meanFrame = pd.DataFrame.from_dict(rms)
meanColumn = ['RMS('+key+')' for key in keys]
meanFrame.columns= meanColumn
#Load Std values into a dataframe
stdFrame = pd.DataFrame.from_dict(std)
stdColumn = ['std('+key+')' for key in keys]
stdFrame.columns = stdColumn
#Load correlation values into a dataframe
correlationFrame = pd.DataFrame.from_dict(correlation)
correlationColumn = ['corr('+key+')' for key in corrkeys]
correlationFrame.columns = correlationColumn
#
#concatenate all the dataframes into a single feature frame
FinalFeatureFrame = pd.concat([meanFrame, stdFrame, correlationFrame], axis = 1)
#Add a new column for Activity name
activityColumn = ['EatFood'+str(i+1) for i in range(len(meanFrame))]
FinalFeatureFrame.insert(0, 'activity', activityColumn)
#Write the feature matrix to a csv file
FinalFeatureFrame.to_csv('EatingDataFiltered.csv', index=None)
|
"""
Week 2, Day 2: Find the Town Judge
In a town, there are N people labelled from 1 to N. There is a rumor that one of these people is secretly the town
judge.
If the town judge exists, then:
- The town judge trusts nobody.
- Everybody (except for the town judge) trusts the town judge.
- There is exactly one person that satisfies properties 1 and 2.
You are given trust, an array of pairs trust[i] = [a, b] representing that the person labelled a trusts the person
labelled b.
If the town judge exists and can be identified, return the label of the town judge. Otherwise, return -1.
E x a m p l e s
Input: N = 2, trust = [[1,2]]
Output: 2
Input: N = 3, trust = [[1,3],[2,3]]
Output: 3
Input: N = 3, trust = [[1,3],[2,3],[3,1]]
Output: -1
Input: N = 3, trust = [[1,2],[2,3]]
Output: -1
Input: N = 4, trust = [[1,3],[1,4],[2,3],[2,4],[4,3]]
Output: 3
N o t e s
- 1 <= N <= 1000
- trust.length <= 10000
- trust[i] are all different
- trust[i][0] != trust[i][1]
- 1 <= trust[i][0], trust[i][1] <= N
"""
from collections import defaultdict
from time import perf_counter_ns
from typing import List
def findJudge(N: int, trust: List[List[int]]) -> int:
"""Complexity: O(n)"""
if not trust:
return -1 if N > 1 else 1
confidence = defaultdict(int)
believers = set()
for a, b in trust:
confidence[b] += 1
believers.add(a)
judge = {confidant
for confidant, num_believers in confidence.items()
if num_believers == N - 1 and confidant not in believers}
return -1 if not judge else judge.pop()
def findJudge_v2(N: int, trust: List[List[int]]) -> int:
"""
This version is inspired by a post found in a LeetCode discussion.
https://leetcode.com/explore/featured/card/may-leetcoding-challenge/535/week-2-may-8th-may-14th/3325/discuss/244859/Python-O(n)-with-Explanation
It is less complicated, and it is slightly faster.
Complexity: O(n)
"""
confidence = {b: 0 for b in range(1, N + 1)}
for a, b in trust:
confidence[a] -= 1
confidence[b] += 1
judge = {a for a, b in confidence.items() if b == N - 1}
return -1 if not judge else judge.pop()
if __name__ == '__main__':
start = perf_counter_ns()
print(findJudge(N=1, trust=[]) == 1)
print(findJudge(N=2, trust=[[1, 2]]) == 2)
print(findJudge(N=3, trust=[[1, 3], [2, 3]]) == 3)
print(findJudge(N=3, trust=[[1, 3], [2, 3], [3, 1]]) == -1)
print(findJudge(N=3, trust=[[1, 2], [2, 3]]) == -1)
print(findJudge(N=4, trust=[[1, 3], [1, 4], [2, 3], [2, 4], [4, 3]]) == 3)
print('v1', perf_counter_ns() - start)
start = perf_counter_ns()
print(findJudge_v2(N=1, trust=[]) == 1)
print(findJudge_v2(N=2, trust=[[1, 2]]) == 2)
print(findJudge_v2(N=3, trust=[[1, 3], [2, 3]]) == 3)
print(findJudge_v2(N=3, trust=[[1, 3], [2, 3], [3, 1]]) == -1)
print(findJudge_v2(N=3, trust=[[1, 2], [2, 3]]) == -1)
print(findJudge_v2(N=4, trust=[[1, 3], [1, 4], [2, 3], [2, 4], [4, 3]]) == 3)
print('v2', perf_counter_ns() - start)
# last line of code
|
import os
import datetime
from collections import OrderedDict
from logging import basicConfig
from logging import INFO
from flask import Flask, request, logging, Response, jsonify, json
from flask_cors import CORS
from persistence import Persistence
app = Flask(__name__)
CORS(app)
@app.route('/')
def hello():
log = read_file(os.environ['APP_LOG'])
return log
def read_file(file_path):
abs_path = os.path.dirname(os.path.abspath(__file__))
log = os.path.join(abs_path, file_path)
with open(log, 'r') as logfile:
content = logfile.read()
return content
def validate_parking(parking):
pass
@app.route('/parkings', methods=['POST'])
def parking_post():
parking = request.get_json()
validate_parking(parking)
start_time = datetime.datetime.now()
parking = repository.create(parking['vehicle'], parking['parkingArea'], start_time, parking['tipusVehicle'])
return jsonify(parking), 201
@app.route('/parkings/recent', methods=['GET'])
def recent_parking():
recent = repository.list_occupied_ordered()
return jsonify(recent), 200
@app.route('/parkings/<parkingid>/endtime', methods=['POST'])
def parking_finish(parkingid):
end_time = datetime.datetime.now()
repository.update(parkingid, end_time)
return "{}"
@app.route('/parkings', methods=['GET'])
def parking_list():
parking_ll = repository.list()
return jsonify(parking_ll)
@app.route('/cid', methods=['GET'])
def cid_list():
from cid_infostat import cid as cids
cid_dict = {}
for cid in cids:
cid['nplacesocupades'] = 0
cid['nplacesocupadesperveh'] = [0] * 3
cid_dict[cid["ID"]] = cid
parking_list = repository.list_occupied()
for parking in parking_list:
cid = cid_dict[parking['parkingArea']]
tipusVehicle = parking['tipusVehicle']
cid['nplacesocupades'] = cid['nplacesocupades'] + 1
cid['nplacesocupadesperveh'][tipusVehicle] = cid['nplacesocupadesperveh'][tipusVehicle] + 1
return jsonify(cid_dict)
if __name__ == '__main__':
abs_path = os.path.dirname(os.path.abspath(__file__))
log = os.path.join(abs_path, os.environ['APP_LOG'])
basicConfig(filename=log, level=INFO)
db = os.path.join(abs_path, os.environ['DB_PATH'])
repository = Persistence(db, logging.getLogger(__name__))
repository.init_db()
app.run(host=os.environ['IP_LISTEN'], port=int(os.environ['PORT_LISTEN']), threaded=True)
|
from typing import Dict, Any, List
from datetime import datetime
class FleetSettings(object):
def __init__(self, is_free_move: bool, motd: str) -> None:
self.__is_free_move: bool = is_free_move
self.__motd: str = motd
def get_esi_data(self) -> Dict[str, Any]:
data: Dict[str, Any] = {}
if self.__is_free_move is not None:
data['is_free_move'] = self.__is_free_move
if self.__motd is not None:
data['motd'] = self.__motd
return data
class EveFleetSquad(object):
def __init__(self, squad_id: int, squad_name: str) -> None:
self.__squadID: int = squad_id
self.__squadName: str = squad_name
def id(self) -> int:
return self.__squadID
def name(self) -> str:
return self.__squadName
class EveFleetWing(object):
def __init__(self, wing_id: int, wing_name: str, squads: List[EveFleetSquad]) -> None:
self.__id: int = wing_id
self.__name: str = wing_name
self.__squads: List[EveFleetSquad] = squads
def squads(self) -> List[EveFleetSquad]:
return self.__squads
def name(self) -> str:
return self.__name
def id(self) -> int:
return self.__id
class FleetMember(object):
def __init__(self, member: Dict[str, Any]) -> None:
self._data = member
self._data['join_time'] = self._data['join_time'].v
def character_id(self) -> int:
return self._data['character_id']
def join_datetime(self) -> datetime:
return self._data['join_time']
def role(self) -> str:
return self._data['role']
def role_name(self) -> str:
return self._data['role_name']
def ship_type_id(self) -> int:
return self._data['ship_type_id']
def solar_system(self) -> int:
return self._data['solar_system_id']
def squad_id(self) -> int:
return self._data['squad_id']
def station_id(self) -> int:
return self._data['station_id']
def takes_fleet_warp(self) -> bool:
return self._data['takes_fleet_warp']
def wing_id(self) -> int:
return self._data['wing_id']
@property
def data(self):
return self._data
|
def bubblesort(list1):
#this is for the descending sorting
temp = 0
for i in range (0,len(list1)-1,1):
for j in range (len(list1)-1):
if list1[j] < list1[j+1]:
temp = list1[j]
list1[j] = list1[j+1]
list1[j+1] = temp
return list1
myL = [12,1,5,3,2,11,8,15]
print (bubblesort(myL)) |
# License
'''
Code by Gugulothu Yashwanth Naik
April 30,2020
Released under GNU GPL
'''
import numpy as np
import matplotlib.pyplot as plt
from pylab import*
#if using termux
#import subprocess
#import shlex
#end if
subplot(2,1,1)
x=[5.08,5.67,6.34,6.88,6.98,7.08,7.58,7.9,8.02,9.86,11.17,14.33,57.91]
y=[5.15,6.8,7.64,7.64,7.64,7.64,6.48,5.15,5.15,-0.81,-4.29,-10.17,-40]
plt.plot(x,y)
plt.xlim(5,60)
plt.ylabel('Mag(dB)')
plt.title('Magnitude plot')
plt.grid()
subplot(2,1,2)
x=[5.425,6.045,6.314,6.404,6.524,6.634,6.724,7.323,7.503,7.713,7.833,7.993,8.243,8.642,9.432,11.68,28.335,34.569,46.019]
y=[-38.65,-54.46,-63.4,-66.5,-70.97,-75.25,-78.6,-101.5,-106.85,-114.4,-117.7,-122,-127.5,-135,-145,-158.19,-174.28,-174.28,-174.28]
plt.plot(x,y)
plt.xlim(5,47)
plt.xlabel('frequency(rad/s)')
plt.ylabel('Phase(deg)')
plt.title('Phase plot')
plt.grid()
#if using termux
#plt.savefig('./figs/ee18btech11017/ee18btech11017_fig2.pdf')
#plt.savefig('./figs/ee18btech11017/ee18btech11017_fig2.eps')
#subprocess.run(shlex.split("termux-open ./figs/ee18btech11017/ee18btech11017_fig2.pdf"))
#else
plt.show()
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from math import ceil
import hashlib
import binascii
class Base58CheckAddress(object):
VERSION_BYTE = bytes([0])
def __init__(self, public_key = None):
self.ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
self.MAP = { self.ALPHABET[i]: i for i in range(len(self.ALPHABET)) }
"""
Where public key is an instance of ec.EllipticCurvePublicKey
"""
self.payload_bin = None
self.payload_str = None
if public_key:
self.load_public_key(public_key)
self.ripemd_hash = None
def load_public_key(self, public_key):
self.payload_bin = public_key.public_numbers().encode_point()
self.payload_str = binascii.hexlify(self.payload_bin)
def set_ripemd(self):
self.ripemd_hash = self.hash_ripemd(self.payload_bin)
def hash_ripemd(self, payload: bytes):
sha256 = self.hash_sha256(payload)
ripemd = hashlib.new('ripemd160')
ripemd.update(sha256)
return ripemd.digest()
def hash_sha256(self, payload: bytes):
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(payload)
return digest.finalize()
def address_string(self):
self.set_ripemd()
assert self.ripemd_hash != None, "RIPEMD160 Hash is None"
extended = self.VERSION_BYTE + self.ripemd_hash
first_hash = self.hash_sha256(extended)
payload_hashed = self.hash_sha256(first_hash)
checksum = payload_hashed[:4]
concatenated = extended + checksum
return self.encode_base58(concatenated)
def encode_base58(self, payload: bytes):
bignum = int.from_bytes(payload, byteorder='big', signed=False)
address = ""
while bignum:
remainder = bignum % 58
bignum //= 58
address += self.ALPHABET[remainder]
zero_byte = bytes(1)[0]
for byte in payload:
if byte != zero_byte:
break
address += self.ALPHABET[0]
return address[::-1]
def decode_base58(self, payload: str):
bignum = 0
payload = payload[::-1]
i = 0
while True:
rem = self.MAP[payload[i]]
bignum += (58**i) * rem
i += 1
if i == len(payload):
break
zeros = 0
for c in payload[::-1]:
if c == self.ALPHABET[0]:
zeros += 1
else:
break
n_bytes = ceil(bignum.bit_length()/8)
bignum_bytes = bignum.to_bytes(n_bytes, 'big')
result = (bytes(zeros) + bignum_bytes)
return result
|
import argparse
import pandas as pd
import torch
from scipy.sparse import csr_matrix, vstack
from sklearn.decomposition import PCA
from pathlib import Path
import numpy as np
def load_cell_gene_features(params):
"""
return: (cell, pca_dim)
"""
random_seed = params.random_seed
pca_dim = params.pca_dim
train = params.train
tissue = params.tissue
proj_path = Path(__file__).parent.resolve().parent.resolve().parent.resolve()
mouse_data_path = proj_path / 'data' / 'mouse_data'
statistics_path = mouse_data_path / 'statistics'
if not statistics_path.exists():
statistics_path.mkdir()
gene_statistics_path = statistics_path / (tissue + '_genes.txt')
# generate gene statistics file
if not gene_statistics_path.exists():
data_files = mouse_data_path.glob(f'*{tissue}*_data.csv')
genes = None
for file in data_files:
data = pd.read_csv(file, dtype=np.str, header=0).values[:, 0]
if genes is None:
genes = set(data)
else:
genes = genes | set(data)
id2gene = list(genes)
id2gene.sort()
with open(gene_statistics_path, 'w', encoding='utf-8') as f:
for gene in id2gene:
f.write(gene + '\r\n')
else:
id2gene = []
with open(gene_statistics_path, 'r', encoding='utf-8') as f:
for line in f:
id2gene.append(line.strip())
# prepare unified genes
gene2id = {gene: idx for idx, gene in enumerate(id2gene)}
num_genes = len(id2gene)
print(f"totally {num_genes} genes.")
matrices = []
for num in train:
data_path = mouse_data_path / f'mouse_{tissue}{num}_data.csv'
# load data file then update graph
df = pd.read_csv(data_path, index_col=0) # (gene, cell)
df = df.transpose(copy=True) # (cell, gene)
df = df.rename(columns=gene2id)
# filter out useless columns if exists (when using gene intersection)
col = [c for c in df.columns if c in gene2id.values()]
df = df[col]
# maintain inter-datasets index for graph and RNA-seq values
arr = df.to_numpy()
print(arr.shape)
row_idx, col_idx = arr.nonzero() # intra-dataset index
non_zeros = arr[(row_idx, col_idx)] # non-zero values
# inter-dataset index
tgt_idx = df.columns[col_idx].astype(int).tolist() # gene_index
info_shape = (len(df), num_genes)
info = csr_matrix((non_zeros, (row_idx, tgt_idx)), shape=info_shape)
matrices.append(info)
sparse_feat = vstack(matrices).toarray() # cell-wise (cell, gene)
gene_pca = PCA(pca_dim, random_state=random_seed).fit(sparse_feat.T)
gene_feat = gene_pca.transform(sparse_feat.T) # (cell, pca_dim)
gene_evr = sum(gene_pca.explained_variance_ratio_) * 100
print(f'[PCA] Gene EVR: {gene_evr:.2f} %.')
return gene_feat
if __name__ == '__main__':
"""
python ./code/datasets/load_features.py --train 3510 1311 6633 6905 4909 2081 --tissue Mammary_gland
"""
parser = argparse.ArgumentParser()
parser.add_argument('--tissue', required=True, type=str)
parser.add_argument('--train', required=True, nargs='+')
params = parser.parse_args()
load_cell_gene_features(params)
|
from apps.inventory.models.ships import *
from apps.inventory.models.weapons import *
|
from django.shortcuts import render, redirect
from .models import Books
def index(request):
context = { "books": Books.objects.all() }
return render( request, "books/index.html", context )
def add_data(request):
Books.objects.create(
title = 'Title1',
author = 'Author1',
published_date = '2017-05-01 12:34:56',
category = 'Category1',
)
Books.objects.create(
title = 'Title2',
author = 'Author2',
published_date = '2017-05-07 23:45:01',
category = 'Category2',
)
Books.objects.create(
title = 'Title3',
author = 'Author3',
published_date = '2017-05-13 04:56:12',
category = 'Category3',
in_print = True,
)
Books.objects.create(
title = 'Title4',
author = 'Author4',
published_date = '2017-05-19 05:01:23',
category = 'Category4',
in_print = True,
)
Books.objects.create(
title = 'Title5',
author = 'Author5',
published_date = '2017-05-25 06:12:34',
category = 'Category5',
)
return redirect( "/" )
def remove_data(request):
Books.objects.all().delete()
return redirect( "/" )
|
"""
=================== TASK 1 ====================
* Name: Sum Number Digits
*
* Write a function `sum_digits` that will return
* sum of digits for given integer number.
* If passed value is invalid, function should
* return -1 which indicates something went wrong.
*
* Note: Please describe in details possible cases
* in which your solution might not work.
*
* Use main() function to test your solution.
===================================================
"""
def sum_digits(nr):
if type(nr) == "int":
return -1
nr = abs(nr)
x = 0
while nr:
x += nr % 10
nr = nr // 10
return x
def main():
int_number = 1234
digit_sum = sum_digits(int_number)
print("Sum of digits for given numbers is: ", digit_sum)
main() |
"""
Artificial potential fields
Basic idea :
-----------
U_art = U_xd(attraction potential) + U_O(repulsive Potential)
U_att = 1/2 * kp * (x - xd)^2
1/2 * n * (1/rho - 1/rho_node )^2 if rho <= rho_node
U_rep = {
0 if rho > rho_node
U_art = The artificial potential field, which the agent is subjected to
U_xd = Attractive potential field created by the goal
U_O = Repulsive potential field created by the obstacle
x = position of agent
x_d = goal position
k_p = position gain
eta = constant gain
rho = shortest distance to the obstacle
rho_0 = limit distance of the potential field influence
"""
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow, show, colorbar
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator
import numpy as np
import math
import Utilities
# Initialize figure
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(1, 2, 1, projection='3d')
# Global Variables:
# Map specific:
mapBoundaryX = 11 # Map x length
mapBoundaryY = 11 # Map y length
reso = 10 # Map resolution
# repulsion gains:
eta = 5.0
rho_node = 5.0
# Attraction Gains:
KP = 2 # Position Gain
spacingResolution = (mapBoundaryX * reso) + 1
xAxis = np.linspace(0, mapBoundaryX, spacingResolution)
yAxis = np.linspace(0, mapBoundaryY, spacingResolution)
# Robot specific
robotX = 2.0 # initial robot X position
robotY = 2.0 # initial robot Y position
goalX = 10.0 # goal X position
goalY = 10.0 # goal Y position
# Obstacle:
obsX = 5.0
obsY = 5.0
# Position scaling for grid:
xGoalOnGrid = goalX * reso
yGoalOnGrid = goalY * reso
xRobotOnGrid = robotX * reso
yRobotOnGrid = robotY * reso
xObsOnGrid = obsX * reso
yObsOnGrid = obsY * reso
# potential field generation:
uPot = np.zeros(shape=(len(xAxis), len(yAxis)))
uAtr = np.zeros(shape=(len(xAxis), len(yAxis)))
uRep = np.zeros(shape=(len(xAxis), len(yAxis)))
# Calculate Attraction
for i in range(len(xAxis)):
for j in range(len(yAxis)):
uAtr[i][j] = 1 / 2.0 * KP * math.sqrt(abs(xGoalOnGrid - i) ** 2 + abs(yGoalOnGrid - j) ** 2) / reso
# Calculate repulsion potential:
for i in range(len(xAxis)):
for j in range(len(yAxis)):
rho = math.sqrt(((xObsOnGrid - i) ** 2 + (yObsOnGrid - j) ** 2)) / reso
if rho <= rho_node:
if rho == 0:
uRep[i][j] = np.max(uAtr) / 2
else:
uRep[i][j] = 0.5 * eta * ((1 / rho) - (1 / rho_node)) ** 2
if uRep[i][j] > eta:
uRep[i][j] = eta
else:
uRep[i][j] = 0
uPot = uAtr + uRep
# for debugging
# print(uPot.shape)
# print(uPot)
xAxis, yAxis = np.meshgrid(xAxis, yAxis)
surf = ax.plot_surface(xAxis, yAxis, uPot, cmap=cm.viridis, linewidth=0, antialiased=True)
ax.view_init(azim=30)
ax = fig.add_subplot(1, 2, 2)
imshow(uPot, origin='lower', extent=[0, int(mapBoundaryX), 0, int(mapBoundaryY)])
colorbar()
# Simulate motion:
distanceToGoal = math.sqrt((xRobotOnGrid - xGoalOnGrid) ** 2 + (yRobotOnGrid - yGoalOnGrid) ** 2)
distanceTolerance = 1
robotStartPoint = [robotX, robotY]
goalPoint = [goalX, goalY]
motionDirection, quadrant = Utilities.getAngleAndDirection(robotStartPoint, goalPoint)
numIterations = 0
indexX = int(xRobotOnGrid)
indexY = int(yRobotOnGrid)
while distanceToGoal >= distanceTolerance or numIterations < 100:
minVal = 1000
minIndex = [indexX, indexY]
for i in range(-1, 2):
for j in range(-1, 2):
newindexX = indexX + i
newindexY = indexY + j
if newindexX < 0 or newindexX > spacingResolution or newindexY < 0 or newindexY > spacingResolution or (i == j and i == 0):
pass
elif uPot[newindexX][newindexY] < minVal:
minVal = uPot[newindexX][newindexY]
minIndex = [newindexX, newindexY]
indexX = minIndex[0]
indexY = minIndex[1]
distanceToGoal = math.sqrt((indexX - xGoalOnGrid) ** 2 + (indexY - yGoalOnGrid) ** 2)
if numIterations % 2 == 0:
plt.scatter(indexX/reso, indexY/reso, color="m", marker=".")
plt.pause(0.005)
numIterations += 1
plt.show() |
from django.db import models
from django.utils import timezone
class LoginAttemptRecord(models.Model):
username = models.CharField(max_length=255, null=True, blank=True, unique=True)
count = models.PositiveIntegerField(null=True, blank=True, default=0)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'auth_record'
class LoginAttemptLogger(object):
@classmethod
def reset(cls, username):
defaults = {
'count': 0,
'timestamp': timezone.now()
}
LoginAttemptRecord.objects.update_or_create(username=username, defaults=defaults)
@classmethod
def increment(cls, username):
obj, created = LoginAttemptRecord.objects.get_or_create(username=username)
obj.count += 1
obj.timestamp = timezone.now()
obj.save()
|
from django.db.models import Q
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView,RetrieveAPIView,RetrieveUpdateAPIView,DestroyAPIView,CreateAPIView
from chirps.models import Chirp
from .serializers import ChirpListSerializer,ChirpDetailSerializer,ChirpCreateUpdateSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAdminUser, IsAuthenticatedOrReadOnly
from .permissions import IsOwnerOrReadOnly
class ChirpListAPIView(ListAPIView):
serializer_class = ChirpListSerializer
filter_backends = [SearchFilter, OrderingFilter]
search_fields = ['content', 'user__username', 'user__first_name', 'user__last_name']
def get_queryset(self, *args, **kwargs):
query_set_list = Chirp.objects.all()
query = self.request.GET.get('q')
if query:
query_set_list = query_set_list.filter(
Q(content__icontains=query) |
Q(user__username__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
return query_set_list
class ChirpUpdateAPIView(RetrieveUpdateAPIView):
queryset = Chirp.objects.all().order_by('-timestamp')
serializer_class = ChirpCreateUpdateSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
class ChirpDeleteAPIView(DestroyAPIView):
queryset = Chirp.objects.all()
serializer_class = ChirpDetailSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
class ChirpDetailAPIView(RetrieveAPIView):
queryset = Chirp.objects.all()
serializer_class = ChirpDetailSerializer
class ChirpCreateAPIView(CreateAPIView):
queryset = Chirp.objects.all()
serializer_class = ChirpCreateUpdateSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
serializer.save(user = self.request.user)
|
from flask import Flask, render_template, request
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
from chatterbot.trainers import ListTrainer
app = Flask(__name__)
with open('file.txt','r') as file:
conversation = file.read()
bot = ChatBot("Sunanda's Resume ChatBot")
trainer = ListTrainer(bot)
trainer.train(conversation)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
return str(bot.get_response(userText))
if __name__ == "__main__":
app.run() |
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.modules.batchnorm import BatchNorm2d
from torch.nn.modules.instancenorm import InstanceNorm2d
from torchvision.ops import Conv2dNormActivation
from ...transforms._presets import OpticalFlow
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._utils import handle_legacy_interface
from ._utils import grid_sample, make_coords_grid, upsample_flow
__all__ = (
"RAFT",
"raft_large",
"raft_small",
"Raft_Large_Weights",
"Raft_Small_Weights",
)
class ResidualBlock(nn.Module):
"""Slightly modified Residual block with extra relu and biases."""
def __init__(self, in_channels, out_channels, *, norm_layer, stride=1, always_project: bool = False):
super().__init__()
# Note regarding bias=True:
# Usually we can pass bias=False in conv layers followed by a norm layer.
# But in the RAFT training reference, the BatchNorm2d layers are only activated for the first dataset,
# and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful
# for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm
# because these aren't frozen, but we don't bother (also, we wouldn't be able to load the original weights).
self.convnormrelu1 = Conv2dNormActivation(
in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
)
self.convnormrelu2 = Conv2dNormActivation(
out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True
)
# make mypy happy
self.downsample: nn.Module
if stride == 1 and not always_project:
self.downsample = nn.Identity()
else:
self.downsample = Conv2dNormActivation(
in_channels,
out_channels,
norm_layer=norm_layer,
kernel_size=1,
stride=stride,
bias=True,
activation_layer=None,
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = x
y = self.convnormrelu1(y)
y = self.convnormrelu2(y)
x = self.downsample(x)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
"""Slightly modified BottleNeck block (extra relu and biases)"""
def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
super().__init__()
# See note in ResidualBlock for the reason behind bias=True
self.convnormrelu1 = Conv2dNormActivation(
in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True
)
self.convnormrelu2 = Conv2dNormActivation(
out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
)
self.convnormrelu3 = Conv2dNormActivation(
out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True
)
self.relu = nn.ReLU(inplace=True)
if stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = Conv2dNormActivation(
in_channels,
out_channels,
norm_layer=norm_layer,
kernel_size=1,
stride=stride,
bias=True,
activation_layer=None,
)
def forward(self, x):
y = x
y = self.convnormrelu1(y)
y = self.convnormrelu2(y)
y = self.convnormrelu3(y)
x = self.downsample(x)
return self.relu(x + y)
class FeatureEncoder(nn.Module):
"""The feature encoder, used both as the actual feature encoder, and as the context encoder.
It must downsample its input by 8.
"""
def __init__(
self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), strides=(2, 1, 2, 2), norm_layer=nn.BatchNorm2d
):
super().__init__()
if len(layers) != 5:
raise ValueError(f"The expected number of layers is 5, instead got {len(layers)}")
# See note in ResidualBlock for the reason behind bias=True
self.convnormrelu = Conv2dNormActivation(
3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=strides[0], bias=True
)
self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=strides[1])
self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=strides[2])
self.layer3 = self._make_2_blocks(block, layers[2], layers[3], norm_layer=norm_layer, first_stride=strides[3])
self.conv = nn.Conv2d(layers[3], layers[4], kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
num_downsamples = len(list(filter(lambda s: s == 2, strides)))
self.output_dim = layers[-1]
self.downsample_factor = 2**num_downsamples
def _make_2_blocks(self, block, in_channels, out_channels, norm_layer, first_stride):
block1 = block(in_channels, out_channels, norm_layer=norm_layer, stride=first_stride)
block2 = block(out_channels, out_channels, norm_layer=norm_layer, stride=1)
return nn.Sequential(block1, block2)
def forward(self, x):
x = self.convnormrelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv(x)
return x
class MotionEncoder(nn.Module):
"""The motion encoder, part of the update block.
Takes the current predicted flow and the correlation features as input and returns an encoded version of these.
"""
def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128, 64), out_channels=128):
super().__init__()
if len(flow_layers) != 2:
raise ValueError(f"The expected number of flow_layers is 2, instead got {len(flow_layers)}")
if len(corr_layers) not in (1, 2):
raise ValueError(f"The number of corr_layers should be 1 or 2, instead got {len(corr_layers)}")
self.convcorr1 = Conv2dNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
if len(corr_layers) == 2:
self.convcorr2 = Conv2dNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
else:
self.convcorr2 = nn.Identity()
self.convflow1 = Conv2dNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
self.convflow2 = Conv2dNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
# out_channels - 2 because we cat the flow (2 channels) at the end
self.conv = Conv2dNormActivation(
corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3
)
self.out_channels = out_channels
def forward(self, flow, corr_features):
corr = self.convcorr1(corr_features)
corr = self.convcorr2(corr)
flow_orig = flow
flow = self.convflow1(flow)
flow = self.convflow2(flow)
corr_flow = torch.cat([corr, flow], dim=1)
corr_flow = self.conv(corr_flow)
return torch.cat([corr_flow, flow_orig], dim=1)
class ConvGRU(nn.Module):
"""Convolutional Gru unit."""
def __init__(self, *, input_size, hidden_size, kernel_size, padding):
super().__init__()
self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
self.convq = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
return h
def _pass_through_h(h, _):
# Declared here for torchscript
return h
class RecurrentBlock(nn.Module):
"""Recurrent block, part of the update block.
Takes the current hidden state and the concatenation of (motion encoder output, context) as input.
Returns an updated hidden state.
"""
def __init__(self, *, input_size, hidden_size, kernel_size=((1, 5), (5, 1)), padding=((0, 2), (2, 0))):
super().__init__()
if len(kernel_size) != len(padding):
raise ValueError(
f"kernel_size should have the same length as padding, instead got len(kernel_size) = {len(kernel_size)} and len(padding) = {len(padding)}"
)
if len(kernel_size) not in (1, 2):
raise ValueError(f"kernel_size should either 1 or 2, instead got {len(kernel_size)}")
self.convgru1 = ConvGRU(
input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[0], padding=padding[0]
)
if len(kernel_size) == 2:
self.convgru2 = ConvGRU(
input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[1], padding=padding[1]
)
else:
self.convgru2 = _pass_through_h
self.hidden_size = hidden_size
def forward(self, h, x):
h = self.convgru1(h, x)
h = self.convgru2(h, x)
return h
class FlowHead(nn.Module):
"""Flow head, part of the update block.
Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow".
"""
def __init__(self, *, in_channels, hidden_size):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class UpdateBlock(nn.Module):
"""The update block which contains the motion encoder, the recurrent block, and the flow head.
It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block.
"""
def __init__(self, *, motion_encoder, recurrent_block, flow_head):
super().__init__()
self.motion_encoder = motion_encoder
self.recurrent_block = recurrent_block
self.flow_head = flow_head
self.hidden_state_size = recurrent_block.hidden_size
def forward(self, hidden_state, context, corr_features, flow):
motion_features = self.motion_encoder(flow, corr_features)
x = torch.cat([context, motion_features], dim=1)
hidden_state = self.recurrent_block(hidden_state, x)
delta_flow = self.flow_head(hidden_state)
return hidden_state, delta_flow
class MaskPredictor(nn.Module):
"""Mask predictor to be used when upsampling the predicted flow.
It takes the hidden state of the recurrent unit as input and outputs the mask.
This is not used in the raft-small model.
"""
def __init__(self, *, in_channels, hidden_size, multiplier=0.25):
super().__init__()
self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
# 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder,
# and we interpolate with all 9 surrounding neighbors. See paper and appendix B.
self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0)
# In the original code, they use a factor of 0.25 to "downweight the gradients" of that branch.
# See e.g. https://github.com/princeton-vl/RAFT/issues/119#issuecomment-953950419
# or https://github.com/princeton-vl/RAFT/issues/24.
# It doesn't seem to affect epe significantly and can likely be set to 1.
self.multiplier = multiplier
def forward(self, x):
x = self.convrelu(x)
x = self.conv(x)
return self.multiplier * x
class CorrBlock(nn.Module):
"""The correlation block.
Creates a correlation pyramid with ``num_levels`` levels from the outputs of the feature encoder,
and then indexes from this pyramid to create correlation features.
The "indexing" of a given centroid pixel x' is done by concatenating its surrounding neighbors that
are within a ``radius``, according to the infinity norm (see paper section 3.2).
Note: typo in the paper, it should be infinity norm, not 1-norm.
"""
def __init__(self, *, num_levels: int = 4, radius: int = 4):
super().__init__()
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid: List[Tensor] = [torch.tensor(0)] # useless, but torchscript is otherwise confused :')
# The neighborhood of a centroid pixel x' is {x' + delta, ||delta||_inf <= radius}
# so it's a square surrounding x', and its sides have a length of 2 * radius + 1
# The paper claims that it's ||.||_1 instead of ||.||_inf but it's a typo:
# https://github.com/princeton-vl/RAFT/issues/122
self.out_channels = num_levels * (2 * radius + 1) ** 2
def build_pyramid(self, fmap1, fmap2):
"""Build the correlation pyramid from two feature maps.
The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2)
The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions
to build the correlation pyramid.
"""
if fmap1.shape != fmap2.shape:
raise ValueError(
f"Input feature maps should have the same shape, instead got {fmap1.shape} (fmap1.shape) != {fmap2.shape} (fmap2.shape)"
)
# Explaining min_fmap_size below: the fmaps are down-sampled (num_levels - 1) times by a factor of 2.
# The last corr_volume most have at least 2 values (hence the 2* factor), otherwise grid_sample() would
# produce nans in its output.
min_fmap_size = 2 * (2 ** (self.num_levels - 1))
if any(fmap_size < min_fmap_size for fmap_size in fmap1.shape[-2:]):
raise ValueError(
"Feature maps are too small to be down-sampled by the correlation pyramid. "
f"H and W of feature maps should be at least {min_fmap_size}; got: {fmap1.shape[-2:]}. "
"Remember that input images to the model are downsampled by 8, so that means their "
f"dimensions should be at least 8 * {min_fmap_size} = {8 * min_fmap_size}."
)
corr_volume = self._compute_corr_volume(fmap1, fmap2)
batch_size, h, w, num_channels, _, _ = corr_volume.shape # _, _ = h, w
corr_volume = corr_volume.reshape(batch_size * h * w, num_channels, h, w)
self.corr_pyramid = [corr_volume]
for _ in range(self.num_levels - 1):
corr_volume = F.avg_pool2d(corr_volume, kernel_size=2, stride=2)
self.corr_pyramid.append(corr_volume)
def index_pyramid(self, centroids_coords):
"""Return correlation features by indexing from the pyramid."""
neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels
di = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
dj = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
delta = torch.stack(torch.meshgrid(di, dj, indexing="ij"), dim=-1).to(centroids_coords.device)
delta = delta.view(1, neighborhood_side_len, neighborhood_side_len, 2)
batch_size, _, h, w = centroids_coords.shape # _ = 2
centroids_coords = centroids_coords.permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 2)
indexed_pyramid = []
for corr_volume in self.corr_pyramid:
sampling_coords = centroids_coords + delta # end shape is (batch_size * h * w, side_len, side_len, 2)
indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view(
batch_size, h, w, -1
)
indexed_pyramid.append(indexed_corr_volume)
centroids_coords = centroids_coords / 2
corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous()
expected_output_shape = (batch_size, self.out_channels, h, w)
if corr_features.shape != expected_output_shape:
raise ValueError(
f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}"
)
return corr_features
def _compute_corr_volume(self, fmap1, fmap2):
batch_size, num_channels, h, w = fmap1.shape
fmap1 = fmap1.view(batch_size, num_channels, h * w)
fmap2 = fmap2.view(batch_size, num_channels, h * w)
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
corr = corr.view(batch_size, h, w, 1, h, w)
return corr / torch.sqrt(torch.tensor(num_channels))
class RAFT(nn.Module):
def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None):
"""RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
args:
feature_encoder (nn.Module): The feature encoder. It must downsample the input by 8.
Its input is the concatenation of ``image1`` and ``image2``.
context_encoder (nn.Module): The context encoder. It must downsample the input by 8.
Its input is ``image1``. As in the original implementation, its output will be split into 2 parts:
- one part will be used as the actual "context", passed to the recurrent unit of the ``update_block``
- one part will be used to initialize the hidden state of the recurrent unit of
the ``update_block``
These 2 parts are split according to the ``hidden_state_size`` of the ``update_block``, so the output
of the ``context_encoder`` must be strictly greater than ``hidden_state_size``.
corr_block (nn.Module): The correlation block, which creates a correlation pyramid from the output of the
``feature_encoder``, and then indexes from this pyramid to create correlation features. It must expose
2 methods:
- a ``build_pyramid`` method that takes ``feature_map_1`` and ``feature_map_2`` as input (these are the
output of the ``feature_encoder``).
- a ``index_pyramid`` method that takes the coordinates of the centroid pixels as input, and returns
the correlation features. See paper section 3.2.
It must expose an ``out_channels`` attribute.
update_block (nn.Module): The update block, which contains the motion encoder, the recurrent unit, and the
flow head. It takes as input the hidden state of its recurrent unit, the context, the correlation
features, and the current predicted flow. It outputs an updated hidden state, and the ``delta_flow``
prediction (see paper appendix A). It must expose a ``hidden_state_size`` attribute.
mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow.
The output channel must be 8 * 8 * 9 - see paper section 3.3, and Appendix B.
If ``None`` (default), the flow is upsampled using interpolation.
"""
super().__init__()
_log_api_usage_once(self)
self.feature_encoder = feature_encoder
self.context_encoder = context_encoder
self.corr_block = corr_block
self.update_block = update_block
self.mask_predictor = mask_predictor
if not hasattr(self.update_block, "hidden_state_size"):
raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.")
def forward(self, image1, image2, num_flow_updates: int = 12):
batch_size, _, h, w = image1.shape
if (h, w) != image2.shape[-2:]:
raise ValueError(f"input images should have the same shape, instead got ({h}, {w}) != {image2.shape[-2:]}")
if not (h % 8 == 0) and (w % 8 == 0):
raise ValueError(f"input image H and W should be divisible by 8, instead got {h} (h) and {w} (w)")
fmaps = self.feature_encoder(torch.cat([image1, image2], dim=0))
fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0)
if fmap1.shape[-2:] != (h // 8, w // 8):
raise ValueError("The feature encoder should downsample H and W by 8")
self.corr_block.build_pyramid(fmap1, fmap2)
context_out = self.context_encoder(image1)
if context_out.shape[-2:] != (h // 8, w // 8):
raise ValueError("The context encoder should downsample H and W by 8")
# As in the original paper, the actual output of the context encoder is split in 2 parts:
# - one part is used to initialize the hidden state of the recurent units of the update block
# - the rest is the "actual" context.
hidden_state_size = self.update_block.hidden_state_size
out_channels_context = context_out.shape[1] - hidden_state_size
if out_channels_context <= 0:
raise ValueError(
f"The context encoder outputs {context_out.shape[1]} channels, but it should have at strictly more than hidden_state={hidden_state_size} channels"
)
hidden_state, context = torch.split(context_out, [hidden_state_size, out_channels_context], dim=1)
hidden_state = torch.tanh(hidden_state)
context = F.relu(context)
coords0 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
coords1 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
flow_predictions = []
for _ in range(num_flow_updates):
coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper
corr_features = self.corr_block.index_pyramid(centroids_coords=coords1)
flow = coords1 - coords0
hidden_state, delta_flow = self.update_block(hidden_state, context, corr_features, flow)
coords1 = coords1 + delta_flow
up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state)
upsampled_flow = upsample_flow(flow=(coords1 - coords0), up_mask=up_mask)
flow_predictions.append(upsampled_flow)
return flow_predictions
_COMMON_META = {
"min_size": (128, 128),
}
class Raft_Large_Weights(WeightsEnum):
"""The metrics reported here are as follows.
``epe`` is the "end-point-error" and indicates how far (in pixels) the
predicted flow is from its true value. This is averaged over all pixels
of all images. ``per_image_epe`` is similar, but the average is different:
the epe is first computed on each image independently, and then averaged
over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
in the original paper, and it's only used on Kitti. ``fl-all`` is also a
Kitti-specific metric, defined by the author of the dataset and used for the
Kitti leaderboard. It corresponds to the average of pixels whose epe is
either <3px, or <5% of flow's 2-norm.
"""
C_T_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_large_C_T_V1-22a6c225.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 1.4411},
"Sintel-Train-Finalpass": {"epe": 2.7894},
"Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
C_T_V2 = Weights(
url="https://download.pytorch.org/models/raft_large_C_T_V2-1bb1363a.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 1.3822},
"Sintel-Train-Finalpass": {"epe": 2.7161},
"Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
C_T_SKHT_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V1-0b8c9e55.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Sintel-Test-Cleanpass": {"epe": 1.94},
"Sintel-Test-Finalpass": {"epe": 3.18},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were ported from the original paper. They are
trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D` and fine-tuned on
Sintel. The Sintel fine-tuning step is a combination of
:class:`~torchvision.datasets.Sintel`,
:class:`~torchvision.datasets.KittiFlow`,
:class:`~torchvision.datasets.HD1K`, and
:class:`~torchvision.datasets.FlyingThings3D` (clean pass).
""",
},
)
C_T_SKHT_V2 = Weights(
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V2-ff5fadd5.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Sintel-Test-Cleanpass": {"epe": 1.819},
"Sintel-Test-Finalpass": {"epe": 3.067},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D` and then
fine-tuned on Sintel. The Sintel fine-tuning step is a
combination of :class:`~torchvision.datasets.Sintel`,
:class:`~torchvision.datasets.KittiFlow`,
:class:`~torchvision.datasets.HD1K`, and
:class:`~torchvision.datasets.FlyingThings3D` (clean pass).
""",
},
)
C_T_SKHT_K_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V1-4a6a5039.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Kitti-Test": {"fl_all": 5.10},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were ported from the original paper. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`,
fine-tuned on Sintel, and then fine-tuned on
:class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
step was described above.
""",
},
)
C_T_SKHT_K_V2 = Weights(
url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V2-b5c70766.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 5257536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Kitti-Test": {"fl_all": 5.19},
},
"_ops": 211.007,
"_file_size": 20.129,
"_docs": """
These weights were trained from scratch. They are
pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`,
fine-tuned on Sintel, and then fine-tuned on
:class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
step was described above.
""",
},
)
DEFAULT = C_T_SKHT_V2
class Raft_Small_Weights(WeightsEnum):
"""The metrics reported here are as follows.
``epe`` is the "end-point-error" and indicates how far (in pixels) the
predicted flow is from its true value. This is averaged over all pixels
of all images. ``per_image_epe`` is similar, but the average is different:
the epe is first computed on each image independently, and then averaged
over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
in the original paper, and it's only used on Kitti. ``fl-all`` is also a
Kitti-specific metric, defined by the author of the dataset and used for the
Kitti leaderboard. It corresponds to the average of pixels whose epe is
either <3px, or <5% of flow's 2-norm.
"""
C_T_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT
url="https://download.pytorch.org/models/raft_small_C_T_V1-ad48884c.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 990162,
"recipe": "https://github.com/princeton-vl/RAFT",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 2.1231},
"Sintel-Train-Finalpass": {"epe": 3.2790},
"Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801},
},
"_ops": 47.655,
"_file_size": 3.821,
"_docs": """These weights were ported from the original paper. They
are trained on :class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
C_T_V2 = Weights(
url="https://download.pytorch.org/models/raft_small_C_T_V2-01064c6d.pth",
transforms=OpticalFlow,
meta={
**_COMMON_META,
"num_params": 990162,
"recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
"_metrics": {
"Sintel-Train-Cleanpass": {"epe": 1.9901},
"Sintel-Train-Finalpass": {"epe": 3.2831},
"Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369},
},
"_ops": 47.655,
"_file_size": 3.821,
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets.FlyingChairs` +
:class:`~torchvision.datasets.FlyingThings3D`.""",
},
)
DEFAULT = C_T_V2
def _raft(
*,
weights=None,
progress=False,
# Feature encoder
feature_encoder_layers,
feature_encoder_block,
feature_encoder_norm_layer,
# Context encoder
context_encoder_layers,
context_encoder_block,
context_encoder_norm_layer,
# Correlation block
corr_block_num_levels,
corr_block_radius,
# Motion encoder
motion_encoder_corr_layers,
motion_encoder_flow_layers,
motion_encoder_out_channels,
# Recurrent block
recurrent_block_hidden_state_size,
recurrent_block_kernel_size,
recurrent_block_padding,
# Flow Head
flow_head_hidden_size,
# Mask predictor
use_mask_predictor,
**kwargs,
):
feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder(
block=feature_encoder_block, layers=feature_encoder_layers, norm_layer=feature_encoder_norm_layer
)
context_encoder = kwargs.pop("context_encoder", None) or FeatureEncoder(
block=context_encoder_block, layers=context_encoder_layers, norm_layer=context_encoder_norm_layer
)
corr_block = kwargs.pop("corr_block", None) or CorrBlock(num_levels=corr_block_num_levels, radius=corr_block_radius)
update_block = kwargs.pop("update_block", None)
if update_block is None:
motion_encoder = MotionEncoder(
in_channels_corr=corr_block.out_channels,
corr_layers=motion_encoder_corr_layers,
flow_layers=motion_encoder_flow_layers,
out_channels=motion_encoder_out_channels,
)
# See comments in forward pass of RAFT class about why we split the output of the context encoder
out_channels_context = context_encoder_layers[-1] - recurrent_block_hidden_state_size
recurrent_block = RecurrentBlock(
input_size=motion_encoder.out_channels + out_channels_context,
hidden_size=recurrent_block_hidden_state_size,
kernel_size=recurrent_block_kernel_size,
padding=recurrent_block_padding,
)
flow_head = FlowHead(in_channels=recurrent_block_hidden_state_size, hidden_size=flow_head_hidden_size)
update_block = UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head)
mask_predictor = kwargs.pop("mask_predictor", None)
if mask_predictor is None and use_mask_predictor:
mask_predictor = MaskPredictor(
in_channels=recurrent_block_hidden_state_size,
hidden_size=256,
multiplier=0.25, # See comment in MaskPredictor about this
)
model = RAFT(
feature_encoder=feature_encoder,
context_encoder=context_encoder,
corr_block=corr_block,
update_block=update_block,
mask_predictor=mask_predictor,
**kwargs, # not really needed, all params should be consumed by now
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
@register_model()
@handle_legacy_interface(weights=("pretrained", Raft_Large_Weights.C_T_SKHT_V2))
def raft_large(*, weights: Optional[Raft_Large_Weights] = None, progress=True, **kwargs) -> RAFT:
"""RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.models.optical_flow.Raft_Large_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.optical_flow.Raft_Large_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
for more details about this class.
.. autoclass:: torchvision.models.optical_flow.Raft_Large_Weights
:members:
"""
weights = Raft_Large_Weights.verify(weights)
return _raft(
weights=weights,
progress=progress,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_block=ResidualBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_block=ResidualBlock,
context_encoder_norm_layer=BatchNorm2d,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=4,
# Motion encoder
motion_encoder_corr_layers=(256, 192),
motion_encoder_flow_layers=(128, 64),
motion_encoder_out_channels=128,
# Recurrent block
recurrent_block_hidden_state_size=128,
recurrent_block_kernel_size=((1, 5), (5, 1)),
recurrent_block_padding=((0, 2), (2, 0)),
# Flow head
flow_head_hidden_size=256,
# Mask predictor
use_mask_predictor=True,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", Raft_Small_Weights.C_T_V2))
def raft_small(*, weights: Optional[Raft_Small_Weights] = None, progress=True, **kwargs) -> RAFT:
"""RAFT "small" model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`__.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.models.optical_flow.Raft_Small_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.optical_flow.Raft_Small_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
for more details about this class.
.. autoclass:: torchvision.models.optical_flow.Raft_Small_Weights
:members:
"""
weights = Raft_Small_Weights.verify(weights)
return _raft(
weights=weights,
progress=progress,
# Feature encoder
feature_encoder_layers=(32, 32, 64, 96, 128),
feature_encoder_block=BottleneckBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(32, 32, 64, 96, 160),
context_encoder_block=BottleneckBlock,
context_encoder_norm_layer=None,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=3,
# Motion encoder
motion_encoder_corr_layers=(96,),
motion_encoder_flow_layers=(64, 32),
motion_encoder_out_channels=82,
# Recurrent block
recurrent_block_hidden_state_size=96,
recurrent_block_kernel_size=(3,),
recurrent_block_padding=(1,),
# Flow head
flow_head_hidden_size=128,
# Mask predictor
use_mask_predictor=False,
**kwargs,
)
|
import soursecode.AI.ai as ai
from soursecode.model.letters import Letters
from soursecode.model.board import Board
b1 = Board()
l1 = Letters()
ai.fill_spot(l1, b1)
def game_over(state: bool):
if state:
print("***YOU WIN ASSHOLE***")
else:
print('sry you are out of lives! Fucking bitch')
def correctness(guess: str):
temp = []
for i in range(len(b1.spot)):
if guess[i] == b1.spot[i]:
temp.append("C")
else:
if guess[i] in b1.spot:
temp.append("MP")
else:
temp.append("N")
return temp
def handle_lives(guess, lives):
temp = correctness(guess)
print()
print(list(guess))
print(temp)
print()
for i in range(len(temp)):
if temp[i] != "C":
lives -= 1
return lives
else:
game_over(True)
def init():
lives = len(b1.spot) * 3
while lives > 0:
print("You have", lives, "left")
guess = input('please enter your guess : ').upper()
if guess == "HESOYAM":
print(b1.spot)
lives = handle_lives(guess, lives)
if lives is None:
break
if lives == 0:
game_over(False)
init()
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:learn_related.py
# @Author: Michael.liu
# @Date:2019/2/12
# @Desc:
from os import listdir
import xml.etree.ElementTree as ET
from pyhanlp import *
import sqlite3
import configparser
from datetime import *
import math
import pandas as pd
import numpy as np
from pyhanlp import *
from sklearn.metrics import pairwise_distances
import jieba.analyse
class LearnRanking:
stop_words = set()
k_nearest = []
config_path =''
config_encoding = ''
doc_dir_path = ''
doc_encoding = ''
stop_words_path = ''
stop_words_encoding= ''
idf_path= ''
db_path = ''
big_dic={}
def __init__(self,config_path,config_encoding):
self.config_path = config_path
self.config_encoding = config_encoding
config = configparser.ConfigParser()
config.read(config_path,config_encoding)
# self.doc_dir_path = config['DEFAULT']['doc_dir_path']
# self.doc_encoding = config['DEFAULT']['doc_encoding']
file_path = os.path.join(os.path.dirname(__file__),config['DEFAULT']['stop_words_path'])
file_encoding =config['DEFAULT']['stop_words_encoding']
f = open(file_path, encoding=file_encoding)
words = f.read()
self.stop_words = set(words.split('\n'))
self.db_path = config['DEFAULT']['db_path']
config = configparser.ConfigParser()
config.read(self.config_path, self.config_encoding)
self.doc_dir_path = config['DEFAULT']['doc_dir_path']
self.idf_path = config['DEFAULT']['idf_path']
def write_k_nearest_matrix_to_db(self):
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS knearest''')
c.execute('''CREATE TABLE knearest
(id INTEGER PRIMARY KEY, first INTEGER, second INTEGER,
third INTEGER, fourth INTEGER, fifth INTEGER)''')
for docid, doclist in self.k_nearest:
c.execute("INSERT INTO knearest VALUES (?, ?, ?, ?, ?, ?)", tuple([docid] + doclist))
conn.commit()
conn.close()
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def extract_keywords(self,content,N):
ret = [] #最终结果
HanLP.Config.ShowTermNature = True # 关闭词性
TextRankKeyword = JClass("com.hankcs.hanlp.summary.TextRankKeyword")
#这里的逻辑不能直接使用pyhanlp的抽取关键词的逻辑,
#第一步预处理 #第二部计算tfidf
text = content.lower()
word_li = []
keyword_list = HanLP.extractKeyword(content, N)
ret = keyword_list
return ret
def new_seg(self, content):
ret_list = []
seg_list = HanLP.segment(content)
for term in seg_list:
word = str(term.word)
# 判断新
if word == '' or word == '\r' or word == '\t\n' or word == '\n' or word == '\t':
continue
elif len(word) == 1:
continue
else:
ret_list.append(word)
#ret = ' '.join(ret_list)
return ret_list
def clean_list(self, seg_list):
cleaned_dict = {}
n = 0
for i in seg_list:
i = i.strip().lower()
if i != '' and not self.is_number(i) and i not in self.stop_words:
n = n + 1
if i in cleaned_dict:
cleaned_dict[i] = cleaned_dict[i] + 1
else:
cleaned_dict[i] = 1
return n, cleaned_dict
def construct_dt_matrix(self, files, topK=200):
#jieba.analyse.set_stop_words(self.stop_words_path)
jieba.analyse.set_idf_path(self.idf_path)
files = listdir(self.doc_dir_path)
M = len(files)
N = 1
terms = {}
dt = []
for i in files:
root = ET.parse(self.doc_dir_path + i).getroot()
title = root.find('title').text
body = root.find('body').text
docid = int(root.find('id').text)
tags = jieba.analyse.extract_tags(title + '。' + body, topK=topK, withWeight=True)
# tags = jieba.analyse.extract_tags(title, topK=topK, withWeight=True)
cleaned_dict = {}
for word, tfidf in tags:
word = word.strip().lower()
if word == '' or self.is_number(word):
continue
cleaned_dict[word] = tfidf
if word not in terms:
terms[word] = N
N += 1
dt.append([docid, cleaned_dict])
dt_matrix = [[0 for i in range(N)] for j in range(M)]
i = 0
for docid, t_tfidf in dt:
dt_matrix[i][0] = docid
for term, tfidf in t_tfidf.items():
dt_matrix[i][terms[term]] = tfidf
i += 1
dt_matrix = pd.DataFrame(dt_matrix)
dt_matrix.index = dt_matrix[0]
print('dt_matrix shape:(%d %d)' % (dt_matrix.shape))
return dt_matrix
def construct_k_nearest_matrix(self, dt_matrix, k):
tmp = np.array(1 - pairwise_distances(dt_matrix[dt_matrix.columns[1:]], metric="cosine"))
similarity_matrix = pd.DataFrame(tmp, index=dt_matrix.index.tolist(), columns=dt_matrix.index.tolist())
for i in similarity_matrix.index:
tmp = [int(i), []]
j = 0
while j < k:
max_col = similarity_matrix.loc[i].idxmax(axis=1)
similarity_matrix.loc[i][max_col] = -1
if max_col != i:
tmp[1].append(int(max_col)) # max column name
j += 1
self.k_nearest.append(tmp)
# def construct_dt_matrix(self, files, topK=200):
# files = listdir(self.doc_dir_path)
# M = len(files)
# N = 1
# terms = {}
# dt = []
# dt_matrix =[]
# train_corpus= []
#
# for i in files:
# root = ET.parse(self.doc_dir_path + i).getroot()
# title = root.find('title').text
# body = root.find('body').text
# docid = int(root.find('id').text)
# content = title + '。' + body
# seg_term = self.new_seg(content)
# train_corpus.append(seg_term)
# #ld,cleaned_dict = self.clean_list(seg_term)
#
#
# # vectorizer = CountVectorizer() # 该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频
# # transformer = TfidfTransformer() # 该类会统计每个词语的tf-idf权值
# # tfidf = transformer.fit_transform(vectorizer.fit_transform(train_corpus)) # 第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵
# # word = vectorizer.get_feature_names() # 获取词袋模型中的所有词语
# # weight = tfidf.toarray() # 将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重
# # for i in range(len(weight)): # 打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重
# # print(u"-------这里输出第", i, u"类文本的词语tf-idf权重------")
# # for j in range(len(word)):
# # if self.is_number(weight[i][j]) and float(weight[i][j]) > 0.0:
# # tfidf
#
#
#
#
# # cleaned_dict = {}
# # for word, tfidf in seg_term:
# # word = word.strip().lower()
# # if word == '' or self.is_number(word):
# # continue
# # cleaned_dict[word] = tfidf
# # if word not in terms:
# # terms[word] = N
# # N += 1
# # dt.append([docid, cleaned_dict])
# # dt_matrix = [[0 for i in range(N)] for j in range(M)]
# # i = 0
# # for docid, t_tfidf in dt:
# # dt_matrix[i][0] = docid
# # for term, tfidf in t_tfidf.items():
# # dt_matrix[i][terms[term]] = tfidf
# # i += 1
# #
# # dt_matrix = pd.DataFrame(dt_matrix)
# # dt_matrix.index = dt_matrix[0]
# # print('dt_matrix shape:(%d %d)' % (dt_matrix.shape))
# return dt_matrix
def construct_k_nearest_matrix(self, dt_matrix, k):
tmp = np.array(1 - pairwise_distances(dt_matrix[dt_matrix.columns[1:]], metric="cosine"))
similarity_matrix = pd.DataFrame(tmp, index=dt_matrix.index.tolist(), columns=dt_matrix.index.tolist())
for i in similarity_matrix.index:
tmp = [int(i), []]
j = 0
while j < k:
max_col = similarity_matrix.loc[i].idxmax(axis=1)
similarity_matrix.loc[i][max_col] = -1
if max_col != i:
tmp[1].append(int(max_col)) # max column name
j += 1
self.k_nearest.append(tmp)
def gen_idf_file(self):
files = listdir(self.doc_dir_path)
n = float(len(files))
idf = {}
for i in files:
root = ET.parse(self.doc_dir_path + i).getroot()
title = root.find('title').text
body = root.find('body').text
seg_list = jieba.lcut(title + '。' + body, cut_all=False)
seg_list = set(seg_list) - self.stop_words
for word in seg_list:
word = word.strip().lower()
if word == '' or self.is_number(word):
continue
if word not in idf:
idf[word] = 1
else:
idf[word] = idf[word] + 1
idf_file = open(self.idf_path, 'w', encoding='utf-8')
for word, df in idf.items():
idf_file.write('%s %.9f\n' % (word, math.log(n / df)))
idf_file.close()
def find_k_nearest(self, k, topK):
self.gen_idf_file()
files = listdir(self.doc_dir_path)
dt_matrix = self.construct_dt_matrix(files, topK)
self.construct_k_nearest_matrix(dt_matrix, k)
self.write_k_nearest_matrix_to_db()
#TODO: 修改为pyhanlp,将jiba彻底替换掉
if __name__=='__main__':
print('-----start time: %s-----' % (datetime.today()))
filename = os.path.join(os.path.dirname(__file__), 'config.ini')
rm = LearnRanking(filename, 'utf-8')
rm.find_k_nearest(5, 25)
print('-----finish time: %s-----' % (datetime.today()))
|
# Create your tests here.
import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
'''
1) manage.py test polls looked for tests in the polls application
2)ele encontrou uma subclass da classe django.test.TestCase
3)ele cria um banco de dados especial com o propósito de teste
4)ele procurou por métodos de test - aquele cujo nome começam com test
5)em test_was_published_recently_with_future_question é criado uma instância de Question na qual o campo pub_date está 30 dias no futuro
6)… e usando o método assertIs(), descobrimos que was_published_recently() retorna True, mas queremos que retorne False
O teste nos informa que teste falhou e até mesmo a linha na qual a falha ocorreu.
'''
'''
para corrigir esse problema, vá do model e adicione no modelo question a função
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
''' |
#Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares. Se o valor digitado for ímpar, desconsidere-o.
soma=0
cont=0
for c in range(1, 7):
n = int(input('Digite o {}º valor: '.format(c)))
if n % 2 == 0:
soma += n
cont += 1
print('Foram informado {} numeros PARES e a soma foi {}'.format(cont, soma)) |
from music21 import *
import os
import random
def getMidisToCombine(source_location):
midisList = os.listdir(source_location)
midisToCombine = random.sample(midisList, 4)
#midisToCombine = [source_location + midi for midi in midisToCombine]
return midisToCombine
def combineMidis(source_location, write_location, midisToCombine):
if len(midisToCombine) == 0: return
# Combine all the midis into one midi
first_file = source_location + midisToCombine[0]
midi = converter.parse(first_file)
for i in range(1, len(midisToCombine)):
musicPart = converter.parse(source_location + midisToCombine[i])
midi.insert(0, musicPart)
# Create a unique name for the new combined midi
new_file_name = write_location
for midi_name in midisToCombine:
new_file_name += midi_name[:-4]
new_file_name += ".mid"
# Write the midi to disk
midi.write('midi', new_file_name)
return new_file_name
source_location = './new_beats/'
write_location = './new_songs/'
midisToCombine = getMidisToCombine(source_location)
print(midisToCombine)
new_file_name = combineMidis(source_location, write_location, midisToCombine)
def getFeatures(source_location, midi_name):
score = converter.parse(source_location + midi_name)
features = {}
key = score.analyze('key')
features['key'] = key.tonic.name
features['mode'] = key.mode
for part in score:
print(part)
for i in part:
# If this is a music note
if str(type(i)) == str(type(note.Note("F5"))):
if i.name in features:
features[i.name] += 1
else:
features[i.name] = 1
octave = "octave-" + str(i.octave)
if octave in features:
features[octave] += 1
else:
features[octave] = 1
length = "noteLength-" + str(i.quarterLength)
if length in features:
features[length] += 1
else:
features[length] = 1
return features
source_location = './old_songs/'
midi_name = 'Piano man - Bridge.midi'
features = getFeatures(source_location, midi_name)
print(features) |
"""
Часть 1. Численной дифференцирование
1. односторонние разности
2. центральная разность
3. повышенная точность в граничных точках
4. формулы Рунге
5. Выравнивающие переменные (для экспоненты)
Задается х, для которого необходимо найти производную
"""
import numpy as np
import pandas as pd
from math import exp, log
def f(x):
return exp(x)
def generate_table(start, end, step):
table = []
x = start
while(x < end + step):
table.append([x, f(x)])
x += step
return np.array(table)
def get_table(filename):
infile = open(filename, 'r')
data = []
for line in infile:
if line:
a, b = map(float, line.split())
data.append([a, b])
infile.close()
return np.array(data)
#1. односторонние разности
def diff_one_side(table):
n = table.shape[0]
a = []
for i in range(0, n - 1):
dx = table[i+1][0] - table[i][0]
if dx == 0:
a.append(None)
else:
a.append((table[i+1][1] - table[i][1]) / dx)
a.append(None)
return np.array(a)
#2. центральная разность
def diff_central(table):
n = table.shape[0]
a = [None]
for i in range(1, n - 1):
dx = table[i+1][0] - table[i-1][0]
if dx == 0:
a.append(None)
else:
a.append((table[i+1][1] - table[i-1][1]) / dx)
a.append(None)
## нужно ли добавлять производные на границах?
return np.array(a)
#3. повышенная точность в граничных точках
def border_derevative(table):
n = table.shape[0]
a = [None for i in range(0, n)]
dx0 = table[2][0] - table[0][0]
dxn = table[n-1][0] - table[n-3][0]
if dx0 != 0:
a[0] = (-3 * table[0][1] + 4 * table[1][1] - table[2][1]) / dx0
if dxn != 0:
a[n-1] = (3 * table[n-1][1] - 4 * table[n-2][1] + table[n-3][1]) / dxn
return np.array(a)
# Рунге по центральным разностям
def Runge_central(table):
n = table.shape[0]
h = table[2][0] - table[0][0]
h2 = h * 2
a = [None, None]
for i in range(2, n - 2):
ksih = (table[i+1][1] - table[i-1][1]) / h
ksi2h = (table[i+2][1] - table[i-2][1]) / h2
a.append(ksih + (ksih - ksi2h) / 3)
a.append(None)
a.append(None)
return np.array(a)
#4. формулы Рунге (по односторонним разностям)
def Runge(table):
n = table.shape[0]
h = table[1][0] - table[0][0]
h2 = h * 2
a = []
p = 1.
for i in range(0, n - 2):
ksih = (table[i+1][1] - table[i][1]) / h
ksi2h = (table[i+2][1] - table[i][1]) / h2
a.append(ksih + (ksih - ksi2h) / (2**p - 1))
for i in range(n - 2, n):
ksih = (table[i][1] - table[i-1][1]) / h
ksi2h = (table[i][1] - table[i-2][1]) / h2
a.append(ksih + (ksih - ksi2h) / (2**p - 1))
return np.array(a)
#5. Выравнивающие переменные (для экспоненты)
def ksi(x):
return x
def eta(y):
return log(y)
def leveling_variables(table):
new_table = np.array([[ksi(i[0]), eta(i[1])] for i in table])
a = diff_one_side(new_table) #eta'ksi
a[-1] = 0
#print(a)
#print(table[:,1])
a = a * table[:,1]
a[-1] = None
return a
table = generate_table(0, 3, 0.2)
#table = get_table("der_table.txt") #для таблицы из файла
one_side = diff_one_side(table)
central = diff_central(table)
border = border_derevative(table)
runge = Runge(table)
leveling = leveling_variables(table)
res = np.column_stack((table, one_side, central, border, runge, leveling))
s = pd.DataFrame(res, columns=['x', 'f(x)', 'одностор.разности', 'центр.разности', 'на границах', 'Рунге', 'выр.перем.'])
print(s)
|
age = int(input("你的年龄是:"))
if age >= 18:
print("恭喜!你成年了。")
else:
diff = str(18 - age)
print("要年满18岁才成年,你还差 " + diff + " 岁")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.