text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os.path
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class VTKFrame(QtGui.QFrame):
def __init__(self, parent = None):
super(VTKFrame, self).__init__(parent)
self.vtkWidget = QVTKRenderWindowInteractor(self)
vl = QtGui.QVBoxLayout(self)
vl.addWidget(self.vtkWidget)
vl.setContentsMargins(0, 0, 0, 0)
self.ren = vtk.vtkRenderer()
self.ren.SetBackground(0.1, 0.2, 0.4)
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
# Create the quadric function definition
quadric = vtk.vtkQuadric()
quadric.SetCoefficients(0.5, 1, 0.2, 0, 0.1, 0, 0, 0.2, 0, 0)
#quadric.SetCoefficients(0.5, 0, 0.2, 0, 0, 0, 0, 0, 0, -0.1)
# Sample the quadric function
sample = vtk.vtkSampleFunction()
sample.SetSampleDimensions(50, 50, 50)
sample.SetImplicitFunction(quadric)
sample.SetModelBounds(-1, 1, -1, 1, -1, 1)
contourFilter = vtk.vtkContourFilter()
contourFilter.SetInputConnection(sample.GetOutputPort())
#contourFilter.GenerateValues(1, 1, 1)
contourFilter.Update()
# Create a mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(contourFilter.GetOutputPort())
# Create an actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
self.ren.ResetCamera()
self._initialized = False
def showEvent(self, evt):
if not self._initialized:
self.iren.Initialize()
self.startTimer(30)
self._initialized = True
def timerEvent(self, evt):
self.ren.GetActiveCamera().Azimuth(1)
self.vtkWidget.GetRenderWindow().Render()
class MainPage(QtGui.QMainWindow):
def __init__(self, parent = None):
super(MainPage, self).__init__(parent)
self.setCentralWidget(VTKFrame())
self.setWindowTitle("Quadrctic Suface example")
def categories(self):
return ['Implicit Function', 'Filters']
def mainClasses(self):
return ['vtkQuadric', 'vtkSampleFunction', 'vtkContourFilter']
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = MainPage()
w.show()
sys.exit(app.exec_())
|
#!/usr/bin/python
# -*-coding:gbk-*-
import random
import demjson
import os
def randomProxy(isPrint=True):
curr_dir = os.path.dirname(os.path.realpath(__file__))
file = open(curr_dir + os.sep + ".." + os.sep + "conf" + os.sep + "ip.json")
jsontext = file.read()
file.close()
ip_pool = demjson.decode(jsontext)
ip = ip_pool[random.randrange(0, len(ip_pool))]
proxy_ip = "{proxy}://{ip}:{port}"
proxy = "http"
proxy_ip = proxy_ip.format(proxy=proxy, ip=ip["ip"], port=ip["port"])
proxies = {proxy: proxy_ip}
if isPrint:
print(proxies)
return proxies
|
"""
This module demonstrates the WAIT-FOR-EVENT pattern implemented
by using a WHILE loop using the ITCH pattern:
Initialize as needed so that the CONDITION can be TESTED.
while <some CONDITION>: # Test the CONDITION, continue WHILE it is true.
...
...
CHange something that (eventually) affects the CONDITION.
(else otherwise you will be in an infinite loop)
Additionally, it provides practice at implementing classes.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Muqing Zheng. September 2015.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
# ----------------------------------------------------------------------
# DONE: 2. Your instructor will explain the specification of
# the PrimeGenerator class that you will implement. In doing so,
# she will explain:
# -- What an OPTIONAL parameter is.
# (Just how to READ it. It does not affect the code you WRITE.)
# -- What a STATIC method is and how to call it.
#
# When you believe you understand those concepts (at least mostly),
# put your name here: PUT_YOUR_NAME_HERE.
# ----------------------------------------------------------------------
def main():
""" Calls the TEST functions in this module. """
test_PrimeGenerator_init()
test_PrimeGenerator_next_prime()
test_PrimeGenerator_init_again()
test_PrimeGenerator_previous_prime()
test_smallest_prime_with_gap_2()
test_prime_gap_at_least()
def test_PrimeGenerator_init():
""" Tests the __init__ method of the PrimeGenerator class. """
# ------------------------------------------------------------------
# DONE: 3. Read these TESTS for the __init__ method of the
# PrimeGenerator class (which is defined below). Also read
# the specification of the __init__ method (below).
#
# Then ADD TWO MORE TESTS ** that you UNDERSTAND completely. **
# If you do NOT understand these tests, ASK QUESTIONS NOW.
#
# Then implement and test the __init__ method using the tests
# given in this function (plus any others that you choose to add).
#
# *** ASSUME (for now) that the argument to the __init__
# method is a PRIME number (per the tests in this function).
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __init__ method of the PrimeGenerator class.')
print('-----------------------------------------------------------')
generator = PrimeGenerator()
print('Should be 2:', generator.current_prime)
generator = PrimeGenerator(2)
print('Should be 2:', generator.current_prime)
generator = PrimeGenerator(59)
print('Should be 59:', generator.current_prime)
generator = PrimeGenerator(71)
print('Should be 71:', generator.current_prime)
generator = PrimeGenerator(17)
print('Should be 17:', generator.current_prime)
def test_PrimeGenerator_next_prime():
""" Tests the next_prime method of the PrimeGenerator class. """
# ------------------------------------------------------------------
# DONE: 4. Read these TESTS for the next_prime method of the
# PrimeGenerator class (which is defined below). Also read
# the specification of the next_prime method (below).
#
# Then ADD TWO MORE TESTS ** that you UNDERSTAND completely. **
# If you do NOT understand these tests, ASK QUESTIONS NOW.
#
# Then implement and test the next_prime method.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the next_prime method of the PrimeGenerator class.')
print('-----------------------------------------------------------')
generator1 = PrimeGenerator()
print('Should be 2:', generator1.current_prime)
print('Should be 3:', generator1.next_prime())
print('Should be 5:', generator1.next_prime())
generator2 = PrimeGenerator(103)
print('Should be 103:', generator2.current_prime)
print('Should be 107:', generator2.next_prime())
print('Should be 109:', generator2.next_prime())
print('Should be 7:', generator1.next_prime())
for _ in range(100):
generator2.next_prime()
print('Should be 727:', generator2.current_prime)
print('Should be 733:', generator2.next_prime())
print('Should be 11:', generator1.next_prime())
generator2 = PrimeGenerator(71)
print('Should be 71:', generator2.current_prime)
print('Should be 73:', generator2.next_prime())
print('Should be 79:', generator2.next_prime())
generator2 = PrimeGenerator(2)
print('Should be 2:', generator2.current_prime)
print('Should be 3:', generator2.next_prime())
print('Should be 5:', generator2.next_prime())
def test_PrimeGenerator_init_again():
""" Tests the __init__ method of the PrimeGenerator class. """
# ------------------------------------------------------------------
# DONE: 5. Read these ** AUGMENTED ** TESTS for the __init__
# method of the PrimeGenerator class (which is defined below).
#
# These tests REMOVE the assumption that the argument to the
# __init__ method is a PRIME number.
#
# Then ADD TWO MORE TESTS ** that you UNDERSTAND completely. **
# If you do NOT understand these tests, ASK QUESTIONS NOW.
# Then RE-implement the __init__ method so that it passes
# these tests as well as the original ones.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing enhanced __init__ method of the PrimeGenerator class.')
print('-----------------------------------------------------------')
for k in range(-5, 2):
generator = PrimeGenerator(k)
print('Should print 2:', generator.current_prime)
generator = PrimeGenerator(60)
print('Should print 61:', generator.current_prime)
generator = PrimeGenerator(4)
print('Should print 5:', generator.current_prime)
generator = PrimeGenerator(70)
print('Should print 71:', generator.current_prime)
# Repeat the previous tests to be sure that they STILL work,
# since you have changed the code. This is called REGRESSION testing.
test_PrimeGenerator_init()
def test_PrimeGenerator_previous_prime():
""" Tests the previous_prime method of the PrimeGenerator class. """
# ------------------------------------------------------------------
# DONE: 6. Read these TESTS for the previous_prime method of the
# PrimeGenerator class (which is defined below). Also read
# the specification of the previous_prime method (below).
#
# Then ADD TWO MORE TESTS ** that you UNDERSTAND completely. **
# If you do NOT understand these tests, ASK QUESTIONS NOW.
#
# Then implement and test the previous_prime method.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the previous_prime method of the PrimeGenerator class.')
print('-----------------------------------------------------------')
generator1 = PrimeGenerator(59)
print('Should be 59:', generator1.current_prime)
print('Should be 53:', generator1.previous_prime())
print('Should be 47:', generator1.previous_prime())
generator2 = PrimeGenerator(5)
print('Should be 5:', generator2.current_prime)
print('Should be 3:', generator2.previous_prime())
print('Should be 2:', generator2.previous_prime())
print('Should be None 2:',
generator2.previous_prime(), generator2.current_prime)
print('Should be None 2:',
generator2.previous_prime(), generator2.current_prime)
print('Should be 3:', generator2.next_prime())
generator3 = PrimeGenerator(733)
for _ in range(100):
generator3.previous_prime()
print('Should be 113:', generator3.current_prime)
print('Should be 109:', generator3.previous_prime())
for _ in range(100):
generator3.previous_prime()
print('Should be None 2:',
generator3.previous_prime(), generator3.current_prime)
print('Should be None 2:',
generator3.previous_prime(), generator3.current_prime)
print('Should be 3:', generator3.next_prime())
print('Should be 3:', generator3.current_prime)
generator1 = PrimeGenerator(71)
print('Should be 71:', generator1.current_prime)
print('Should be 67:', generator1.previous_prime())
print('Should be 61:', generator1.previous_prime())
generator1 = PrimeGenerator(11)
print('Should be 11:', generator1.current_prime)
print('Should be 7:', generator1.previous_prime())
print('Should be 5:', generator1.previous_prime())
class PrimeGenerator(object):
""" """
def __init__(self, starting_number=2):
"""
Precondition: the argument starting_number is an integer.
Side-effect: sets the instance variable
self.current_prime
to the smallest prime number greater than or equal to
the given argument. See the TESTING code for examples.
"""
# --------------------------------------------------------------
# DONE 3 (continued): Implement and test this method,
# using the tests supplied per TODO 3.
# IMPORTANT:
# At this point, ** ASSUME the starting_number is a PRIME. **
# --------------------------------------------------------------
self.starting_number = starting_number
if type(starting_number) == int:
if self.is_prime(starting_number) == True:
self.current_prime = starting_number
elif self.is_prime(starting_number) == False and starting_number < 2:
self.current_prime = 2
elif self.is_prime(starting_number) == False:
self.current_prime = self.starting_number + 1
while self.is_prime(self.current_prime) == False:
self.current_prime += +1
# --------------------------------------------------------------
# DONE 5 (continued): Implement and test this method,
# using the tests supplied per TODO 5. That is,
# now REMOVE the assumption that the starting_number is a PRIME.
#
# IMPORTANT:
# Do TODO ** 4 ** (i.e., implement the next_prime method)
# BEFORE doing this TODO. It makes this TODO easier.
#
# REMINDER:
# CALL functions/methods rather than COPY-PASTING from them.
# --------------------------------------------------------------
@staticmethod
def is_prime(n):
"""
Argument: an integer.
Side effects: None.
Returns True if the given integer is prime, else returns False.
Note: since the smallest prime is 2,
this method returns False on all integers < 2.
Note: The algorithm used here is simple and clear but slow.
"""
# This method is complete - do not change it. It has no TODO.
if n < 2:
return False
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
def next_prime(self):
"""
Precondition: The instance variable
self.current_prime
is an integer >= 2.
Side-effect: Sets the instance variable
self.current_prime
to the smallest prime strictly greater than its current value.
Returns: the updated value of self.current_prime
"""
# --------------------------------------------------------------
# DONE 4 (continued): Implement and test this method,
# using the tests supplied per TODO 4.
# --------------------------------------------------------------
if self.current_prime >= 2:
self.current_prime = self.current_prime + 1
while self.is_prime(self.current_prime) == False:
self.current_prime += +1
return self.current_prime
def previous_prime(self):
"""
Precondition: the instance variable
self.current_prime
is a prime integer >= 2.
Side-effect:
-- If the current value of self.current_prime is 2,
this method leaves self.current_prime unchanged (i.e., 2).
-- Otherwise, this method sets the instance variable
self.current_prime
to the smallest prime strictly less than its current value.
Returns:
-- If the current value of self.current_prime is 2,
returns the keyword None.
-- Otherwise, returns the updated value of self.current_prime.
"""
# --------------------------------------------------------------
# DONE 6 (continued): Implement and test this method,
# using the tests supplied per TODO 6.
# --------------------------------------------------------------
xy = self.current_prime
if self.current_prime == 2:
return None
elif self.current_prime > 2:
self.current_prime += -1
while self.is_prime(self.current_prime) == False:
self.current_prime += -1
return self.current_prime
def test_smallest_prime_with_gap_2():
""" Tests the smallest_prime_with_gap_2 function. """
# ------------------------------------------------------------------
# DONE: 7. Read these TESTS for the smallest_prime_with_gap_2
# function. Add ** TWO ** more tests of your own.
#
# Then implement and test the smallest_prime_with_gap_2 function.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the smallest_prime_with_gap_2 function:')
print('--------------------------------------------------')
expected = 11
actual = smallest_prime_with_gap_2(10)
print('Expected, actual:', expected, actual)
expected = 11
actual = smallest_prime_with_gap_2(11)
print('Expected, actual:', expected, actual)
expected = 17
actual = smallest_prime_with_gap_2(13)
print('Expected, actual:', expected, actual)
expected = 137
actual = smallest_prime_with_gap_2(109)
print('Expected, actual:', expected, actual)
expected = 1019
actual = smallest_prime_with_gap_2(1000)
print('Expected, actual:', expected, actual)
expected = 101
actual = smallest_prime_with_gap_2(100)
print('Expected, actual:', expected, actual)
expected = 29
actual = smallest_prime_with_gap_2(20)
print('Expected, actual:', expected, actual)
def smallest_prime_with_gap_2(m):
"""
Returns the smallest prime P >= m for P + 2 is prime, that is,
the smallest prime P >= m whose gap (to the next prime) is 2.
Precondition: m is an integer.
"""
# ------------------------------------------------------------------
# DONE 7 (continued). AFTER adding YOUR tests to the TESTING code
# above, implement and test this smallest_prime_with_gap_2 function.
#
# IMPORTANT: Use a PrimeGenerator to help solve this problem!
# -----------------------------------------------------------------
if type(m) == int:
xx = PrimeGenerator(m)
while xx.is_prime(xx.current_prime + 2) == False:
xx = xx.current_prime + 2
xx = PrimeGenerator(xx)
xx = xx.current_prime
return xx
def test_prime_gap_at_least():
""" Tests the prime_gap_at_least function. """
# ------------------------------------------------------------------
# DONE: 8. Read these TESTS for the prime_gap_at_least function.
# Add ** TWO ** more tests of your own.
#
# Then implement and test the prime_gap_at_least function.
#
# IMPORTANT: Use a PrimeGenerator to help solve this problem!
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the prime_gap_at_least function:')
print('--------------------------------------------------')
expected = 2
actual = prime_gap_at_least(2, 1)
print('Expected, actual:', expected, actual)
expected = 5
actual = prime_gap_at_least(5, 1)
print('Expected, actual:', expected, actual)
expected = 5
actual = prime_gap_at_least(5, 2)
print('Expected, actual:', expected, actual)
expected = 7
actual = prime_gap_at_least(5, 4)
print('Expected, actual:', expected, actual)
expected = 23
actual = prime_gap_at_least(5, 6)
print('Expected, actual:', expected, actual)
expected = 15683
actual = prime_gap_at_least(5, 44)
print('Expected, actual:', expected, actual)
expected = 151
actual = prime_gap_at_least(150, 4)
print('Expected, actual:', expected, actual)
expected = 113
actual = prime_gap_at_least(100, 5)
print('Expected, actual:', expected, actual)
def prime_gap_at_least(m, gap_size):
"""
Returns the smallest P such that:
-- P is a prime number >= m
-- The smallest prime strictly bigger than P
is at least P + gap_size.
That is, returns the smallest prime that is at least m for which
the "gap" to the next prime is at least the given gap_size.
Preconditions: m and gap_size are positive integers.
"""
# ------------------------------------------------------------------
# DONE 8 (continued). AFTER adding YOUR tests to the TESTING code
# above, implement and test this prime_gap_at_least function.
#
# IMPORTANT: Use a PrimeGenerator to help solve this problem!
# ----------------------------------------------------------------
xx = PrimeGenerator(m)
while xx.next_prime() < gap_size + xx.previous_prime():
xx = xx.current_prime + 1
xx = PrimeGenerator(xx)
return xx.current_prime
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
import requests
from bs4 import BeautifulSoup
print "THIS IS FROM TOI- BUSINESS" + '\n'
r = requests.get("http://timesofindia.indiatimes.com/business")
soup = BeautifulSoup(r.content)
linksTOImain = soup.find_all("div",{"class":"ct1stry"})
for item in linksTOImain:
for kuch in item.contents:
print kuch.text.split('|')[0]
print kuch.text.split('|')[-1].split(';')[-1] + '\n'
print "THIS IS FROM TOI- BUSINESS - INDIAN BUSINESS" + '\n'
r = requests.get("http://timesofindia.indiatimes.com/business/india-business")
soup = BeautifulSoup(r.content)
linksTOImain = soup.find_all("div",{"class":"ct1stry"})
for item in linksTOImain:
for kuch in item.contents:
print kuch.text.split('|')[0]
print kuch.text.split('|')[-1].split(';')[-1] + '\n'
print "This is from TOI- BUSINESS - INTERNATIONAL" + '\n'
r = requests.get("http://timesofindia.indiatimes.com/business/international-business")
soup = BeautifulSoup(r.content)
linksTOImain = soup.find_all("div",{"class":"ct1stry"})
for item in linksTOImain:
for kuch in item.contents:
print kuch.text.split('|')[0]
print kuch.text.split('|')[-1].split(';')[-1] + '\n'
r = requests.get("http://www.thehindu.com/sci-tech/")
soup = BeautifulSoup(r.content)
latest = soup.find_all("div",{"class":"headlines"})
print "latest Headlines from the Hindu"
for item in latest:
print item.text
print "most popular Headlines from the Hindu"
mostpopular = soup.find_all('div',{"class":"tab1 tab"})
for item in mostpopular:
print item.text
print "most commented Headlines from the Hindu"
mostcommented =soup.find_all('div',{"class":"tab2 tab"})
for item in mostcommented:
print item.text
print '\n'+ "technology column " + '\n'
technology = soup.find_all('div',{"class":"smltitle1"})
for item in technology:
print item.text
|
"""
子图:
"""
from matplotlib import pyplot as plt
import numpy as np
x = np.arange(1, 4)
# 方法1:add_subplot()方法
fig1 = plt.figure()
# 表示生成2*2个图形,左上角标号是1,右上角标号是2
ax1 = fig1.add_subplot(2, 2, 1)
# ax1 = fig1.add_subplot(221) 当每个数字都小于10的时候,两者等价
ax1.plot(x, x**2)
# 方法2:plt.subplot()与方法1类似
# 方法3:plt.axes()函数
fig2 = plt.figure()
# ax2 = plt.axes() 充满整个fig2对象
# ax2 = plt.axes((0.1, 0.1, 0.5, 0.5), facecolor='k')
# 指定具体的位置,facecolor用于是指定背景色
# 方法4:fig2.add_axes()函数
ax3 = fig2.add_axes((0.1, 0.1, 0.5, 0.5), facecolor='w')
plt.show()
|
__author__ = 'Elisabetta Ronchieri'
VERSION = (2, 0, 1, 13)
def get_version():
version = '%s.%s.%s-%s' % (VERSION[0], VERSION[1], VERSION[2], VERSION[3])
from tstorm.utils.version import get_svn_revision
svn_rev = get_svn_revision()
if svn_rev != u'SVN-unknown':
version = "%s %s" % (version, svn_rev)
return version
def get_release():
release = VERSION[3]
return release
def get_storm_release():
return '1.11.0-1'
|
import urllib.request
import requests
import bs4
proxies=[]
def get(url: str, header: list = {}):
"""
HTTP GET获取
:param url: URL地址
:param header: HTTP头
:return: row
"""
header['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
header['Accept-Language'] = 'zh-CN,zh;q=0.8,en;q=0.6'
header['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2883.103 Safari/537.36'
content=requests.get(url,headers=header).content;
#req = urllib.request.Request(url, headers=header)
#return urllib.request.urlopen(req).read()
return content
def getProxyIps():
url = 'http://www.xicidaili.com/'
headers = {
"User-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
}
content = requests.get(url, headers=headers).content;
soup = bs4.BeautifulSoup(content, "html.parser")
List = soup.findAll('tr')
i = 0;
# print(content)
for item in List:
Th = item.findAll('td')
# ip=Th[2]
if len(Th) > 0:
proxies.append(Th[1].text)
return proxies
|
import sys
import json
import queue
f = open(sys.argv[1], 'r')
lines = f.read()
lines = lines.replace('\n', '')
lines = lines.split(';')
pipt = []
popt = []
assign = []
submodule = {}
jsobj = json.load(open(sys.argv[2]))
sdfobj = json.load(open(sys.argv[3]))
ipt = {}
wire_connect = {}
que = queue.Queue()
inv_que = queue.Queue()
for rawline in lines[1:-1]:
line = rawline.strip(' ')
if line.startswith('input'):
x = line.lstrip('input')
x = x.replace(' ', '')
x = x.split(',')
for element in x:
xx = element
if element.startswith('['):
t, s = element.split(']')[0].split(':')
xx = element.split(']')[1]
for i in range(int(s), int(t[1:])+1):
ipt[(xx+'['+str(i)+']')] = 'primary_input:'+(xx+'['+str(i)+']')
pipt.append(xx+'['+str(i)+']')
else:
ipt[xx] = 'primary_input:'+xx+'[0]'
pipt.append(xx+'[0]')
elif line.startswith('output'):
x = line.lstrip('output')
x = x.replace(' ', '')
x = x.split(',')
for element in x:
xx = element
if element.startswith('['):
t, s = element.split(']')[0].split(':')
xx = element.split(']')[1]
for i in range(int(s), int(t[1:])+1):
popt.append(xx+'['+str(i)+']')
else:
popt.append(xx)
elif line.startswith('wire'):
continue
elif line.startswith('assign'):
x = line.lstrip('assign')
x = x.replace(' ', '')
x = x.split('=')
assign.append(x)
ipt[x[0]] = 'assign:'+x[1]
else:
module, inout = line.split('(', 1)
module = module[:-1].split(' ')
if jsobj[module[0]]['seq'] == 0:
cur = {}
cur['motype'] = module[0]
inout = inout[:-1]
inout = inout.replace(' ', '')
inout = inout.split(',')
in_sp, out_sp = [], []
for x in inout:
tmp = x[1:-1]
tmp = tmp.split('(')
if len(tmp) == 1:
continue
if tmp[1].startswith('{'):
continue
cur[tmp[1]] = tmp[0]
if jsobj[module[0]][tmp[0]] == 'input':
if tmp[1] in wire_connect.keys():
wire_connect[tmp[1]].append(module[1])
else:
wire_connect[tmp[1]] = [module[1]]
in_sp.append(tmp[1])
elif jsobj[module[0]][tmp[0]] == 'output':
ipt[tmp[1]] = module[1]+':'+tmp[0]
out_sp.append(tmp[1])
else:
print('err')
cur['input'] = in_sp
cur['output'] = out_sp
submodule[module[1]] = cur
for x in submodule.keys():
inp = submodule[x]['input']
cnt = 0
for y in inp:
if "'b" not in y and y not in ipt.keys():
if y.endswith(']'):
ipt[y] = 'pseudo_primary_input:'+y
pipt.append(y)
else:
ipt[y] = 'pseudo_primary_input:'+y+'[0]'
pipt.append(y+'[0]')
if "'b" not in y and not ipt[y].startswith('primary_input') and not ipt[y].startswith('pseudo_primary_input') and not ipt[y].startswith('assign'):
cnt += 1
submodule[x]['in_order'] = cnt
if cnt == 0:
submodule[x]['lev'] = 1
que.put(x)
f.close()
f = open(sys.argv[4], 'w')
# for line in pipt:
# f.write(line+'\n')
f.write("*****************\n")
for line in popt:
f.write(line+'\n')
f.write("*****************\n")
for line in assign:
res = " ".join(line)
f.write(res+'\n')
f.write("*****************\n")
cur_lev, split_group = 1, 0
while not que.empty():
cur_mod = que.get()
if submodule[cur_mod]['lev'] > cur_lev or split_group == 8:
cur_lev = submodule[cur_mod]['lev']
split_group = 0
f.write('\n')
res = cur_mod + " " + submodule[cur_mod]['motype'] + " " + str(submodule[cur_mod]['lev']) + " "
inp = submodule[cur_mod]['input']
for s, i in enumerate(inp):
if s > 0:
res += ","
res += submodule[cur_mod][i]
if i in ipt.keys():
res += "("+ipt[i]+")"
else:
res += "("+i+")"
res += " "
out = submodule[cur_mod]['output']
for s, o in enumerate(out):
if s > 0:
res += ","
res += (submodule[cur_mod][o]+"("+o)
if not o.endswith(']'):
res += '[0]'
res += ")"
if o in popt:
continue
if o not in wire_connect.keys():
continue
for m in wire_connect[o]:
submodule[m]['in_order'] -= 1
if submodule[m]['in_order'] == 0:
submodule[m]['lev'] = submodule[cur_mod]['lev']+1
que.put(m)
delay_dict = sdfobj[cur_mod]
res += ' '
for pin, delay in delay_dict.items():
pin_in, pin_out = pin.split(',')
res += ('['+pin_in+'-'+pin_out+':')
if type(delay) is list:
res += (str(delay[0])+'-'+str(delay[1]))
else:
res += ('"0"'+str(delay["0"][0])+'-'+str(delay["0"][1])+',"1"'+str(delay["1"][0])+'-'+str(delay["1"][1]))
res += ']'
res += "\n"
if jsobj[submodule[cur_mod]['motype']]['seq'] == 0:
split_group += 1
f.write(res)
f.close()
|
# Стратегии платежной матрицы
N = 3
A1 = [30 + N, 10, 20, 25 + N/2]
A2 = [50, 70 - N, 10 + N/2, 25]
A3 = [25 - N/2, 35, 40, 60 - N/2]
matrix_A = [A1, A2, A3]
# Вероятности наступления исходов со стороны оппонента
q1 = 0.3
q2 = 0.2
q3 = 0.4
q4 = 0.1
q = [q1, q2, q3, q4]
# Функция для поиска оптимальной стратегии
def find_strategy(strategies):
maximum = strategies[0]
ind = 0
for i in range(1, len(strategies)):
if strategies[i] > maximum:
maximum = strategies[i]
ind = i
return ind + 1, maximum
# Критерий Байеса
def for_Bayes_and_Lapl_cr(A, q=q):
summ = 0
i = 0
for element in A:
summ += element*q[i]
i += 1
return summ
strategies_bayes = [for_Bayes_and_Lapl_cr(A1), for_Bayes_and_Lapl_cr(A2), for_Bayes_and_Lapl_cr(A3)]
print(f'По критерию Байеса выбираем стратегию {find_strategy(strategies_bayes)[0]} со значением', find_strategy(strategies_bayes)[1])
# Критерий Лапласа
P = 4
q2 = [1/P for i in range(P)]
strategies_laplas = [for_Bayes_and_Lapl_cr(A1, q2), for_Bayes_and_Lapl_cr(A2, q2), for_Bayes_and_Lapl_cr(A3, q2)]
print(f'По критерию Лапласа выбираем стратегию {find_strategy(strategies_laplas)[0]} со значением', find_strategy(strategies_laplas)[1])
# Критерий Вальда
def Vald_cr(matrix_A):
maxi = find_strategy(matrix_A)[0]
return maxi + 1, min(matrix_A[maxi])
print(f'По критерию Вальда выбираем стратегию {Vald_cr(matrix_A)[0]} со значением', Vald_cr(matrix_A)[1])
# Критерий Сэвиджа
# Рассчитываем матрицу рисков
# Столбцы
def columns(n, matrix_A=matrix_A, A2=A2):
return [A2[n] - matrix_A[i][n] for i in range(len(matrix_A))]
ri1 = columns(0)
ri2 = columns(1)
ri3 = columns(2, A2=A3)
ri4 = columns(3, A2=A3)
matr = [ri1, ri2, ri3, ri4]
def Sev_cr(n, matr=matr):
row = []
for i in range(len(matr)):
row.append(matr[i][n])
return max(row)
Sev = [Sev_cr(0), Sev_cr(1), Sev_cr(2)]
minim = min(Sev)
index_min = min(range(len(Sev)), key=Sev.__getitem__)
print(f'По критерию Сэвиджа выбираем стратегию {index_min+1} со значением', minim)
# Критерий Гурвица
# Выбираем y=0.5
def Gurv_cr(A, y=0.5):
return y*min(A) + (1-y)*max(A)
Gurv = [Gurv_cr(A1), Gurv_cr(A2), Gurv_cr(A3)]
index_max = max(range(len(Gurv)), key=Gurv.__getitem__)
print(f'По критерию Гурвица выбираем стратегию {index_max+1} со значением', max(Gurv))
|
import os
import cv2
def generate_video_labels():
video_content = '''
<video id="video" controls="controls" preload="none" width="300" height="300" poster='{}'>
<source id="mp4" src="{}" type="video/mp4">
<p>Your user agent does not support the HTML5 Video element.</p>
</video>
'''
content = ''
for fn in os.listdir('./'):
if fn.endswith('_scaled.mp4'):
content += video_content.format(fn.replace('.mp4', '.jpg'), fn)
with open('temp.txt', 'w+') as f:
f.writelines(content)
def extract_poster():
for fn in os.listdir('./'):
if fn.endswith('_scaled.mp4'):
cap = cv2.VideoCapture(fn)
ret, frame = cap.read()
cv2.imwrite(fn.replace('.mp4', '.jpg'), frame)
print(frame.shape)
def scale_video():
for fn in os.listdir('./'):
if fn.endswith('.mp4') and not fn.endswith('_scaled.mp4'):
#ffmpeg -i video--7T50tAIrg_epoch_0.mp4 -vf scale=300:300 -aspect 1:1 -acodec aac -vcodec h264 -max_muxing_queue_size 1024 out.mp4
cmd = 'ffmpeg -i {} -vf scale=300:300 -aspect 1:1 -acodec aac -vcodec h264 -max_muxing_queue_size 1024 {}'.format(fn, fn.replace('.mp4', '_scaled.mp4'))
os.system(cmd)
#scale_video()
generate_video_labels()
extract_poster()
|
# from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.urls import reverse_lazy
# from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import (ListView, TemplateView, DetailView, CreateView, UpdateView, DeleteView)
from .models import Post, Comment
from .forms import PostForm, CommentForm
# Create your views here.
class IndexView(TemplateView):
template_name = 'index.html'
class AboutView(TemplateView):
template_name = 'about.html'
class PostListView(ListView):
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
class PostDetailView(DetailView):
model = Post
class CreatePostView(LoginRequiredMixin, CreateView):
# Not logged in
login_url = '/login/'
# Logged in redirect to
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
model = Post
class PostUpdateView(LoginRequiredMixin, UpdateView):
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
model = Post
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
success_url = reverse_lazy('blog:post_list')
class DraftListView(LoginRequiredMixin, ListView):
login_url = '/login/'
redirect_field_name = 'blog/post_list.html'
template_name = 'blog/post_draft_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__isnull=True).order_by('created_date')
###### FUNCTION VIEWS FOR THE COMMENTS ######
@login_required
def post_publish(request, pk):
post = get_object_or_404(Post, pk = pk)
post.publish()
return redirect('blog:post_detail', pk = pk)
@login_required
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk = pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
# Connection the comment to the post object
comment.post = post
comment.save()
return redirect('blog:post_detail', pk = post.pk)
else:
form = CommentForm()
return render(request, 'blog/comment_form.html', {'form': form})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk = pk)
# The approve is from the comment module file
comment.approve()
return redirect('blog:post_detail', pk = comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk = pk)
post_pk = comment.post.pk
# Delete from DB, with the PK so need to save before
comment.delete()
return redirect('blog:post_detail', pk = post_pk)
|
def prime_number():
for num in range(1,101):
if num>1:
for i in range(2,num):
if num % i==0:
break
else:
print(num)
def palindrome_checker():
string=input('Enter a String. ')
new_str=string[::-1]
if string==new_str:
print('{} is a palindrome string.'.format(string))
else:
print('{} is not a palindrome string.'.format(string))
def character_counter():
string=input('Enter a String.')
letter_count={}
for i in string:
count=0
for j in range(len(string)):
if i==string[j]:
count+=1
letter_count[i]=count
print(letter_count)
def main():
while (1):
print('1. Print the prime numbers from 1 to number')
print('2. Check palindrome or not.')
print('3. count the number of character in string')
print('4. Exit')
num=int(input('Enter a choice: '))
if num==1:
prime_number()
elif num==2:
palindrome_checker()
elif num==3:
character_counter()
elif num==4:
break
else:
print('*** Invalid Choice ***')
if __name__=='__main__':
main()
|
# coding: utf-8
import datetime
import os
import numpy as np
import tensorflow as tf
import time
import cfg
from word2vec import W2VModelManager
from data_helpers import load_csv
from source.text_cnn.text_cnn import TextCNN
# 参数
tf.flags.DEFINE_float('dev_sample_percentage', .005, '验证集比例')
tf.flags.DEFINE_string('train_words_file', cfg.DATA_PATH + 'train_words_clean.csv', '训练集词列表文件')
tf.flags.DEFINE_string('w2v_model', cfg.MODEL_PATH + 'word2vec_model/' + 'sg.w2v', 'word2vec 模型')
tf.flags.DEFINE_integer('num_classes', 2, '类别数')
tf.flags.DEFINE_integer('sequence_length', 256, '每篇文章的词数')
tf.flags.DEFINE_integer('embedding_size', 200, '词向量维度')
tf.flags.DEFINE_string('filter_sizes', '2,3,4,5,6,7,8', "过滤器尺寸")
tf.flags.DEFINE_integer('num_filters', 256, '每种尺寸过滤器数目')
tf.flags.DEFINE_float('dropout_keep_prob', 0.5, 'Dropout keep probability')
tf.flags.DEFINE_float('l2_reg_lambda', 0.0, 'L2 regularization lambda')
tf.flags.DEFINE_float('learning_rate', 0.0005, 'learning rate')
tf.app.flags.DEFINE_integer("decay_steps", 6000, "how many steps before decay learning rate.")
tf.app.flags.DEFINE_float("decay_rate", 0.65, "Rate of decay for learning rate.")
tf.flags.DEFINE_integer('batch_size', 32, 'Train Batch Size')
tf.flags.DEFINE_integer('evl_batch_size', 256, 'Evl Batch Size')
tf.flags.DEFINE_integer('num_epochs', 200, 'epoch')
tf.flags.DEFINE_integer('evaluate_every', 300, '评测模型的步数节点')
tf.flags.DEFINE_integer('checkpoint_every', 300, '保存模型的步数节点')
tf.flags.DEFINE_integer('num_checkpoints', 5, 'Number of checkpoints to store')
# Misc Parameters
tf.flags.DEFINE_boolean('allow_soft_placement', True, 'Allow device soft device placement')
tf.flags.DEFINE_boolean('log_device_placement', False, 'Log placement of ops on devices')
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print('\nParameters:')
for attr, value in sorted(FLAGS.__flags.items()):
print('{}={}'.format(attr.upper(), value))
print('='*120)
words_df = load_csv(FLAGS.train_words_file)[:200000]
words_df = words_df.sample(frac=1) # 打乱
TRAIN_WORDS_DF = words_df[0:int(words_df.shape[0] * (1 - FLAGS.dev_sample_percentage))]
EVL_WORDS_DF = words_df[int(words_df.shape[0] * (1 - FLAGS.dev_sample_percentage)):]
print('训练集和验证集总样例数:', words_df.shape[0])
print('训练集样例数:', TRAIN_WORDS_DF.shape[0])
print('测试集样例数:', EVL_WORDS_DF.shape[0])
print('='*120)
w2vm = W2VModelManager()
w2v = w2vm.load_model()
print('word2vec 模型信息:', w2v)
def train_batch_iter(train_words_df):
data_size = train_words_df.shape[0]
num_batches_per_epoch = int((data_size - 1) / FLAGS.batch_size) + 1
print('训练集样例数:', data_size)
print('训练集一个 epoch 的 batch 数:', num_batches_per_epoch)
for epoch in range(FLAGS.num_epochs):
# 每个 epoch 都重新打乱数据
train_words_df = train_words_df.sample(frac=1)
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * FLAGS.batch_size
end_index = min((batch_num + 1) * FLAGS.batch_size, data_size)
X = []
y = []
for n in range(start_index, end_index):
words = []
word_num = 0
words.extend(train_words_df.iloc[n]['content'].split())
for word in words:
try:
X.extend(w2v[word])
except:
X.extend([0] * FLAGS.embedding_size)
word_num += 1
if word_num == FLAGS.sequence_length:
break
X.extend([0] * (FLAGS.embedding_size * (FLAGS.sequence_length - word_num)))
if train_words_df.iloc[n]['label'] == 'POSITIVE':
y.append(1)
y.append(0)
else:
y.append(0)
y.append(1)
X_train = np.array(X).reshape(-1, FLAGS.sequence_length, FLAGS.embedding_size, 1)
y_train = np.array(y).reshape(-1, 2)
yield X_train, y_train
def evl_batch_iter(evl_words_df):
data_size = evl_words_df.shape[0]
num_batches = int((data_size - 1) / FLAGS.evl_batch_size) + 1
print('验证集样例数:', data_size)
print('验证集 batch 数:', num_batches)
# evl_words_df = evl_words_df.sample(frac=1)
for batch_num in range(num_batches):
start_index = batch_num * FLAGS.evl_batch_size
end_index = min((batch_num + 1) * FLAGS.evl_batch_size, data_size)
X = []
y = []
for n in range(start_index, end_index):
words = []
word_num = 0
words.extend(evl_words_df.iloc[n]['content'].split())
for word in words:
try:
X.extend(w2v[word])
except:
X.extend([0] * FLAGS.embedding_size)
word_num += 1
if word_num == FLAGS.sequence_length:
break
X.extend([0] * (FLAGS.embedding_size * (FLAGS.sequence_length - word_num)))
if evl_words_df.iloc[n]['label'] == 'POSITIVE':
y.append(1)
y.append(0)
else:
y.append(0)
y.append(1)
X_evl = np.array(X).reshape(-1, FLAGS.sequence_length, FLAGS.embedding_size, 1)
y_evl = np.array(y).reshape(-1, 2)
yield X_evl, y_evl
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=FLAGS.sequence_length,
num_classes=FLAGS.num_classes,
embedding_size=FLAGS.embedding_size,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(','))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# 定义训练过程
global_step = tf.Variable(0, name='global_step', trainable=False) # 总的训练步数
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram('{}/grad/hist'.format(v.name), g)
sparsity_summary = tf.summary.scalar('{}/grad/sparsity'.format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
# out_dir = os.path.abspath(os.path.join(cfg.TEXT_CNN_PATH, 'runs', timestamp))
out_dir = cfg.TEXT_CNN_PATH
print('Writing to {}\n'.format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar('loss', cnn.loss)
acc_summary = tf.summary.scalar('accuracy', cnn.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, 'summaries', 'train')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev')
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# 检查文件夹存不存在,不存在则创建文件夹
checkpoint_dir = os.path.abspath(os.path.join(out_dir, 'checkpoints'))
checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# 初始化变量
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict,
)
time_str = datetime.datetime.now().isoformat()
print('{}: step {}, loss {:g}, acc {:g}'.format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict
)
time_str = datetime.datetime.now().isoformat()
print('{}: step {}, loss {:g}, acc {:g}'.format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
return accuracy, loss
batches = train_batch_iter(TRAIN_WORDS_DF)
for batch_train in batches:
x_batch, y_batch = batch_train[0], batch_train[1]
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0: # 判断是否需要验证模型
print('\nEvaluation:')
accuracy_total = 0
loss_total = 0
num = 0
for batch_evl in evl_batch_iter(EVL_WORDS_DF):
accuracy, loss = dev_step(batch_evl[0], batch_evl[1], writer=dev_summary_writer)
accuracy_total += accuracy
loss_total += loss
num += 1
print('loss mean: %s, accuracy mean: %s' % (accuracy_total/float(num), loss_total/float(num)))
print('')
if current_step % FLAGS.checkpoint_every == 0: # 判断是否需要保存
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print('Saved model checkpoint to {}\n'.format(path))
|
from datetime import datetime as dt
from utils.objects import Map
def is_api_available(data: Map) -> bool:
""" Функция проверяет доступность АПИ по временному интервалу
:param data: Объект Map с настройками
:return: bool
"""
dt_from = data.get('unavailable_from_dt', '')
if dt_from:
dt_to = data.get('unavailable_to_dt', '3000-01-01 00:00:00')
dt_from = dt.strptime(dt_from, '%Y-%m-%d %H:%M:%S')
dt_to = dt.strptime(dt_to, '%Y-%m-%d %H:%M:%S')
if dt_from < dt.now() < dt_to:
return False
return True
|
# encoding=utf8
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import dataflow_pipeline.claro.claro_campanas_beam as claro_campanas_beam
import dataflow_pipeline.claro.claro_seguimiento_beam as claro_seguimiento_beam
import os
import socket
claro_api = Blueprint('claro_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@claro_api.route("/archivos_campana")
def archivos_campana():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Claro/Campanas/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
id_campana = archivo[9:-4]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-telefonia')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('claro_campanas/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.telefonia.claro_campanas` WHERE fecha = '" + mifecha + "'"
# #Primero eliminamos todos los registros que contengan esa fecha
# client = bigquery.Client()
# query_job = client.query(deleteQuery)
#result = query_job.result()
# query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = claro_campanas_beam.run('gs://ct-telefonia/claro_campanas/' + archivo, mifecha, id_campana)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Claro/Campanas/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
##############################################################################################################################################################################################################################################################################
@claro_api.route("/archivos_seguimiento")
def archivos_seguimiento():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Claro/result/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:21]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-telefonia')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('seguimiento_claro/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.telefonia.claro_campanas` WHERE fecha = '" + mifecha + "'"
# #Primero eliminamos todos los registros que contengan esa fecha
# client = bigquery.Client()
# query_job = client.query(deleteQuery)
#result = query_job.result()
# query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = claro_seguimiento_beam.run('gs://ct-telefonia/seguimiento_claro/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Claro/result/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
#####################################################################################################################
@claro_api.route("/cdr_claro_tabla")
def cdr_claro_tabla():
mi_query = '''
INSERT INTO `contento-bi.Claro.cdr_claro_tabla`
(SELECT A.* FROM `contento-bi.telefonia.cdr_partition` A
LEFT JOIN `contento-bi.Claro.cdr_claro_tabla` B ON A.id_call = B.id_call
WHERE A.fecha_partition = CURRENT_DATE ('America/Guayaquil') AND A.operation IN ( 'intcob-claro-movil2' , 'intcob-claro-fijas' )
AND B.id_call IS NULL
)
'''
# Procedimiento que ejecuta los queries.
client = bigquery.Client()
query_job = client.query(mi_query)
query_job.result() # Ejecucion en BigQuery
return "innserta-a-cdr-claro-tabla!"
#####################################################################################################################
@claro_api.route("/llamadas_report_claro_tabla")
def llamadas_report_tabla():
mi_query = '''
INSERT INTO `contento-bi.Claro.llamadas_report_claro_tabla`
(SELECT A.* FROM `contento-bi.telefonia.llamadas_report_partition` A
LEFT JOIN `contento-bi.Claro.llamadas_report_claro_tabla` B ON A.id_call = B.id_call
WHERE A.fecha_partition = CURRENT_DATE ('America/Guayaquil') AND A.operation IN ( 'intcob-claro-movil2' , 'intcob-claro-fijas' )
AND B.id_call IS NULL
)
'''
# Procedimiento que ejecuta los queries.
client = bigquery.Client()
query_job = client.query(mi_query)
query_job.result() # Ejecucion en BigQuery
return "innserta-a-llamadas_report-claro-tabla!"
#####################################################################################################################
@claro_api.route("/detalle_predictivo_claro_tabla")
def detalle_predictivo_claro_tabla():
mi_query = '''
INSERT INTO `contento-bi.Claro.detalle_predictivo_claro_tabla`
(SELECT A.* FROM `contento-bi.telefonia.detalle_predictivo_partition` A
LEFT JOIN `contento-bi.Claro.detalle_predictivo_claro_tabla` B ON A.id_call = B.id_call
WHERE CAST(concat(substr(A.fecha,0,4),'-',substr(A.fecha,5,2),'-',substr(A.fecha,7,2)) AS DATE) = CURRENT_DATE ('America/Guayaquil') AND A.IPDIAL_CODE IN ( 'intcob-claro-movil2' , 'intcob-claro-fijas' )
AND B.id_call IS NULL
)
'''
# Procedimiento que ejecuta los queries.
client = bigquery.Client()
query_job = client.query(mi_query)
query_job.result() # Ejecucion en BigQuery
return "innserta-a-detalle_predictivo-claro-tabla!"
#####################################################################################################################
@claro_api.route("/cdr_unconnected_claro_tabla")
def cdr_unconnected_tabla():
mi_query = '''
INSERT INTO `contento-bi.Claro.cdr_unconnected_claro_tabla`
(SELECT A.* FROM `contento-bi.telefonia.cdr_unconnected_partition` A
LEFT JOIN `contento-bi.Claro.cdr_unconnected_claro_tabla` B ON A.tel = B.tel AND A.date = B.date
WHERE A.fecha_partition = CURRENT_DATE ('America/Guayaquil') AND A.ipdial_code IN ( 'intcob-claro-movil2' , 'intcob-claro-fijas' )
AND B.tel IS NULL
)
'''
# Procedimiento que ejecuta los queries.
client = bigquery.Client()
query_job = client.query(mi_query)
query_job.result() # Ejecucion en BigQuery
return "innserta-a-llamadas_report-claro-tabla!"
|
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#!/usr/bin/env python3
#
import re, base64, logging, pickle, httplib2, time, urlparse, urllib2, urllib, StringIO, gzip, zipfile
from google.appengine.ext import webapp, db
from google.appengine.api import taskqueue, urlfetch, memcache, images, users
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp import template
from django.utils import simplejson as json
from django.utils.html import strip_tags
from oauth2client.appengine import CredentialsProperty
from oauth2client.client import OAuth2WebServerFlow
import encoder
# TODO(jimhug): Allow client to request desired thumb size.
THUMB_SIZE = (57, 57)
READER_API = 'http://www.google.com/reader/api/0'
MAX_SECTIONS = 5
MAX_ARTICLES = 20
class UserData(db.Model):
credentials = CredentialsProperty()
sections = db.ListProperty(db.Key)
def getEncodedData(self, articleKeys=None):
enc = encoder.Encoder()
# TODO(jimhug): Only return initially visible section in first reply.
maxSections = min(MAX_SECTIONS, len(self.sections))
enc.writeInt(maxSections)
for section in db.get(self.sections[:maxSections]):
section.encode(enc, articleKeys)
return enc.getRaw()
class Section(db.Model):
title = db.TextProperty()
feeds = db.ListProperty(db.Key)
def fixedTitle(self):
return self.title.split('_')[0]
def encode(self, enc, articleKeys=None):
# TODO(jimhug): Need to optimize format and support incremental updates.
enc.writeString(self.key().name())
enc.writeString(self.fixedTitle())
enc.writeInt(len(self.feeds))
for feed in db.get(self.feeds):
feed.ensureEncodedFeed()
enc.writeRaw(feed.encodedFeed3)
if articleKeys is not None:
articleKeys.extend(feed.topArticles)
class Feed(db.Model):
title = db.TextProperty()
iconUrl = db.TextProperty()
lastUpdated = db.IntegerProperty()
encodedFeed3 = db.TextProperty()
topArticles = db.ListProperty(db.Key)
def ensureEncodedFeed(self, force=False):
if force or self.encodedFeed3 is None:
enc = encoder.Encoder()
articleSet = []
self.encode(enc, MAX_ARTICLES, articleSet)
logging.info('articleSet length is %s' % len(articleSet))
self.topArticles = articleSet
self.encodedFeed3 = enc.getRaw()
self.put()
def encode(self, enc, maxArticles, articleSet):
enc.writeString(self.key().name())
enc.writeString(self.title)
enc.writeString(self.iconUrl)
logging.info('encoding feed: %s' % self.title)
encodedArts = []
for article in self.article_set.order('-date').fetch(limit=maxArticles):
encodedArts.append(article.encodeHeader())
articleSet.append(article.key())
enc.writeInt(len(encodedArts))
enc.writeRaw(''.join(encodedArts))
class Article(db.Model):
feed = db.ReferenceProperty(Feed)
title = db.TextProperty()
author = db.TextProperty()
content = db.TextProperty()
snippet = db.TextProperty()
thumbnail = db.BlobProperty()
thumbnailSize = db.TextProperty()
srcurl = db.TextProperty()
date = db.IntegerProperty()
def ensureThumbnail(self):
# If our desired thumbnail size has changed, regenerate it and cache.
if self.thumbnailSize != str(THUMB_SIZE):
self.thumbnail = makeThumbnail(self.content)
self.thumbnailSize = str(THUMB_SIZE)
self.put()
def encodeHeader(self):
# TODO(jmesserly): for now always unescape until the crawler catches up
enc = encoder.Encoder()
enc.writeString(self.key().name())
enc.writeString(unescape(self.title))
enc.writeString(self.srcurl)
enc.writeBool(self.thumbnail is not None)
enc.writeString(self.author)
enc.writeInt(self.date)
enc.writeString(unescape(self.snippet))
return enc.getRaw()
class HtmlFile(db.Model):
content = db.BlobProperty()
compressed = db.BooleanProperty()
filename = db.StringProperty()
author = db.UserProperty(auto_current_user=True)
date = db.DateTimeProperty(auto_now_add=True)
class UpdateHtml(webapp.RequestHandler):
def post(self):
upload_files = self.request.POST.multi.__dict__['_items']
version = self.request.get('version')
logging.info('files: %r' % upload_files)
for data in upload_files:
if data[0] != 'files': continue
file = data[1]
filename = file.filename
if version:
filename = '%s-%s' % (version, filename)
logging.info('upload: %r' % filename)
htmlFile = HtmlFile.get_or_insert(filename)
htmlFile.filename = filename
# If text > (1MB - 1KB) then gzip text to fit in 1MB space
text = file.value
if len(text) > 1024 * 1023:
data = StringIO.StringIO()
gz = gzip.GzipFile(str(filename), 'wb', fileobj=data)
gz.write(text)
gz.close()
htmlFile.content = data.getvalue()
htmlFile.compressed = True
else:
htmlFile.content = text
htmlFile.compressed = False
htmlFile.put()
self.redirect('/')
class TopHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
prefs = UserData.get_by_key_name(user.user_id())
if prefs is None:
self.redirect('/update/user')
return
params = {'files': HtmlFile.all().order('-date').fetch(limit=30)}
self.response.out.write(template.render('top.html', params))
class MainHandler(webapp.RequestHandler):
@login_required
def get(self, name):
if name == 'dev':
return self.handleDev()
elif name == 'login':
return self.handleLogin()
elif name == 'upload':
return self.handleUpload()
user = users.get_current_user()
prefs = UserData.get_by_key_name(user.user_id())
if prefs is None:
return self.handleLogin()
html = HtmlFile.get_by_key_name(name)
if html is None:
self.error(404)
return
self.response.headers['Content-Type'] = 'text/html'
if html.compressed:
# TODO(jimhug): This slightly sucks ;-)
# Can we write directly to the response.out?
gz = gzip.GzipFile(name,
'rb',
fileobj=StringIO.StringIO(html.content))
self.response.out.write(gz.read())
gz.close()
else:
self.response.out.write(html.content)
# TODO(jimhug): Include first data packet with html.
def handleLogin(self):
user = users.get_current_user()
# TODO(jimhug): Manage secrets for dart.googleplex.com better.
# TODO(jimhug): Confirm that we need client_secret.
flow = OAuth2WebServerFlow(
client_id='267793340506.apps.googleusercontent.com',
client_secret='5m8H-zyamfTYg5vnpYu1uGMU',
scope=READER_API,
user_agent='swarm')
callback = self.request.relative_url('/oauth2callback')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
content = template.render('login.html', {'authorize': authorize_url})
self.response.out.write(content)
def handleDev(self):
user = users.get_current_user()
content = template.render('dev.html', {'user': user})
self.response.out.write(content)
def handleUpload(self):
user = users.get_current_user()
content = template.render('upload.html', {'user': user})
self.response.out.write(content)
class UploadFeed(webapp.RequestHandler):
def post(self):
upload_files = self.request.POST.multi.__dict__['_items']
version = self.request.get('version')
logging.info('files: %r' % upload_files)
for data in upload_files:
if data[0] != 'files': continue
file = data[1]
logging.info('upload feed: %r' % file.filename)
data = json.loads(file.value)
feedId = file.filename
feed = Feed.get_or_insert(feedId)
# Find the section to add it to.
sectionTitle = data['section']
section = findSectionByTitle(sectionTitle)
if section != None:
if feed.key() in section.feeds:
logging.warn('Already contains feed %s, replacing' % feedId)
section.feeds.remove(feed.key())
# Add the feed to the section.
section.feeds.insert(0, feed.key())
section.put()
# Add the articles.
collectFeed(feed, data)
else:
logging.error('Could not find section %s to add the feed to' %
sectionTitle)
self.redirect('/')
# TODO(jimhug): Batch these up and request them more aggressively.
class DataHandler(webapp.RequestHandler):
def get(self, name):
if name.endswith('.jpg'):
# Must be a thumbnail
key = urllib2.unquote(name[:-len('.jpg')])
article = Article.get_by_key_name(key)
self.response.headers['Content-Type'] = 'image/jpeg'
# cache images for 10 hours
self.response.headers['Cache-Control'] = 'public,max-age=36000'
article.ensureThumbnail()
self.response.out.write(article.thumbnail)
elif name.endswith('.html'):
# Must be article content
key = urllib2.unquote(name[:-len('.html')])
article = Article.get_by_key_name(key)
self.response.headers['Content-Type'] = 'text/html'
if article is None:
content = '<h2>Missing article</h2>'
else:
content = article.content
# cache article content for 10 hours
self.response.headers['Cache-Control'] = 'public,max-age=36000'
self.response.out.write(content)
elif name == 'user.data':
self.response.out.write(self.getUserData())
elif name == 'CannedData.dart':
self.canData()
elif name == 'CannedData.zip':
self.canDataZip()
else:
self.error(404)
def getUserData(self, articleKeys=None):
user = users.get_current_user()
user_id = user.user_id()
key = 'data_' + user_id
# need to flush memcache fairly frequently...
data = memcache.get(key)
if data is None:
prefs = UserData.get_or_insert(user_id)
if prefs is None:
# TODO(jimhug): Graceful failure for unknown users.
pass
data = prefs.getEncodedData(articleKeys)
# TODO(jimhug): memcache.set(key, data)
return data
def canData(self):
def makeDartSafe(data):
return repr(unicode(data))[1:].replace('$', '\\$')
lines = [
'// TODO(jimhug): Work out correct copyright for this file.',
'class CannedData {'
]
user = users.get_current_user()
prefs = UserData.get_by_key_name(user.user_id())
articleKeys = []
data = prefs.getEncodedData(articleKeys)
lines.append(' static const Map<String,String> data = const {')
for article in db.get(articleKeys):
key = makeDartSafe(urllib.quote(article.key().name()) + '.html')
lines.append(' %s:%s, ' % (key, makeDartSafe(article.content)))
lines.append(' "user.data":%s' % makeDartSafe(data))
lines.append(' };')
lines.append('}')
self.response.headers['Content-Type'] = 'application/dart'
self.response.out.write('\n'.join(lines))
# Get canned static data
def canDataZip(self):
# We need to zip into an in-memory buffer to get the right string encoding
# behavior.
data = StringIO.StringIO()
result = zipfile.ZipFile(data, 'w')
articleKeys = []
result.writestr('data/user.data',
self.getUserData(articleKeys).encode('utf-8'))
logging.info(' adding articles %s' % len(articleKeys))
images = []
for article in db.get(articleKeys):
article.ensureThumbnail()
path = 'data/' + article.key().name() + '.html'
result.writestr(path.encode('utf-8'),
article.content.encode('utf-8'))
if article.thumbnail:
path = 'data/' + article.key().name() + '.jpg'
result.writestr(path.encode('utf-8'), article.thumbnail)
result.close()
logging.info('writing CannedData.zip')
self.response.headers['Content-Type'] = 'multipart/x-zip'
disposition = 'attachment; filename=CannedData.zip'
self.response.headers['Content-Disposition'] = disposition
self.response.out.write(data.getvalue())
data.close()
class SetDefaultFeeds(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
prefs = UserData.get_or_insert(user.user_id())
prefs.sections = [
db.Key.from_path('Section', 'user/17857667084667353155/label/Top'),
db.Key.from_path('Section',
'user/17857667084667353155/label/Design'),
db.Key.from_path('Section', 'user/17857667084667353155/label/Eco'),
db.Key.from_path('Section', 'user/17857667084667353155/label/Geek'),
db.Key.from_path('Section',
'user/17857667084667353155/label/Google'),
db.Key.from_path('Section',
'user/17857667084667353155/label/Seattle'),
db.Key.from_path('Section', 'user/17857667084667353155/label/Tech'),
db.Key.from_path('Section', 'user/17857667084667353155/label/Web')
]
prefs.put()
self.redirect('/')
class SetTestFeeds(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
prefs = UserData.get_or_insert(user.user_id())
sections = []
for i in range(3):
s1 = Section.get_or_insert('Test%d' % i)
s1.title = 'Section %d' % (i + 1)
feeds = []
for j in range(4):
label = '%d_%d' % (i, j)
f1 = Feed.get_or_insert('Test%s' % label)
f1.title = 'Feed %s' % label
f1.iconUrl = getFeedIcon('http://google.com')
f1.lastUpdated = 0
f1.put()
feeds.append(f1.key())
for k in range(8):
label = '%d_%d_%d' % (i, j, k)
a1 = Article.get_or_insert('Test%s' % label)
if a1.title is None:
a1.feed = f1
a1.title = 'Article %s' % label
a1.author = 'anon'
a1.content = 'Lorem ipsum something or other...'
a1.snippet = 'Lorem ipsum something or other...'
a1.thumbnail = None
a1.srcurl = ''
a1.date = 0
s1.feeds = feeds
s1.put()
sections.append(s1.key())
prefs.sections = sections
prefs.put()
self.redirect('/')
class UserLoginHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
prefs = UserData.get_or_insert(user.user_id())
if prefs.credentials:
http = prefs.credentials.authorize(httplib2.Http())
response, content = http.request(
'%s/subscription/list?output=json' % READER_API)
self.collectFeeds(prefs, content)
self.redirect('/')
else:
self.redirect('/login')
def collectFeeds(self, prefs, content):
data = json.loads(content)
queue_name = self.request.get('queue_name', 'priority-queue')
sections = {}
for feedData in data['subscriptions']:
feed = Feed.get_or_insert(feedData['id'])
feed.put()
category = feedData['categories'][0]
categoryId = category['id']
if not sections.has_key(categoryId):
sections[categoryId] = (category['label'], [])
# TODO(jimhug): Use Reader preferences to sort feeds in a section.
sections[categoryId][1].append(feed.key())
# Kick off a high priority feed update
taskqueue.add(url='/update/feed',
queue_name=queue_name,
params={'id': feed.key().name()})
sectionKeys = []
for name, (title, feeds) in sections.items():
section = Section.get_or_insert(name)
section.feeds = feeds
section.title = title
section.put()
# Forces Top to be the first section
if title == 'Top': title = '0Top'
sectionKeys.append((title, section.key()))
# TODO(jimhug): Use Reader preferences API to get users true sort order.
prefs.sections = [key for t, key in sorted(sectionKeys)]
prefs.put()
class AllFeedsCollector(webapp.RequestHandler):
'''Ensures that a given feed object is locally up to date.'''
def post(self):
return self.get()
def get(self):
queue_name = self.request.get('queue_name', 'background')
for feed in Feed.all():
taskqueue.add(url='/update/feed',
queue_name=queue_name,
params={'id': feed.key().name()})
UPDATE_COUNT = 4 # The number of articles to request on periodic updates.
INITIAL_COUNT = 40 # The number of articles to get first for a new queue.
SNIPPET_SIZE = 180 # The length of plain-text snippet to extract.
class FeedCollector(webapp.RequestHandler):
def post(self):
return self.get()
def get(self):
feedId = self.request.get('id')
feed = Feed.get_or_insert(feedId)
if feed.lastUpdated is None:
self.fetchn(feed, feedId, INITIAL_COUNT)
else:
self.fetchn(feed, feedId, UPDATE_COUNT)
self.response.headers['Content-Type'] = "text/plain"
def fetchn(self, feed, feedId, n, continuation=None):
# basic pattern is to read by ARTICLE_COUNT until we hit existing.
if continuation is None:
apiUrl = '%s/stream/contents/%s?n=%d' % (READER_API, feedId, n)
else:
apiUrl = '%s/stream/contents/%s?n=%d&c=%s' % (READER_API, feedId, n,
continuation)
logging.info('fetching: %s' % apiUrl)
result = urlfetch.fetch(apiUrl)
if result.status_code == 200:
data = json.loads(result.content)
collectFeed(feed, data, continuation)
elif result.status_code == 401:
self.response.out.write('<pre>%s</pre>' % result.content)
else:
self.response.out.write(result.status_code)
def findSectionByTitle(title):
for section in Section.all():
if section.fixedTitle() == title:
return section
return None
def collectFeed(feed, data, continuation=None):
'''
Reads a feed from the given JSON object and populates the given feed object
in the datastore with its data.
'''
if continuation is None:
if 'alternate' in data:
feed.iconUrl = getFeedIcon(data['alternate'][0]['href'])
feed.title = data['title']
feed.lastUpdated = data['updated']
articles = data['items']
logging.info('%d new articles for %s' % (len(articles), feed.title))
for articleData in articles:
if not collectArticle(feed, articleData):
feed.put()
return False
if len(articles) > 0 and data.has_key('continuation'):
logging.info('would have looked for more articles')
# TODO(jimhug): Enable this continuation check when more robust
#self.fetchn(feed, feedId, data['continuation'])
feed.ensureEncodedFeed(force=True)
feed.put()
return True
def collectArticle(feed, data):
'''
Reads an article from the given JSON object and populates the datastore with
it.
'''
if not 'title' in data:
# Skip this articles without titles
return True
articleId = data['id']
article = Article.get_or_insert(articleId)
# TODO(jimhug): This aborts too early - at lease for one adafruit case.
if article.date == data['published']:
logging.info('found existing, aborting: %r, %r' %
(articleId, article.date))
return False
if data.has_key('content'):
content = data['content']['content']
elif data.has_key('summary'):
content = data['summary']['content']
else:
content = ''
#TODO(jimhug): better summary?
article.content = content
article.date = data['published']
article.title = unescape(data['title'])
article.snippet = unescape(strip_tags(content)[:SNIPPET_SIZE])
article.feed = feed
# TODO(jimhug): make this canonical so UX can change for this state
article.author = data.get('author', 'anonymous')
article.ensureThumbnail()
article.srcurl = ''
if data.has_key('alternate'):
for alt in data['alternate']:
if alt.has_key('href'):
article.srcurl = alt['href']
return True
def unescape(html):
"Inverse of Django's utils.html.escape function"
if not isinstance(html, basestring):
html = str(html)
html = html.replace(''', "'").replace('"', '"')
return html.replace('>', '>').replace('<', '<').replace('&', '&')
def getFeedIcon(url):
url = urlparse.urlparse(url).netloc
return 'http://s2.googleusercontent.com/s2/favicons?domain=%s&alt=feed' % url
def findImage(text):
img = findImgTag(text, 'jpg|jpeg|png')
if img is not None:
return img
img = findVideoTag(text)
if img is not None:
return img
img = findImgTag(text, 'gif')
return img
def findImgTag(text, extensions):
m = re.search(r'src="(http://\S+\.(%s))(\?.*)?"' % extensions, text)
if m is None:
return None
return m.group(1)
def findVideoTag(text):
# TODO(jimhug): Add other videos beyond youtube.
m = re.search(r'src="http://www.youtube.com/(\S+)/(\S+)[/|"]', text)
if m is None:
return None
return 'http://img.youtube.com/vi/%s/0.jpg' % m.group(2)
def makeThumbnail(text):
url = None
try:
url = findImage(text)
if url is None:
return None
return generateThumbnail(url)
except:
logging.info('error decoding: %s' % (url or text))
return None
def generateThumbnail(url):
logging.info('generating thumbnail: %s' % url)
thumbWidth, thumbHeight = THUMB_SIZE
result = urlfetch.fetch(url)
img = images.Image(result.content)
w, h = img.width, img.height
aspect = float(w) / h
thumbAspect = float(thumbWidth) / thumbHeight
if aspect > thumbAspect:
# Too wide, so crop on the sides.
normalizedCrop = (w - h * thumbAspect) / (2.0 * w)
img.crop(normalizedCrop, 0., 1. - normalizedCrop, 1.)
elif aspect < thumbAspect:
# Too tall, so crop out the bottom.
normalizedCrop = (h - w / thumbAspect) / h
img.crop(0., 0., 1., 1. - normalizedCrop)
img.resize(thumbWidth, thumbHeight)
# Chose JPEG encoding because informal experiments showed it generated
# the best size to quality ratio for thumbnail images.
nimg = img.execute_transforms(output_encoding=images.JPEG)
logging.info(' finished thumbnail: %s' % url)
return nimg
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
prefs = UserData.get_or_insert(user.user_id())
prefs.credentials = flow.step2_exchange(self.request.params)
prefs.put()
self.redirect('/update/user')
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/data/(.*)', DataHandler),
# This is called periodically from cron.yaml.
('/update/allFeeds', AllFeedsCollector),
('/update/feed', FeedCollector),
('/update/user', UserLoginHandler),
('/update/defaultFeeds', SetDefaultFeeds),
('/update/testFeeds', SetTestFeeds),
('/update/html', UpdateHtml),
('/update/upload', UploadFeed),
('/oauth2callback', OAuthHandler),
('/', TopHandler),
('/(.*)', MainHandler),
],
debug=True)
webapp.util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
# uses the previous homework written by Zhiwang Wang
import math, random
random.seed(0)
## ================================================================
# calculate a random number a <= rand < b
def rand(a, b):
return (b - a) * random.random() + a
def make_matrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill] * J)
return m
def sigmoid(x):
return math.tanh(x)
# derivative of our sigmoid function, in terms of the output (i.e. y)
def dsigmoid(y):
return 1.0 - y ** 2
## ================================================================
class NeuralNetwork:
def __init__(self, inputNodes, hiddenNodes, outputNodes):
# number of input, hidden, and output nodes
self.inputNodes = inputNodes + 1 # +1 for bias node
self.hiddenNodes = hiddenNodes
self.outputNodes = outputNodes
# activations for nodes
self.inputActivation = [1.0] * self.inputNodes
self.hiddenActivation = [1.0] * self.hiddenNodes
self.outputActivation = [1.0] * self.outputNodes
# create weights
self.inputWeight = make_matrix(self.inputNodes, self.hiddenNodes)
self.outputWeight = make_matrix(self.hiddenNodes, self.outputNodes)
# set them to random vaules
for i in range(self.inputNodes):
for j in range(self.hiddenNodes):
self.inputWeight[i][j] = rand(-0.2, 0.2)
for j in range(self.hiddenNodes):
for k in range(self.outputNodes):
self.outputWeight[j][k] = rand(-2.0, 2.0)
# last change in weights for momentum
self.ci = make_matrix(self.inputNodes, self.hiddenNodes)
self.co = make_matrix(self.hiddenNodes, self.outputNodes)
def update(self, inputs):
if len(inputs) != self.inputNodes - 1:
raise ValueError('wrong number of inputs')
# input activations
for i in range(self.inputNodes - 1):
self.inputActivation[i] = inputs[i]
# hidden activations
for j in range(self.hiddenNodes):
sum = 0.0
for i in range(self.inputNodes):
sum = sum + self.inputActivation[i] * self.inputWeight[i][j]
self.hiddenActivation[j] = sigmoid(sum)
# output activations
for k in range(self.outputNodes):
sum = 0.0
for j in range(self.hiddenNodes):
sum = sum + self.hiddenActivation[j] * self.outputWeight[j][k]
self.outputActivation[k] = sigmoid(sum)
return self.outputActivation[:]
def backPropagate(self, targets, N, M):
if len(targets) != self.outputNodes:
raise ValueError('wrong number of target values')
# calculate error terms for output
output_deltas = [0.0] * self.outputNodes
for k in range(self.outputNodes):
error = targets[k] - self.outputActivation[k]
output_deltas[k] = dsigmoid(self.outputActivation[k]) * error
# calculate error terms for hidden
hidden_deltas = [0.0] * self.hiddenNodes
for j in range(self.hiddenNodes):
error = 0.0
for k in range(self.outputNodes):
error = error + output_deltas[k] * self.outputWeight[j][k]
hidden_deltas[j] = dsigmoid(self.hiddenActivation[j]) * error
# update output weights
for j in range(self.hiddenNodes):
for k in range(self.outputNodes):
change = output_deltas[k] * self.hiddenActivation[j]
self.outputWeight[j][k] = self.outputWeight[j][k] + N * change + M * self.co[j][k]
self.co[j][k] = change
# update input weights
for i in range(self.inputNodes):
for j in range(self.hiddenNodes):
change = hidden_deltas[j] * self.inputActivation[i]
self.inputWeight[i][j] = self.inputWeight[i][j] + N * change + M * self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5 * (targets[k] - self.outputActivation[k]) ** 2
return error
def test(self, inputNodes):
print(inputNodes, '->', self.update(inputNodes))
return self.update(inputNodes)[0]
def weights(self):
print('Input weights:')
for i in range(self.inputNodes):
print(self.inputWeight[i])
print()
print('Output weights:')
for j in range(self.hiddenNodes):
print(self.outputWeight[j])
def train(self, patterns, iterations=1000, N=0.5, M=0.1):
# N: learning rate, M: momentum factor
for i in range(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
# if i % 100 == 0:
# print('error %-.5f' % error)
|
# parser.py
import requests
from bs4 import BeautifulSoup as bs
import sys
import os
import datetime
from pymongo import MongoClient
new = []
def scrape_html(url):
req = requests.get(url)
return bs(req.text, 'html.parser')
def mongoConnection(dict):
client = MongoClient('localhost', 27017)
db = client.test
col = db.ActorRecruit
col.insert_one(dict)
# result = col.find({})
def check_time():
utcnow = datetime.datetime.utcnow()
time_gap = datetime.timedelta(hours=9)
now = utcnow + time_gap
nowday = now.strftime('%Y-%m-%d')
nowtime = now.strftime('%H:%M:%S')
return [str(nowday),str(nowtime)]
def logging(txt):
f = open("log.txt",'a') #로그 file open
curr_time = check_time()
f.write(curr_time[0] + ' ' + curr_time[1] + ' ' + txt + '\n')
f.close()
def latest_Data_ex():
res = []
with open('latest.txt', 'r+') as f_read:
for _ in range(10):
before = f_read.readline()
before = before.rstrip('\n')
res.append(before)
f_read.close()
return res
def latest_Data_in(newlat):
with open('latest.txt', 'w+') as f_write:
for val in newlat :
f_write.write(str(val)+'\n')
f_write.close()
# for v in result:
# print(v)
# sys.exit()
# init_count = 3
# for i in range(init_count,0,-1):
def parse_flim_web(page,lat):
global new
soup = scrape_html('https://www.filmmakers.co.kr/index.php?mid=actorsAudition&page='+str(page))
my_titles = soup.select(
'#board > tbody > tr > td.title > a'
)
# print(my_titles)
my_titles.pop(0)
my_titles.pop(0)
# my_titles는 list 객체
for oneTitle in my_titles:
dicTemp = {
'title':'',
'making':'',
'name':'',
'director':'',
'part':'',
'preproduction':'',
'castingStar':'',
'period':'',
'pay':'',
'numRecruit':'',
'sexRecruit':'',
'chargeMan':'',
'tel':'',
'mail':'',
'closingDate':'',
'article':'',
'srl':'',
'type':'',
'date':'',
'time':'',
'img':''
}
# Tag안의 텍스트
# print(oneTitle.text)
# # Tag의 속성을 가져오기(ex: href속성)
# print(oneTitle.get('href'))
# print(oneTitle.get('href').split('document_srl=')[1])
ch_srl = oneTitle.get('href').split('document_srl=')[1]
for val in lat:
if str(ch_srl) == val :
return 0
soup2 = scrape_html(oneTitle.get('href'))
# soup2 = scrape_html('https://www.filmmakers.co.kr/actorsAudition/4340780')
#####
tmp = soup2.find('div',{'id':'board'})#.find('div',{'class':'col-md-8 col-lg-8 padding-0'})#.find('div',{'class':'container-fluid padding-0'})
# .find('div',{'class':'padding-0 margin-top-5 margin-bottom-0'}).find('tbody').find('tr'))
tmp = tmp.find_all('div')
tmp = tmp[1].find('div',{'class':'container-fluid padding-0'})
if tmp.find('form') != None :
continue
itime = tmp.find('table').find_all('td')[7].find('span').text
itime = itime.replace('년 ','-').replace('월 ','-').replace('일 ',' ').replace('시 ',':').replace('분 ',':').replace('초','').split()
# print(itime)
# print(str(nowday),str(nowtime))
# sys.exit()
tmp = tmp.find_all('div')
# print(tmp)
art = tmp[1].find_all('p')
arTemp = ''
for value in art:
arTemp += value.text + '\n'
imgart = tmp[1].find_all('img')
# print(imgart)
if len(imgart) != 0:
imTemp = '{'
for index in range(len(imgart)) :
if len(imgart)-1 == index :
imTemp += str(imgart[index].get('src')) + '}'
else :
imTemp += str(imgart[index].get('src')) + ','
dicTemp['img'] = imTemp
tbl = tmp[0].find_all('td')
# print(tbl)
# for value in tbl:
# print(value.text)
# print()
# sys.exit()
# print(oneTitle.find('strong').text.strip().replace('[','').replace(']',''))
# print(oneTitle.find('span').text)
dicTemp['srl'] = ch_srl
try:
dicTemp['type'] = oneTitle.find('strong').text.strip().replace('[','').replace(']','')
except :
dicTemp['type'] = ''
dicTemp['title'] = oneTitle.find('span').text.strip()
dicTemp['making'] = tbl[0].text
dicTemp['name'] = tbl[1].text
dicTemp['director'] = tbl[2].text
dicTemp['part'] = tbl[3].text
dicTemp['preproduction'] = tbl[4].text
dicTemp['castingStar'] = tbl[5].text
dicTemp['period'] = tbl[6].text
dicTemp['pay'] = tbl[7].text
dicTemp['numRecruit'] = tbl[8].text
dicTemp['sexRecruit'] = tbl[9].text
dicTemp['chargeMan'] = tbl[10].text
dicTemp['tel'] = tbl[11].text
dicTemp['mail'] = tbl[12].text
dicTemp['closingDate'] = tbl[13].text
dicTemp['article'] = arTemp
dicTemp['date'] = itime[0]
dicTemp['time'] = itime[1]
# print(dicTemp)
# sys.exit()
mongoConnection(dicTemp)
if len(new) < 10 :
new.append(dicTemp['srl'])
# print(ch_srl,new)
# logging('index = '+str(page)+' inner srl = '+dicTemp['srl'])
print('index = ',page)
return 1
|
import pyglet
import pyglet.gl as gl
class Visualizer(pyglet.window.Window):
def __init__(self,*args,**kwargs):
super().__init__(*args, **kwargs)
#pyglet.app.run()
def on_draw(self):
self.window.clear()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_LINE_SMOOTH)
width, height = self.get_size()
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.gluPerspective(60, width / float(height), 0.01, 20)
gl.glMatrixMode(gl.GL_TEXTURE)
gl.glLoadIdentity()
# texcoords are [0..1] and relative to top-left pixel corner, add 0.5 to center
gl.glTranslatef(0.5 / image_data.width, 0.5 / image_data.height, 0)
image_texture = image_data.get_texture()
# texture size may be increased by pyglet to a power of 2
tw, th = image_texture.owner.width, image_texture.owner.height
gl.glScalef(image_data.width / float(tw),
image_data.height / float(th), 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)
gl.glTranslatef(0, 0, state.distance)
gl.glRotated(state.pitch, 1, 0, 0)
gl.glRotated(state.yaw, 0, 1, 0)
if any(state.mouse_btns):
axes(0.1, 4)
gl.glTranslatef(0, 0, -state.distance)
gl.glTranslatef(*state.translation)
gl.glColor3f(0.5, 0.5, 0.5)
gl.glPushMatrix()
gl.glTranslatef(0, 0.5, 0.5)
grid()
gl.glPopMatrix()
psz = max(window.get_size()) / float(max(w, h)) if state.scale else 1
gl.glPointSize(psz)
distance = (0, 0, 1) if state.attenuation else (1, 0, 0)
gl.glPointParameterfv(gl.GL_POINT_DISTANCE_ATTENUATION,
(gl.GLfloat * 3)(*distance))
if state.lighting:
ldir = [0.5, 0.5, 0.5] # world-space lighting
ldir = np.dot(state.rotation, (0, 0, 1)) # MeshLab style lighting
ldir = list(ldir) + [0] # w=0, directional light
gl.glLightfv(gl.GL_LIGHT0, gl.GL_POSITION, (gl.GLfloat * 4)(*ldir))
gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,
(gl.GLfloat * 3)(1.0, 1.0, 1.0))
gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,
(gl.GLfloat * 3)(0.75, 0.75, 0.75))
gl.glEnable(gl.GL_LIGHT0)
gl.glEnable(gl.GL_NORMALIZE)
gl.glEnable(gl.GL_LIGHTING)
gl.glColor3f(1, 1, 1)
texture = image_data.get_texture()
gl.glEnable(texture.target)
gl.glBindTexture(texture.target, texture.id)
gl.glTexParameteri(
gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
# comment this to get round points with MSAA on
gl.glEnable(gl.GL_POINT_SPRITE)
if not state.scale and not state.attenuation:
gl.glDisable(gl.GL_MULTISAMPLE) # for true 1px points with MSAA on
vertex_list.draw(gl.GL_POINTS)
gl.glDisable(texture.target)
if not state.scale and not state.attenuation:
gl.glEnable(gl.GL_MULTISAMPLE)
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(0.25, 0.25, 0.25)
frustum(depth_intrinsics)
axes()
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.glMatrixMode(gl.GL_TEXTURE)
gl.glLoadIdentity()
gl.glDisable(gl.GL_DEPTH_TEST)
fps_display.draw()
|
#!/usr/bin/env python3
from argparse import ArgumentParser
import os
from .lib import utils
from . import detector
from . import analyzer
from . import doc_collector
from . import doc_analyzer
from . import builder
from . import evaluator
modules = {
'build': builder.build,
'generate-bc': builder.generate_bc,
'analyze': analyzer.analyzer,
'doc-collect': doc_collector.doc_collector,
'doc-analyze': doc_analyzer.doc_analyzer,
'occurrence': analyzer.occurrence,
'detect': detector.main,
'evaluate': evaluator.evaluate
}
def arg_parser():
parser = ArgumentParser()
parser.add_argument('-codebase', type=str, default=None, help='the codebase directory, default is ./')
subparsers = parser.add_subparsers(dest="cmd")
subparsers.required = True
for (_, module) in modules.items():
module.setup_parser(subparsers)
return parser
def main():
# parse the arguments
parser = arg_parser()
args = parser.parse_args()
if args.codebase == None:
args.codebase = os.getcwd()
else:
args.codebase = os.path.abspath(args.codebase)
if utils.path_exist(args.codebase) != 0:
return
# Execute the main function of that module
if args.cmd:
modules[args.cmd].main(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
Arreglo=input("Ingrese los números reales del arreglo(separados por un espacio): ").split()
for i in range(len(Arreglo)):
Arreglo[i]=float(Arreglo[i])
promedio=sum(Arreglo)/len(Arreglo)
print("El promedio del arreglo de reales es de:",promedio)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-28 04:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Affiliation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.TextField()),
],
options={
'db_table': 'affliation',
},
),
migrations.CreateModel(
name='UserAffliation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('affiliation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Affiliation')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'user_affiliation',
},
),
migrations.CreateModel(
name='UserStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=30)),
('status', models.CharField(max_length=30, unique=True)),
],
),
]
|
# Import necessary classes
from django.conf.urls import url
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from mysiteS20 import settings
from .forms import OrderForm, InterestForm, RegisterForm
from .models import Topic, Course, Student, Order
import datetime
# Create your views here.
@login_required
def index(request):
if request.session.test_cookie_worked():
print('Test Cookie Worked. Delete it')
request.session.delete_test_cookie
top_list = Topic.objects.all().order_by('id')[:10]
# get current user in request object
current_user = request.user
return render(request, 'myapp/index.html', {'top_list': top_list, 'user': current_user})
def about(request):
about_visits = request.COOKIES.get('about_visits', 'default')
if about_visits == 'default':
response = render(request, 'myapp/about.html', {'about_visits': '1'})
response.set_cookie('about_visits', 1, 5 * 60)
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie
return response
else:
about_visits = int(about_visits) + 1
response = render(request, 'myapp/about.html', {'about_visits': about_visits})
response.set_cookie('about_visits', about_visits)
return response
@login_required
def detail(request, top_no):
topic = get_object_or_404(Topic, pk=top_no)
course_list = Course.objects.all().filter(topic=top_no)
return render(request, 'myapp/detail.html', {'topic': topic, 'course_list': course_list})
@login_required
def courses(request):
courlist = Course.objects.all().order_by('id')
return render(request, 'myapp/courses.html', {'courlist': courlist})
@login_required
def place_order(request):
msg = ''
courlist = Course.objects.all()
if request.method == 'POST':
form = OrderForm(request.POST)
if form.is_valid():
order = form.save(commit=True)
if order.levels <= order.courses.first().stages:
order.save()
msg = 'Your course has been ordered successfully.'
# Update course price if it is greater than 150.00
if order.courses.first().price > 150.00:
order.courses.first().discount()
else:
msg = 'You exceeded the number of levels for this course.'
return render(request, 'myapp/order_response.html', {'msg': msg})
else:
form = OrderForm()
return render(request, 'myapp/place_order.html', {'form': form, 'msg': msg, 'courlist': courlist})
@login_required
def course_detail(request, cour_id):
course = get_object_or_404(Course, pk=cour_id)
if request.method == 'POST':
form = InterestForm(request.POST)
if form.is_valid():
interest = form.cleaned_data['interested']
if interest == '1':
course.interested += 1
course.save()
return index(request)
else:
form = InterestForm()
return render(request, 'myapp/course_detail.html', {'form': form, 'course': course})
def user_login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
current_login_time = str(datetime.datetime.now())
# session parameter last_login
request.session['last_login'] = current_login_time
request.session['username'] = username
# set session expiry to 1 hour
# request.session.set_expiry(3600)
# set session expiry to 0 to expire session at browser close
request.session.set_expiry(0)
login(request, user)
return HttpResponseRedirect(reverse('myapp:myaccount'))
else:
return HttpResponse('Your account is disabled.')
else:
return render(request, 'myapp/login.html')
else:
# set a test cookie
print('Set a test cookie')
request.session.set_test_cookie()
return render(request, 'myapp/login.html')
@login_required
def user_logout(request):
try:
del request.session['last_login']
del request.session['username']
request.session.flush()
except KeyError:
pass
return HttpResponseRedirect(reverse('myapp:user_login'))
@login_required
def myaccount(request):
student = Student.objects.filter(username=request.user.username)
if len(student) == 1:
topics = Student.objects.filter(username=request.user.username).first().interested_in.all()
ordered_courses = Order.objects.filter(Student__username=request.user.username, order_status=1).values_list(
'courses__id', 'courses__name')
return render(request, 'myapp/myaccount.html',
{'student': student.first(), 'courses': ordered_courses, 'isStudent': 1, 'topics': topics})
else:
return render(request, 'myapp/myaccount.html', {'isStudent': 0})
# Register custom view
def register(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
student = form.save(commit=False)
student.save() # save student
form.save_m2m() # save topics
return redirect('myapp:user_login')
else:
form = RegisterForm()
return render(request, 'myapp/register.html', {'form': form})
|
import os
import pathlib
from typing import List
import boto3
import botocore
def s3_bucket_exists(name: str) -> bool:
s3 = boto3.client("s3")
try:
s3.head_bucket(Bucket=name)
except botocore.exceptions.ClientError as e:
print(e)
return False
return True
def s3_get_object_names_from_dir(
bucket_name: str, dir_name: str, file_type: str = None
) -> List[str]:
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucket_name) # pylint: disable=no-member
object_names = [
object_summary.key for object_summary in bucket.objects.filter(Prefix=dir_name)
]
if file_type is not None:
object_names = [
object_name
for object_name in object_names
if object_name.lower().endswith(file_type.lower())
]
return object_names
def s3_download_files(
bucket_name: str,
s3_object_paths: List[str],
destination_dir: str,
notify_if_exists: bool = False,
) -> None:
s3_client = boto3.client("s3")
s3_resource = boto3.resource("s3")
object_summary_list = [
s3_resource.ObjectSummary( # pylint: disable=no-member
bucket_name, s3_object_path
)
for s3_object_path in s3_object_paths
]
if not os.path.isdir(destination_dir):
pathlib.Path(destination_dir).mkdir(parents=True, exist_ok=True)
for object_index, object_summary in enumerate(object_summary_list):
destination_file_path = os.path.join(
destination_dir, os.path.basename(object_summary.key)
)
if not os.path.isfile(destination_file_path):
try:
s3_client.download_file( # pylint: disable=no-member
object_summary.bucket_name,
object_summary.key,
destination_file_path,
)
except botocore.exceptions.ClientError as e:
print(e)
print(
"Downloading file from %s:%s, %i/%i"
% (
object_summary.bucket_name,
object_summary.key,
object_index + 1,
len(object_summary_list),
)
)
else:
if notify_if_exists:
print(
"File already downloaded: %s:%s, %i/%i"
% (
object_summary.bucket_name,
object_summary.key,
object_index + 1,
len(object_summary_list),
)
)
def file_exists(bucket_name: str, s3_object_path: str) -> None:
s3 = boto3.resource("s3")
try:
s3.Object(bucket_name, s3_object_path).load() # pylint: disable=no-member
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
return False
else:
raise
else:
return True
def upload_files(
bucket_name,
files_to_send: List[str],
s3_destination_object_dir: str,
notify_if_exists: bool = False,
) -> None:
s3 = boto3.client("s3")
for file_index, file_to_send in enumerate(files_to_send):
s3_destination_object_path = os.path.join(
s3_destination_object_dir, os.path.basename(file_to_send)
)
try:
if file_exists(bucket_name, s3_destination_object_path):
if notify_if_exists:
print(
"S3 object already exists %s:%s, %i/%i"
% (
bucket_name,
s3_destination_object_dir,
file_index + 1,
len(files_to_send),
)
)
continue
s3.upload_file(file_to_send, bucket_name, s3_destination_object_path)
except botocore.exceptions.ClientError as e:
print(e)
continue
print(
"Uploading file to %s:%s, %i/%i"
% (
bucket_name,
s3_destination_object_path,
file_index + 1,
len(files_to_send),
)
)
|
#!/usr/bin/python
#!/nasa/python/2.7.3/bin/python
# graphPosterLine.py
# by: Mike Pozulp
# same as graphLine, but make
# the plot and font bigger for
# purposes of legibility
import matplotlib.pyplot as plt
import pylab
import csv
import sys
import numpy
# allow for assignment of
# different color to each line
count = 0
linecolor = ['r', 'b', 'm', 'g']
# track max values for setting
# aesthetic plotting boundaries
xmax = 0
ymax = 0
def printUsage():
print ('Usage: graphLine.py {-x | -m} [npbid_san] '
'[npbid_har] [npbid_wes] [npbid_neh] \n'
'\t-x to graph max instead of median '
'\t-m to graph median (default) ')
def draw(scaledata, flag):
# acknowledge global counter, xmax, ymax
global count, xmax, ymax
with open(scaledata, 'rb') as f:
recordlist = csv.reader(f)
next(recordlist) #skip first line
# sort by nproc into dictionary
pdict = {}
for row in recordlist:
nprocs = row[1]
# convert to gigaflops
speed = float(row[2])/1000
if nprocs in pdict:
pdict[nprocs].append(speed)
else:
pdict[nprocs] = [speed]
if flag == '-x':
x, y = findMaximums(pdict)
else:
x, y = findMedians(pdict)
if xmax < numpy.max(x):
xmax = numpy.max(x)
if ymax < numpy.max(y):
ymax = numpy.max(y)
procid = (scaledata.split("_")[1]).split(".")[0]
if procid == 'san':
proc = 'Sandybridge'
elif procid == 'wes':
proc = 'Westmere'
elif procid == 'neh':
proc = 'Nehalem'
elif procid == 'har':
proc = 'Harpertown'
else:
print 'Unidentified proc type ' + procid + '; exiting'
sys.exit()
# draw the line and scatter plots
print 'drawing ' + scaledata
plot = plt.plot(x,y, linecolor[count]+'--', linewidth=7, label=proc)
plt.scatter(x,y, color=linecolor[count], s=200)
#next color for next line
count += 1
return plot, proc
def findMedians(pdict):
tuplist = []
for xkey in pdict:
med = numpy.median(pdict[xkey])
tuplist.append( (int(xkey), med) )
# sort the tuples on nprocs
tuplist.sort()
return zip(*tuplist)[0], zip(*tuplist)[1]
def findMaximums(pdict):
tuplist = []
for xkey in pdict:
ymax = numpy.max(pdict[xkey])
tuplist.append( (int(xkey), ymax) )
#sort the tuples on nprocs
tuplist.sort()
return zip(*tuplist)[0], zip(*tuplist)[1]
def main():
# make the scatter plot big
plt.figure(figsize=(16,13))
# make the fonts big
font = { 'family' : 'helvetica',
'weight' : 'bold',
'size' : 30}
plt.rc('font', **font)
if len(sys.argv) < 2 or len(sys.argv) > 6:
printUsage(); sys.exit()
plist = [] # keep track of lines and
labels = [] # labels for the legend
if sys.argv[1] == '-x' or sys.argv[1] == '-m':
start = 2
else:
start = 1
for scaledata in sys.argv[start:]:
plot, label = draw(scaledata, sys.argv[1])
plist.append(plot)
labels.append(label)
#get title
npbid = (scaledata.split("_")[0]).split("/")[3]
if sys.argv[1] == '-x':
stat = 'Max'
else:
stat = 'Medium'
title = (npbid.upper() + ' Class C Benchmark ' +
stat + ' Results by Processor Type')
#find appropriate max and min values
p = 0.05
x_min = 0 - p * xmax
x_max = xmax + xmax * p
y_min = 0 - p * ymax
y_max = ymax + ymax * p
plt.ylabel('Giga FLOPS')
plt.xlabel('Processors')
#plt.title(title)
plt.legend(plist, labels, loc=2,
borderaxespad=0.1, prop={'size':26})
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
#plt.show()
# save our scatterplot using pertubation
# of the scaledata file name
pt = 'max' if sys.argv[1] == '-x' else 'med'
fname = 'npbGraphs/poster/line/' + \
(sys.argv[start].split("/")[3]).split("_")[0] \
+ '_Line_' + pt + '.png'
print 'saving ' + fname
plt.savefig(fname)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 4 20:12:40 2019
@author: Zackerman24
"""
import numpy as np
import game_setup as gs
"""There's probably a way of creating six separate lists for each letter
and then combining them into one array. Look into this after?"""
base_array = np.array([['A1', 'A2', 'A3', 'A4', 'A5', 'A6'],
['B1', 'B2', 'B3', 'B4', 'B5', 'B6'],
['C1', 'C2', 'C3', 'C4', 'C5', 'C6'],
['D1', 'D2', 'D3', 'D4', 'D5', 'D6'],
['E1', 'E2', 'E3', 'E4', 'E5', 'E6'],
['F1', 'F2', 'F3', 'F4', 'F5', 'F6']])
player_one_ships_array = np.copy(base_array)
player_two_ships_array = np.copy(base_array)
player_one_placements = []
player_two_placements = []
print("\nAt any point in play, enter 'Exit' to exit the game.")
print("\nPlayer One select first. Place coordinates on your map.")
print(player_one_ships_array)
gs.create_ships(base_array,player_one_ships_array,player_one_placements)
print("\nPlayer Two now selects. Place coordinates on your map.")
print(player_two_ships_array)
gs.create_ships(base_array,player_two_ships_array,player_two_placements)
while True:
print("\nPlayer One's turn.")
gs.player_move(base_array,player_two_ships_array,player_two_placements)
print("\nPlayer Two's turn.")
gs.player_move(base_array,player_one_ships_array,player_one_placements)
if not player_one_placements:
break
elif not player_two_placements:
break
else:
continue
exit()
"""
Ships will be represented in each player's array as X's
Each player will also have a 'targets' array, tracking their moves
When a player enters a move, it will reference the original array
to identify the index location of the guess, and then search
the opposing player's array at that spot to see if it has struck
"""
|
import os
import sys
#
# Complete the timeConversion function below.
#
def timeConversion(s):
a = s[-2:] #PM
s1 = s[:-2]
hour = s1[:2] #07
rest = s1[2:] #:09:45
check = hour+rest
conversion = int(hour)
#print(int(h)-12) #prints everything except last two letters 12:09:45
#print(a) #prints Last two letters PM
if a.upper()=="PM":
if conversion == 12:
return(s[:-2])
else:
conversion2 = conversion + 12
conversion1 = str(conversion2)
return(conversion1+rest)
else:
if conversion ==12:
hour ='00'
return(hour+rest)
else:
return(s[:-2])
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = timeConversion(s)
f.write(result + '\n')
f.close()
|
#B
men3=[int(q) for q in input().split()]
print(abs(men3[0]-men3[1]))
|
6# -*- coding: utf-8 -*-
"""
Spyder Editor
Dies ist eine temporäre Skriptdatei.
"""
import matplotlib.pyplot as plt
import numpy as np
img = plt.imread("test.png")
rgb_weights = [0.2989, 0.5870, 0.1140]
#plt.imshow(img)
grayscale_image = np.dot(img[...,:3], rgb_weights)
plt.imshow(grayscale_image, cmap = plt.get_cmap("gray"))
|
"""
A binary gap within a positive integer N is any maximal sequence of consecutive zeros that is surrounded by ones at both ends in the binary representation of N.
For example, number 9 has binary representation 1001 and contains a binary gap of length 2. The number 529 has binary representation 1000010001 and contains two binary gaps: one of length 4 and one of length 3. The number 20 has binary representation 10100 and contains one binary gap of length 1. The number 15 has binary representation 1111 and has no binary gaps. The number 32 has binary representation 100000 and has no binary gaps.
Write a function:
class Solution { public int solution(int N); }
that, given a positive integer N, returns the length of its longest binary gap. The function should return 0 if N doesn't contain a binary gap.
For example, given N = 1041 the function should return 5, because N has binary representation 10000010001 and so its longest binary gap is of length 5. Given N = 32 the function should return 0, because N has binary representation '100000' and thus no binary gaps.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..2,147,483,647].
"""
def countBinaryGap(num):
# convert num to binary
binary = format(num , "b")
print("Longest Binary Gap in :" + binary)
gap = False # to indicate if there is a gap
counter = 0 # to count zeros in each gap
last = '' # to store last iteration value
binary_gap = 0 # to store longest gap value
for s in binary:
if last == '':
last = str(s)
continue
else :
if s == '0':
if last == '1':
counter += 1
last = '0'
gap = True
continue
if last == '0':
if counter > 0 :
counter += 1
last = '0'
continue
if counter == 0:
last = '0'
continue
if s == '1':
if gap == True :
if last == '0':
if counter > binary_gap:
binary_gap = counter
counter = 0
last = '1'
gap == False
continue
if gap == False:
if last == '0':
last = '1'
continue
if last == '1':
last = '1'
continue
return binary_gap
print(countBinaryGap(15))
print(countBinaryGap(300))
print(countBinaryGap(250))
print(countBinaryGap(17))
print(countBinaryGap(44))
print(countBinaryGap(69))
print(countBinaryGap(123123))
|
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import warnings
import pytest
import numpy as np
from copy import deepcopy
import os
import sys
import shutil
from scipy import constants, interpolate
from pyuvdata import UVCal, UVData
from hera_sim import noise
from uvtools import dspec
from hera_cal import io, datacontainer
from hera_cal import vis_clean
from hera_cal.vis_clean import VisClean
from hera_cal.data import DATA_PATH
from hera_cal import xtalk_filter as xf
import glob
import copy
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
class Test_VisClean(object):
def test_init(self):
# test basic init
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(24, 25, 'ee')])
assert hasattr(V, 'data')
assert hasattr(V, 'antpos')
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test basic init w/ uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(13, 14, 'ee')])
assert set(V.hd.ant_1_array) == set([13])
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test input cal
fname = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
uvc = io.HERACal(os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA.abs.calfits'))
gains, _, _, _ = uvc.read()
V1 = VisClean(fname, filetype='miriad')
bl = (52, 53, 'ee')
V1.read(bls=[bl])
V2 = VisClean(fname, filetype='miriad', input_cal=uvc)
V2.read(bls=[bl])
g = gains[(bl[0], 'Jee')] * gains[(bl[1], 'Jee')].conj()
assert np.allclose((V1.data[bl] / g)[30, 30], V2.data[bl][30, 30])
V2.apply_calibration(V2.hc, unapply=True)
assert np.allclose(V1.data[bl][30, 30], V2.data[bl][30, 30], atol=1e-5)
# test soft copy
V1.hello = 'hi'
V1.hello_there = 'bye'
V1.foo = 'bar'
V3 = V1.soft_copy(references=["hello*"])
assert hex(id(V1.data[(52, 53, 'ee')])) == hex(id(V3.data[(52, 53, 'ee')]))
assert hasattr(V3, 'hello')
assert hasattr(V3, 'hello_there')
assert not hasattr(V3, 'foo')
assert V3.__class__ == VisClean
# test clear
V1.clear_containers()
assert np.all([len(getattr(V1, c)) == 0 for c in ['data', 'flags', 'nsamples']])
V2.clear_calibration()
assert not hasattr(V2, 'hc')
@pytest.mark.filterwarnings("ignore:Selected polarization values are not evenly spaced")
def test_read_write(self):
# test read data can be turned off for uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
V.read(read_data=False)
assert set(V.hd.ant_1_array) == set([1, 11, 12, 13, 14])
# test read-write-read
V.read()
V.write_data(V.data, "./ex.uvh5", overwrite=True, filetype='uvh5', extra_attrs=dict(vis_units='Jy'))
V2 = VisClean("./ex.uvh5", filetype='uvh5')
V2.read()
assert V2.hd.vis_units == 'Jy'
assert 'Thisfilewasproducedbythefunction' in V2.hd.history.replace('\n', '').replace(' ', '')
V.hd.history, V2.hd.history, V2.hd.vis_units = '', '', V.hd.vis_units
assert V.hd == V2.hd
os.remove("./ex.uvh5")
# exceptions
pytest.raises(ValueError, V.write_data, V.data, 'foo', filetype='what')
# test write on subset of data
V.read(read_data=True)
data = datacontainer.DataContainer(dict([(k, V.data[k]) for k in list(V.data.keys())[:2]]))
V.write_data(data, "ex.uvh5", overwrite=True, filetype='uvh5')
assert os.path.exists("ex.uvh5")
os.remove('ex.uvh5')
def test_fourier_filter(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# test arg errors
k = (24, 25, 'ee')
fc = [0.]
fw = [100e-9]
ff = [1e-9]
fwt = [1e-3]
assert pytest.raises(ValueError, V.fourier_filter, keys=[k], overwrite=True,
filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='height', mode='dayenu', fitting_options=None)
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='freq', mode='dayenu', output_prefix='clean', zeropad=10, overwrite=True, max_contiguous_edge_flags=20)
# this line is repeated to cover the overwrite skip
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff, max_contiguous_edge_flags=20,
ax='freq', mode='dayenu', zeropad=10, output_prefix='clean', overwrite=False)
assert np.all([V.clean_info[k]['status']['axis_1'][i] == 'success' for i in V.clean_info[k]['status']['axis_1']])
# now do a time filter
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fwt, suppression_factors=ff, overwrite=True,
ax='time', mode='dayenu', zeropad=10, max_contiguous_edge_flags=20)
assert V.clean_info[k]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-16))
# raise errors.
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff],
mode='dayenu', zeropad=0, overwrite=True)
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff], overwrite=True,
mode='dayenu', zeropad=['Mathematical Universe', 'Crazy Universe'])
# check 2d filter.
V.fourier_filter(filter_centers=[fc, fc],
filter_half_widths=[fwt, fw],
suppression_factors=[ff, ff],
mode='dayenu', overwrite=True,
zeropad=[20, 10], ax='both', max_contiguous_edge_flags=100)
assert V.clean_info[k]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-16))
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dayenu(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dayenu')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
# had to set atol=1e-6 here so it won't fail on travis (it runs fine on my laptop). There are some funny
# numpy issues.
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-15))
assert np.all([V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', mode='dayenu')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dayenu')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dayenu')
assert V.clean_info[(24, 25, 'ee')]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-15))
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
assert V.clean_info[(24, 25, 'ee')]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-15))
# check whether dayenu filtering axis 1 and then axis 0 is the same as dayenu filtering axis 1 and then filtering the resid.
# note that filtering axis orders do not commute, we filter axis 1 (foregrounds) before filtering cross-talk.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, max_frate=1.0, output_prefix='clean1', mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, data=V.clean1_resid, output_prefix='clean0', mode='dayenu')
assert np.all(np.isclose(V.clean_resid[(24, 25, 'ee')], V.clean0_resid[(24, 25, 'ee')]))
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dpss(self):
# Relax atol=1e-6 for clean_data and data equalities. there may be some numerical
# issues going on. Notebook tests show that distributing minus signs has
# consequences.
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dpss_leastsq')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-6))
assert np.all([V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', mode='dpss_leastsq')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dpss_leastsq')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dpss_leastsq')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert V.clean_info[(24, 25, 'ee')]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-6))
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-6))
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# just need to make sure various kwargs run through
# actual code unit-testing coverage has been done in uvtools.dspec
# basic freq clean
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True)
assert np.all([V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-16))
# basic time clean
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='time', max_frate=10., overwrite=True)
assert 'skipped' == V.clean_info[(24, 25, 'ee')]['status']['axis_0'][0]
assert 'success' == V.clean_info[(24, 25, 'ee')]['status']['axis_0'][3]
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]]))
# basic 2d clean
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', max_frate=10., overwrite=True,
filt2d_mode='plus')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')]['status']['axis_0'][i] for i in V.clean_info[(24, 25, 'ee')]['status']['axis_0']])
assert np.all(['success' == V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=1e-16))
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', flags=V.flags + True, max_frate=10.,
overwrite=True, filt2d_mode='plus')
assert np.all([V.clean_info[(24, 25, 'ee')]['status']['axis_1'][i] == 'skipped' for i in V.clean_info[(24, 25, 'ee')]['status']['axis_1']])
assert np.all([V.clean_info[(24, 25, 'ee')]['status']['axis_0'][i] == 'skipped' for i in V.clean_info[(24, 25, 'ee')]['status']['axis_0']])
# test fft data
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', max_frate=10., overwrite=True,
filt2d_mode='rect')
# assert foreground peak is at 0 delay bin
V.fft_data(data=V.clean_model, keys=[(24, 25, 'ee')], ax='freq', window='hann', edgecut_low=10, edgecut_hi=10, overwrite=True)
assert np.argmax(np.mean(np.abs(V.dfft[(24, 25, 'ee')]), axis=0)) == 32
# assert foreground peak is at 0 FR bin (just due to FR resolution)
V.fft_data(data=V.clean_model, keys=[(24, 25, 'ee')], ax='time', window='hann', edgecut_low=10, edgecut_hi=10, overwrite=True)
assert np.argmax(np.mean(np.abs(V.dfft[(24, 25, 'ee')]), axis=1)) == 30
# assert foreground peak is at both 0 FR and 0 delay bin
V.fft_data(data=V.clean_model, keys=[(24, 25, 'ee')], ax='both', window='tukey', alpha=0.5, edgecut_low=10, edgecut_hi=10, overwrite=True)
assert np.argmax(np.mean(np.abs(V.dfft[(24, 25, 'ee')]), axis=0)) == 32
assert np.argmax(np.mean(np.abs(V.dfft[(24, 25, 'ee')]), axis=1)) == 30
# check various kwargs
V.fft_data(keys=[(24, 25, 'ee')], assign='foo', ifft=True, fftshift=True)
delays = V.delays
assert hasattr(V, 'foo')
V.fft_data(keys=[(24, 25, 'ee')], assign='foo', overwrite=True, ifft=False, fftshift=False)
np.testing.assert_array_almost_equal(delays, np.fft.fftshift(V.delays))
# test flag factorization
flags = V.factorize_flags(inplace=False, time_thresh=0.05)
assert np.all(flags[(24, 25, 'ee')][45, :])
assert np.all(flags[(24, 25, 'ee')][:, 5])
def test_fft_data(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# fft
V.fft_data(zeropad=30, ifft=False)
assert V.dfft[(24, 25, 'ee')].shape == (60, 124)
# exceptions
pytest.raises(ValueError, V.fft_data, ax='foo')
pytest.raises(ValueError, V.fft_data, keys=[])
pytest.raises(ValueError, V.fft_data, keys=[('foo')])
# THIS UNIT TEST IS BROKEN!!!
# See https://github.com/HERA-Team/hera_cal/issues/603
def test_trim_model(self):
# load data
V = VisClean(os.path.join(DATA_PATH, "PyGSM_Jy_downselect.uvh5"))
V.read(bls=[(23, 23, 'ee'), (23, 24, 'ee')])
# interpolate to 768 frequencies
freqs = np.linspace(120e6, 180e6, 768)
for k in V.data:
V.data[k] = interpolate.interp1d(V.freqs, V.data[k], axis=1, fill_value='extrapolate', kind='cubic')(freqs)
V.flags[k] = np.zeros_like(V.data[k], dtype=np.bool)
V.freqs = freqs
# the old unit test was using the wrong dnu (for the original frequencies) which means that it was actually cleaning
# out to 1250 ns. I've fixed this dnu bug and used a larger min_dly below.
V.Nfreqs = len(V.freqs)
# dnu should have also been set here to be np.diff(np.median(freqs))
# but it wasn't Because of this, the old version of vis_clean was cleaning with a
# delay width = intended delay width x (manually set dnu / original dnu of the attached data)
np.random.seed(0)
k = (23, 24, 'ee')
Op = noise.bm_poly_to_omega_p(V.freqs / 1e9)
V.data[k] += noise.sky_noise_jy(V.data[(23, 23, 'ee')], V.freqs / 1e9, V.lsts, Op, inttime=50)
# add lots of random flags
f = np.zeros(V.Nfreqs, dtype=np.bool)[None, :]
f[:, 127:156] = True
f[:, 300:303] = True
f[:, 450:455] = True
f[:, 625:630] = True
V.flags[k] += f
# Note that the intended delay width of this unit test was 300 ns but because of the dnu bug, the delay width was
# actuall 300 x V.dnu / np.mean(np.diff(V.freqs))
# the new vis_clean never explicitly references V.dnu so it doesn't have problems and uses the correct delay width.
# however, using the correct delay width causes this unit test to fail.
# so we need to fix it. SEP (Somebody Elses PR).
V.vis_clean(data=V.data, flags=V.flags, keys=[k], tol=1e-6, min_dly=300. * (V.dnu / np.mean(np.diff(V.freqs))), ax='freq', overwrite=True, window='tukey', alpha=0.2)
V.fft_data(V.data, window='bh', overwrite=True, assign='dfft1')
V.fft_data(V.clean_data, window='bh', overwrite=True, assign='dfft2')
# trim model
mdl, n = vis_clean.trim_model(V.clean_model, V.clean_resid, V.dnu, noise_thresh=3.0, delay_cut=500,
kernel_size=21, polyfit_deg=None)
clean_data2 = deepcopy(V.clean_data)
clean_data2[k][V.flags[k]] = mdl[k][V.flags[k]]
V.fft_data(clean_data2, window='bh', overwrite=True, assign='dfft3')
# get averaged spectra
n1 = vis_clean.noise_eq_bandwidth(dspec.gen_window('bh', V.Nfreqs))
n2 = vis_clean.noise_eq_bandwidth(dspec.gen_window('bh', V.Nfreqs) * ~V.flags[k][0])
d1 = np.mean(np.abs(V.dfft1[k]), axis=0) * n1
d2 = np.mean(np.abs(V.dfft2[k]), axis=0) * n2
d3 = np.mean(np.abs(V.dfft3[k]), axis=0) * n2
# confirm that dfft3 and dfft1 match while dfft2 and dfft1 do not near CLEAN boundary
select = (np.abs(V.delays) < 300) & (np.abs(V.delays) > 100)
assert np.isclose(np.mean(np.abs(d1)[select]), np.mean(np.abs(d3)[select]), atol=10)
assert not np.isclose(np.mean(np.abs(d1)[select]), np.mean(np.abs(d2)[select]), atol=10)
# test that polynomial fitting is a good fit
_, n1 = vis_clean.trim_model(V.clean_model, V.clean_resid, V.dnu, noise_thresh=3.0, delay_cut=500,
kernel_size=None, polyfit_deg=None)
_, n2 = vis_clean.trim_model(V.clean_model, V.clean_resid, V.dnu, noise_thresh=3.0, delay_cut=500,
kernel_size=None, polyfit_deg=5)
assert (np.std(n1[k] - n2[k]) / np.mean(n2[k])) < 0.1 # assert residual is below 10% of fit
# test well-conditioned check takes effect
V2 = deepcopy(V)
V2.clean_resid[k][:-2] = 0.0 # zero all the data except last two integrations
_, n2 = vis_clean.trim_model(V2.clean_model, V2.clean_resid, V2.dnu, noise_thresh=3.0, delay_cut=500,
kernel_size=None, polyfit_deg=5)
assert np.all(np.isclose(n2[k][-1], n1[k][-1])) # assert non-zeroed output are same as n1 (no polyfit)
def test_neb(self):
n = vis_clean.noise_eq_bandwidth(dspec.gen_window('blackmanharris', 10000))
assert np.isclose(n, 1.9689862471203075)
def test_zeropad(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# test basic zeropad
d, _ = vis_clean.zeropad_array(V.data[(24, 25, 'ee')], zeropad=30, axis=-1, undo=False)
assert d.shape == (60, 124)
assert np.allclose(d[:, :30], 0.0)
assert np.allclose(d[:, -30:], 0.0)
d, _ = vis_clean.zeropad_array(d, zeropad=30, axis=-1, undo=True)
assert d.shape == (60, 64)
# test zeropad with bool
f, _ = vis_clean.zeropad_array(V.flags[(24, 25, 'ee')], zeropad=30, axis=-1, undo=False)
assert f.shape == (60, 124)
assert np.all(f[:, :30])
assert np.all(f[:, -30:])
# zeropad with binvals
d, bval = vis_clean.zeropad_array(V.data[(24, 25, 'ee')], zeropad=30, axis=0, binvals=V.times)
assert np.allclose(np.median(np.diff(V.times)), np.median(np.diff(bval)))
assert len(bval) == 120
# 2d zeropad
d, bval = vis_clean.zeropad_array(V.data[(24, 25, 'ee')], zeropad=(30, 10), axis=(0, 1), binvals=[V.times, V.freqs])
assert d.shape == (120, 84)
assert (bval[0].size, bval[1].size) == (120, 84)
# un-pad with bval
d, bval = vis_clean.zeropad_array(d, zeropad=(30, 10), axis=(0, 1), binvals=bval, undo=True)
assert d.shape == (60, 64)
assert (bval[0].size, bval[1].size) == (60, 64)
# test VisClean method
V.zeropad_data(V.data, binvals=V.times, zeropad=10, axis=0, undo=False)
assert V.data[(24, 25, 'ee')].shape == (80, 64)
assert V.data.binvals.size == 80
# exceptions
pytest.raises(ValueError, vis_clean.zeropad_array, V.data[(24, 25, 'ee')], axis=(0, 1), zeropad=0)
pytest.raises(ValueError, vis_clean.zeropad_array, V.data[(24, 25, 'ee')], axis=(0, 1), zeropad=(0,))
def test_filter_argparser(self):
sys.argv = [sys.argv[0], 'a', '--clobber', '--spw_range', '0', '20']
parser = vis_clean._filter_argparser()
a = parser.parse_args()
assert a.infilename == 'a'
assert a.clobber is True
assert a.spw_range[0] == 0
assert a.spw_range[1] == 20
assert a.time_thresh == 0.05
assert not a.factorize_flags
def test_filter_argparser_multifile(self):
# test multifile functionality of _filter_argparser
sys.argv = [sys.argv[0], 'a', '--clobber', '--spw_range', '0', '20', '--calfilelist', 'cal1', 'cal2', 'cal3',
'--datafilelist', 'a', 'b', 'c']
parser = vis_clean._filter_argparser(multifile=True)
a = parser.parse_args()
assert a.datafilelist == ['a', 'b', 'c']
assert a.calfilelist == ['cal1', 'cal2', 'cal3']
assert a.infilename == 'a'
assert a.clobber is True
assert a.spw_range[0] == 0
assert a.spw_range[1] == 20
assert a.time_thresh == 0.05
assert not a.factorize_flags
def test_time_chunk_from_baseline_chunks_argparser(self):
sys.argv = [sys.argv[0], 'a', '--clobber', '--baseline_chunk_files', 'a', 'b', 'c', 'd', '--outfilename', 'a.out']
parser = vis_clean.time_chunk_from_baseline_chunks_argparser()
a = parser.parse_args()
assert a.clobber
for char in ['a', 'b', 'c', 'd']:
assert char in a.baseline_chunk_files
assert a.time_chunk_template == 'a'
assert a.outfilename == 'a.out'
def test_time_chunk_from_baseline_chunks(self, tmp_path):
# First, construct some cross-talk baseline files.
datafiles = [os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5"),
os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5")]
cals = [os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only.part1"),
os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only.part2")]
# make a cache directory
cdir = tmp_path / "cache_temp"
cdir.mkdir()
# cross-talk filter chunked baselines
for filenum, file in enumerate(datafiles):
baselines = io.baselines_from_filelist_position(file, datafiles, polarizations=['ee'])
fname = 'temp.fragment.part.%d.h5' % filenum
fragment_filename = tmp_path / fname
xf.load_xtalk_filter_and_write_baseline_list(datafiles, baseline_list=baselines, calfile_list=cals,
spw_range=[0, 20], cache_dir=cdir, read_cache=True, write_cache=True,
res_outfilename=fragment_filename, clobber=True)
# load in fragment and make sure the number of baselines is equal to the length of the baseline list
hd_fragment = io.HERAData(str(fragment_filename))
assert len(hd_fragment.bls) == len(baselines)
assert hd_fragment.Ntimes == 60
assert hd_fragment.Nfreqs == 20
fragments = glob.glob(DATA_PATH + '/test_output/temp.fragment.h5.part*')
# reconstitute the filtered data
for filenum, file in enumerate(datafiles):
# reconstitute
fname = 'temp.reconstituted.part.%d.h5' % filenum
vis_clean.time_chunk_from_baseline_chunks(time_chunk_template=file,
baseline_chunk_files=glob.glob(str(tmp_path / 'temp.fragment.part.*.h5')), clobber=True,
outfilename=str(tmp_path / fname))
# load in the reconstituted files.
hd_reconstituted = io.HERAData(glob.glob(str(tmp_path / 'temp.reconstituted.part.*.h5')))
hd_reconstituted.read()
# compare to xtalk filtering the whole file.
xf.load_xtalk_filter_and_write(infilename=os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5"),
calfile=os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only"),
res_outfilename=str(tmp_path / 'temp.h5'), clobber=True, spw_range=[0, 20])
hd = io.HERAData(str(tmp_path / 'temp.h5'))
hd.read()
assert np.all(np.isclose(hd.data_array, hd_reconstituted.data_array))
assert np.all(np.isclose(hd.flag_array, hd_reconstituted.flag_array))
assert np.all(np.isclose(hd.nsample_array, hd_reconstituted.nsample_array))
# Do the same thing with time-bounds mode.
for filenum, file in enumerate(datafiles):
# reconstitute
fname = 'temp.reconstituted.part.%d.h5' % filenum
vis_clean.time_chunk_from_baseline_chunks(time_chunk_template=file,
baseline_chunk_files=glob.glob(str(tmp_path / 'temp.fragment.part.*.h5')), clobber=True,
outfilename=str(tmp_path / fname), time_bounds=True)
# load in the reconstituted files.
hd_reconstituted = io.HERAData(glob.glob(str(tmp_path / 'temp.reconstituted.part.*.h5')))
hd_reconstituted.read()
# compare to xtalk filtering the whole file.
xf.load_xtalk_filter_and_write(infilename=os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.uvh5"),
calfile=os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only"),
res_outfilename=str(tmp_path / 'temp.h5'), clobber=True, spw_range=[0, 20])
hd = io.HERAData(str(tmp_path / 'temp.h5'))
hd.read()
assert np.all(np.isclose(hd.data_array, hd_reconstituted.data_array))
assert np.all(np.isclose(hd.flag_array, hd_reconstituted.flag_array))
assert np.all(np.isclose(hd.nsample_array, hd_reconstituted.nsample_array))
|
from django.contrib import admin
from .models import Service, Position, Employee, Feature, Plan, Client
@admin.register(Position)
class PositionAdmin(admin.ModelAdmin):
list_display = ('position', 'active', 'modified')
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('service', 'icon', 'active', 'modified')
@admin.register(Employee)
class EmployeeAdmin(admin.ModelAdmin):
list_display = ('name', 'position', 'active', 'modified')
@admin.register(Feature)
class FeatureAdmin(admin.ModelAdmin):
list_display = ('title', 'icon', 'active', 'modified')
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
list_display = ('name', 'active', 'modified')
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = ('name', 'occupation', 'rating', 'active', 'modified')
|
import keras as k
from keras.models import Graph
from keras.layers.core import *
from keras.layers.convolutional import *
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from data_utils import *
from collections import defaultdict
import random
#########################
### Utility Functions ###
#########################
def chopra_loss(y_true, y_pred):
''' (1-Y)(2/Q)(Ew)^2 + (Y) 2 Q e^(-2.77/Q * Ew)
Needs to use functions of keras.backend.theano_backend = K '''
#Q = 500.
#return (1 - y_true) * 2 / Q * K.square(y_pred) + y_true * 2 * Q * K.exp(-2.77 / Q * y_pred)
margin = 1
loss = K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
return loss
def l2dist(x):
assert len(x) == 2
y, z = x.values()
return K.sqrt(K.sum(K.square(y - z), axis=1, keepdims=True))
def l1dist(x):
''' Chopra '05 computes output = || G(X_1) - G(X_2) ||
y is G(X_1)
z is G(X_2) '''
y, z = x.values()
return K.sum(K.abs(y - z), axis=1, keepdims=True)
def generate_data(d, examples_per_image=1):
''' Generates 50% genuine and 50% impostor pairs
Returns a ([left_x, right_x], y_target) tuple. '''
print 'Generating data...'
(x_genuine_1, x_genuine_2), y_genuine = generate_genuine_data(d, examples_per_image=examples_per_image)
(x_impostor_1, x_impostor_2), y_impostor = generate_impostor_data(d, examples_per_image=examples_per_image)
index_permutation = np.random.permutation(np.arange(x_genuine_1.shape[0] + x_impostor_1.shape[0]))
left_x = np.concatenate((x_genuine_1, x_impostor_1), axis=0)[index_permutation,:]
right_x = np.concatenate((x_genuine_2, x_impostor_2), axis=0)[index_permutation,:]
y_target = np.concatenate((y_genuine, y_impostor), axis=0)[index_permutation]
print 'Done generating data'
return [left_x, right_x], y_target
def generate_genuine_data(d, examples_per_image=1):
left_x, right_x = [], []
for label in d:
images = d[label]
num_images = len(images)
for i in xrange(num_images):
for j in xrange(examples_per_image): # every image will have examples_per_image genuine matches
left_x.append(images[i])
right_x.append(images[random.randint(0, num_images - 1)])
return [np.array(left_x), np.array(right_x)], np.zeros(len(left_x))
def generate_impostor_data(d, examples_per_image=1):
left_x, right_x = [], []
for label in d:
images = d[label]
num_images = len(images)
different_labels = [z for z in xrange(len(d)) if z != label]
for i in xrange(num_images):
for j in xrange(examples_per_image):
left_x.append(images[i])
right_x.append(random.choice(d[random.choice(different_labels)]))
return [np.array(left_x), np.array(right_x)], np.ones(len(left_x))
def invert_dataset(x, y):
d = defaultdict(lambda : [])
for i, label in enumerate(y):
d[label].append(x[i,:,:,:])
return d
##########################
### Siamese Net Object ###
##########################
class SiameseNet:
''' A rough implementation of Chopra et al. 2005's Siamese network.
Essentially a wrapper of a Sequential which takes inputs from a Siamese portion
and adds one more layer that merges the two identical legs (with a custom merge function). '''
# Defaults
TRAINING_BATCH_SIZE = 64
TRAINING_NB_EPOCHS = 2
VALIDATION_BATCH_SIZE = 1
PREDICT_BATCH_SIZE = 1
INPUT_LEFT = 'left'
INPUT_RIGHT = 'right'
OUTPUT = 'output'
def __init__(self, structure, input_shape, verbose=True):
self.input_shape=(3, 32, 32)
self.verbose = verbose
self.construct(structure)
def construct(self, structure):
''' structure - a list of (is_shared, layer_fn) tuples detailing the structure
of the Siamese part of the network
is_shared - boolean, whether or not the layer is shared
layer_fn - a generator function for a layer '''
self.graph = Graph()
input_left = self.INPUT_LEFT
input_right = self.INPUT_RIGHT
self.graph.add_input(name=input_left, input_shape=self.input_shape)
self.graph.add_input(name=input_right, input_shape=self.input_shape)
unique_name = 'name'
for is_shared, layer_fn in structure:
if is_shared:
self.graph.add_shared_node(
layer_fn(),
name=unique_name,
inputs=[input_left, input_right],
outputs=[input_left+'\'', input_right+'\''])
else:
self.graph.add_node(
layer_fn(),
input=input_left,
name=input_left+'\'')
self.graph.add_node(
layer_fn(),
input=input_right,
name=input_right+'\'')
input_left += '\''
input_right += '\''
unique_name += '0'
self.graph.add_node(Lambda(l2dist),
inputs=[input_left, input_right],
merge_mode='join',
name='dist')
self.graph.add_output(name=self.OUTPUT, input='dist')
if self.verbose:
print 'Constructed a SiameseNet.'
def compile(self):
self.graph.compile(loss={'output': chopra_loss}, optimizer='adam')
if self.verbose:
print 'Successfully compiled the SiameseNet.'
def _transform_data(self, x, y=None):
data = {
self.INPUT_LEFT: x[0],
self.INPUT_RIGHT: x[1]
}
if y is not None:
data[self.OUTPUT] = y
return data
def fit(self, x, y, validation_data=None, nb_epoch=TRAINING_NB_EPOCHS,
batch_size=TRAINING_BATCH_SIZE, shuffle=True):
''' Train it. '''
self.graph.fit(self._transform_data(x, y), nb_epoch=nb_epoch, batch_size=batch_size)
if self.verbose:
print 'Done training the SiameseNet.'
def evaluate(self, x, y, batch_size=VALIDATION_BATCH_SIZE):
''' Validate it. '''
validation_loss = self.graph.evaluate(self._transform_data(x, y), batch_size=batch_size)
if self.verbose:
print 'Validation loss is', validation_loss
return validation_loss
def predict(self, x, batch_size=PREDICT_BATCH_SIZE):
''' Predict it. (Not sure if this is helpful) '''
prediction = self.graph.predict(self._transform_data(x), batch_size=batch_size)
if self.verbose:
print 'Predicted probabilities are', prediction
return prediction
def save(self, filepath):
self.graph.save_weights(filepath)
def load(self, filepath):
self.graph.load_weights(filepath)
def similarity(self, x1, x2):
pass # The crux of this project
############
### Main ###
############
def _train_sn(sn, x_train, y_train, filepath):
d_train = invert_dataset(x_train, y_train)
sn.fit(*generate_data(d_train, examples_per_image=1)) #, validation_data=generate_data(x_val, y_val))
#sn.save(filepath)
def main():
# Prepare data
print 'Getting CIFAR10 data...'
data = get_CIFAR10_data()
x_train, y_train = data['X_train'], data['y_train']
x_val, y_val = data['X_val'], data['y_val']
N = x_train.shape[0]
# Specify structure of Siamese part of SiameseNet
# This part needs to be improved. I'm kind of just using random layers.
init = 'glorot_uniform'
layers = []
layers.append((
False,
lambda : BatchNormalization(
epsilon=1e-6,
mode=0,
axis=1,
momentum=0.9,
weights=None)
)) # Not-yet-tuned batch norm without shared weights
layers.append((True, lambda : Convolution2D(10, 3, 3, init=init, border_mode='same')))
for _ in xrange(1):
layers.append((True, lambda : Convolution2D(10, 3, 3, init=init, border_mode='same')))
layers.append((False, lambda : Activation('relu'))) # ReLU activation without shared weights
layers.append((False, lambda : Flatten()))
layers.append((False, lambda : Dense(100)))
sn = SiameseNet(layers, input_shape=(3, 32, 32), verbose=True)
sn.compile()
_train_sn(sn, x_train, y_train, filepath='weights.h5')
#sn.load(filepath='weights.h5')
d_val = invert_dataset(x_val, y_val)
loss = sn.evaluate(*generate_data(d_val, examples_per_image=5))
val_x_dat, val_y_dat = generate_data(d_val, examples_per_image=5)
prediction = sn.predict(val_x_dat)[SiameseNet.OUTPUT]
preds = [0,0]
for i,p in enumerate(prediction):
if val_y_dat[i] > .5:
preds[1] += p[0]
else:
preds[0] += p[0]
print preds
if __name__ == '__main__':
main()
|
import pyscreenshot
import numpy as np
import cv2
import os
class DistanceDetector:
x1 = 66
x2 = 760 + x1
y1 = 450
y2 = 800 + y1
def __init__(self):
self.step = 0
def screen_shoot(self):
im = pyscreenshot.grab(bbox=(self.x1, self.y1, self.x2, self.y2))
self.im = np.array(im)
self.im_gray = cv2.cvtColor(self.im, cv2.COLOR_BGR2GRAY)
# getting background color range
left_top = self.im[0][0]
self.b_range_background = (left_top[0] - 10, left_top[0] + 10)
self.g_range_background = (left_top[1] - 10, left_top[1] + 10)
self.r_range_background = (left_top[2] - 40, left_top[2] + 10)
def filter_foot_color(self, c):
try:
color = self.im[int(c[1]), int(c[0])]
except IndexError:
return False
return abs(color[0] - 88) < 20 and abs(color[1] - 81) < 20 and abs(color[2] - 128) < 20
def foot_detection(self):
circles = cv2.HoughCircles(self.im_gray,cv2.HOUGH_GRADIENT,1.2,100,
param1=50, param2=20, minRadius=20, maxRadius=25)[0]
circles = list(filter(self.filter_foot_color, circles))
head = circles[0]
self.foot = head
self.foot[1] += 114
#cv2.circle(im, (foot[0], foot[1]), 2, (0, 0, 255), 3)
def is_background(self, color):
return color[0] >= self.b_range_background[0] and color[0] <= self.b_range_background[1] and\
color[1] >= self.g_range_background[0] and color[1] <= self.g_range_background[1] and\
color[2] >= self.r_range_background[0] and color[2] <= self.r_range_background[1]
def check_color_sim(self, color1, color2):
if abs(int(color1[0]) - int(color2[0])) > 15 or abs(int(color1[1]) - int(color2[1])) > 15 or\
abs(int(color1[2]) - int(color2[2])) > 15:
return False
return True
def find_first_point(self, im):
for y in range(im.shape[0]):
for x in range(im.shape[1]):
color = im[y][x]
color_down = im[y+1][x]
if abs(x - self.foot[0]) > 25 and not self.is_background(color) and\
self.check_color_sim(color, color_down):
return (y, x)
def find_left_right_point(self, im, first_point, foot):
y_top, x_top = first_point
x_range_left = range(x_top, max(int(foot[0]), x_top - 300), -1) if foot[1] < x_top else \
range(x_top, x_top - 300, -1)
x_range_right = range(x_top, min(int(foot[0]), x_top + 300)) if foot[1] > x_top else \
range(x_top, min(x_top + 300, im.shape[1] - 1))
y_range = range(y_top, min(y_top + 300, int(foot[1])))
sim_colors = []
for y in y_range:
x_left_max = first_point[1]
x_right_max = first_point[1]
if self.is_background(im[y][x_top]):
break
for x in x_range_left:
if self.is_background(im[y][x]):
break
if self.check_color_sim(im[y][x], im[first_point[0]][first_point[1]]) and x < x_left_max:
sim_colors.append((y, x))
x_left_max = x
for x in x_range_right:
if self.is_background(im[y][x]):
break
if self.check_color_sim(im[y][x], im[first_point[0]][first_point[1]]) and x > x_right_max:
sim_colors.append((y, x))
x_right_max = x
if foot[0] < x_top:
return max(sim_colors, key = lambda x: x[1])
return min(sim_colors, key=lambda x: x[1])
def find_center(self):
self.first_point = self.find_first_point(self.im)
self.another_point = self.find_left_right_point(self.im, self.first_point, self.foot)
self.center = (self.first_point[1], self.another_point[0])
def get_distance(self):
return np.math.hypot(self.foot[0] - self.center[0], self.foot[1] - self.center[1])
def save_im(self):
self.step += 1
cv2.circle(self.im, (self.foot[0], self.foot[1]), 2, (0, 0, 255), 3)
cv2.circle(self.im, (self.first_point[1], self.first_point[0]), 2, (255, 0, 0), 3)
cv2.circle(self.im, (self.another_point[1], self.another_point[0]), 2, (0, 255, 0), 3)
os.makedirs("./debug_images/", exist_ok=True)
cv2.imwrite('./debug_images/step_'+str(self.step)+'.png', self.im)
|
import npyscreen
import argparse
ADDRESS=''
class TCUMonitorForm(npyscreen.Form):
def afterEditing(self):
self.parentApp.setNextForm(None)
def create(self):
self.keypress_timeout = 10 # refresh period in 100ms (10 = 1s)
self.text_address = self.add(npyscreen.TitleText, name='IP Address', editable=False, value=ADDRESS)
self.text_connection = self.add(npyscreen.TitleText, name='Connection', editable=False, value='Disconnected')
self.text_state = self.add(npyscreen.TitleText, name='State', editable=False, value='?')
self.text_num_pulses = self.add(npyscreen.TitleText, name='Pulses', editable=False, value='?')
self.text_num_repeats = self.add(npyscreen.TitleText, name='Repeats', editable=False, value='?')
self.text_pre_pulse = self.add(npyscreen.TitleText, name='Pre Pulse', editable=False, value='?')
self.text_x_amp_delay = self.add(npyscreen.TitleText, name='X Amp Delay', editable=False, value='?')
self.text_l_amp_delay = self.add(npyscreen.TitleText, name='L Amp Delay', editable=False, value='?')
self.text_rex_delay = self.add(npyscreen.TitleText, name='Rex Delay', editable=False, value='?')
self.grid_pulses = self.add(npyscreen.GridColTitles, name='Pulses', editable=False, column_width=10, height=7, max_height=10)
self.grid_pulses.col_titles =[ 'Pulse', 'Pulse Width', 'PRI', 'Mode', 'Frequency']
self.grid_pulses.values = [
['0', '10.0', '1000', '0', '1300'],
['1', '10.0', '1000', '1', '1300'],
['2', '10.0', '1000', '2', '1300'],
['3', '10.0', '1000', '3', '1300'],
['4', '10.0', '1000', '4', '8500'],
['5', '10.0', '1000', '5', '8500'],
]
# self.button_arm = self.add(npyscreen.ButtonPress, name='Arm')
# self.button_arm.whenPressed = self.when_pressed_arm
def when_pressed_arm(self):
self.button_arm.name = 'disarm'
def while_waiting(self):
# called every keypress_timeout period when user not interacting
pass
class TCUMonitorApplication(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm('MAIN', TCUMonitorForm, name='TCU MONITOR')
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='monitor [address]',
description='Monitor program for '
'NeXtRAD\'s Timing Control Unit')
parser.add_argument('address', help='IP address of TCU')
args = parser.parse_args()
ADDRESS = args.address
app = TCUMonitorApplication()
app.run()
|
import unittest
from katas.kyu_6.take_a_num_and_sum_digits_to_consecutive_powers import \
sum_dig_pow
class SumDigitsToConsecutivePowersTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sum_dig_pow(1, 10), [1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_equals_2(self):
self.assertEqual(sum_dig_pow(1, 100), [1, 2, 3, 4, 5, 6, 7, 8, 9, 89])
def test_equals_3(self):
self.assertEqual(sum_dig_pow(10, 89), [89])
def test_equals_4(self):
self.assertEqual(sum_dig_pow(10, 100), [89])
def test_equals_5(self):
self.assertEqual(sum_dig_pow(90, 100), [])
def test_equals_6(self):
self.assertEqual(sum_dig_pow(89, 135), [89, 135])
|
def bad_filename(filename):
return repr(filename)[1:-1]
try:
print filename
except UnicodeEncodeError:
print bad_filename()
|
import subprocess
import os
import sys
import re
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "families"))
import simulations_common
import experiments as exp
import fam
datasets = []
cores = 40
subst_model = "GTR+G"
gene_trees = ["raxml-ng"]
launch_mode = "normald"
replicates = range(3000, 3010)
varying_params = []
varying_params.append((None, ["none"]))
#varying_params.append(("sites", ["sites50", "sites200", "sites300"]))
#varying_params.append(("transfer_rate", ["t0.0", "t0.5", "t2.0", "t5.0"]))
#varying_params.append(("dup_rate", ["d0.5_l0.5_t0.5", "d2.0_l2.0_t2.0"]))
varying_params.append(("species", ["s15", "s35", "s50"]))
#varying_params.append(("families", ["f50", "f200", "f500", "f1000"]))
#varying_params.append(("population", ["pop10000000", "pop100000000", "pop1000000000"]))
tag = "varydtlunif"
fixed_point = "ssim_" + tag + "_s25_f100_sites100_GTR_bl1.0_d1.0_l1.0_t1.0_p0.0_pop10_mu1.0_theta0.0_seed20"
# run run_filter on all datasets in dataset
def run_generax(datasets, subst_model, cores, launch_mode, model, radius):
command = []
command.append(exp.python())
command.append(os.path.join(exp.scripts_root, "generax/launch_generax.py"))
command.append(dataset)
command.append(subst_model)
command.append("SPR")
command.append("true")
command.append("raxml-ng")
command.append(launch_mode)
command.append(str(cores))
command.append("--rec-model")
command.append(model)
command.append("--max-spr-radius")
command.append(str(radius))
command.append("--analyze")
command.append("no")
print("-> Running " + " ".join(command))
subprocess.check_call(command)
for entry in varying_params:
datasets = simulations_common.get_dataset_list(fixed_point, entry[1], replicates)
for dataset in datasets:
run_generax(dataset, subst_model, cores, launch_mode, "UndatedDTL", 0)
run_generax(dataset, subst_model, cores, launch_mode, "UndatedDTL", 5)
|
from django.shortcuts import render
from django.views import View
from db.login_mixin import LoginRequiredMixin
from .models import Position
from enterprice.models import EnterPrice
# Create your views here.
class JobCustomizeView(LoginRequiredMixin,View):
'''岗位订阅'''
def get(self,request):
return render(request, 'job_customize.html')
class JobListView(View):
'''岗位列表'''
def get(self,request):
position_list = Position.objects.all()
return render(request, 'index.html',{"position_list":position_list})
class JobView(View):
'''工作详情页'''
def get(self,request,job_id):
job = Position.objects.get(id=job_id)
return render(request, 'job.html',{"job":job})
class PositionView(View):
'''岗位信息'''
def get(self,request):
return render(request, 'search.html')
class RecuiterView(LoginRequiredMixin,View):
'''招聘需求页面'''
def get(self,request):
return render(request, 'recruiter.html')
def post(self,request):
name = request.POST.get("position","")
city = request.POST.get("city", "")
degree_required = request.POST.get("degree_required","")
attractive_desc = request.POST.get("attractive_desc","")
salary = request.POST.get("salary","")
experience_required = request.POST.get("experience_required", "")
position_type = request.POST.get("position_type", "")
effect_days = request.POST.get("effect_days", "")
position_desc = request.POST.get("position_desc","")
if not all([name,city,degree_required,attractive_desc,salary,experience_required,position_type,effect_days,position_desc]):
message="信息输入不完整"
return render(request, "enterpriceinfo.html", {"message": message})
else:
print([name,city,degree_required,attractive_desc,salary,experience_required,position_type,effect_days,position_desc])
position = Position()
position.name = name
position.city = city
position.degree_required = degree_required
position.attractive_desc = attractive_desc
position.salary = salary
position.experience_required = experience_required
position.position_type = position_type
position.effect_days = effect_days
position.position_desc = position_desc
position.enterprice_id = EnterPrice.objects.get(user_id=request.user.id,is_delete=0).id
position.save()
message = "招聘信息发布完成"
return render(request,"successful.html",{"message":message})
|
# Generated by Django 2.2.1 on 2019-05-18 02:01
from django.db import migrations, models
import django.utils.timezone
import filebrowser.fields
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20190518_0117'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'categories'},
),
migrations.RemoveField(
model_name='post',
name='title',
),
migrations.AddField(
model_name='post',
name='header_title',
field=models.CharField(default=django.utils.timezone.now, max_length=100, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='meta_title',
field=models.CharField(default=django.utils.timezone.now, max_length=100, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='modification_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='thumbnail_image',
field=filebrowser.fields.FileBrowseField(blank=True, max_length=500, verbose_name='Thumbnail image'),
),
migrations.AlterField(
model_name='category',
name='category_image',
field=filebrowser.fields.FileBrowseField(blank=True, max_length=500, verbose_name='Category image'),
),
migrations.AlterField(
model_name='category',
name='category_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='post',
name='featured_image',
field=filebrowser.fields.FileBrowseField(blank=True, max_length=500, verbose_name='Featured image'),
),
migrations.AlterField(
model_name='post',
name='is_published',
field=models.BooleanField(default=True, help_text='Check the box to publish post.'),
),
migrations.AlterField(
model_name='tag',
name='tag_name',
field=models.CharField(max_length=50),
),
]
|
"""
Greedily prune a set of conformers.
Usage:
python main.py input 0.1 10
"""
def ignore_smiles(systems, patterns, align_dir):
"""
"""
from openbabel.openbabel import OBSmartsPattern, OBAtomBondIter
remove_dict = {x: [] for x in systems}
# Find the atoms to remove using the smart patterns.
for smi in patterns:
pat = OBSmartsPattern()
pat.Init(smi)
for name, mol in systems.items():
pat.Match(mol)
for match in pat.GetMapList():
for atom_id in match:
remove_dict[name].append(atom_id)
at = mol.GetAtom(atom_id)
for bond in OBAtomBondIter(at):
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
if mol.GetAtom(idx1).GetAtomicNum() == 1:
remove_dict[name].append(idx1)
if mol.GetAtom(idx2).GetAtomicNum() == 1:
remove_dict[name].append(idx2)
remove_dict[name] = sorted(list(set(remove_dict[name])),
reverse=True)
# Remove those atoms.
for name, mol in systems.items():
for idx in remove_dict[name]:
at = mol.GetAtom(idx)
mol.DeleteAtom(at)
return systems
def read_babel(f):
"""
Read in an xyz file into an openbabel molecule.
"""
from openbabel.openbabel import OBMol, OBConversion
conv = OBConversion()
conv.SetInFormat("xyz")
with open(f) as ifile:
sval = "".join([x for x in ifile])
mol = OBMol()
conv.ReadString(mol, sval)
return mol
def write_babel(mol, fname):
"""
Write an xyz file from an openbabel molecule
"""
from openbabel.openbabel import OBConversion
conv = OBConversion()
conv.SetOutFormat("xyz")
sval = conv.WriteString(mol)
with open(fname, "w") as ofile:
ofile.write(sval)
def energy(mol):
"""
Compute the energy of a molecule using GAFF.
"""
from openbabel.openbabel import OBForceField
ff = OBForceField.FindForceField("gaff")
ff.Setup(mol)
return ff.Energy()
def max_deviation(mol1, mol2):
"""
Return the furthest atom distance.
"""
from numpy import array
from numpy.linalg import norm
plist = []
numlist = []
for i in range(mol2.NumAtoms()):
at = mol2.GetAtom(i+1)
plist.append([at.GetX(), at.GetY(), at.GetZ()])
numlist.append(at.GetAtomicNum())
minlist = []
for i in range(mol1.NumAtoms()):
at = mol1.GetAtom(i+1)
pos = array([at.GetX(), at.GetY(), at.GetZ()])
anum = at.GetAtomicNum()
minlist.append(min([norm(pos - pos2)
for pos2, num2 in zip(plist, numlist)
if anum == num2]))
return max(minlist)
def filter(systems, energies, rmsd_cutoff, max_cutoff, energy_cutoff,
align_dir):
"""
Filter the systems based on an RMSD criteria.
"""
from openbabel.openbabel import OBAlign, OBMol
failures = {}
passing = {}
align = OBAlign(True, True)
for f1 in tqdm(list(systems)):
mol1 = systems[f1]
found = False
align.SetRefMol(mol1)
for f2, mol2 in passing.items():
if f1 == f2:
continue
ediff = 100 * abs(energies[f1] - energies[f2]) / abs(energies[f1])
if ediff > energy_cutoff:
continue
# Three times appears to be enough to converge
align.SetTargetMol(mol2)
align.Align()
rmsd_first = align.GetRMSD()
align_mol = OBMol(mol2)
align.UpdateCoords(align_mol)
align.SetTargetMol(align_mol)
align.Align()
rmsd = align.GetRMSD()
align.UpdateCoords(align_mol)
align.SetTargetMol(align_mol)
align.Align()
rmsd = align.GetRMSD()
if rmsd < rmsd_cutoff:
maxdev = max_deviation(mol1, align_mol)
if maxdev < max_cutoff:
failures[(f1, f2)] = {"rmsd": rmsd, "maxdev": maxdev,
"ediff": ediff}
write_babel(align_mol, join(align_dir, f2))
found = True
break
if not found:
passing[f1] = mol1
return passing, failures
if __name__ == "__main__":
from sys import argv
from glob import glob
from os.path import join, basename, exists
from tqdm import tqdm
from yaml import load, SafeLoader
# Get the input path
if len(argv) < 2:
raise Exception("Please specify the configure file.")
with open(argv[1]) as ifile:
parameters = load(ifile, Loader=SafeLoader)
if not exists(parameters["aligned_directory"]):
raise Exception("directory", parameters["aligned_directory"],
"does not exist.")
flist = glob(join(parameters["input"], "*"))
# Read in the systems
print("Reading in systems")
systems = {}
for i in tqdm(range(len(flist))):
f = flist[i]
systems[basename(f)] = read_babel(f)
# Compute the energies
print("Computing Energies")
energies = {}
for f in tqdm(list(systems)):
energies[f] = energy(systems[f])
# Ignore smiles.
ignored = ignore_smiles(systems, parameters["ignore_smiles"],
parameters["aligned_directory"])
# Filter
print("Filtering")
passing, failures = filter(systems, energies,
float(parameters["rmsd_cutoff"]),
float(parameters["max_distance_cutoff"]),
float(parameters["energy_cutoff"]),
parameters["aligned_directory"])
print("Writing Output")
# Write out the failures
with open(join("failures.txt"), "w") as ofile:
ofile.write("System 1\tSystem 2\t")
ofile.write("RMSD\tMax Dist\tEnergy Difference (%)\n")
for k, v in failures.items():
ofile.write(k[0] + "\t" + k[1] + "\t")
ofile.write(str(v["rmsd"]) + "\t")
ofile.write(str(v["maxdev"]) + "\t")
ofile.write(str(v["ediff"]) + "\n")
# Write out the successes
with open(join("successes.txt"), "w") as ofile:
ofile.write("\n".join(list(passing)))
# Write out the energies
with open(join("energies.txt"), "w") as ofile:
for k, ene in energies.items():
ofile.write(k + "\t" + str(ene) + "\n")
|
from __future__ import print_function
import numpy as np
import cv2
import sys
import glob
import os
if __name__ == '__main__':
min_size = (30,30)
max_size = (60,60)
haar_scale = 1.2
min_neighbors = 3
haar_flags = 0
face_cascade = cv2.CascadeClassifier('/opt/ros/kinetic/share/OpenCV-3.3.1-dev/haarcascades/haarcascade_frontalface_default.xml')
path = os.path.dirname(os.path.realpath(__file__)) + '/'
for directory in glob.glob(path + '*'):
index = 0
dir_name = directory.split('/')
dir_name = dir_name[len(dir_name)-1]
print(dir_name, ':')
for file in glob.glob(directory + '/*'):
file_name = file.split('/')
file_name = file_name[len(file_name)-1]
print(file_name[:4])
if file_name[:4] == 'face':
continue
print(file)
image = cv2.imread(file)
# Preprocessing
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY, 0)
# Detecting faces
# todo: add min and max_size parameters
faces = face_cascade.detectMultiScale(gray, haar_scale, min_neighbors, haar_flags)
for (x,y,w,h) in faces:
dh = int(h/3.5)
dw = int(w/3.5)
crop = image[y-dh:y+h+dh, x-dw:x+w+dw]
cv2.imwrite(directory + '/' + dir_name + '_' + str(index) + '.jpg', crop)
index += 1
|
import logging
class numList:
"""This is a numList class.
Attributes:
:maxMin (tuple): tuple of the Max and Min values in the list
:max_diff (list): list of the highest diff between 2 adj values in list
:list_add (int): sum of all the values in the list
"""
def __init__(self, myList=[]):
self.list = myList
self.maxMin = None
self.max_diff = None
self.list_add = None
self.max_Min()
self.find_diff()
self.find_sum()
def max_Min(self):
"""
Finds the max and min in a list of positive values and returns a tuple
:param inputList: Is a list of positive values
:returns: Tuple of the max and min values
:raises ImportError: If numpy is not installed in the env
:raises ValueError: If there are values less than 0
:raises TypeError: If the inputList is not an actual list
"""
inputList = self.list
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
try:
import numpy
except ImportError:
logging.error("missing a module!")
raise ImportError("missing a module!")
for i in inputList:
if i < 0:
logging.warning("Negative value detected")
raise ValueError('Negative value detected')
if not isinstance(inputList, list):
logging.warning('Input is not a list')
raise TypeError('Input is not a list')
myMin = min(inputList)
myMax = max(inputList)
logging.debug(inputList)
logging.debug('Min value: %s', myMin)
logging.debug('Max value: %s', myMax)
maxMinTuple = (myMin, myMax)
logging.info(maxMinTuple)
self.maxMin = maxMinTuple
def find_diff(self):
"""
Finds maximum difference between two adjacent numbers in a list
:param my_list: Is a list of numbers
:returns: Largest difference between two adjacent numbers
:raises ValueError: If my_list has 0 or 1 elements
:raises ImportError: If numpy is not installed in environment
:raises TypeError: If element in my_list is not an int, float, complex
"""
my_list = self.list
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
logging.info('Finding max difference between adjacent values in list')
logging.debug('Printing %s', str(my_list))
n = 0
if len(my_list) < 2:
logging.warning('Not enough values to calculate difference')
raise ValueError('List too small, no difference to compare!')
for i in range(len(my_list)-1):
if(isinstance(my_list[i], (int, float, complex)) and
isinstance(my_list[i+1], (int, float, complex))):
diff = abs(my_list[i+1] - my_list[i])
if diff > n:
n = diff
else:
raise TypeError('List elements must be int, float, complex!')
logging.debug('Returns %s', str(n))
self.max_diff = n
def find_sum(self):
"""
Adds a lenist of numbers
:param list_var: Is a list of numbers (int, float, complex)
:returns: Addition of values in list
:raises ValueError: If list_var is empty
:raises ImportError: If numpy or numbers not installed in environment
:raises TypeError: If element in list_var is not an int, float,complex
"""
list_var = self.list
try:
import logging
except ImportError:
logging.warning('ImportError Logging')
raise ImportError('Module Logging not found.')
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
try:
import numpy as np
except ImportError:
logging.warning('ImportError Numpy')
raise ImportError('Module Numpy not found.')
if len(list_var) == 0:
raise ValueError('Input list is empty')
try:
import numbers
except ImportError:
logging.warning('ImportError Numbers')
raise ImportError('Module Numbers not found.')
if not isinstance(list_var, list):
logging.warning('Input is not a list')
for x in list_var:
if isinstance(x, (int, float, complex)):
continue
else:
logging.warning('List elements must be int, float or complex')
raise TypeError('List elements must be int, float, or complex')
logging.debug(list_var)
value = np.sum(list_var)
logging.info(value)
self.list_add = value
|
# Generated by Django 3.1.4 on 2021-01-03 02:54
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Blog', '0004_remove_starred_save'),
]
operations = [
migrations.AddField(
model_name='starred',
name='saved',
field=models.ManyToManyField(related_name='savepost', to=settings.AUTH_USER_MODEL),
),
]
|
import re
import tkinter
from tkinter import filedialog
import threading
import src
from src import CNF
from src.CNF import ClauseSet, Clause
class gui_window:
def __init__(self, name):
self.mainframe = tkinter.Tk()
self.mainframe.title(name)
self.mainframe.geometry("200x200")
self.clauseframe = clauses_frame(self)
self.menubar = tkinter.Menu(self.mainframe)
self.menus = list()
file_menu = tkinter.Menu(self.menubar, title="File")
file_menu.add_command(label="New", command=self.new)
file_menu.add_command(label="Open", command=self.load)
file_menu.add_command(label="Save", command=self.save)
file_menu.add_separator()
file_menu.add_command(label="Exit without saving", command=self.mainframe.destroy)
self.menubar.add_cascade(label="File", menu=file_menu)
self.mainframe.config(menu=self.menubar)
self.active = False
return
def run(self):
self.clauseframe.update()
self.active = True
self.mainframe.mainloop()
self.active = False
return
def new(self, cs=ClauseSet()):
return self.clauseframe.new_clauseset(cs, True)
def load(self):
filename = filedialog.askopenfilename()
return self.clauseframe.load_clauseset(filename)
def save(self):
filename = filedialog.asksaveasfilename()
return self.clauseframe.save_clauseset(filename)
class clauses_frame:
def __init__(self, root: gui_window):
self.root = root
self.frame = tkinter.Frame(self.root.mainframe)
self.frame.pack(fill=tkinter.X)
self.list = tkinter.Listbox(self.frame, selectmode=tkinter.EXTENDED)
self.list.pack(fill=tkinter.X)
self.input = tkinter.Entry(self.frame)
self.input.pack(fill=tkinter.X)
self.addbutton = tkinter.Button(self.frame, text="Add", command=lambda: self.input_clauses())
self.addbutton.pack(fill=tkinter.X)
self.clauseset = ClauseSet()
return
def update(self, force=False):
if not (self.root.active or force):
return
self.list.delete(0, tkinter.END)
if self.clauseset.isContradictionDetected():
self.list.insert(tkinter.END, "~")
return
for e in self.clauseset.clauses:
self.list.insert(tkinter.END, e)
return
def new_clauseset(self, cs=ClauseSet(), forceUpdate=False):
self.clauseset = cs
self.update(forceUpdate)
return
def load_clauseset(self, filename, overwrite=True):
F = open(filename, "r")
s = F.read()
F.close()
L = re.findall(r"[\w|~]+", s)
if overwrite:
self.new_clauseset()
for e in L:
self.clauseset.add(Clause(e))
self.update()
return
def save_clauseset(self, filename):
lines: list[str] = [str(e) for e in self.clauseset.clauses]
lines.sort()
F = open(filename, "w")
F.writelines(lines)
F.close()
return
def input_clauses(self):
raw = self.input.get()
if len(raw) == 0:
return
elif raw == "~":
raw = ""
clauses = raw.split(",")
for e in clauses:
self.clauseset.add(Clause(e))
self.update()
return
def T(gui: gui_window):
while not gui.active:
pass
gui.clauseframe.load_clauseset("testCS_1")
gui.clauseframe.save_clauseset("testCS_2")
return
if __name__ == "__main__":
X = gui_window("name")
X.new(ClauseSet([]))
X.run()
|
print(6*(1-2))
|
import sdl2.ext
class MenuRenderer(sdl2.ext.SoftwareSpriteRenderSystem):
def __init__(self, window):
super(MenuRenderer, self).__init__(window)
def render(self, components, x=None, y=None):
sdl2.ext.fill(self.surface, sdl2.ext.Color(128, 128, 128))
super(MenuRenderer, self).render(components)
|
rule hisat2:
input:fwd="outData/trimmed/{sample}_clean_R1.fq.gz",
rev="outData/trimmed/{sample}_clean_R2.fq.gz"
output:sam="outData/hisat2/{sample}.sam",
log="outData/hisat2/{sample}.log"
params:index=config["ref"]["index"]
threads:config["threads"]
message:"""--- Hisat2 Mapping.---"""
shell:"""
hisat2 -p {threads} -x {params.index} -1 {input.fwd} -2 {input.rev} -S {output.sam} > {output.log} 2>&1
"""
rule sam_sort:
input:"outData/hisat2/{sample}.sam"
output:"outData/sorted_bam/{sample}_sorted.bam"
threads:config["threads"]
shell:"""
samtools view -u {input} | samtools sort -@ {threads} - > {output}
"""
rule htseq:
input:"outData/hisat2/{sample}.sam"
output:"outData/htseq/{sample}_CountNum.txt"
params:gtf=config["ref"]["gtf"]
message:"""---htseq count---"""
shell:"""
htseq-count -f sam {input} {params.gtf} -q > {output}
"""
|
# RachelPotterCH7P1.py
# A program that calculates a person's BMI and determines if it is in a healthy range
def get_bmi():
weight = float(input("Enter your weight in pounds: "))
height = float(input("Enter your height in inches: "))
bmi = (weight * 720)/(height**2)
print("Your BMI is", round(bmi, 2)) # Round to 2 decimal places to avoid super long integers
if 19 <= bmi <= 25:
print("Your BMI is in the healthy range!")
elif bmi < 19:
print("Your BMI is below the healthy range.")
else: # If BMI > 25
print("Your BMI is above the healthy range.")
get_bmi()
# I pledge my honor that I have abided by the Stevens Honor System
# Rachel Potter
|
#-*- coding: UTF-8 -*-
import numpy as np
import operator
import os
from os import listdir
# Doses begin
#读取文件为numpy数据,
def file2matrix(filename):
fp = open(filename)
lines = fp.readlines()
num_of_lines = len(lines)
ret_mat = np.zeros((num_of_lines, 3))
label = []
index = 0
for line in lines:
line = line.strip()
l = line.split('\t')
ret_mat[index] = l[0:3]
label.append(l[-1])
index += 1
return ret_mat,label
#knn 分类
def classify0(in_x,dataset,labels,k):
dataset_size = dataset.shape[0]
diff_mat = in_x - dataset # numpy Broadcasting
sq_mat = diff_mat**2
sq_distance = sq_mat.sum(axis=1)
distances = sq_distance**0.5
sort_distance_index = distances.argsort()
class_count = {}
for i in range(k):
cur_label = labels[sort_distance_index[i]]
class_count[cur_label] = class_count.get(cur_label,0) + 1
sort_class_cout = sorted(class_count.iteritems(),key = operator.itemgetter(1),reverse=True)
return sort_class_cout[0][0]
#归一化
def normal(dataset):
minval = dataset.min(0)
maxval = dataset.max(0)
ranges = maxval - minval
normal_dataset = dataset - minval
normal_dataset = normal_dataset/ranges
return normal_dataset
#测试
def knn_doses():
dataset,labels = file2matrix(os.path.dirname(__file__) +'/datingTestSet.txt')
dataset = normal(dataset)
data_size = dataset.shape[0]
test_data = dataset[0:data_size/10:1,0:]
test_labels = labels[0:data_size/10:1]
dataset = dataset[data_size/10:data_size:1,0:]
labels = labels[data_size/10:data_size:1]
index = 0
true_count = 0
for in_x in test_data:
label = classify0(in_x,dataset,labels,5)
if(label == test_labels[index]):
true_count += 1
index += 1
print true_count,'/',index
# Doses end
knn_doses()
|
from ui.AddSiteWindow_ui import Ui_AddSiteWindow
from PyQt5.QtWidgets import QDialog
class AddSiteWindow(QDialog, Ui_AddSiteWindow):
def __init__(self, parent):
super().__init__(parent)
self.setupUi(self)
self.buttonBox.accepted.connect(self.inputSiteName)
self.buttonBox.rejected.connect(self.cancel)
def inputSiteName(self):
self.newSiteName = self.lineEditSiteName.text()
self.close()
def cancel(self):
self.newSiteName = None
self.close()
|
# Generated by Django 2.0.3 on 2018-03-16 05:26
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('static_pages', '0003_auto_20180313_0411'),
]
operations = [
migrations.AlterField(
model_name='page',
name='description',
field=ckeditor.fields.RichTextField(blank=True, default=None, verbose_name='описание'),
),
]
|
from django import forms
from orders.models import ConatacForm
class ContactForm(forms.ModelForm):
class Meta:
model = ConatacForm
fields = ['name', 'telephone']
|
# Generated by Django 2.0.7 on 2020-08-10 09:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('is_show', models.BooleanField(default=True, verbose_name='是否展示')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_time', models.DateTimeField(auto_now=True, verbose_name='最后更新时间')),
('orders', models.IntegerField()),
('username', models.CharField(max_length=32, verbose_name='评论用户')),
('usericon', models.ImageField(default='icon/default.png', upload_to='icon', verbose_name='用户头像')),
('content', models.CharField(max_length=255, verbose_name='评论内容')),
('cmt_time', models.DateTimeField(auto_now_add=True, verbose_name='评论时间')),
('cmt_rated', models.IntegerField(verbose_name='评分')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='zixun.comment')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='news',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('is_show', models.BooleanField(default=True, verbose_name='是否展示')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_time', models.DateTimeField(auto_now=True, verbose_name='最后更新时间')),
('orders', models.IntegerField()),
('title', models.CharField(max_length=128, verbose_name='资讯标题')),
('zixun_img', models.ImageField(blank=True, max_length=255, null=True, upload_to='courses', verbose_name='封面图片')),
('checked', models.IntegerField(verbose_name='阅读量')),
('catagorys', models.SmallIntegerField(choices=[(0, '营养'), (1, '疾病'), (2, '知道'), (3, '两性'), (4, '运动'), (5, '减肥'), (6, '母婴'), (7, '中医'), (8, '心理')], default=0, verbose_name='资讯分类')),
('recommended', models.BooleanField(default=False, verbose_name='是否推荐')),
],
options={
'verbose_name': '资讯',
'verbose_name_plural': '资讯',
'db_table': 'news',
},
),
migrations.CreateModel(
name='news_detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_delete', models.BooleanField(default=False, verbose_name='是否删除')),
('is_show', models.BooleanField(default=True, verbose_name='是否展示')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_time', models.DateTimeField(auto_now=True, verbose_name='最后更新时间')),
('orders', models.IntegerField()),
('title', models.CharField(max_length=128, verbose_name='文章标题')),
('author', models.CharField(max_length=32, verbose_name='作者名')),
('article', models.TextField(verbose_name='文章内容')),
('date', models.DateTimeField(verbose_name='发布时间')),
('thumbup', models.IntegerField(verbose_name='点赞数')),
],
options={
'abstract': False,
},
),
]
|
def knight_or_knave(said):
return 'Knight!' if eval(str(said)) else 'Knave! Do not trust.'
|
import unittest
import logging
import sys
VERBOSITY = 0
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
# Adding some niceness to the default TextTestRunner, test totals, etc
class CustomRunner(unittest.TextTestRunner):
def run(self, test):
ran_count = 0
errors = []
failures = []
for suite in test:
for test_suite in suite:
unique_tests = test_suite._tests
if unique_tests is not None and len(unique_tests) > 0:
for test_class in unique_tests:
logging.info('Running: ' + str(test_class) + '\n')
else:
logging.info(('Skipping empty test group..\n'))
continue
output = unittest.TextTestRunner.run(self, test_suite)
ran_count += output.testsRun
errors.extend(output.errors)
failures.extend(output.failures)
total_errors = len(errors)
total_failures = len(failures)
if (total_errors == 0 and total_errors == 0):
logging.info(('=== SUCCESS total ran: %d, errors: %d, failures: %d ===\n' % (ran_count, total_errors, total_failures)))
else:
logging.info(('=== FAILED total ran: %d, errors: %d, failures: %d ===\n' % (ran_count, total_errors, total_failures)))
def __init__(self, *args, **kwargs):
super(CustomRunner, self).__init__(*args, **kwargs)
if __name__ == "__main__":
suite = unittest.defaultTestLoader.discover('.')
# runner = unittest.TextTestRunner(verbosity=VERBOSITY)
runner = CustomRunner(verbosity=VERBOSITY)
runner.run(suite)
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import audio
import effects
def plot_io(xn: list, yn: list, fs: float, n: int = -1):
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
xn_l = xn if n < 1 else xn[0:n]
yn_l = yn if n < 1 else yn[0:n]
Xn = np.fft.fft(xn_l)
Yn = np.fft.fft(yn_l)
N = len(Xn)
k = np.arange(0, N, dtype=np.float32)
fa = fs * k / N
ax1.plot(k, xn_l)
ax2.plot(k, yn_l)
ax3.plot(fa, np.abs(Xn))
ax4.plot(fa, np.abs(Yn))
plt.show()
if __name__ == '__main__':
fs = 44100
xn = audio.sinusoid(amp=10, freq=17000, phs=0, fs=fs, duration=1)
#yn = effects.soft_clip(xn, k=8, gain=8)
yn = effects.wah(xn)
#yn = effects.tremolo(yn)
#yn = effects.asym_clip(yn, high=1, low=-0.5, gain=2)
#yn = effects.triangular(yn, gain=1)
#plot_io(xn, yn, fs)#, n=10000)
#audio.play(xn, fs)
#audio.play(yn, fs)
|
n1 = int(input('Type a number'))
n2 = int(input('Type a more number'))
n3 = int(input('Type other number'))
if n1>n2 and n1>n3:
biggest = n1
if n2>n1 and n2>n3:
biggest = n2
if n3>n1 and n3>n2:
biggest = n3
if n1<n2 and n1<n3:
smaller = n1
if n2<n1 and n2<n3:
smaller = n2
if n3<n1 and n3<n2:
smaller = n3
print('The biggest number is {}'.format(biggest))
print('The smaller number is {}'.format(smaller))
|
text = "I am from Chennai"
for i in range(len(text)):
if(text[i] == 'f'):
print("position of f is ",i)
break
|
"""Mealy Machine : md-réduction, dualisation, inversion, produit, factorisation..."""
from copy import deepcopy
from graphviz import Digraph
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics import Permutation
import igraph
Permutation.print_cyclic = True
class MealyMachine:
"""Machine de Mealy : dualisation, inversion, minimisation, md-réduction,
test d'inversibilité, test de réversibilité"""
def __init__(self, delta, rho, states=None, letters=None, name=None):
self.delta = deepcopy(delta)
self.rho = deepcopy(rho)
self.states = states if states is not None else [
str(i) for i in range(len(delta))]
self.letters = letters if letters is not None else [
str(i) for i in range(len(delta[0]))]
self.name = name if name is not None else None
self.nb_states = len(self.states)
self.nb_letters = len(self.letters)
def __eq__(self, other):
if other is None:
return False
if other.nb_states != self.nb_states or other.nb_letters != self.nb_letters:
return False
for p in range(self.nb_states):
for x in range(self.nb_letters):
if other.delta[p][x] != self.delta[p][x]:
return False
for p in range(self.nb_states):
for x in range(self.nb_letters):
if other.rho[p][x] != self.rho[p][x]:
return False
return True
# return other and self.delta == other.delta and self.rho == other.rho
def __str__(self):
return "(delta : " + str(self.delta) + ",\n rho : " + str(self.rho) + ")"
def __hash__(self):
return hash(str(self.states).join(str(self.rho)))
def execute(self, entry, state):
out = []
for w in entry:
out.append(self.rho[state][w])
state = self.delta[state][w]
return out
def dual(self):
"""Renvoie l'automate dual si l'automate est dualisable, renvoie Faux sinon"""
delta = [[None for i in range(self.nb_states)]
for j in range(self.nb_letters)]
rho = [[None for i in range(self.nb_states)]
for j in range(self.nb_letters)]
for x in range(self.nb_letters):
S = [False] * self.nb_states
for p in range(self.nb_states):
q, y = self.delta[p][x], self.rho[p][x]
if S[q]:
return False
delta[x][p] = y
rho[x][p] = q
S[q] = True
name_dual = self.name + "_dual" if self.name is not None else None
return MealyMachine(delta, rho, list(self.letters), list(self.states), name_dual)
def inverse(self):
"""Renvoie l'automate inverse si l'automate est inversible, renvoie Faux sinon"""
new_delta = [[None for i in range(self.nb_letters)]
for j in range(self.nb_states)]
new_rho = [[None for i in range(self.nb_letters)]
for j in range(self.nb_states)]
for p in range(self.nb_states):
for x in range(self.nb_letters):
e, y = self.delta[p][x], self.rho[p][x]
if new_rho[p][y] is not None:
return False
new_delta[p][y] = e
new_rho[p][y] = x
new_states = [p + "*" for p in self.states]
name_inverse = self.name + "_inverse" if self.name is not None else None
return MealyMachine(new_delta, new_rho, new_states, list(self.letters), name_inverse)
def is_reversible(self):
for x in range(self.nb_letters):
out = [False] * self.nb_states
for p in range(self.nb_states):
if out[self.delta[p][x]]:
return False
out[self.delta[p][x]] = True
return True
def bireversible(self):
inv = self.inverse()
if not inv:
return False
return self.is_reversible() and inv.is_reversible()
def __init_nerode_class(self):
cl = [None for i in range(self.nb_states)]
for p1 in range(self.nb_states):
if cl[p1] is None:
cl[p1] = p1
for p2 in range(p1+1, self.nb_states):
if cl[p2] is None:
equivalent = True
for x in range(self.nb_letters):
if self.rho[p1][x] != self.rho[p2][x]:
equivalent = False
if equivalent:
cl[p2] = cl[p1]
return cl
def __next_nerode_class(self, cl):
new_cl = [None for i in range(self.nb_states)]
for p1 in range(self.nb_states):
if new_cl[p1] is None:
new_cl[p1] = p1
for p2 in range(p1+1, self.nb_states):
if new_cl[p2] is None and cl[p1] == cl[p2]:
equivalent = True
for x in range(self.nb_letters):
if cl[self.delta[p1][x]] != cl[self.delta[p2][x]]:
equivalent = False
if equivalent:
new_cl[p2] = new_cl[p1]
return new_cl
def __fusion(self, cl):
new_delta, new_rho = [], []
for p in range(len(self.delta)):
if p == cl[p]:
new_delta.append(list(self.delta[p]))
new_rho.append(list(self.rho[p]))
new_id = {}
states = []
compteur = 0
for i in range(self.nb_states):
if not cl[i] in new_id:
new_id[cl[i]] = compteur
states.append(self.states[i])
compteur += 1
else:
states[new_id[cl[i]]] += self.states[i]
for p in range(len(new_delta)):
for x in range(len(new_delta[0])):
new_delta[p][x] = new_id[cl[new_delta[p][x]]]
return MealyMachine(new_delta, new_rho, states, self.letters)
def minimize(self):
stop = False
cl = self.__init_nerode_class()
while not stop:
new_cl = self.__next_nerode_class(cl)
if new_cl == cl:
stop = True
else:
cl = new_cl
return self.__fusion(cl)
def expansion(self, exp):
"Renvoie un automate dans lequel l'état i de self apparait exp[i] > 0 fois"
nb_states = 0
for i in range(self.nb_states):
if exp[i] < 1:
print("Erreur dans exp, exp[{}] < 1".format(i))
nb_states += exp[i]
delta = [[None for _ in range(self.nb_letters)]
for _ in range(nb_states)]
rho = [[None for _ in range(self.nb_letters)]
for _ in range(nb_states)]
print(len(delta))
for i in range(self.nb_states):
for j in range(self.nb_letters):
delta[i][j] = self.delta[i][j]
rho[i][j] = self.rho[i][j]
indice = self.nb_states
for i in range(self.nb_states):
for _ in range(1, exp[i]):
for j in range(self.nb_letters):
delta[indice][j] = self.delta[i][j]
rho[indice][j] = self.rho[i][j]
indice += 1
return MealyMachine(delta, rho)
def md_reduce(self):
prev, current = None, self
while prev != current:
prev, current = current, current.minimize()
if current == prev: # automaton is minimal
prev = prev.dual()
current = prev.minimize()
return current
def is_trivial(self):
# is trivial iff one state and rho is identity
return self.nb_states == 1 and self.rho[0] == [i for i in range(self.nb_letters)]
def is_md_trivial(self):
red = self.md_reduce()
return red.is_trivial() or red.dual().is_trivial()
def show(self, view=True, destfile=None):
"""Affiche l'automate"""
graph = Digraph(comment="Mealy Machine")
graph.attr(rankdir='LR')
for i in range(len(self.delta)):
graph.node(self.states[i])
graph.attr('node', shape='circle')
for p in range(self.nb_states):
edges = {}
for x in range(self.nb_letters):
key = self.states[self.delta[p][x]]
if key in edges:
edges[key] = edges[key] + "\n" + \
self.letters[x] + " | " + self.letters[self.rho[p][x]]
else:
edges[key] = self.letters[x] + " | " + \
self.letters[self.rho[p][x]]
for key in edges:
graph.edge(self.states[p], key, label=edges[key])
if destfile:
graph.render('outputs/' + destfile, view=view)
elif self.name is not None:
graph.render('outputs/' + self.name, view=view)
else:
graph.render('outputs/default', view=view)
def helix_graph(self):
adj = {}
for p in range(self.nb_states):
for x in range(self.nb_letters):
adj[p, x] = self.delta[p][x], self.rho[p][x]
return adj
def show_helix_graph(self, view=True, destfile=None):
"""Affiche le graphe en hélice de l'automate"""
graph = Digraph(comment="Helix Graph")
graph.attr(rankdir='LR')
graph.attr('node', shape='circle')
H = self.helix_graph()
for i in H:
p, x = i
q, y = H[i]
graph.node(self.states[p] + ", " + self.letters[x])
graph.edge(self.states[p] + ", " + self.letters[x],
self.states[q] + ", " + self.letters[y])
if destfile:
graph.render('outputs/' + destfile, view=view)
elif self.name is not None:
graph.render('outputs/' + self.name + "_helix", view=view)
else:
graph.render('outputs/default', view=view)
def cycles(self):
"""Renvoie un tableau contenant la longueur de chaque
cycle du graphe en hélix de l'automate"""
H = self.helix_graph()
cycles = []
done = set()
for i in H:
if i in done:
continue
done.add(i)
j = H[i]
length = 1
while j != i:
done.add(j)
j = H[j]
length += 1
cycles.append(length)
sorted(cycles)
cycles.sort()
return cycles
def augmented_helix_graph(self):
"""Renvoie le graphe en hélice augmenté avec igraph,
utilisé pour le calcul des automorphismes"""
# construction of helix graph using igraph
H = igraph.Graph(directed=True)
M = self.nb_letters
S = self.nb_states * self.nb_letters
ST = S + self.nb_states
SL = ST + self.nb_letters
H.add_vertices(SL + 3)
H.add_edge(SL + 1, SL + 2)
for x in range(self.nb_letters):
H.add_edge(ST + x, SL + 1)
for p in range(self.nb_states):
H.add_edge(S + p, SL)
for x in range(self.nb_letters):
st = p * M + x
H.add_edge(st, self.delta[p][x] * M + self.rho[p][x])
H.add_edge(st, S + p)
H.add_edge(st, ST + x)
return H
def automorphisms(self):
"""Renvoie le groupe d'automorphisme de l'automate"""
H = self.augmented_helix_graph()
S = self.nb_states * self.nb_letters
ST = S + self.nb_states
SL = ST + self.nb_letters
aut = H.get_automorphisms_vf2()
base = []
for f in aut:
ps = Permutation(list(map(lambda s: s - S, f[S:ST])))
pl = Permutation(list(map(lambda s: s - ST, f[ST:SL])))
p = Permutation(list(map(
lambda s: s - S, f[S:ST])) + list(map(lambda s: s - ST + self.nb_states, f[ST:SL])))
print(ps, 'x', pl)
base.append(p)
return PermutationGroup(base)
def canonical_graph(self):
H = self.augmented_helix_graph()
return H.permute_vertices(H.canonical_permutation())
def isomorphic(self, m2):
H1 = self.augmented_helix_graph()
H2 = m2.augmented_helix_graph()
return H1.isomorphic(H2)
def pretty_print_perm(self, p):
for cycle in p.cyclic_form:
print('(', ' '.join(map(
lambda x: self.states[x] if x < self.nb_states
else self.letters[x - self.nb_states], cycle)), ')', end='')
def product(m1, m2):
"""Renvoie le produit des automates m1 et m2"""
if m1.nb_letters != m2.nb_letters:
return None
nb_letters = m1.nb_letters
nb_states = m1.nb_states * m2.nb_states
delta = [[None for i in range(nb_letters)]
for i in range(nb_states)]
rho = [[None for i in range(nb_letters)]
for i in range(nb_states)]
states = [None for i in range(nb_states)]
for p in range(m1.nb_states):
for x in range(nb_letters):
q, y = m1.delta[p][x], m1.rho[p][x]
for r in range(m2.nb_states):
delta[p * m2.nb_states + r][x] = q * \
m2.nb_states + m2.delta[r][y]
rho[p * m2.nb_states + r][x] = m2.rho[r][y]
states[p * m2.nb_states + r] = m1.states[p] + m2.states[r]
return MealyMachine(delta, rho, states, list(m1.letters))
def mass_decide(m, states_limit):
current = m.minimize()
last_size = m.nb_states
# size = []
while current.nb_states > states_limit:
current = product(current, current)
current = current.minimize()
if current.nb_states == last_size:
return True # finite
return False # infinit ?
def mass(m, n):
current = m
size = []
for i in range(n):
current = current.minimize()
if i > 0 and current.nb_states == size[-1]:
size.extend([current.nb_states] * (n - i))
return size
size.append(current.nb_states)
current = product(current, m)
return size
|
from rest_framework import viewsets
from .serializers import (MovieSerializer, ShowingRoomSerializer, ShowingSerializer, OrderSerializer)
from .models import (Movie, ShowingRoom, Showing, Order, Status)
class MovieViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows you to create new Movie and get all the Movies available.
"""
queryset = Movie.objects.all().order_by('id')
serializer_class = MovieSerializer
class ShowingRoomViewSet(viewsets.ModelViewSet):
"""
This is for the access to the Rooms: create one or get list of existing
"""
queryset = ShowingRoom.objects.all().order_by('id')
serializer_class = ShowingRoomSerializer
class ShowingViewSet(viewsets.ModelViewSet):
"""
This endpoint will return those showings which are not sold out and relevant
"""
queryset = Showing.objects.filter(status=Status.ACTIVE.value).order_by('id')\
.select_related('movie').select_related('showing_room') # a good way to reduce identical requests to db
serializer_class = ShowingSerializer
class OrderViewSet(viewsets.ModelViewSet):
"""
Here we can create new orders and get the list of the past orders
"""
queryset = Order.objects.all()
serializer_class = OrderSerializer
|
"""
CCT 建模优化代码
工具集
作者:赵润晓
日期:2021年6月12日
"""
# 超导线材临界电流关键点
magnet_field_critical_points = [6, 7, 8, 9]
current_critical_points = [795, 620, 445, 275]
# cct 单线电流和表面最大磁场
current = 444 # 588
max_magnet_field = 4.39 # 4.00
# ---------------------------------------------------------------------------------------- #
import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
PathProject = os.path.split(rootPath)[0]
sys.path.append(rootPath)
sys.path.append(PathProject)
from cctpy import *
# 临界电流关键点拟合
coefficient_list = BaseUtils.polynomial_fitting(
xs = magnet_field_critical_points,
ys = current_critical_points,
order = 2
)
fitted_func = BaseUtils.polynomial_fitted_function(coefficient_list)
Plot2.plot_function(fitted_func,2,10,describe="g--")
Plot2.plot_xy_array(magnet_field_critical_points,current_critical_points,"kx")
Plot2.plot_p2s([P2.origin(),P2(max_magnet_field,current).change_length(100000)],"r")
Plot2.plot_p2(P2(max_magnet_field,current),"ro")
Plot2.xlim(0,10)
Plot2.ylim(0,1200)
Plot2.info("B/T","I/A")
Plot2.show()
|
from flask_wtf import FlaskForm
from wtforms import TextAreaField, TextField
from wtforms.validators import DataRequired
from widgets import SubmitButtonField
class NewCommentForm(FlaskForm):
comment = TextAreaField("comment", validators=[DataRequired("You need to enter a comment.")], render_kw={
"placeholder": "Commnemt"})
submit = SubmitButtonField("Submit", render_kw={"class": "uk-button uk-button-primary"})
class WriteNewsForm(FlaskForm):
title = TextField("title", validators=[DataRequired("You need to enter a title.")], render_kw={
"placeholder": "Title"})
lead = TextField("lead", render_kw={"placeholder": "Lead"})
text = TextAreaField("text", validators=[DataRequired("You need to enter a text.")], render_kw={
"placeholder": "Text"})
submit = SubmitButtonField("Submit", render_kw={"class": "uk-button uk-button-primary"})
|
class Solution:
def reorderLogFiles(self, logs: List[str]) -> List[str]:
n = len(logs)
res1, res2 = [], []
for sub in logs:
tmp = sub.strip().split(' ')
if tmp[1].isdigit():
res1.append(sub)
else:
res2.append(sub)
res2.sort(key=lambda x : (x.split(' ')[1:], x.split(' ')[0]))
res2.extend(res1)
return res2
|
import read
import facts_and_rules
facts, rules = read.read_tokenize("statements_backup.txt")
global KB
KB = []
global RB
RB = []
def assert_rule(rule):
if rule not in RB:
RB.append(rule)
infer_from_rule(rule)
def assert_fact(fact):
if fact not in KB:
KB.append(fact)
infer_from_fact(fact)
def infer_from_fact(fact):
for r in RB:
bindings = facts_and_rules.match(r.lhs[0], fact)
if bindings != False:
if len(r.lhs) == 1:
new_statement = facts_and_rules.statement(facts_and_rules.instantiate(r.rhs.full, bindings))
if r.type == "Assert":
fact.add_fact(new_statement)
assert_fact(new_statement)
print "adding inference: " + str(new_statement.full)
elif r.type == "Retract":
retract(new_statement)
print "retracting: " + str(new_statement.full)
else:
tests = map(lambda x: facts_and_rules.instantiate(x.full, bindings), r.lhs[1:])
rhs = facts_and_rules.instantiate(r.rhs.full, bindings)
new_rule = facts_and_rules.rule(tests, rhs)
fact.add_rule(new_rule)
assert_rule(new_rule)
def infer_from_rule(rule):
for f in KB:
bindings = facts_and_rules.match(rule.lhs[0], f)
if bindings != False:
if len(rule.lhs) == 1:
new_statement = facts_and_rules.statement(facts_and_rules.instantiate(rule.rhs.full, bindings))
if rule.type == "Assert":
f.add_fact(new_statement)
assert_fact(new_statement)
print "adding inference: " + str(new_statement.full)
elif rule.type == "Retract":
retract(new_statement)
else:
tests = map(lambda x: facts_and_rules.instantiate(x.full, bindings), rule.lhs[1:])
rhs = facts_and_rules.instantiate(rule.rhs.full, bindings)
new_rule = facts_and_rules.rule(tests, rhs)
rule.add_rule(new_rule)
assert_rule(new_rule)
def retract(item):
for fact in KB:
if facts_and_rules.match(item, fact) != False:
remove_supports(fact)
def remove_supports(fact):
if fact in KB:
print "Retracting: " + fact.pretty()
for f in fact.facts:
remove_supports(f)
KB.remove(fact)
def ask(pattern):
for fact in KB:
bindings_lists = []
bindings = facts_and_rules.match(pattern, fact)
if bindings != False:
bindings_lists.append(bindings)
for b in bindings_lists:
print "This is true: \t",
print facts_and_rules.statement(facts_and_rules.instantiate(pattern.full, b)).pretty()
for new_fact in facts:
assert_fact(facts_and_rules.statement(new_fact))
for new_rule in rules:
assert_rule(facts_and_rules.rule(new_rule[0], new_rule[1]))
ask(facts_and_rules.statement(["flat", "?x"]))
for f in KB:
print f.pretty()
|
from setuptools import setup
setup(
name='overpass',
packages=['overpass'],
version='0.1.0',
description='Python wrapper for the OpenStreetMap Overpass API',
author='Martijn van Exel',
author_email='m@rtijn.org',
url='https://github.com/mvexel/overpass-api-python-wrapper',
download_url='https://github.com/mvexel/overpass-api-python-wrapper/tarball/0.0.1',
keywords=['openstreetmap', 'overpass', 'wrapper'],
classifiers=[],
install_requires=['requests>=2.3.0', 'geojson>=1.0.9'],
)
|
def foo():
# hmps!
|
#activation function
#gradient dof the activation function
#add bias
#feed forward
#encoding the labels
#calculate the cost function
#gradient with respect to the weights
#initialize those weights
import struct
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.special import expit
def load_data():
with open('train-labels.idx1-ubyte','rb') as labels:
magic, n = struct.unpack('>II',labels.read(8))
train_labels = np.fromfile(labels, dtype=np.uint8)
with open('train-images.idx3-ubyte', 'rb') as images:
magic,num, nrows, ncols = struct.unpack('>IIII',images.read(16))
train_images = np.fromfile(images, dtype=np.uint8).reshape(num,784)
with open('t10k-labels.idx1-ubyte','rb') as labels:
magic, n = struct.unpack('>II',labels.read(8))
test_labels = np.fromfile(labels, dtype=np.uint8)
with open('t10k-images.idx3-ubyte', 'rb') as images:
magic,num, nrows, ncols = struct.unpack('>IIII',images.read(16))
test_images = np.fromfile(images, dtype=np.uint8).reshape(num,784)
return train_images, train_labels, test_images, test_labels
def visualize_data(img_arr,label_arr):
fig, ax = plt.subplots(nrows=8, ncols=8, sharex=True, sharey=True)
ax=ax.flatten()
for i in range(64):
img=img_arr[label_arr==2][i].reshape(28,28)
ax[i].imshow(img, cmap='Greys',interpolation='nearest')
plt.show()
#train_x,train_y, test_x, test_y = load_data()
#visualize_data(train_x, train_y)
def enc_one_hot(y, num_labels=10):
one_hot = np.zeros((num_labels, y.shape[0]))
for i, val in enumerate(y):
one_hot[val,i] =1.0
return one_hot
def sigmoid(z):
return expit(z)
#return (1/(1+np.exp(-z)))
def visualize_sigmoid():
x = np.arange(-10,10,0.1)
y=sigmoid(x)
fig, ax = plt.subplots()
ax.plot(x,y)
plt.show()
def cal_cost(y_enc, output):
t1 = -y_enc*np.log(output)
t2 = (1-y_enc) *np.log(1-output)
cost = np.sum(t1-t2)
return cost
def add_bias_unit(x, where):
#where do you want to add bais, to row or column
if where=='column':
x_new = np.ones((x.shape[0], x.shape[1]+1))
x_new[:, 1: ]=x
elif where=='row':
x_new = np.ones((x.shape[0] + 1, x.shape[1]))
x_new[1:,:]=x
return x_new
def init_weights(n_features, n_hidden, n_output):
w1 = np.random.uniform(-1.0, 1.0, size=n_hidden*(n_features+1))
w1 = w1.reshape(n_hidden, n_features+1)
w2 = np.random.uniform(-1.0, 1.0, size=n_hidden*(n_hidden+1))
w2 = w2.reshape(n_hidden, n_hidden+1)
w3 = np.random.uniform(-1.0, 1.0, size=n_output*(n_hidden+1))
w3 = w3.reshape(n_output, n_hidden+1)
return w1, w2, w3
def feed_forward(x, w1, w2, w3):
#add bias unit to the input
#columns within the row is just a byte of visualize_data#
#so we need to add the bias column vector of ones
a1= add_bias_unit(x,where='column')
z2= w1.dot(a1.T)
a2 = sigmoid(z2)
#since we transposed we have to add bias unit as row
a2 = add_bias_unit(a2, where='row')
z3 = w2.dot(a2)
a3 = sigmoid(z3)
a3=add_bias_unit(a3, where='row')
z4 = w3.dot(a3)
a4 = sigmoid(z4)
return a1, z2, a2, z3, a3, z4, a4
def predict(x, w1, w2,w3):
a1, z2, a2, z3, a3, z4, a4 = feed_forward(x,w1, w2, w3)
y_pred =np.argmax(a4,axis=0)
return y_pred
def calc_grad(a1, a2, a3, a4, z2, z3, z4, y_enc, w1, w2, w3):
delta4 = a4-y_enc
z3 = add_bias_unit(z3, where='row')
delta3 = w3.T.dot(delta4)*sigmoid(z3)
delta3 =delta3[1:,:]
z2 = add_bias_unit(z2, where='row')
delta2 = w2.T.dot(delta3)*sigmoid(z2)
delta2 =delta2[1:,:]
grad1 = delta2.dot(a1)
grad2 = delta3.dot(a2.T)
grad3 = delta4.dot(a3.T)
return grad1, grad2, grad3
def run_model(X,y,x_t,y_t):
X_copy, y_copy = X.copy(), y.copy()
y_enc= enc_one_hot(y)
epochs = 1000
batch = 50
w1, w2, w3= init_weights(784,75,10)
alpha = 0.001
eta = 0.001
dec = 0.00001 #decrease constant
delta_w1_prev = np.zeros(w1.shape)
delta_w2_prev = np.zeros(w2.shape)
delta_w3_prev = np.zeros(w3.shape)
total_cost=[]
pred_acc = np.zeros(epochs)
for i in range(epochs):
shuffle = np.random.permutation(y_copy.shape[0])
X_copy, y_enc = X_copy[shuffle], y_enc[:,shuffle]
eta/=(1+dec*i)
mini = np.array_split(range(y_copy.shape[0]),batch)
for step in mini:
#feed foward the model
a1, z2, a2, z3, a3, z4, a4 = feed_forward(X_copy[step], w1,w2,w3)
cost =cal_cost(y_enc[:,step],a4)
total_cost.append(cost)
#back propagate the gradient
grad1, grad2,grad3 = calc_grad(a1, a2, a3, a4,z2,z3,z4, y_enc[:,step], w1, w2, w3)
delta_w1, delta_w2, delta_w3 = eta*grad1, eta*grad2, eta*grad3
w1-=delta_w1+alpha*delta_w1_prev
w2-=delta_w2+alpha*delta_w2_prev
w3-= delta_w3+alpha*delta_w3_prev
delta_w1_prev,delta_w2_prev, delta_w3_prev = delta_w1, delta_w2, delta_w3_prev
y_pred =predict(x_t, w1, w2, w3)
pred_acc[i] = 100*np.sum(y_t == y_pred, axis=0)/x_t.shape[0]
print('Epoch #', i)
return total_cost, pred_acc
train_x, train_y, test_x, test_y = load_data()
cost, acc = run_model(train_x, train_y, test_x, test_y)
x_a = [i for i in range(acc.shape[0])]
x_c = [i for i in range(len(cost))]
plt.subplot(221)
plt.plot(x_c,cost)
plt.subplot(222)
plt.plot(x_a,acc)
plt.show()
|
import RPi.GPIO as GPIO
import time
x = 0.1
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.OUT)
GPIO.output(4, GPIO.LOW)
while True:
GPIO.output(4, GPIO.HIGH)
time.sleep(x)
GPIO.output(4, GPIO.LOW)
time.sleep(x)
GPIO.cleanup()
|
import scrapy
import sys
import string
import urllib
from urllib.request import urlopen
from urllib.parse import urlparse
import socket
from datetime import datetime
from bs4 import BeautifulSoup
import requests
from ..items import SihItem
def get_last_modified(url):
result = urlparse(url)
if True if [result.scheme, result.netloc, result.path] else False:
header = requests.head(url).headers
if 'Last-Modified' in header:
return header['Last-Modified']
print ("Data is not available")
return -1
else:
return -1
class simple(scrapy.Spider):
name='implication3'
i=0;
start_urls=["https://www.sih.gov.in"]
def parse(self,response):
items=SihItem()
# r = requests.get("https://www.datacamp.com/community/tutorials/text-analytics-beginners-nltk")
# soup = BeautifulSoup(r.content)
#l=[]
# for tag in soup.find_all():
#l.append(tag.name);
#l= list(dict.fromkeys(l))
#print("this is L",l)
#for q1 in response.css('body'):
#a=input("enter the url")
#self.start_urls.append(a)
e=""
a=self.start_urls[self.i]
j=len(self.start_urls[self.i])-1
k=0
flag=0
print("in")
while(k<=j):
k=k+1
if(a[k-1]=='/'):
flag=flag+1
continue;
if(flag>=2 and flag<3):
e=e+a[k-1]
if(flag==3):
break;
a=e
print("out")
s=response.xpath("//title/text()").extract()
now=datetime.now()
cs=now.strftime("%d/%m/%y %H:%M:%S")
items["Time"]=cs
try:
items["IP_Address"]=socket.gethostbyname(a)
w=socket.gethostbyname(a)
print(socket.gethostbyname(a))
ress=urllib.request.urlopen("http://api.hostip.info/get_html.php?ip={}&position=true".format(w)).read()
print(ress)
items["Country"]=ress
except:
print("Invalid url")
items["URL"]=self.start_urls[self.i]
ww=self.start_urls[self.i]
we=get_last_modified(ww)
items["Updated_Time"]=we
items["Snippet"]=s
self.i=self.i+1
yield items;
next_page=response.xpath("//a/@href").extract()
if next_page is not None:
for r1 in next_page:
self.start_urls.append(r1)
yield response.follow(r1,callback=self.parse) #print("this is nextpage",next_page)
#if next_page is not None
|
from msvcrt import getch
charkey = getch()
if(key == 32):
print('you pressed space')
|
from DominoExceptions import EndGameException
from Solitaire import Solitaire
class AutoPlaySolitaire(Solitaire):
TESTED_SOLUTION = {}
MATCH_COUNT = 0
def auto_play(self):
"""TODO Returns a solution, if any. None otherwise"""
try:
self.auto_play_helper()
except EndGameException:
return True
return False
def auto_play_helper(self):
"""Raise an EndGameException if a solution exists."""
# discard_sets = self.get_discard_possibilities()
discard_sets = self.get_discard_possibilities_dyn()
# We make a copy of the deck
tmp = self.hand[:]
# try every possible discard
for discard_set in discard_sets:
self.play_turn(discard_set, check=False)
if self.victory:
raise EndGameException
if self.current_state.state_size not in AutoPlaySolitaire.TESTED_SOLUTION:
AutoPlaySolitaire.TESTED_SOLUTION[self.current_state.state_size] = {}
current_state_size = self.current_state.state_size
current_state_hash = self.current_state.__hash__()
if current_state_hash not in AutoPlaySolitaire.TESTED_SOLUTION[current_state_size]:
try:
self.auto_play_helper()
except EndGameException:
AutoPlaySolitaire.TESTED_SOLUTION[current_state_size][current_state_hash] = True
raise EndGameException
AutoPlaySolitaire.TESTED_SOLUTION[current_state_size][current_state_hash] = False
else:
AutoPlaySolitaire.MATCH_COUNT += 1
if AutoPlaySolitaire.TESTED_SOLUTION[current_state_size][current_state_hash]:
raise EndGameException
self.hand = tmp[:]
def get_discard_possibilities(self):
"""Returns a list containing the sets that can be discarded."""
result = []
self.get_discard_possibilities_rec(self.hand, [], self.number_point, result)
return result
def get_discard_possibilities_rec(self, deck_rest, deck_used, total_value, results_set):
"""Make the recursive computation of the function above."""
# Initialisation
if total_value == 0:
results_set += [deck_used]
return
if not deck_rest or total_value < 0:
return
# Then there are two possibilities whether
# we use the last number in the list or not
# the recursion stop when the sum is null
# or when there is no more number.
self.get_discard_possibilities_rec(deck_rest[:-1], deck_used + [deck_rest[-1]],
total_value - deck_rest[-1].get_value, results_set)
self.get_discard_possibilities_rec(deck_rest[:-1], deck_used, total_value, results_set)
def get_discard_possibilities_dyn(self):
# We begin with the memoisation
memoization = [[False for i in range (self.number_point + 1)] for i in range(len(self.hand) + 1)]
# Initialization
for i in range(len(self.hand)):
memoization[i][0] = True
for i in range(1, len(self.hand) + 1):
for j in range(1, self.number_point + 1):
if j < self.hand[i - 1].get_value:
memoization[i][j] = memoization[i][j - 1]
else:
memoization[i][j] = memoization[i][j - 1] or memoization[i][j - self.hand[i - 1].get_value]
def solution_rec(i, j, hand):
if i == 0:
return [[]]
results = []
if memoization[i - 1][j]:
# We can
results += solution_rec(i - 1, j, hand)
if j >= hand[i - 1].get_value and memoization[i - 1][j - hand[i - 1].get_value]:
# On peut resoudre le pb en prenant cette valeur
tmp_res = solution_rec(i - 1, j - hand[i - 1].get_value, hand)
for res in tmp_res:
res += [hand[i - 1]]
results += tmp_res
return results
return solution_rec(len(self.hand), self.number_point, self.hand)
|
#!/usr/bin/env python
import sys
import time
import os.path
import datetime
import logging
from operator import attrgetter
from functools import partial
import click
from click_datetime import Datetime
from finam import (Exporter,
Timeframe,
Market,
FinamExportError,
FinamObjectNotFoundError)
from finam.utils import click_validate_enum
"""
Helper script to download a set of assets
"""
logger = logging.getLogger(__name__)
def _arg_split(ctx, param, value):
if value is None:
return value
try:
items = value.split(',')
except ValueError:
raise click.BadParameter('comma-separated {} is required, got {}'
.format(param, value))
return items
@click.command()
@click.option('--contracts',
help='Contracts to lookup',
required=False,
callback=_arg_split)
@click.option('--market',
help='Market to lookup',
callback=partial(click_validate_enum, Market),
required=False)
@click.option('--timeframe',
help='Timeframe to use (DAILY, HOURLY, MINUTES30 etc)',
default=Timeframe.DAILY.name,
callback=partial(click_validate_enum, Timeframe),
required=False)
@click.option('--destdir',
help='Destination directory name',
required=True,
type=click.Path(exists=True, file_okay=False, writable=True,
resolve_path=True))
@click.option('--skiperr',
help='Continue if a download error occurs. False by default',
required=False,
default=True,
type=bool)
@click.option('--lineterm',
help='Line terminator',
default='\r\n')
@click.option('--delay',
help='Seconds to sleep between requests',
type=click.IntRange(0, 600),
default=1)
@click.option('--startdate', help='Start date',
type=Datetime(format='%Y-%m-%d'),
default='2007-01-01',
required=False)
@click.option('--enddate', help='End date',
type=Datetime(format='%Y-%m-%d'),
default=datetime.date.today().strftime('%Y-%m-%d'),
required=False)
@click.option('--ext',
help='Resulting file extension',
default='csv')
def main(contracts, market, timeframe, destdir, lineterm,
delay, startdate, enddate, skiperr, ext):
exporter = Exporter()
if not any((contracts, market)):
raise click.BadParameter('Neither contracts nor market is specified')
market_filter = dict()
if market:
market_filter.update(market=Market[market])
if not contracts:
contracts = exporter.lookup(**market_filter)['code'].tolist()
for contract_code in contracts:
logging.info('Handling {}'.format(contract_code))
try:
contracts = exporter.lookup(code=contract_code, **market_filter)
except FinamObjectNotFoundError:
logger.error('unknown contract "{}"'.format(contract_code))
sys.exit(1)
else:
contract = contracts.reset_index().iloc[0]
logger.info(u'Downloading contract {}'.format(contract))
try:
data = exporter.download(contract.id,
start_date=startdate,
end_date=enddate,
timeframe=Timeframe[timeframe],
market=Market(contract.market))
except FinamExportError as e:
if skiperr:
logger.error(repr(e))
continue
else:
raise
destpath = os.path.join(destdir, '{}-{}.{}'
.format(contract.code, timeframe, ext))
data.to_csv(destpath, index=False, line_terminator=lineterm)
if delay > 0:
logger.info('Sleeping for {} second(s)'.format(delay))
time.sleep(delay)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
import json
import pickle
from bisect import bisect_left, bisect_right
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset, DataLoader
def prepare_data(party_list, mode="train"):
"""
This function define the pipeline of the creation of train and valid samples.
We consider each client from party_list. For each client take each
predict_period_start from predict_dates list. All client transaction before
this date is our features. Next, we look at the customer's transactions in
the next two months. This transactions should be predicted. It will form
our labels vector.
"""
data_sum = []
data_trans_type = []
data_merchant_type = []
data_labels = []
data_fin_type = []
data_trans_cat = []
data_day = []
data_gender = []
data_age = []
data_marital_status = []
data_child_cnt = []
data_like_cat = []
data_fav_cat = []
data_disl_cat = []
data_prod_sum = []
data_popular_prod = []
for party_rk in tqdm(party_list):
date_series = party2dates[party_rk]
sum_series = party2sum[party_rk]
merch_type_series = party2merchant_type[party_rk]
trans_type_series = party2trans_type[party_rk]
fin_type_series = party2fin_type[party_rk]
trans_cat_series = party2trans_cat[party_rk]
day_series = party2day[party_rk]
gender_series = party2gender[party_rk]
age_series = party2age[party_rk]
marital_status_series = party2marital_status[party_rk]
child_cnt_series = party2child_cnt[party_rk]
like_cat_series = party2like_cat[party_rk]
fav_cat_series = party2fav_cat[party_rk]
disl_cat_series = party2disl_cat[party_rk]
prod_sum_series = party2prod_sum[party_rk]
popular_prod_series = party2popular_prod[party_rk]
if mode == "train":
predict_dates = train_predict_dates
elif mode == "valid":
predict_dates = valid_predict_dates
elif mode == "submission":
predict_dates = submission_predict_dates
else:
raise Exception("Unknown mode")
for predict_period_start in predict_dates:
predict_period_end = datetime.strftime(
datetime.strptime(predict_period_start, "%Y-%m-%d")
+ timedelta(days=predict_period_len),
"%Y-%m-%d",
)
l, r = (
bisect_left(date_series, predict_period_start),
bisect_right(date_series, predict_period_end),
)
history_merch_type = merch_type_series[:l]
history_sum = sum_series[:l]
history_trans_type = trans_type_series[:l]
predict_merch = merch_type_series[l:r]
history_fin_type = fin_type_series[:l]
history_trans_cat = trans_cat_series[:l]
history_day = day_series[:l]
history_gender = gender_series[:l]
history_age = age_series[:l]
history_marital_status = marital_status_series[:l]
history_child_cnt = child_cnt_series[:l]
history_like_cat = like_cat_series[:l]
history_fav_cat = fav_cat_series[:l]
history_disl_cat = disl_cat_series[:l]
history_prod_sum = prod_sum_series[:l]
history_popular_prod = popular_prod_series[:l]
if predict_merch and l or mode not in ("train", "valid"):
data_sum.append(history_sum)
data_trans_type.append(history_trans_type)
data_merchant_type.append(history_merch_type)
data_labels.append(predict_merch)
data_fin_type.append(history_fin_type)
data_trans_cat.append(history_trans_cat)
data_day.append(history_day)
data_gender.append(history_gender)
data_age.append(history_age)
data_marital_status.append(history_marital_status)
data_child_cnt.append(history_child_cnt)
data_like_cat.append(history_like_cat)
data_fav_cat.append(history_fav_cat)
data_disl_cat.append(history_disl_cat)
data_prod_sum.append(history_prod_sum)
data_popular_prod.append(history_popular_prod)
return data_sum, data_trans_type, data_merchant_type, data_labels, \
data_fin_type, data_trans_cat, data_day, data_gender, data_age, \
data_marital_status, data_child_cnt, data_like_cat, data_fav_cat, \
data_disl_cat, data_prod_sum, data_popular_prod
class RSDataset(Dataset):
def __init__(self, data_sum, data_trans_type, data_merchant_type, labels,
data_fin_type, data_trans_cat, data_day, data_gender,
data_age, data_marital_status, data_child_cnt, data_like_cat,
data_fav_cat, data_disl_cat, data_prod_sum, data_popular_prod):
super(RSDataset, self).__init__()
self.data_sum = data_sum
self.data_trans_type = data_trans_type
self.data_merchant_type = data_merchant_type
self.labels = labels
self.data_fin_type = data_fin_type
self.data_trans_cat = data_trans_cat
self.data_day = data_day
self.data_gender = data_gender
self.data_age = data_age
self.data_marital_status = data_marital_status
self.data_child_cnt = data_child_cnt
self.data_like_cat = data_like_cat
self.data_fav_cat = data_fav_cat
self.data_disl_cat = data_disl_cat
self.data_prod_sum = data_prod_sum
self.data_popular_prod = data_popular_prod
def __len__(self):
return len(self.data_sum)
def __getitem__(self, idx):
targets = np.zeros((MERCH_TYPE_NCLASSES - 1,), dtype=np.float32)
for m in self.labels[idx]:
if m: # skip UNK, UNK-token should not be predicted
targets[m - 1] = 1.0
item = {
"features": {},
"targets": targets,
}
sum_feature = np.array(self.data_sum[idx][-PADDING_LEN:])
sum_feature = np.vectorize(lambda s: np.log(1 + s))(sum_feature)
if sum_feature.shape[0] < PADDING_LEN:
pad = np.zeros(
(PADDING_LEN - sum_feature.shape[0],), dtype=np.float32
)
sum_feature = np.hstack((sum_feature, pad))
item["features"]["sum"] = torch.from_numpy(sum_feature).float()
for feature_name, feature_values in zip(
["trans_type", "merchant_type", "financial_account_type_cd",
"transaction_category", "day_of_week", "gender_cd", "age",
"marital_status_desc", "children_cnt", "most_popular_like_category",
"most_popular_favorite_category", "most_popular_dislike_category",
"products_sum", "most_popular_product_chosen"
],
[self.data_trans_type[idx], self.data_merchant_type[idx],
self.data_fin_type[idx], self.data_trans_cat[idx],
self.data_day[idx], self.data_gender[idx], self.data_age[idx],
self.data_marital_status[idx], self.data_child_cnt[idx],
self.data_like_cat[idx], self.data_fav_cat[idx],
self.data_disl_cat[idx], self.data_prod_sum[idx],
self.data_popular_prod[idx]],
):
feature_values = np.array(feature_values[-PADDING_LEN:])
mask = np.ones(feature_values.shape[0], dtype=np.float32)
if feature_values.shape[0] < PADDING_LEN:
feature_values = np.append(
feature_values,
np.zeros(
PADDING_LEN - feature_values.shape[0], dtype=np.int64
),
)
mask = np.append(
mask,
np.zeros(PADDING_LEN - mask.shape[0], dtype=np.float32),
)
item["features"][feature_name] = torch.from_numpy(feature_values).long()
item["features"][f"{feature_name}_mask"] = torch.from_numpy(mask).float()
return item
if __name__ == '__main__':
transactions_path = f"{DATADIR}/avk_hackathon_data_transactions.csv"
DATADIR = "/content/drive/My Drive/datasets/2009-skoltech-hack_data/"
create_party2 = True
create_mappings = True
data_1 = pd.read_csv(transactions_path)
data = pd.read_csv(DATADIR + 'overall_df_transactions.csv')
data = data.drop("log_transaction_amt_rur", axis=1)
data = data.join(data_1["transaction_amt_rur"])
data.to_csv(f"{DATADIR}/data.csv")
data_path = f"{DATADIR}/data.csv"
if create_mappings:
mappings = defaultdict(dict)
unk_token = "<UNK>"
def create_mapping(values):
mapping = {unk_token: 0}
for v in values:
if not pd.isna(v):
mapping[str(v)] = len(mapping)
return mapping
for col in tqdm(
[
"transaction_type_desc",
"merchant_rk",
"merchant_type",
"merchant_group_rk",
"transaction_category",
"financial_account_type_cd",
]
):
col_values = (
pd.read_csv(data_path, usecols=[col])[col]
.fillna(unk_token)
.astype(str)
)
mappings[col] = create_mapping(col_values.unique())
del col_values
with open(f"{DATADIR}/mappings.json", "w") as f:
json.dump(mappings, f)
else:
with open(f"{DATADIR}/mappings.json", 'r') as f:
mappings = json.load(f)
if create_party2:
# Prepare & save client data
party2dates = defaultdict(list) # for each party save a series of the transaction dates
party2sum = defaultdict(list) # for each party save a series of the transaction costs
party2merchant_type = defaultdict(list) # for each party save a series of the transaction_type
party2trans_type = defaultdict(list) # for each party save a series of the transaction merchant_type
party2fin_type = defaultdict(list)
party2trans_cat = defaultdict(list)
party2day = defaultdict(list)
party2gender = defaultdict(list)
party2age = defaultdict(list)
party2marital_status = defaultdict(list)
party2child_cnt = defaultdict(list)
party2like_cat = defaultdict(list)
party2fav_cat = defaultdict(list)
party2disl_cat = defaultdict(list)
party2prod_sum = defaultdict(list)
party2popular_prod = defaultdict(list)
usecols = [
"party_rk",
"transaction_dttm_datetime",
"transaction_amt_rur",
"merchant_type",
"transaction_type_desc",
"financial_account_type_cd", # new
"transaction_category", # new
"day_of_week", # new
"gender_cd", # new
"age", # new
"marital_status_desc", # new
"children_cnt", # new
"most_popular_like_category", # new
"most_popular_favorite_category", # new
"most_popular_dislike_category", # new
"products_sum", # new
"most_popular_product_chosen" # new
]
for chunk in tqdm(
pd.read_csv(data_path, usecols=usecols, chunksize=100_000)
):
chunk["merchant_type"] = (
chunk["merchant_type"].fillna(unk_token).astype(str)
)
chunk["transaction_type_desc"] = (
chunk["transaction_type_desc"].fillna(unk_token).astype(str)
)
chunk["transaction_amt_rur"] = chunk["transaction_amt_rur"].fillna(0)
for i, row in chunk.iterrows():
party2dates[row.party_rk].append(row.transaction_dttm_datetime)
party2sum[row.party_rk].append(row.transaction_amt_rur)
party2merchant_type[row.party_rk].append(
mappings["merchant_type"][row.merchant_type]
)
party2trans_type[row.party_rk].append(
mappings["transaction_type_desc"][row.transaction_type_desc]
)
party2trans_cat[row.party_rk].append(
mappings["transaction_category"][row.transaction_category]
)
party2fin_type[row.party_rk].append(row.financial_account_type_cd)
# party2trans_cat[row.party_rk].append(row.transaction_category)
party2day[row.party_rk].append(row.day_of_week)
party2gender[row.party_rk].append(row.gender_cd)
party2age[row.party_rk].append(row.age)
party2marital_status[row.party_rk].append(row.marital_status_desc)
party2child_cnt[row.party_rk].append(row.children_cnt)
party2like_cat[row.party_rk].append(row.most_popular_like_category)
party2fav_cat[row.party_rk].append(row.most_popular_favorite_category)
party2disl_cat[row.party_rk].append(row.most_popular_dislike_category)
party2prod_sum[row.party_rk].append(row.products_sum)
party2popular_prod[row.party_rk].append(row.most_popular_product_chosen)
del chunk
pickle.dump(party2dates, open(f"{DATADIR}/party2dates.pkl", "wb"))
pickle.dump(party2sum, open(f"{DATADIR}/party2sum.pkl", "wb"))
pickle.dump(party2merchant_type, open(f"{DATADIR}/party2merchant_type.pkl", "wb"))
pickle.dump(party2trans_type, open(f"{DATADIR}/party2trans_type.pkl", "wb"))
pickle.dump(party2fin_type, open(f"{DATADIR}/party2fin_type.pkl", "wb"))
pickle.dump(party2trans_cat, open(f"{DATADIR}/party2trans_cat.pkl", "wb"))
pickle.dump(party2day, open(f"{DATADIR}/party2day.pkl", "wb"))
pickle.dump(party2gender, open(f"{DATADIR}/party2gender.pkl", "wb"))
pickle.dump(party2age, open(f"{DATADIR}/party2age.pkl", "wb"))
pickle.dump(party2marital_status, open(f"{DATADIR}/party2marital_status.pkl", "wb"))
pickle.dump(party2child_cnt, open(f"{DATADIR}/party2child_cnt.pkl", "wb"))
pickle.dump(party2like_cat, open(f"{DATADIR}/party2like_cat.pkl", "wb"))
pickle.dump(party2fav_cat, open(f"{DATADIR}/party2fav_cat.pkl", "wb"))
pickle.dump(party2disl_cat, open(f"{DATADIR}/party2disl_cat.pkl", "wb"))
pickle.dump(party2prod_sum, open(f"{DATADIR}/party2prod_sum.pkl", "wb"))
pickle.dump(party2popular_prod, open(f"{DATADIR}/party2popular_prod.pkl", "wb"))
else:
party2dates = pickle.load(open(f"{DATADIR}/party2dates.pkl", 'rb'))
party2sum = pickle.load(open(f"{DATADIR}/party2sum.pkl", 'rb'))
party2merchant_type = pickle.load(open(f"{DATADIR}/party2merchant_type.pkl", 'rb'))
party2trans_type = pickle.load(open(f"{DATADIR}/party2trans_type.pkl", 'rb'))
party2fin_type = pickle.load(open(f"{DATADIR}/party2fin_type.pkl", 'rb'))
party2trans_cat = pickle.load(open(f"{DATADIR}/party2trans_cat.pkl", 'rb'))
party2day = pickle.load(open(f"{DATADIR}/party2day.pkl", 'rb'))
party2gender = pickle.load(open(f"{DATADIR}/party2gender.pkl", 'rb'))
party2age = pickle.load(open(f"{DATADIR}/party2age.pkl", 'rb'))
party2marital_status = pickle.load(open(f"{DATADIR}/party2marital_status.pkl", 'rb'))
party2child_cnt = pickle.load(open(f"{DATADIR}/party2child_cnt.pkl", 'rb'))
party2like_cat = pickle.load(open(f"{DATADIR}/party2like_cat.pkl", 'rb'))
party2fav_cat = pickle.load(open(f"{DATADIR}/party2fav_cat.pkl", 'rb'))
party2disl_cat = pickle.load(open(f"{DATADIR}/party2disl_cat.pkl", 'rb'))
party2prod_sum = pickle.load(open(f"{DATADIR}/party2prod_sum.pkl", 'rb'))
party2popular_prod = pickle.load(open(f"{DATADIR}/party2popular_prod.pkl", 'rb'))
train_party, valid_party = train_test_split(
pd.read_csv(transactions_path, usecols=['party_rk']).party_rk.unique(),
train_size=0.8, random_state=42
)
predict_period_len = 60 # -- days
train_predict_dates = (
pd.date_range("2019-03-01", "2019-10-31", freq="MS")
.strftime("%Y-%m-%d")
.tolist()
)
valid_predict_dates = (
pd.date_range("2019-11-01", "2019-12-31", freq="MS")
.strftime("%Y-%m-%d")
.tolist()
)
submission_predict_dates = (
pd.date_range("2020-01-01", "2020-02-28", freq="2MS")
.strftime("%Y-%m-%d")
.tolist()
)
train_sum, train_trans_type, train_merchant_type, train_labels, train_fin_type, train_trans_cat, train_day, \
train_gender, train_age, train_marital_status, train_child_cnt, train_like_cat, train_fav_cat, train_disl_cat, \
train_prod_sum, train_popular_prod = prepare_data(train_party, mode="train")
valid_sum, valid_trans_type, valid_merchant_type, valid_labels, valid_fin_type, valid_trans_cat, valid_day, \
valid_gender, valid_age, valid_marital_status, valid_child_cnt, valid_like_cat, valid_fav_cat, valid_disl_cat, \
valid_prod_sum, valid_popular_prod = prepare_data(valid_party, mode="valid")
MERCH_TYPE_NCLASSES = len(mappings['merchant_type'])
TRANS_TYPE_NCLASSES = len(mappings['transaction_type_desc'])
CAT_TYPE_NCLASSES = len(mappings['transaction_category'])
PADDING_LEN = 300
train_dataset = RSDataset(
train_sum, train_trans_type, train_merchant_type, train_labels,
train_fin_type, train_trans_cat, train_day, train_gender, train_age,
train_marital_status, train_child_cnt, train_like_cat, train_fav_cat,
train_disl_cat, train_prod_sum, train_popular_prod
)
valid_dataset = RSDataset(
valid_sum, valid_trans_type, valid_merchant_type, valid_labels,
valid_fin_type, valid_trans_cat, valid_day, valid_gender, valid_age,
valid_marital_status, valid_child_cnt, valid_like_cat, valid_fav_cat,
valid_disl_cat, valid_prod_sum, valid_popular_prod
)
train_loader = DataLoader(
train_dataset, batch_size=64, shuffle=True, num_workers=2
)
valid_loader = DataLoader(
valid_dataset, batch_size=64, shuffle=False, num_workers=2
)
|
from django.contrib import admin
from .models import Realtor, Immo
# Register your models here.
@admin.register(Realtor)
class RealtorAdmin(admin.ModelAdmin):
pass
@admin.register(Immo)
class ImmoAdmin(admin.ModelAdmin):
pass
|
print("Master")
print("Check Command")
|
from flask_wtf import FlaskForm
from wtforms import SubmitField, IntegerField
from wtforms.validators import DataRequired
class PrintLabelsForm(FlaskForm):
start_from = IntegerField('Start from', validators=[DataRequired()])
page_count = IntegerField('Page count', validators=[DataRequired()])
submit = SubmitField('Download')
|
from selenium import webdriver
import random
from selenium.webdriver.common.keys import Keys
from time import sleep
from easygui import passwordbox
# Lưu ý: Cần đặt file chromedriver chung bên cạnh để có thể chạy chương trình
x = 'yes'
while x == 'yes' :
# 1.1 nhập username fb
username = input("USERNAME :")
pass_fb = passwordbox("PASSWORD:")
# 1.2 Giả lập chrome
browser = webdriver.Chrome(executable_path="chromedriver.exe")
browser.get("https://vi-vn.facebook.com/")
# 1.3 Gửi dữ liệu vào ô username và pass
username_fb = browser.find_element_by_id("email") # tìm kiếm ib có tên email, tài khoản có id là email
username_fb.send_keys(username) # gửi ký tự đến id
pass_work_fb = browser.find_element_by_id("pass")
pass_work_fb.send_keys(pass_fb)
sleep(random.randint(2,5))
pass_work_fb.send_keys(Keys.ENTER) # login : auto nhấn ENTER
sleep(random.randint(3,5))
# dùng lệnh dưới để dừng chương trình đến khi thực hiện xg thao tác trên browser
start = input('press any to countinute :')
comment_list = browser.find_elements_by_xpath("//div[@class='b3i9ofy5 e72ty7fz qlfml3jp inkptoze qmr60zad rq0escxv oo9gr5id q9uorilb kvgmc6g5 cxmmr5t8 oygrvhab hcukyx3x d2edcug0 jm1wdb64 l9j0dhe7 l3itjdph qv66sw1b']")
print(comment_list)
# 2. Lặp trong tất cả các comment và hiển thị nội dung comment ra màn hình
for comment in comment_list:
# hiển thị tên người và nội dung, cách nhau bởi dấu :
poster = comment.find_element_by_class_name("nc684nl6")
content = comment.find_element_by_xpath("/html/body/div[1]/div/div[1]/div/div[3]/div/div/div[1]/div[1]/div[4]/div/div/div/div/div/div[1]/div/div/div/div/div/div/div/div/div/div/div[2]/div/div[4]/div/div/div[2]/ul/li[2]/div[1]/div/div[2]/div/div[1]/div/div/div/div/div/span/div")
print("<>", poster.text, ":", content.text)
x = input('Bạn muốn tiếp tục craw ?..')
# đóng trình duyệt
browser.close()
|
from pathlib import Path
from dotenv import load_dotenv
import os
load_dotenv()
BASE_DIR = Path(__file__).resolve().parent.parent
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'ui/'), ]
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SECRET_KEY = os.environ.get('DJANGO_SK')
DEBUG = False
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'api',
'recipes',
'users',
'rest_framework',
'sorl.thumbnail',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
SITE_ID = 1 # foodgram
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if DEBUG:
INSTALLED_APPS += [
'debug_toolbar',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['console'],
'level': 'WARNING',
},
}
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'recipes.context_processors.shoplist',
'recipes.context_processors.tags',
'recipes.context_processors.is_following'
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
DATABASES = {
'default': {
'ENGINE': os.environ.get('DB_ENGINE'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = 'index'
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'sent_emails')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
ns = []
try:
while True:
n = float(input())
ns.append( [n] )
except:
pass
for i in range (0,len(ns) - 1):
for j in range (0,len(ns) - i - 1):
ns[j].append( (pow(2, i + 1) * ns[j + 1][i] - ns[j][i]) / (pow(2, i + 1) - 1) )
print('N:', ns[0][len(ns[0]) - 1])
|
import tensorflow as tf
import os
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
import argparse
def decode_labels(image, label):
""" store label data to colored images """
layer1 = [255, 0, 0]
layer2 = [255, 165, 0]
layer3 = [255, 255, 0]
layer4 = [0, 255, 0]
layer5 = [0, 127, 255]
layer6 = [0, 0, 255]
layer7 = [127, 255, 212]
layer8 = [139, 0, 255]
r = image.copy()
g = image.copy()
b = image.copy()
label_colours = np.array([layer1, layer2, layer3, layer4, layer5, layer6, layer7, layer8])
for l in range(0, 7):
r[label == l+1] = label_colours[l, 0]
g[label == l+1] = label_colours[l, 1]
b[label == l+1] = label_colours[l, 2]
r[label == 9] = label_colours[7, 0]
g[label == 9] = label_colours[7, 1]
b[label == 9] = label_colours[7, 2]
rgb = np.zeros((image.shape[0], image.shape[1], 3))
rgb[:, :, 0] = r/255.0
rgb[:, :, 1] = g/255.0
rgb[:, :, 2] = b/255.0
return rgb
def predict(checkpoint_dir, image_file):
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(latest_checkpoint + '.meta')
new_saver.restore(sess, latest_checkpoint)
graph = tf.get_default_graph()
pred_tensor = tf.get_collection('prediction')[0]
image_placeholder = graph.get_operation_by_name('image').outputs[0]
is_training = graph.get_tensor_by_name('Placeholder:0')[0]
image_path = os.path.join('images', image_file)
image = io.imread(image_path, as_gray=True)
image = image[np.newaxis, ..., np.newaxis]
image = image / 127.5 - 1.0
fetches = [pred_tensor]
feed_dict = {image_placeholder: image, is_training: False}
preds = sess.run(fetches, feed_dict=feed_dict)
preds = np.squeeze(preds)
image = np.squeeze(image)
image = np.squeeze((image + 1) * 127.5)
labeled_image = decode_labels(image, preds)
plt.figure(1)
ax1 = plt.subplot(121)
plt.imshow(np.uint8(image), cmap='gray')
ax1.set_axis_off()
ax2 = plt.subplot(122)
plt.imshow(labeled_image)
ax2.set_axis_off()
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--save_path', '-s', type=str, default='outputs')
parser.add_argument('--checkpoint_dir', '-c', type=str, default='model')
parser.add_argument('--image_file', '-i', type=str, default='image_1.png')
args = parser.parse_args()
wd = os.path.dirname(os.path.realpath(__file__))
args.save_path = os.path.join(wd, args.save_path)
args.checkpoint_dir = os.path.join(wd, args.checkpoint_dir)
predict(args.checkpoint_dir, args.image_file)
|
import pytest
pytest_plugins = ['session_log_fixture', 'current_stack_fixture']
|
import xlrd
from collections import Counter
from matplotlib import pyplot as plt
colors = ["red","coral","green","yellow","orange","purple","Indigo"]
data = xlrd.open_workbook('dataFood.xlsx')
table = data.sheet_by_name(u'愛評網')
cityCounter = Counter()
for x in table.col_values(2):
cityCounter.update([x])
topsixLable = []
topsixValue = []
count = 0
for x in cityCounter.most_common():
if count>5:
if count == 6:
topsixLable.append("其他區域")
topsixValue.append(x[1])
topsixValue[6] = topsixValue[6]+x[1]
else:
topsixLable.append(x[0])
topsixValue.append(x[1])
count+=1
labels = [x for x in cityCounter]
allValue = sum([cityCounter[x] for x in cityCounter])
explode = [0 for x in range(len(topsixLable))]
sizes = [x/allValue * 100 for x in topsixValue]
import random
#colors = cm.Set1(np.arange(len(labels)))
plt.rcParams['font.sans-serif'] = ['DFKai-SB']
plt.figure(figsize=(7,9))
plt.pie(sizes,labels=topsixLable,explode=explode,labeldistance=1.2,autopct='%3.1f%%',shadow=False,startangle =90,pctdistance = 1.1)
plt.axis('equal')
plt.legend(bbox_to_anchor=(-0.1, 1.1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("city.png",format="png")
|
# Purpose: Manages the QActions that are bound to hotkeys
from .KeyBind import KeyBind
from .KeyBinds import KeyBindsByID
from bsp.leveleditor import LEGlobals
from PyQt5 import QtWidgets, QtGui, QtCore
class EditorAction(QtWidgets.QAction):
def __init__(self, text, parent, checkable, keyBindID):
QtWidgets.QAction.__init__(self, text, parent)
self.keyBindID = keyBindID
keyBind = KeyBindsByID[keyBindID]
self.keyBind = keyBind
def connect(self, func):
self.triggered.connect(func)
def disconnect(self, func):
self.triggered.disconnect(func)
def enable(self):
self.setEnabled(True)
def disable(self):
self.setEnabled(False)
class MenuManager:
def __init__(self):
# Key bind ID -> QAction
self.actions = {}
def action(self, id):
return self.actions[id]
def connect(self, keyBindID, func):
self.action(keyBindID).connect(func)
def disconnect(self, keyBindID, func):
self.action(keyBindID).disconnect(func)
def enableAction(self, keyBindID):
action = self.actions.get(keyBindID, None)
if action:
action.setEnabled(True)
def disableAction(self, keyBindID):
action = self.actions.get(keyBindID, None)
if action:
action.setEnabled(False)
def addAction(self, keyBindID, text, desc, icon = None, toolBar = False, checkable = False, menu = None, enabled = True):
action = EditorAction(text, base.qtWindow, checkable, keyBindID)
if icon is not None:
icon = QtGui.QIcon(icon)
action.setIcon(icon)
action.setCheckable(checkable)
action.setToolTip(desc)
action.setStatusTip(desc)
action.setIconVisibleInMenu(False)
action.setShortcutVisibleInContextMenu(True)
action.setEnabled(enabled)
if toolBar:
if isinstance(toolBar, bool):
base.topBar.addAction(action)
else:
toolBar.addAction(action)
if menu:
menu.addAction(action)
action.setShortcut(action.keyBind.shortcut)
self.actions[keyBindID] = action
return action
def createToolBar(self, name):
toolBar = base.qtWindow.addToolBar(name)
toolBar.setIconSize(QtCore.QSize(24, 24))
toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
label = QtWidgets.QLabel(name)
label.setAlignment(QtCore.Qt.AlignCenter)
toolBar.addWidget(label)
return toolBar
def createMenu(self, name):
menu = QtWidgets.QMenu(name, base.menuBar)
base.menuBar.addMenu(menu)
return menu
def addMenuItems(self):
editToolBar = self.createToolBar("Editing:")
fileMenu = self.createMenu("File")
self.addAction(KeyBind.FileNew, "New", "Create a new map", menu=fileMenu)
self.addAction(KeyBind.FileOpen, "Open...", "Open an existing map", menu=fileMenu)
self.addAction(KeyBind.FileClose, "Close", "Close the map", toolBar=editToolBar, menu=fileMenu, enabled=False,
icon="resources/icons/editor-close.png")
self.addAction(KeyBind.FileCloseAll, "Close All", "Close all open maps", menu=fileMenu, enabled=False)
fileMenu.addSeparator()
self.addAction(KeyBind.FileSave, "Save", "Save the map", toolBar=editToolBar, menu=fileMenu, enabled=False,
icon="resources/icons/editor-save.png")
self.addAction(KeyBind.FileSaveAs, "Save As...", "Save the map as", menu=fileMenu, enabled=False)
self.addAction(KeyBind.FileSaveAll, "Save All", "Save all maps", menu=fileMenu, enabled=False)
fileMenu.addSeparator()
self.addAction(KeyBind.Run, "Run...", "Run the map", menu=fileMenu, enabled=False)
fileMenu.addSeparator()
self.addAction(KeyBind.Exit, "Exit", "Exit %s" % LEGlobals.AppName, menu=fileMenu)
editMenu = self.createMenu("Edit")
self.addAction(KeyBind.Undo, "Undo", "Undo the previous action", menu=editMenu, toolBar=editToolBar, enabled=False,
icon="resources/icons/editor-undo.png")
self.addAction(KeyBind.Redo, "Redo", "Redo the previous action", menu=editMenu, toolBar=editToolBar, enabled=False,
icon="resources/icons/editor-redo.png")
editMenu.addSeparator()
self.addAction(KeyBind.Delete, "Delete", "Delete the selected objects", menu=editMenu, enabled=False)
self.addAction(KeyBind.Cut, "Cut", "Cut the selected objects", menu=editMenu, enabled=False)
self.addAction(KeyBind.Copy, "Copy", "Copy the selected objects", menu=editMenu, enabled=False)
self.addAction(KeyBind.Paste, "Paste", "Paste objects", menu=editMenu, enabled=False)
editMenu.addSeparator()
self.addAction(KeyBind.GroupSelected, "Group", "Group the selected objects", menu=editMenu, enabled=False)
self.addAction(KeyBind.UngroupSelected, "Ungroup", "Ungroup the selected objects", menu=editMenu, enabled=False)
editMenu.addSeparator()
self.addAction(KeyBind.TieToWorld, "Tie to World", "Tie selected objects to world", menu=editMenu, enabled=False)
self.addAction(KeyBind.TieToEntity, "Tie to Entity", "Tie selected objects to entity", menu=editMenu, enabled=False)
editMenu.addSeparator()
self.addAction(KeyBind.ToggleGridSnap, "Grid Snap", "Toggle snap to grid", menu=editMenu, enabled=False, toolBar=editToolBar, checkable=True,
icon="resources/icons/editor-grid-snap.png")
self.addAction(KeyBind.IncGridSize, "Increase Grid Size", "Increase grid size", menu=editMenu, enabled=False, toolBar=editToolBar,
icon="resources/icons/editor-inc-grid.png")
self.addAction(KeyBind.DecGridSize, "Decrease Grid Size", "Decrease grid size", menu=editMenu, enabled=False, toolBar=editToolBar,
icon="resources/icons/editor-dec-grid.png")
viewMenu = self.createMenu("View")
self.addAction(KeyBind.ViewQuads, "Quad View", "Arrange viewports in quad splitter", menu=viewMenu, enabled=False)
self.addAction(KeyBind.View3D, "3D Perspective", "Focus 3D Perspective", menu=viewMenu, enabled=False)
self.addAction(KeyBind.ViewXY, "2D Top", "Focus 2D Top", menu=viewMenu, enabled=False)
self.addAction(KeyBind.ViewYZ, "2D Side", "Focus 2D Side", menu=viewMenu, enabled=False)
self.addAction(KeyBind.ViewXZ, "2D Front", "Focus 2D Front", menu=viewMenu, enabled=False)
viewMenu.addSeparator()
self.addAction(KeyBind.Toggle2DGrid, "2D Grid", "Toggle 2D grid", menu=viewMenu, toolBar=editToolBar, enabled=False, checkable=True,
icon="resources/icons/editor-grid-2d.png")
self.addAction(KeyBind.Toggle3DGrid, "3D Grid", "Toggle 3D grid", menu=viewMenu, toolBar=editToolBar, enabled=False, checkable=True,
icon="resources/icons/editor-grid-3d.png")
self.editMenu = editMenu
|
# This file is part of beets.
# Copyright 2019, Joris Jensen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
List all files in the library folder which are not listed in the
beets library database, including art files
"""
import os
from beets import util
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
__author__ = 'https://github.com/MrNuggelz'
class Unimported(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
'ignore_extensions': [],
'ignore_subdirectories': []
}
)
def commands(self):
def print_unimported(lib, opts, args):
ignore_exts = [
('.' + x).encode()
for x in self.config["ignore_extensions"].as_str_seq()
]
ignore_dirs = [
os.path.join(lib.directory, x.encode())
for x in self.config["ignore_subdirectories"].as_str_seq()
]
in_folder = {
os.path.join(r, file)
for r, d, f in os.walk(lib.directory)
for file in f
if not any(
[file.endswith(ext) for ext in ignore_exts]
+ [r in ignore_dirs]
)
}
in_library = {x.path for x in lib.items()}
art_files = {x.artpath for x in lib.albums()}
for f in in_folder - in_library - art_files:
print_(util.displayable_path(f))
unimported = Subcommand(
'unimported',
help='list all files in the library folder which are not listed'
' in the beets library database')
unimported.func = print_unimported
return [unimported]
|
#!/usr/bin/env python
import rospy
import tf
# from nav_msgs.msg import *
# from geometry_msgs.msg import *
from gazebo_msgs.srv import GetLinkState
if __name__ == '__main__':
rospy.init_node('tf_X_broadcaster')
botframe1 = 'X1/base_link'
botframe2 = 'X2/base_link'
br = tf.TransformBroadcaster()
rospy.wait_for_service('/gazebo/get_link_state')
gazebo_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
rate = rospy.Rate(120) # 120hz
while not rospy.is_shutdown():
try:
X1_state = gazebo_link_state('X1::X1/base_link', 'world')
# print(X1_state.link_state.pose.position)
br.sendTransform((X1_state.link_state.pose.position.x,
X1_state.link_state.pose.position.y,
X1_state.link_state.pose.position.z),
(X1_state.link_state.pose.orientation.x,
X1_state.link_state.pose.orientation.y,
X1_state.link_state.pose.orientation.z,
X1_state.link_state.pose.orientation.w),
rospy.Time.now(),
botframe1,
"world")
X2_state = gazebo_link_state('X2::X2/base_link', 'world')
br.sendTransform((X2_state.link_state.pose.position.x,
X2_state.link_state.pose.position.y,
X2_state.link_state.pose.position.z),
(X2_state.link_state.pose.orientation.x,
X2_state.link_state.pose.orientation.y,
X2_state.link_state.pose.orientation.z,
X2_state.link_state.pose.orientation.w),
rospy.Time.now(),
botframe2,
"world")
rate.sleep()
except:
print('Could not send X1/base_link to world transform')
break
rospy.spin()
|
#Autor: Andrés Reyes Rangel
#Calcular el área de un helado (vista lateral)
radio = int(input("Ingrese el radio: "))
altura = int(input("Ingrese la altura: "))
area= (3.141592*radio**2)/ 2 + radio*altura
print ("Área= ", area)
|
class Solution:
def candy(self, ratings): # 实际也是贪心
candy_list = [1]*len(ratings)
if len(ratings) == 1:
return 1
if len(ratings) == 0:
return 0
for i in range(1,len(ratings)):
if ratings[i] > ratings[i-1] and candy_list[i]<=candy_list[i-1]:
candy_list[i] = candy_list[i-1] + 1
print(candy_list)
print("------")
for i in range(len(ratings)-2,-1,-1):
if ratings[i] > ratings[i+1] and candy_list[i]<=candy_list[i+1]:
candy_list[i] = candy_list[i+1] + 1
print(candy_list)
all_candy = 0
for candy in candy_list:
all_candy += candy
return all_candy
def candy_old(self, ratings): # 这个提交超时了
candy_list = [1]*len(ratings)
change = True
if len(ratings) == 1:
return 1
if len(ratings) == 0:
return 0
while change:
change = False
for i in range(len(ratings)):
if i == 0:
if ratings[i] > ratings[i+1] and candy_list[i]<=candy_list[i+1]:
candy_list[i] += 1
change = True
elif ratings[i] < ratings[i+1] and candy_list[i]>=candy_list[i+1]:
candy_list[i+1] += 1
change = True
elif i == len(ratings)-1:
if ratings[i] > ratings[i-1] and candy_list[i]<=candy_list[i-1]:
candy_list[i] += 1
change = True
elif ratings[i] < ratings[i-1] and candy_list[i]>=candy_list[i-1]:
candy_list[i-1] += 1
change = True
else:
if ratings[i] > ratings[i+1] and candy_list[i]<=candy_list[i+1]:
candy_list[i] += 1
change = True
elif ratings[i] < ratings[i+1] and candy_list[i]>=candy_list[i+1]:
candy_list[i+1] += 1
change = True
if ratings[i] > ratings[i-1] and candy_list[i]<=candy_list[i-1]:
candy_list[i] += 1
change = True
elif ratings[i] < ratings[i-1] and candy_list[i]>=candy_list[i-1]:
candy_list[i-1] += 1
change = True
all_candy = 0
for candy in candy_list:
all_candy += candy
return all_candy
if __name__ == '__main__':
mysol = Solution()
ratings = [0]
ratings1 = [1,0,2]
ratings2 = [1,2,2]
ratings3 = [1,2,87,87,87,2,1]
print(mysol.candy(ratings1))
|
# Generated by Django 2.2.1 on 2019-05-19 02:14
from django.db import migrations, models
import filebrowser.fields
class Migration(migrations.Migration):
dependencies = [
('posts', '0007_auto_20190519_0009'),
]
operations = [
migrations.AlterField(
model_name='category',
name='category_slug',
field=models.SlugField(blank=True, help_text='Text that shows in URL. Will automatically populate when object is saved.', unique=True),
),
migrations.AlterField(
model_name='category',
name='is_published',
field=models.BooleanField(default=True, help_text='Check the box to publish category.'),
),
migrations.AlterField(
model_name='post',
name='featured_image',
field=filebrowser.fields.FileBrowseField(blank=True, help_text='Image featured in post. Must be at least 1,000px X 1,000px', max_length=500, verbose_name='Featured image'),
),
migrations.AlterField(
model_name='post',
name='post_slug',
field=models.SlugField(blank=True, help_text='Text that shows in URL. Will automatically populate when object is saved.', unique=True),
),
migrations.AlterField(
model_name='post',
name='thumbnail_image',
field=filebrowser.fields.FileBrowseField(blank=True, help_text='Thumbnail image used across site. Must be at least 1,000px X 1,000px', max_length=500, verbose_name='Thumbnail image'),
),
migrations.AlterField(
model_name='tag',
name='tag_name',
field=models.CharField(help_text='Tag should be short. 1 or 2 words and less than 50 characters.', max_length=50, unique=True),
),
migrations.AlterField(
model_name='tag',
name='tag_slug',
field=models.SlugField(blank=True, help_text='Text that shows in URL. Will automatically populate when object is saved.', unique=True),
),
]
|
# Generated by Django 2.1.2 on 2018-10-06 10:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20181006_0136'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.CharField(max_length=300)),
('proposal', models.FileField(upload_to='projects/proposals/')),
('is_accepted', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('supervisor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Faculty')),
],
),
]
|
import math
import pygame as pg
import numpy as np
class Player:
def __init__(self, start_position: list or tuple, start_angle: int, movement_speed: int, angle_speed: int, use_mouse=False, rotational_control=False, strait_control=False):
self.pos = np.array(start_position, dtype=float)
self.angle = start_angle
self.movement_speed = movement_speed
self.angle_speed = angle_speed
self.mouse_control = use_mouse
self.rotational_control = rotational_control
self.strait_control = strait_control
self.movement_control = lambda x: None
if self.mouse_control:
pg.mouse.set_visible(False)
pg.event.set_grab(True)
self.movement_control = self.mouse_movement
elif self.rotational_control:
self.movement_control = self.rotational_movement
elif self.strait_control:
self.movement_control = self.strait_movement
def update(self):
pressed_key = pg.key.get_pressed()
if pressed_key[pg.K_ESCAPE]:
exit()
self.movement_control(pressed_key)
def mouse_movement(self, pressed_key):
pg.mouse.set_pos(200, 200)
self.angle += pg.mouse.get_rel()[0] / 10
self.angle %= 360
sin_a = math.sin(self.angle / 180 * math.pi)
cos_a = math.cos(self.angle / 180 * math.pi)
if pressed_key[pg.K_w]:
self.pos[0] += self.movement_speed * cos_a
self.pos[1] += self.movement_speed * sin_a
if pressed_key[pg.K_s]:
self.pos[0] -= self.movement_speed * cos_a
self.pos[1] -= self.movement_speed * sin_a
if pressed_key[pg.K_a]:
self.pos[0] += self.movement_speed * sin_a
self.pos[1] -= self.movement_speed * cos_a
if pressed_key[pg.K_d]:
self.pos[0] -= self.movement_speed * sin_a
self.pos[1] += self.movement_speed * cos_a
def rotational_movement(self, pressed_key):
sin_a = math.sin(self.angle / 180 * math.pi)
cos_a = math.cos(self.angle / 180 * math.pi)
if pressed_key[pg.K_LEFT]:
self.angle -= self.angle_speed
if pressed_key[pg.K_RIGHT]:
self.angle += self.angle_speed
if pressed_key[pg.K_w]:
self.pos[0] += self.movement_speed * cos_a
self.pos[1] += self.movement_speed * sin_a
if pressed_key[pg.K_s]:
self.pos[0] -= self.movement_speed * cos_a
self.pos[1] -= self.movement_speed * sin_a
if pressed_key[pg.K_a]:
self.pos[0] += self.movement_speed * sin_a
self.pos[1] -= self.movement_speed * cos_a
if pressed_key[pg.K_d]:
self.pos[0] -= self.movement_speed * sin_a
self.pos[1] += self.movement_speed * cos_a
def strait_movement(self, pressed_key):
if pressed_key[pg.K_LEFT]:
self.angle -= self.angle_speed
if pressed_key[pg.K_RIGHT]:
self.angle += self.angle_speed
if pressed_key[pg.K_w]:
self.pos[1] -= self.movement_speed
if pressed_key[pg.K_s]:
self.pos[1] += self.movement_speed
if pressed_key[pg.K_a]:
self.pos[0] -= self.movement_speed
if pressed_key[pg.K_d]:
self.pos[0] += self.movement_speed
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-13 10:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0010_auto_20170413_1526'),
]
operations = [
migrations.AlterField(
model_name='album',
name='cover_image',
field=models.TextField(),
),
]
|
from collections import Counter
import matplotlib.pyplot as plt
import networkx as nx
import sys
import os
import re
from scipy.sparse import csr_matrix
from sklearn.cross_validation import KFold
from sklearn.linear_model import LogisticRegression
import string
import pickle
import time
from TwitterAPI import TwitterAPI
import numpy as np
from numpy import array
from collections import Counter, defaultdict
from itertools import chain, combinations
import glob
import numpy.lib.arraysetops as aso
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
def read_data(path):
"""
Walks all subdirectories of this path and reads all
the text files and labels.
DONE ALREADY.
Params:
path....path to files
Returns:
docs.....list of strings, one per document
labels...list of ints, 1=positive, 0=negative label.
Inferred from file path (i.e., if it contains
'pos', it is 1, else 0)
"""
fnames = sorted([f for f in glob.glob(os.path.join(path, 'pos', '*.txt'))])
data = [(1, open(f).readlines()[0]) for f in sorted(fnames)]
fnames = sorted([f for f in glob.glob(os.path.join(path, 'neg', '*.txt'))])
data += [(0, open(f).readlines()[0]) for f in sorted(fnames)]
data = sorted(data, key=lambda x: x[1])
return np.array([d[1] for d in data]), np.array([d[0] for d in data])
def tokenize(doc, keep_internal_punct=False):
"""
Tokenize a string.
The string should be converted to lowercase.
If keep_internal_punct is False, then return only the alphanumerics (letters, numbers and underscore).
If keep_internal_punct is True, then also retain punctuation that
is inside of a word. E.g., in the example below, the token "isn't"
is maintained when keep_internal_punct=True; otherwise, it is
split into "isn" and "t" tokens.
Params:
doc....a string.
keep_internal_punct...see above
Returns:
a numpy array containing the resulting tokens.
>>> tokenize(" Hi there! Isn't this fun?", keep_internal_punct=False)
array(['hi', 'there', 'isn', 't', 'this', 'fun'],
dtype='<U5')
>>> tokenize("Hi there! Isn't this fun? ", keep_internal_punct=True)
array(['hi', 'there', "isn't", 'this', 'fun'],
dtype='<U5')
"""
###TODO
l1=[]
if keep_internal_punct == False:
val=re.sub('[^a-zA-Z0-9_]+', ' ',doc.lower()).split()
arr1= np.array(val)
return arr1
else:
for x in doc.lower().split():
val=re.sub('[^a-zA-Z0-9_*-]+$','',re.sub('^[^a-zA-Z0-9_*-]+','',x))
l1.append(val)
return np.array(l1)
pass
def token_features(tokens, feats):
"""
Add features for each token. The feature name
is pre-pended with the string "token=".
Note that the feats dict is modified in place,
so there is no return value.
Params:
tokens...array of token strings from a document.
feats....dict from feature name to frequency
Returns:
nothing; feats is modified in place.
>>> feats = defaultdict(lambda: 0)
>>> token_features(['hi', 'there', 'hi'], feats)
>>> sorted(feats.items())
[('token=hi', 2), ('token=there', 1)]
"""
###TODO
new_tokens =[]
for i in range(len(tokens)):
new_tokens.append("token="+tokens[i])
feats1 = Counter(new_tokens)
for k,v in feats1.items():
feats[k]=v
sorted(feats.items())
pass
def token_pair_features(tokens, feats, k=3):
"""
Compute features indicating that two words occur near
each other within a window of size k.
For example [a, b, c, d] with k=3 will consider the
windows: [a,b,c], [b,c,d]. In the first window,
a_b, a_c, and b_c appear; in the second window,
b_c, c_d, and b_d appear. This example is in the
doctest below.
Note that the order of the tokens in the feature name
matches the order in which they appear in the document.
(e.g., a__b, not b__a)
Params:
tokens....array of token strings from a document.
feats.....a dict from feature to value
k.........the window size (3 by default)
Returns:
nothing; feats is modified in place.
>>> feats = defaultdict(lambda: 0)
>>> token_pair_features(np.array(['a', 'b', 'c', 'd']), feats)
>>> sorted(feats.items())
[('token_pair=a__b', 1), ('token_pair=a__c', 1), ('token_pair=b__c', 2), ('token_pair=b__d', 1), ('token_pair=c__d', 1)]
"""
###TODO
token_w=[]
pair_t=[]
new_l =[]
val = tokens.tolist()
for i in range(len(val)):
token_w.append(val[i:k+i])
for vv in token_w:
if len(vv)==k:
new_l.append(vv)
#print(vv)
for i in range(len(new_l)):
for j in range(len(new_l[i])):
for k in range(j+1,len(new_l[i])):
pair ="token_pair="+new_l[i][j]+"__"+new_l[i][k]
pair_t.append(pair)
feat1= Counter(pair_t)
for k,v in feat1.items():
feats[k]=v
return sorted(feats.items())
pass
neg_words=set(pickle.load(open("neg_words.txt","rb")))
pos_words=set(pickle.load(open("pos_words.txt","rb")))
def lexicon_features(tokens, feats):
"""
Add features indicating how many time a token appears that matches either
the neg_words or pos_words (defined above). The matching should ignore
case.
Params:
tokens...array of token strings from a document.
feats....dict from feature name to frequency
Returns:
nothing; feats is modified in place.
In this example, 'LOVE' and 'great' match the pos_words,
and 'boring' matches the neg_words list.
>>> feats = defaultdict(lambda: 0)
>>> lexicon_features(np.array(['i', 'LOVE', 'this', 'great', 'boring', 'movie']), feats)
>>> sorted(feats.items())
[('neg_words', 1), ('pos_words', 2)]
"""
###TODO
count1=0
count2=0
for vv in tokens:
val=vv.lower()
#print(vv.lower())
if val in neg_words:
#print (vv)
count1+=1
elif val in pos_words:
#print (vv+"sdaa")
count2+=1
feats['neg_words'] = count1
feats['pos_words'] = count2
sorted(feats.items())
pass
def featurize(tokens, feature_fns):
"""
Compute all features for a list of tokens from
a single document.
Params:
tokens........array of token strings from a document.
feature_fns...a list of functions, one per feature
Returns:
list of (feature, value) tuples, SORTED alphabetically
by the feature name.
>>> feats = featurize(np.array(['i', 'LOVE', 'this', 'great', 'movie']), [token_features, lexicon_features])
>>> feats
[('neg_words', 0), ('pos_words', 2), ('token=LOVE', 1), ('token=great', 1), ('token=i', 1), ('token=movie', 1), ('token=this', 1)]
"""
###TODO
val=[]
feats = defaultdict(lambda:0)
for f in feature_fns:
f(tokens,feats)
return sorted(list(feats.items()))
pass
def vectorize(tokens_list, feature_fns, min_freq, vocab=None):
"""
Given the tokens for a set of documents, create a sparse
feature matrix, where each row represents a document, and
each column represents a feature.
Params:
tokens_list...a list of lists; each sublist is an
array of token strings from a document.
feature_fns...a list of functions, one per feature
min_freq......Remove features that do not appear in
at least min_freq different documents.
Returns:
- a csr_matrix: See https://goo.gl/f5TiF1 for documentation.
This is a sparse matrix (zero values are not stored).
- vocab: a dict from feature name to column index. NOTE
that the columns are sorted alphabetically (so, the feature
"token=great" is column 0 and "token=horrible" is column 1
because "great" < "horrible" alphabetically),
>>> docs = ["Isn't this movie great?", "Horrible, horrible movie"]
>>> tokens_list = [tokenize(d) for d in docs]
>>> feature_fns = [token_features]
>>> X, vocab = vectorize(tokens_list, feature_fns, min_freq=1)
>>> type(X)
<class 'scipy.sparse.csr.csr_matrix'>
>>> X.toarray()
array([[1, 0, 1, 1, 1, 1],
[0, 2, 0, 1, 0, 0]], dtype=int64)
>>> sorted(vocab.items(), key=lambda x: x[1])
[('token=great', 0), ('token=horrible', 1), ('token=isn', 2), ('token=movie', 3), ('token=t', 4), ('token=this', 5)]
"""
###TODO
list_n=[]
col_data=[]
col_data1=[]
feats = defaultdict(lambda:0)
for i in range(len(tokens_list)):
feats=featurize(np.array(tokens_list[i]),feature_fns)
list_n.append(dict(feats))
if vocab ==None:
mydict1 = defaultdict(lambda:0)
mydict2=defaultdict(lambda:0)
myval=[]
for dic in list_n:
for k,v in dic.items():
if dic[k]>0:
mydict1[k]=mydict1[k]+1
if (k not in mydict2) and (mydict1[k]>=min_freq):
myval.append(k)
mydict2[k]=0
myval = sorted(myval)
f=0
vocab = defaultdict(lambda:0)
for m in myval:
vocab[m]=f
f+=1
row=[]
column=[]
data=[]
counter=0
for dic in list_n:
for k,v in dic.items():
if k in vocab:
row.append(counter)
column.append(vocab[k])
data.append(v)
counter+=1
x1 = np.array(data,dtype='int64')
x2= np.array(row,dtype='int64')
x3=np.array(column,dtype='int64')
mat1=csr_matrix( (x1,(x2,x3)), shape=(counter,len(vocab)) )
return mat1 , vocab
else:
row=[]
column=[]
data=[]
counter=0
for dic in list_n:
for k,v in dic.items():
if k in vocab:
row.append(counter)
column.append(vocab[k])
data.append(v)
counter+=1
x1 = np.array(data,dtype='int64')
x2= np.array(row,dtype='int64')
x3=np.array(column,dtype='int64')
mat1=csr_matrix( (x1,(x2,x3)), shape=(counter,len(vocab)) )
return mat1 , vocab
pass
def accuracy_score(truth, predicted):
""" Compute accuracy of predictions.
DONE ALREADY
Params:
truth.......array of true labels (0 or 1)
predicted...array of predicted labels (0 or 1)
"""
return len(np.where(truth==predicted)[0]) / len(truth)
def cross_validation_accuracy(clf, X, labels, k):
"""
Compute the average testing accuracy over k folds of cross-validation. You
can use sklearn's KFold class here (no random seed, and no shuffling
needed).
Params:
clf......A LogisticRegression classifier.
X........A csr_matrix of features.
labels...The true labels for each instance in X
k........The number of cross-validation folds.
Returns:
The average testing accuracy of the classifier
over each fold of cross-validation.
"""
###TODO
model =clf
le = len(X.toarray())
cv = KFold(len(labels),k)
accuracies =[]
Y =labels
for train_ind ,test_ind in cv:
model.fit(X[train_ind],Y[train_ind])
predictions = model.predict(X[test_ind])
accuracies.append(accuracy_score(Y[test_ind],predictions))
return np.mean(accuracies)
pass
def eval_all_combinations(docs, labels, punct_vals,feature_fns, min_freqs):
data_true=[]
data_false=[]
X = docs
Y = labels
k = len(docs)
final_feat={}
model = LogisticRegression()
for val in punct_vals:
for i in range(len(docs)):
if val == False:
text= tokenize(docs[i],val)
data_false.append(text)
else:
text= tokenize(docs[i],val)
data_true.append(text)
#print(data_false)
feat_pair1=[]
for i in range(len(feature_fns)):
feat_pair1.append(feature_fns[i:i+1])
#print(feat_pair1)
feat_pair2 = list(combinations(feature_fns,2))
#print(feat_pair2[0])
feat_pair3 = list(combinations(feature_fns,3))
final_list=[]
for val in punct_vals:
for i in range(len(min_freqs)):
my_dict_pair={}
for j in range(len(feat_pair1)):
if val==False:
mat1,vocab = vectorize(data_false,feat_pair1[j],min_freqs[i])
if mat1!=None and vocab!=None:
accu = cross_validation_accuracy(model,mat1,labels,5)
my_dict_pair['features']=tuple(feat_pair1[j])
my_dict_pair['punct'] = val
my_dict_pair['accuracy'] = accu
my_dict_pair['min_freq']=min_freqs[i]
final_list.append(my_dict_pair)
else:
mat1,vocab = vectorize(data_true,feat_pair1[j],min_freqs[i])
if mat1!=None and vocab!=None:
accu = cross_validation_accuracy(model,mat1,labels,5)
my_dict_pair['features']=tuple(feat_pair1[j])
my_dict_pair['punct'] = val
my_dict_pair['accuracy'] = accu
my_dict_pair['min_freq']=min_freqs[i]
final_list.append(my_dict_pair)
for val in punct_vals:
for i in range(len(min_freqs)):
my_dict_pair={}
for j in range(len(feat_pair2)):
if val==False:
mat1,vocab = vectorize(data_false,list(feat_pair2[j]),min_freqs[i])
if mat1!=None and vocab!=None:
accu = cross_validation_accuracy(model,mat1,labels,5)
my_dict_pair['features']=feat_pair2[j]
my_dict_pair['punct'] = val
my_dict_pair['accuracy'] = accu
my_dict_pair['min_freq']=min_freqs[i]
final_list.append(my_dict_pair)
else:
mat1,vocab = vectorize(data_true,list(feat_pair2[j]),min_freqs[i])
if mat1!=None and vocab!=None:
accu = cross_validation_accuracy(model,mat1,labels,5)
my_dict_pair['features']=feat_pair2[j]
my_dict_pair['punct'] = val
my_dict_pair['accuracy'] = accu
my_dict_pair['min_freq']=min_freqs[i]
final_list.append(my_dict_pair)
for val in punct_vals:
for i in range(len(min_freqs)):
my_dict_pair={}
for j in range(len(feat_pair3)):
if val==False:
mat1,vocab = vectorize(data_false,list(feat_pair3[j]),min_freqs[i])
if mat1!=None and vocab!=None:
accu = cross_validation_accuracy(model,mat1,labels,5)
my_dict_pair['features']=feat_pair3[j]
my_dict_pair['punct'] = val
my_dict_pair['accuracy'] = accu
my_dict_pair['min_freq']=min_freqs[i]
final_list.append(my_dict_pair)
else:
mat1,vocab = vectorize(data_true,list(feat_pair3[j]),min_freqs[i])
if mat1!=None and vocab!=None:
accu = cross_validation_accuracy(model,mat1,labels,5)
my_dict_pair['features']=feat_pair3[j]
my_dict_pair['punct'] = val
my_dict_pair['accuracy'] = accu
my_dict_pair['min_freq']=min_freqs[i]
final_list.append(my_dict_pair)
return sorted(final_list,key=lambda x:(-x['accuracy'],-x['min_freq']))
pass
def fit_best_classifier(docs, labels, best_result):
"""
Using the best setting from eval_all_combinations,
re-vectorize all the training data and fit a
LogisticRegression classifier to all training data.
(i.e., no cross-validation done here)
Params:
docs..........List of training document strings.
labels........The true labels for each training document (0 or 1)
best_result...Element of eval_all_combinations
with highest accuracy
Returns:
clf.....A LogisticRegression classifier fit to all
training data.
vocab...The dict from feature name to column index.
"""
###TODO
punct=0
min_freq=0
feat_fn=0
len1= len(docs)
for k,v in best_result.items():
if k=='punct':
punct=best_result.get(k)
if k=='min_freq':
min_freq = best_result.get(k)
if k=="features":
feat_fn=best_result.get(k)
data=[]
for i in range(len(docs)):
val=tokenize(docs[i],punct)
data.append(val)
mat1,vocab = vectorize(data,list(feat_fn),min_freq)
clf = LogisticRegression()
clf.fit(mat1[:len1],labels)
return clf,vocab
pass
def get_unique_tweets(tweets,best_result,vocab):
unique_tweets=[]
test_tokens=[]
for x in tweets:
if x not in unique_tweets:
unique_tweets.append(x)
for val in unique_tweets:
data1=tokenize(val,best_result['punct'])
test_tokens.append(data1)
#print(best_result['min_freq'])
X_test,vocab1 = vectorize(test_tokens,list(best_result['features']),best_result['min_freq'],vocab)
#print(X_test)
return X_test, vocab1, test_tokens
def my_predict(clf,X_test,tweets1):
neg_tweets=[]
pos_tweets=[]
tweets=[]
for x in tweets1:
if x not in tweets:
tweets.append(x)
predict = clf.predict(X_test)
for i, val in enumerate(predict):
if val == 0:
neg_tweets.append(tweets[i])
if val==1:
pos_tweets.append(tweets[i])
return neg_tweets,pos_tweets
def classify():
feature_fns = [token_features, token_pair_features, lexicon_features]
docs, labels = read_data(os.path.join('data', 'train'))
#print(docs[0])
tweets= pickle.load(open("tweets_text.txt","rb"))
tweets1=tweets
results = eval_all_combinations(docs, labels,[True,False],feature_fns,[2,5,10])
best_result = results[0]
clf, vocab = fit_best_classifier(docs, labels, results[0])
X_test,vocab,tweets = get_unique_tweets(tweets,best_result,vocab)
#print(clf)
neg_tweets,pos_tweets=my_predict(clf,X_test,tweets1)
pickle.dump(neg_tweets,open("final_neg_tweets.txt","wb"))
pickle.dump(pos_tweets,open("final_pos_tweets.txt","wb"))
#print("Negative Tweets",neg_tweets[:5])
#print("Positive Tweets",pos_tweets[:5])
pass
def main():
classify()
pass
if __name__ == "__main__":
main()
|
"""
LCS
https://www.acmicpc.net/problem/9251
"""
import sys
sys.setrecursionlimit(10**5)
first_word = input()
first_word_len = len(first_word)
second_word = input()
second_word_len = len(second_word)
data = [[0] * second_word_len for _ in range(first_word_len)]
max_data = []
for i, first in enumerate(first_word):
for j, second in enumerate(second_word):
if first == second:
data[i][j] = 1
def dd(x,x_max,check_x, y, y_max, check_y, data, result):
if data[x][y] == 1:
check_x = x
check_y = y
result+=1
cal_x = x+1
cal_y = y+1
# 마지막 x_max, y_max 값 비교를 위해
if cal_x== x_max and cal_y == y_max:
if data[cal_x-1][cal_y-1] == 1:
return result+1
else:
return result
if cal_x == x_max:
cal_x = x_max-1
if cal_y == y_max:
cal_y = y_max-1
return dd(cal_x,x_max,check_x, cal_y, y_max, check_y, data, result)
else:
cal_x = x
cal_y = y+1
if cal_y == y_max:
cal_x = x + 1
if cal_x == x_max and cal_y == y_max:
if data[cal_x - 1][cal_y - 1] == 1:
return result + 1
else:
return result
cal_y = check_y+1
if cal_x == x_max:
cal_x = x_max-1
if cal_y == y_max:
cal_y = y_max-1
return dd(cal_x, x_max, check_x, cal_y, y_max, check_y, data, result)
for i in range(first_word_len):
for j in range(second_word_len):
max_data.append(dd(i,first_word_len,0, j, second_word_len, 0, data, 0))
print(max(max_data))
|
import dash_bootstrap_components as dbc
from dash import html
popovers = html.Div(
[
dbc.Button(
"Hidden Arrow",
id="hide-arrow-target",
className="me-1",
n_clicks=0,
),
dbc.Popover(
"I am a popover without an arrow!",
target="hide-arrow-target",
trigger="legacy",
hide_arrow=True,
),
dbc.Button(
"Offset Popover",
id="offset-target",
n_clicks=0,
),
dbc.Popover(
"I am a popover that's been offset!",
target="offset-target",
trigger="legacy",
hide_arrow=True,
offset="50,20",
),
]
)
|
from epidemioptim.environments.cost_functions.costs.death_toll_cost import DeathToll
from epidemioptim.environments.cost_functions.costs.gdp_recess_cost import GdpRecess
|
from __future__ import unicode_literals
from django.apps import AppConfig
class UserOtpConfig(AppConfig):
name = 'phoneuser'
|
#先验指纹库
matches = {
}
|
# Generated by Django 3.2.5 on 2021-07-28 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clinic_app', '0013_prescription_dose'),
]
operations = [
migrations.AddField(
model_name='medicine',
name='date',
field=models.DateTimeField(auto_now_add=True, default='2021-07-27 22:58:51.049087+03'),
preserve_default=False,
),
migrations.AddField(
model_name='medicine',
name='description',
field=models.TextField(default='Medicine'),
preserve_default=False,
),
]
|
from .hpgl2_elm_classes import cHpgl2ElmCommand, \
cHpgl2IN, cHpgl2PG, cHpgl2RO, cHpgl2AA, cHpgl2CI, \
cHpgl2PA, cHpgl2PD, cHpgl2PU, cHpgl2LT, cHpgl2PW, \
cHpgl2SP, cHpgl2SD, cHpgl2SS, cHpgl2BP, cHpgl2PS, \
cHpgl2NP
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.