text stringlengths 8 6.05M |
|---|
def add():
a=35
b=65
c=a+b
print(c)
add()
def multiply():
d=2
e=2
f=d*e
print(f)
multiply()
def divison():
x=26
y=2
z=x/y
print(z)
divison() |
input = '109165-576723'
pass_limits = list(map(lambda x: int(x), input.split('-')))
pass_range = range(pass_limits[0], pass_limits[1]+1)
def is_valid(candidate):
candidate = str(candidate)
repeating_val = False
for x in range(len(candidate)-1):
if candidate[x] > candidate[x+1]:
return False
if candidate[x] == candidate[x+1]:
repeating_val = True
if repeating_val:
return True
return False
valid_pass = list(filter(lambda x: is_valid(x), pass_range))
print(len(valid_pass))
|
from django.contrib import admin
from .models import Atom, Particle
admin.site.register(Atom)
admin.site.register(Particle)
|
#!/usr/bin/python
# coding=utf-8
import urllib.request
# 通过urllib.Request()方法构造一个请求对象
'''
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: max-age=0
Connection: keep-alive
Cookie: BAIDUID=6EF80C8B11D09EEB5959821488E62CA8:FG=1; BIDUPSID=6EF80C8B11D09EEB5959821488E62CA8; PSTM=1538138173; BD_UPN=123353; BDUSS=tWazZON0Z4NmRmcDR2cDJTR2pwaW9VcmQ2WVZZeHdsSmJ0RUdjUDFwZHpWLWxiQVFBQUFBJCQAAAAAAAAAAAEAAAAIPWgxeGM1MzQzNjExMjQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHPKwVtzysFbTU; MCITY=-75%3A; BD_HOME=1; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; delPer=0; BD_CK_SAM=1; PSINO=3; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; locale=zh; H_PS_PSSID=1454_21119_20691_27376_26350_27509; H_PS_645EC=d777hqh9TH%2Flz%2BPmj%2BGutYJzRwuxYfGD6ySoyR2peJGl4Y788WxYiHnXRzTjPQHBxwRc
Host: www.baidu.com
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36
'''
# User-Agent 是爬虫和反爬虫斗争的第一步
ua_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
}
url = "http://www.baidu.com"
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
request = urllib.request.Request(url,headers=ua_headers)
# 添加/修改一个HTTP 报头
request.add_header("User-Agent",user_agent)
# 向指定的URL地址发送请求,返回服务器响应的类文件对象
response = urllib.request.urlopen(request)
# read()方法就是读取文件的全部内容,返回字符串
html = response.read()
# 返回http响应码
code = response.getcode()
# 返回实际的URL地址,防止重定向
url = response.geturl()
# 服务器响应的HTTP包头
info = response.info()
print(request.get_header("User-agent"))
|
"""
In this problem, a tree is an undirected graph that is connected and has no cycles.
You are given a graph that started as a tree with n nodes labeled from 1 to n, with one additional edge added. The added edge has two different vertices chosen from 1 to n, and was not an edge that already existed. The graph is represented as an array edges of length n where edges[i] = [ai, bi] indicates that there is an edge between nodes ai and bi in the graph.
Return an edge that can be removed so that the resulting graph is a tree of n nodes. If there are multiple answers, return the answer that occurs last in the input.
Input: edges = [[1,2],[1,3],[2,3]]
Output: [2,3]
"""
from typing import List
from collections import defaultdict
class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
print(edges)
d = defaultdict(list)
def dfs(u, v):
if u not in visied:
visied.append(u)
if u == v: return True
return any(dfs(nei, v) for nei in d[u])
for i in edges:
u, v = i
visied = []
if u in d and v in d and dfs(u, v):
return i
d[u].append(v)
d[v].append(u)
edges = [[1,2],[1,3],[2,3]]
print ("Input : {}".format(edges))
ans =Solution().findRedundantConnection(edges)
print ("Solution : {}".format(ans))
edges = [[1,2],[2,3],[3,4],[1,4],[1,5]]
print ("Input : {}".format(edges))
ans = Solution().findRedundantConnection(edges)
print ("Solution : {}".format(ans))
|
def DNAtoRNA(dna):
""" dna_to_rna == PEP8 (forced camelCase by CodeWars) """
return dna.replace('T', 'U')
|
from base import *
import clsTestService
import enums
from selenium.webdriver.common.keys import Keys
class EntryPage(Base):
driver = None
clsCommon = None
def __init__(self, clsCommon, driver):
self.driver = driver
self.clsCommon = clsCommon
#=============================================================================================================
#Entry Page locators:
#=============================================================================================================
ENTRY_PAGE_ENTRY_TITLE = ('xpath', "//h3[@class='entryTitle' and contains(text(), 'ENTRY_NAME')]") # When using this locator, replace 'ENTRY_NAME' string with your real entry name
ENTRY_PAGE_ACTIONS_DROPDOWNLIST = ('xpath', "//button[@id='entryActionsMenuBtn']")
ENTRY_PAGE_ACTIONS_DROPDOWNLIST_EDIT_BUTTON = ('xpath', "//span[@id='tabLabel-Edit']")
ENTRY_PAGE_DESCRIPTION = ('xpath', "//div[@class='row-fluid normalWordBreak']")
ENTRY_PAGE_TAGS = ('class_name', "tagsWrapper")
ENTRY_PAGE_ADDTOPLAYLIST_BUTTON = ('id', "Addtoplaylists")
ENTRY_PAGE_PUBLISH_BUTTON = ('id', "tab-Publish")
ENTRY_PAGE_ACTIONS_DROPDOWNLIST_DELETE_BUTTON = ('id', "tab-Delete")
ENTRY_PAGE_CONFIRM_DELETE_BUTTON = ('xpath', "//a[contains(@id,'delete_button_') and @class='btn btn-danger']")
ENTRY_PAGE_DOWNLOAD_TAB = ('xpath', "//a[contains(@class,'btn responsiveSizePhone tab-download-tab')]")
ENTRY_PAGE_MEDIA_IS_BEING_PROCESSED = ('xpath', "//h3[@class='muted' and contains(text(), 'Media is being processed')]")
ENTRY_PAGE_PLAYER_IFRAME = ('xpath',"//iframe[@id='kplayer_ifp' and @class='mwEmbedKalturaIframe']")
ENTRY_PAGE_PLAYER_IFRAME1 = ('class_name','mwEmbedKalturaIframe')
ENTRY_PAGE_PLAYER_IFRAME2 = ('id','kplayer_ifp')
ENTRY_PAGE_CHAPTER_MENU_ON_THE_PLAYER = ('id', 'sideBarContainerReminderContainer') # This is the icon on the top left of the player that show all the slides that were added
ENTRY_PAGE_SHARE_TAB = ('xpath', '//a[@id="tab-share-tab" and @class="btn responsiveSizePhone tab-share-tab"]')
ENTRY_PAGE_SHARE_LINK_TO_MEDIA_OPTION = ('xpath', '//li[@id="directLink-tab" and @class="active"]')
ENTRY_PAGE_SHARE_EMBED_OPTION = ('id', 'embedTextArea-pane-tab')
ENTRY_PAGE_SHARE_EMAIL_OPTION = ('id', 'emailLink-tab')
ENTRY_PAGE_LOADING = ('xpath', '//div[@class="message" and text()="Loading..."]')
ENTRY_PAGE_EMBED_TEXT_AREA = ('id', 'embedTextArea')
ENTRY_PAGE_COMMENT_TEXT_AREA = ('xpath', '//textarea[@id="commentsbox"]')
ENTRY_PAGE_COMMENT_ADD_BUTTON = ('xpath', '//input[@id="add-comment"]')
ENTRY_PAGE_COMMENTS_PANEL = ('xpath', "//div[@id='commentsWrapper']")
ENTRY_PAGE_DETAILS_BUTTON = ('xpath', "//a[@id='tab-Details' and @class='btn responsiveSizePhone tabs-container__button tab-Details active']")
ENTRY_PAGE_LIKE_BUTTON = ('xpath', "//span[@id='likes']")
ENTRY_PAGE_COMMENT_SECTION = ('xpath', '//div[@class="commentText"]/p[text()="COMMENT_TEXT"]')
ENTRY_PAGE_CLOSE_DISCUSSION_MSG = ('xpath', '//h4[@class="muted" and text()="Discussion is closed"]')
ENTRY_PAGE_COMMENT_ID = ('xpath', '//div[@class="comment row-fluid "]')
ENTRY_PAGE_REPLY_COMMENT = ('xpath', '//a[contains(@href, "/commentId/COMMENT_ID") and @data-track="Comment Reply"]')
ENTRY_PAGE_REPLY_COMMENT_TEXT_AREA = ('xpath', '//textarea[@id="commentsbox" and @title="Add a Reply"]')
ENTRY_PAGE_REPLY_COMMENT_ADD_BUTTON = ('xpath', '//form[@id="addComment_COMMENT_ID"]/div[@class="pull-right"]')
ENTRY_PAGE_RELATED_MEDIA = ('xpath', '//div[@id="sideSelectWrap"]')
ENTRY_PAGE_MY_MEDIA_OPTION = ('xpath', '//a[@id="tab-entrySideBarPane-Sidemymedia-3"]')
ENTRY_PAGE_ELATED_MEDIA_OPTION = ('xpath', '//a[@id="tab-entrySideBarPane-Sidemymedia-2"]')
ENTRY_PAGE_MY_MEDIA_SIDE_BAR_ENTRIES = ('xpath', '//div[@class="photo-group thumb_wrapper" and @title="ENTRY_NAME"]')
ENTRY_PAGE_ATTACHMENTS_TAB = ('xpath', '//a[@id="tab-attachments-tab" and @class="btn responsiveSizePhone tab-attachments-tab"]')
ENTRY_PAGE_DOWNLOAD_ATTACHMENTS_ICON = ('xpath', '//i[@class="icon-download icon-large"]')
ENTRY_PAGE_RELATED_MEDIA_TABLE = ('xpath', '//table[@class="table table-hover table-bordered thumbnails table-condensed"]/tbody/tr')
#=============================================================================================================
def navigateToEntryPageFromMyMedia(self, entryName):
tmp_entry_name = (self.ENTRY_PAGE_ENTRY_TITLE[0], self.ENTRY_PAGE_ENTRY_TITLE[1].replace('ENTRY_NAME', entryName))
#Check if we already in edit entry page
if self.wait_visible(tmp_entry_name, 5) != False:
writeToLog("INFO","Already in edit entry page, Entry name: '" + entryName + "'")
return True
self.clsCommon.myMedia.searchEntryMyMedia(entryName)
self.clsCommon.myMedia.clickEntryAfterSearchInMyMedia(entryName)
# Wait page load - wait for entry title
if self.wait_visible(tmp_entry_name, 30) == False:
writeToLog("INFO","FAILED to enter entry page: '" + entryName + "'")
return False
return True
# Author: Michal Zomper
def navigateToEntryPageFromCategoryPage(self, entryName, categoryName):
tmp_entry_name = (self.ENTRY_PAGE_ENTRY_TITLE[0], self.ENTRY_PAGE_ENTRY_TITLE[1].replace('ENTRY_NAME', entryName))
if self.wait_visible(tmp_entry_name, 5) != False:
writeToLog("INFO","Already in entry page: '" + entryName + "'")
return True
if self.clsCommon.category.navigateToCategory(categoryName) == False:
writeToLog("INFO","FAILED navigate to category:" + categoryName)
return False
sleep(2)
if self.clsCommon.category.searchEntryInCategory(entryName) == False:
writeToLog("INFO","FAILED to search entry'" + entryName + "' in category" + categoryName)
return False
# click on the entry
if self.clsCommon.category.clickOnEntryAfterSearchInCategory(entryName) == False:
writeToLog("INFO","FAILED to click on entry " + entryName)
return False
if self.wait_visible(tmp_entry_name, 15) == False:
writeToLog("INFO","FAILED to enter entry page: '" + entryName + "'")
return False
return True
# @Author: Inbar Willman
def navigateToEntryPageFromMyHistory(self, entryName):
tmp_entry_name = (self.ENTRY_PAGE_ENTRY_TITLE[0], self.ENTRY_PAGE_ENTRY_TITLE[1].replace('ENTRY_NAME', entryName))
#Check if we already in edit entry page
if self.wait_visible(tmp_entry_name, 5) != False:
writeToLog("INFO","Already in edit entry page, Entry name: '" + entryName + "'")
return True
self.clsCommon.myHistory.searchEntryMyHistory(entryName)
self.clsCommon.myHistory.clickEntryAfterSearchInMyHistory(entryName)
# Wait page load - wait for entry title
if self.wait_visible(tmp_entry_name, 15) == False:
writeToLog("INFO","FAILED to enter entry page: '" + entryName + "'")
return False
return True
# @Author: Inbar Willman
# Click on entry from home page playlist
def navigateToEntryPageFromHomePage(self, entryName):
tmp_entry_name = (self.ENTRY_PAGE_ENTRY_TITLE[0], self.ENTRY_PAGE_ENTRY_TITLE[1].replace('ENTRY_NAME', entryName))
#Check if we already in edit entry page
if self.wait_visible(tmp_entry_name, 5) != False:
writeToLog("INFO","Already in edit entry page, Entry name: '" + entryName + "'")
return True
tmp_home_entry_name = (self.clsCommon.home.HOME_PLAYLIST_ENTRY[0], self.clsCommon.home.HOME_PLAYLIST_ENTRY[1].replace('ENTRY_NAME', entryName))
if self.click(tmp_home_entry_name) == False:
writeToLog("INFO","FAILED to click on entry")
return False
if self.wait_visible(tmp_entry_name, 15) == False:
writeToLog("INFO","FAILED to enter entry page: '" + entryName + "'")
return False
# Author: Michal Zomper
def verifyEntryMetadata(self, entryName, entryDescription, entryTags):
# Verify entry name
tmp_entry_name = (self.ENTRY_PAGE_ENTRY_TITLE[0], self.ENTRY_PAGE_ENTRY_TITLE[1].replace('ENTRY_NAME', entryName))
# Check if we already in edit entry page
if self.wait_visible(tmp_entry_name, 20) == False:
writeToLog("INFO","FAILED to verify entry name: '" + entryName + "'")
return False
# Verify description
# First get the description frame
parentEldescription = self.get_element(self.ENTRY_PAGE_DESCRIPTION)
if parentEldescription == None:
writeToLog("INFO","FAILED to find description frame in entry page")
return False
# Check that the description is the correct description
if self.wait_for_text(self.ENTRY_PAGE_DESCRIPTION, entryDescription, 30, False) == False:
writeToLog("INFO","FAILED to verify entry description: '" + entryName + "'")
return True
# Verify tags
# First get the tags frame
parentEltags = self.get_element(self.ENTRY_PAGE_TAGS)
if parentEltags == None:
writeToLog("INFO","FAILED to find tags frame in entry page")
return False
# Check that the description is the correct description
if self.wait_for_text(self.ENTRY_PAGE_TAGS, entryTags, 30, True) == False:
writeToLog("INFO","FAILED to verify entry tags: '" + entryTags + "'")
return False
writeToLog("INFO","Success, all entry '" + entryName + "' metadata are correct")
return True
def navigateToEntry(self, entryName="", navigateFrom = enums.Location.MY_MEDIA, categoryName ="", channelName= ""):
if navigateFrom == enums.Location.MY_MEDIA:
if self.navigateToEntryPageFromMyMedia(entryName) == False:
writeToLog("INFO","FAILED navigate to entry '" + entryName + "' from " + str(enums.Location.MY_MEDIA))
return False
elif navigateFrom == enums.Location.CATEGORY_PAGE:
if self.navigateToEntryPageFromCategoryPage(entryName, categoryName) == False:
writeToLog("INFO","FAILED navigate to entry '" + entryName + "' from " + str(enums.Location.CATEGORY_PAGE))
return False
elif navigateFrom == enums.Location.CHANNEL_PAGE:
if self.clsCommon.channel.naviagteToEntryFromChannelPage(entryName, channelName) == False:
writeToLog("INFO","FAILED navigate to entry '" + entryName + "' from " + str(enums.Location.CHANNEL_PAGE))
return False
elif navigateFrom == enums.Location.UPLOAD_PAGE:
if self.clsCommon.upload.click(self.clsCommon.upload.UPLOAD_GO_TO_MEDIA_BUTTON) == False:
writeToLog("INFO","FAILED navigate to entry '" + entryName + "' from " + str(enums.Location.UPLOAD_PAGE))
return False
elif navigateFrom == enums.Location.MY_HISTORY:
if self.navigateToEntryPageFromMyHistory(entryName) == False:
writeToLog("INFO","FAILED navigate to entry '" + entryName + "' from " + str(enums.Location.MY_HISTORY))
return False
elif navigateFrom == enums.Location.HOME:
if self.navigateToEntryPageFromHomePage(entryName) == False:
writeToLog("INFO","FAILED navigate to entry '" + entryName + "' from " + str(enums.Location.MY_HISTORY))
return False
sleep(2)
return True
def deleteEntryFromEntryPage(self, entryName, deleteFrom= enums.Location.MY_MEDIA, categoryName="", channelName=""):
if self.navigateToEntry(entryName, deleteFrom, categoryName, channelName) == False:
writeToLog("INFO","FAILED navigate to entry page")
return False
if self.click(self.ENTRY_PAGE_ACTIONS_DROPDOWNLIST, 20) == False:
writeToLog("INFO","FAILED to click on 'Actions' button")
return False
if self.click(self.ENTRY_PAGE_ACTIONS_DROPDOWNLIST_DELETE_BUTTON, 15) == False:
writeToLog("INFO","FAILED to click on delete button")
return False
sleep(3)
if self.click(self.ENTRY_PAGE_CONFIRM_DELETE_BUTTON, 20, multipleElements=True) == False:
writeToLog("INFO","FAILED to click confirm delete button")
# Click on the actions button to close the drop down list
self.click(self.ENTRY_PAGE_ACTIONS_DROPDOWNLIST_DELETE_BUTTON, 15)
return False
sleep(5)
# Verify entry was delete: after entry delete the page that will display is the page that we enter the entry from
if deleteFrom == enums.Location.MY_MEDIA or deleteFrom == enums.Location.ENTRY_PAGE:
sleep(5)
if self.verifyUrl(localSettings.LOCAL_SETTINGS_KMS_MY_MEDIA_URL, False, 30) == False:
writeToLog("INFO","FAILED to verify that entry deleted")
return False
elif deleteFrom == enums.Location.CATEGORY_PAGE:
tmpCategoryName = (self.clsCommon.category.CATEGORY_TITLE_IN_CATEGORY_PAGE[0], self.clsCommon.category.CATEGORY_TITLE_IN_CATEGORY_PAGE[1].replace('CATEGORY_NAME', categoryName))
if self.wait_visible(tmpCategoryName, 30) == False:
writeToLog("INFO","FAILED to verify that entry deleted")
return False
elif deleteFrom == enums.Location.CHANNEL_PAGE:
tmp_channel_title = (self.clsCommon.channel.CHANNEL_PAGE_TITLE[0], self.clsCommon.channel.CHANNEL_PAGE_TITLE[1].replace('CHANNEL_TITLE', channelName))
if self.wait_visible(tmp_channel_title, 30) == False:
writeToLog("INFO","FAILED to verify that entry deleted")
return False
writeToLog("INFO","Verify that entry deleted")
return True
# TODO:not finished
def downloadAFlavor(self, entryName, flavorName):
try:
if self.navigateToEntryPageFromMyMedia(entryName) == False:
writeToLog("INFO","FAILED to navigate to entry page, Entry name: " + entryName)
return False
if self.click(self.ENTRY_PAGE_DOWNLOAD_TAB, 30) == False:
writeToLog("INFO","FAILED to click on download tab")
return False
sleep(2)
asteriskElement = self.driver.find_element_by_xpath(".//tr[@class='download_flavors_item' and contains(text(), " + flavorName + ")]")
parentAsteriskElement = asteriskElement.find_element_by_xpath("..")
downloadbutton = parentAsteriskElement.find_element_by_tag_name("a")
downloadbutton.click()
except NoSuchElementException:
writeToLog("INFO","FAILED to click on download button, that located near: " + flavorName)
return False
return True
def waitTillMediaIsBeingProcessed(self, timeout=150):
sleep(3)
self.wait_while_not_visible(self.ENTRY_PAGE_MEDIA_IS_BEING_PROCESSED, timeout)
if self.wait_visible(self.clsCommon.player.PLAYER_IFRAME, 60) == False:
return False
return True
def VerifySlidesonThePlayerInEntryPage(self, entryName):
if self.navigateToEntry(entryName, navigateFrom = enums.Location.MY_MEDIA) == False:
writeToLog("INFO","FAILED navigate to entry: " + entryName)
return False
el = self.get_element(self.ENTRY_PAGE_PLAYER_IFRAME)
self.driver.switch_to.frame(el)
ch = self.driver.get_child_element(el, "//div[@id='sideBarContainerReminderContainer']")
# Verify chapter menu display on the player
if self.is_visible(self.ENTRY_PAGE_CHAPTER_MENU_ON_THE_PLAYER) == False:
writeToLog("INFO","FAILED to find chapter menu on the player")
return False
# @Author: Inbar Willman
def clickOnShareTab(self):
if self.click(self.ENTRY_PAGE_SHARE_TAB, 30) == False:
writeToLog("INFO","FAILED to click on download tab")
return False
# @Author: Inbar Willman
def chooseShareOption(self, shareOption = enums.EntryPageShareOptions.EMBED):
if shareOption == enums.EntryPageShareOptions.EMBED:
embed_tab = self.get_elements(self.ENTRY_PAGE_SHARE_EMBED_OPTION)[1]
if embed_tab.click() == False:
writeToLog("INFO","FAILED to click on embed tab option")
return False
elif shareOption == enums.EntryPageShareOptions.LINK_TO_MEDIA_PAGE:
if self.click(self.ENTRY_PAGE_SHARE_LINK_TO_MEDIA_OPTION) == False:
writeToLog("INFO","FAILED to click on link to media tab option")
return False
elif shareOption == enums.EntryPageShareOptions.EMAIL:
email_tab = self.get_elements(self.ENTRY_PAGE_SHARE_EMAIL_OPTION)[1]
if email_tab.click() == False:
writeToLog("INFO","FAILED to click on link to media tab option")
return False
else:
writeToLog("INFO","FAILED to get valid share option")
return False
return True
def getEmbedLink(self):
if self.clickOnShareTab() == False:
writeToLog("INFO","FAILED to click on share tab")
return False
if self.chooseShareOption() == False:
writeToLog("INFO","FAILED to click on embed tab")
return False
sleep(3)
self.clsCommon.sendKeysToBodyElement(Keys.END)
if self.wait_while_not_visible(self.ENTRY_PAGE_LOADING) == False:
writeToLog("INFO","FAILED - Loading message is still displayed")
return False
embed_text = self.get_element_text(self.ENTRY_PAGE_EMBED_TEXT_AREA)
if embed_text == None:
return False
return embed_text
# Author: Michal Zomper
def addComment(self, comment):
sleep(2)
self.clsCommon.sendKeysToBodyElement(Keys.PAGE_DOWN)
# Wait for comments module to load - wait for 'Add' button
if self.wait_visible(self.ENTRY_PAGE_COMMENT_TEXT_AREA, 30, multipleElements=True) == False:
writeToLog("INFO","FAILED to load Comments module")
return False
sleep(1)
if self.click(self.ENTRY_PAGE_COMMENT_TEXT_AREA, 5) == False:
writeToLog("INFO","FAILED to click in the comment text box area")
return False
if self.send_keys(self.ENTRY_PAGE_COMMENT_TEXT_AREA, comment + Keys.SPACE, multipleElements=True) == False:
writeToLog("INFO","FAILED to add comment")
return False
sleep(2)
if self.click(self.ENTRY_PAGE_COMMENT_ADD_BUTTON, 15) == False:
writeToLog("INFO","FAILED to click on add comment button")
return False
self.clsCommon.general.waitForLoaderToDisappear()
self.clsCommon.sendKeysToBodyElement(Keys.END)
# verify comment was added
tmp_comments = self.get_element_text(self.ENTRY_PAGE_COMMENTS_PANEL)
if comment in tmp_comments == False:
writeToLog("INFO","FAILED to find added comment")
return False
writeToLog("INFO","Success, comment: '" + comment +"' was added to entry")
return True
# Author: Michal Zomper
def addComments(self, commentsList):
for comment in commentsList:
if self.addComment(comment) == False:
writeToLog("INFO","FAILED to add comment")
return False
writeToLog("INFO","Success, All comments were added successfully to entry")
return True
# Author: Michal Zomper
def LikeUnlikeEntry(self, isLike):
self.clsCommon.sendKeysToBodyElement(Keys.HOME)
# Check the amount of likes for the entry before click the like\unlike button
prev_likeAmount = self.wait_visible(self.ENTRY_PAGE_LIKE_BUTTON, timeout=15)
if prev_likeAmount == False:
writeToLog("INFO","FAILED to find like button")
return False
sleep(2)
prev_likeAmount = prev_likeAmount.text
if self.click(self.ENTRY_PAGE_LIKE_BUTTON, 10) == False:
writeToLog("INFO","FAILED to click on like button")
return False
self.clsCommon.general.waitForLoaderToDisappear()
# Check the amount of likes for the entry after click the like\unlike button
after_likeAmount = self.wait_visible(self.ENTRY_PAGE_LIKE_BUTTON, timeout=15)
if after_likeAmount == False:
writeToLog("INFO","FAILED to find like button")
return False
after_likeAmount = after_likeAmount.text
# like the page
if isLike == True:
if int(prev_likeAmount) >= int(after_likeAmount):
writeToLog("INFO","FAILED to click on like button, the number of likes are: " + int(after_likeAmount) + " and need to be: " + int(prev_likeAmount))
return False
writeToLog("INFO","Success, entry was liked successfully")
return True
# unlike the page
elif isLike == False:
if int(prev_likeAmount) <= int(after_likeAmount):
writeToLog("INFO","FAILED to click on unlike button, the number of likes are: " + int(after_likeAmount) + " and need to be: " + int(prev_likeAmount))
return False
writeToLog("INFO","Success, entry was unlike successfully")
return True
# @Author: Inbar Willman
def checkEntryCommentsSection(self, comment, isCommentsDisabled, isDiscussionClose):
# Scroll down in page to comment section
self.clsCommon.sendKeysToBodyElement(Keys.PAGE_DOWN)
comment_section = (self.ENTRY_PAGE_COMMENT_SECTION[0], self.ENTRY_PAGE_COMMENT_SECTION[1].replace('COMMENT_TEXT', comment))
# If disabled comments is enabled (includes if close discussion is enabled)
if isCommentsDisabled == True:
# Check that entry's comments isn't displayed
if self.is_visible(comment_section) == True:
writeToLog("INFO","FAILED - comments still displayed")
return False
# If close discussion is enabled but disabled comments is disabled
if isDiscussionClose == True and isCommentsDisabled == False:
# Wait until close discussion message is displayed
if self.wait_visible(self.ENTRY_PAGE_CLOSE_DISCUSSION_MSG, timeout=20) == False:
writeToLog("INFO","FAILED to displayed close discussion message")
return False
# Check that entry's comments is displayed
if self.is_visible(comment_section) == False:
writeToLog("INFO","FAILED - comments isn't displayed")
return False
# Check that there is no option to add comments - relevant for both close discussion and disabled comments
if self.is_visible(self.ENTRY_PAGE_COMMENT_TEXT_AREA) == True:
writeToLog("INFO","FAILED - add new comment box is still displayed")
return False
return True
# @Author: Inbar Willman
def replyComment(self, replyComment):
# Get comment Id
tmp_comment_id = self.get_element(self.ENTRY_PAGE_COMMENT_ID)
comment_id = tmp_comment_id.get_attribute("data-comment-id")
# Click on replay button
tmp_replay_btn = (self.ENTRY_PAGE_REPLY_COMMENT[0], self.ENTRY_PAGE_REPLY_COMMENT[1].replace('COMMENT_ID', comment_id))
if self.click(tmp_replay_btn) == False:
writeToLog("INFO","FAILED to click on replay button")
return False
# Add new replay comment
# Click on replay comment area
if self.click(self. ENTRY_PAGE_REPLY_COMMENT_TEXT_AREA, 5) == False:
writeToLog("INFO","FAILED to click in the comment text box area")
return False
# Insert comment text
if self.send_keys(self. ENTRY_PAGE_REPLY_COMMENT_TEXT_AREA, replyComment + Keys.SPACE, multipleElements=True) == False:
writeToLog("INFO","FAILED to add comment")
return False
sleep(2)
#Click on add button
self.clsCommon.sendKeysToBodyElement(Keys.ARROW_DOWN)
tmp_add_btn = (self.ENTRY_PAGE_REPLY_COMMENT_ADD_BUTTON[0], self.ENTRY_PAGE_REPLY_COMMENT_ADD_BUTTON[1].replace('COMMENT_ID', comment_id))
if self.click(tmp_add_btn, 15) == False:
writeToLog("INFO","FAILED to click on add comment button")
return False
self.clsCommon.general.waitForLoaderToDisappear()
self.clsCommon.sendKeysToBodyElement(Keys.END)
# verify reply was added
tmp_comments = self.get_element_text(self.ENTRY_PAGE_COMMENTS_PANEL)
if replyComment in tmp_comments == False:
writeToLog("INFO","FAILED to find added comment")
return False
writeToLog("INFO","Success, comment: '" + replyComment +"' was added to entry")
return True
return True
# @Author: Inbar Willman
# Choose option in related media drop down in media side bar
# Related media (default) or My Media
def selectRelatedMediaOption(self, relatedMediaOption=enums.ReleatedMedia.MY_MEDIA):
# Click on related media drop down
if self.click(self.ENTRY_PAGE_RELATED_MEDIA) == False:
writeToLog("INFO","FAILED to click on related media drop down menu")
return False
# Choose related media option
if relatedMediaOption == enums.ReleatedMedia.MY_MEDIA:
if self.click(self.ENTRY_PAGE_MY_MEDIA_OPTION) == False:
writeToLog("INFO","FAILED to click on My media option")
return False
elif relatedMediaOption == enums.ReleatedMedia.RELATED_MEDIA:
if self.click(self.ENTRY_PAGE_MY_MEDIA_OPTION) == False:
writeToLog("INFO","FAILED to click on Related media option")
return False
else:
writeToLog("INFO","FAILED to click on Related media drop down options - No valid option was given")
return False
return True
# @Author: Inbar Willman
# Verify that My Media entry are displayed in My Media side bar
def checkMyMediaSideBarEntries(self, entrisList):
#Check uploaded entries in My Media Side bar
for entry in entrisList:
tmp_entry = (self.ENTRY_PAGE_MY_MEDIA_SIDE_BAR_ENTRIES[0], self.ENTRY_PAGE_MY_MEDIA_SIDE_BAR_ENTRIES[1].replace('ENTRY_NAME', entry))
if self.is_visible(tmp_entry) == False:
writeToLog("INFO","FAILED to displayed My Media entry:" + entry)
return False
return True
# @Author: Inbar Willman
# Click on attachments tab
def clickOnAttachmentTab(self):
# Click on attachment tab
if self.click(self.ENTRY_PAGE_ATTACHMENTS_TAB) == False:
writeToLog("INFO","FAILED to click on attachments tab")
return False
# @Author: Inbar Willman
# Download attachments file
def downloadAttachmentFromEntryPage(self, originalPath, downloadPath):
# Click on download tab
if self.clickOnAttachmentTab() == False:
writeToLog("INFO","FAILED to click on attachments tab")
return False
# Click on download icon
if self.click(self.ENTRY_PAGE_DOWNLOAD_ATTACHMENTS_ICON) == False:
writeToLog("INFO","FAILED to click on download attachments icon")
return False
# Compare between uploaded file and download file
if self.clsCommon.compareBetweenTwoFilesBinary(originalPath, downloadPath) == False:
writeToLog("INFO","Failed to click on to download file correctly")
return False
return True
# @Author: Inbar Willman
# Verify that correct count of related media is displayed
# By default = 10
def verifyRelatedMediaCount(self, realtedLimit):
# get related media table length
related_media_length = len(self.get_elements(self.ENTRY_PAGE_RELATED_MEDIA_TABLE))
if related_media_length != realtedLimit:
writeToLog("INFO","Failed to displayed correct number of media in Related section")
return False
return True |
import exceptions
class ConversionError(exceptions.Exception):
pass
class Converter(object):
def do_conversion(self, measure, timestamp):
pass
|
#String Concatenation
print ('I' + 'love' + 'Python')
first = 'I'
second = 'love'
third = 'Python.'
sentence = first + ' ' + second + ' ' + third + '.'
#Reapating Strings
print('-' * 10)
happiness = 'happy' * 3
print(happiness)
version = 3
print ('I love Python ' + str(version) + '.') |
loop1 = 0
while loop1 < 1:
print("\n******************* Python Calculator *******************")
loop2 = 0
while loop2 < 1:
print('\n')
print('Adição - 1')
print('Subtração - 2')
print('Multiplicação - 3')
print('Divisão - 4')
print('\n')
NumberAction = input('Escolha sua operação: ')
if NumberAction == '1' or NumberAction == '2' or NumberAction == '3' or NumberAction == '4':
loop2 = 1
else:
print('Código invalido!')
print('\n')
loop4 = 0
while loop4 == 0:
n3 = input('Número inicial: ')
if '.' in n3:
n1 = float(n3)
loop4 = 1
elif ',' in n3:
print('Favor utilizar pontos para demarcar o início das casas decimais.')
print('\n')
else:
n1 = int(n3)
loop4 = 1
loop5 = 0
while loop5 == 0:
n4 = input('Número final: ')
if '.' in n4:
n2 = float(n4)
loop5 = 1
elif ',' in n4:
print('Favor utilizar pontos para demarcar o início das casas decimais.')
print('\n')
else:
n2 = int(n4)
loop5 = 1
n10 = str(n1)
n20 = str(n2)
print('\n')
if int(NumberAction) == 1:
print(str(n1), '+', str(n2), '=', str(n1 + n2))
with open('datacalc.txt', 'a') as arq:
arq.write('%s + %s = %s\n' %(str(n1), str(n2), str(n1 + n2)
elif int(NumberAction) == 2:
print(str(n1), '-', str(n2), '=', str(n1 - n2))
with open('datacalc.txt', 'a') as arq:
arq.write('%s - %s = %s\n' %(str(n1), str(n2), str(n1 - n2))
elif int(NumberAction) == 3:
print(str(n1), '*', str(n2), '=', str(n1 * n2))
with open('datacalc.txt', 'a') as arq:
arq.write('%s * %s = %s\n' %(str(n1), str(n2), str(n1 * n2))
else:
print(str(n1), ':', str(n2), '=', str(n1 / n2))
with open('datacalc.txt', 'a') as arq:
arq.write('%s : %s = %s\n' %(str(n1), str(n2), str(n1 / n2))
print('\n')
restart = input('Continuar usando a calculadora (s/n): ')
print('\n')
if restart.lower() == 's':
pass
else:
loop1 = 1
print('Obrigado por usar!') |
import pandas as pd
import numpy as np
import collections
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import balanced_accuracy_score, classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2, f_regression
from sklearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE, ADASYN
#====================================================================================================================
'''
PIPELINE TUNING:---
Hyper-parameters are parameters that are manually tuned by a human operator to maximize the model performance
against a validation set through a grid search.GridSearchCV module helps in this.
'''
n_components_to_test = [2, 4, 8, 16, 24, 32, 48, 64] # number of reduced features
#SVC classifier's parameters..
C_to_test = [0.1, 1, 10, 50, 100]
kernel_to_test = ['linear','rbf']#,'sigmoid']
gamma_to_test = ['scale'] #1e-1, 1e-2, 1e-3, 1e-4, 'auto']
n_estimators_to_test = [3, 5, 10, 30, 50, 80, 100, 200]
#====================================================================================================================
# function with evaluation measures to evaluate the regression problem
def calculateEvaluationMetrics(y_true, y_pred):
# Balanced accuracy score
bas = balanced_accuracy_score(y_true, y_pred)*100
# Confusion matrix
conf = confusion_matrix(y_true, y_pred)
# Accuracy score
acc = accuracy_score(y_true, y_pred)*100
# Number of correctly said classes
noc = accuracy_score(y_true, y_pred, normalize= False)
# classification report
cr = classification_report(y_true, y_pred)
#heat map from confusion matrix
conf_norm = conf.astype('float') / conf.sum(axis=1)[:, np.newaxis]
df = pd.DataFrame(conf_norm)
print("\nBalanced accuracy score (takes care of imbalanced class) is: %.2f" % bas)
print("Overall Accuracy of classification: %.2f" % acc)
print("The classifier correctly classify: ", noc," out of total", len(y_true), " samples.")
print("The overall classification report is as following:\n", cr)
print("The confusion matrix is following.\n",conf)
sns.heatmap(df, annot=True, linewidths=0.8, cmap="YlGnBu")
plt.xlabel('Predicted class')
plt.ylabel('True class')
plt.show()
return
#===================================================================================================================
# classifier with grid search over the parameters: SVM
def classifierSVC(X_train, Y_train, X_test):
#-----------------------------------------------------------------
# creating the pipeline: presently have only one classifier
pipe = Pipeline([('classifier', SVC(probability = False, max_iter=-1, class_weight = 'balanced'))])
params = {'classifier__C': C_to_test, 'classifier__kernel': kernel_to_test, 'classifier__gamma': gamma_to_test,}
# optimization is invoked as follows.... (resampled data with SMOTE)
gridsearch = GridSearchCV(pipe, params, verbose=1, cv=5).fit(X_train, Y_train)
# get the prediction by the learned model
Y_pred = gridsearch.predict(X_test)
return Y_pred
#===================================================================================================================
# classifier with grid search over the parameters: randomForest
def classifierRandomForest(X_train, Y_train, X_test):
#-----------------------------------------------------------------
# creating the pipeline: presently have only one classifier
pipe = Pipeline([('classifier', RandomForestClassifier(class_weight = 'balanced'))])
params = {'classifier__n_estimators': n_estimators_to_test}
# optimization is invoked as follows.... (resampled data with SMOTE)
gridsearch = GridSearchCV(pipe, params, verbose=1, cv=5).fit(X_train, Y_train)
# get the prediction by the learned model
Y_pred = gridsearch.predict(X_test)
return Y_pred
#===================================================================================================================
def main():
climate_data = pd.read_csv(".././DataSets/Lead_10_Hist.csv")
climate_data = np.asarray(climate_data)
end_col = climate_data.shape[1]
#---------------------------------------------
#segregating the predictand and predictors
X = climate_data[:,:end_col-1]
Y = climate_data[:,end_col-1]
#----------------------------------------------
# checking the number of samples for each class
print("\nSamples of each rainfall class in overall set: ", collections.Counter(Y))
#------------------------------------------------------------------
# dividing into training and test set
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = 0.2, shuffle=False)
print("\nSamples in training set: ", collections.Counter(Y_train))
# ---------------------------------------------------
# Upsampling the data for increasing the balance between class
#resampling should be done over the training set and test set should be put away from it
# #method 1: SMOTE
X_resampled1, Y_resampled1 = SMOTE().fit_resample(X_train, Y_train)
print("\nSMOTE:", sorted(collections.Counter(Y_resampled1).items()))
#method 2: ADASYN
X_resampled2, Y_resampled2 = ADASYN().fit_resample(X_train, Y_train)
print("\nADASYN:", sorted(collections.Counter(Y_resampled2).items()))
#-----------------------------------------------------------------
# Calling the classifier module
Y_pred = classifierSVC(X_resampled2, Y_resampled2, X_test)
Y_true = Y_test
# evaluating the classification
print("\nUp-sampled data using ADASYN..................")
calculateEvaluationMetrics(Y_true, Y_pred)
if __name__ == '__main__':
main() |
import sys
import json
import MySQLdb
u="rtluser"
p="rtlpass"
conn = MySQLdb.connect(host='localhost', user=u, passwd=p, db='rtlamr')
conn.autocommit(True)
db = conn.cursor()
while 1:
try:
line = sys.stdin.readline()
except KeyboardInterrupt:
break
if not line:
break
rec = json.loads(line)
print("Time: {0} Meter ID: {1} Consumption: {2}".format(rec["Time"], rec["Message"]["ID"], rec["Message"]["Consumption"]))
# create table meters (t timestamp default current_timestamp, id varchar(20),type int, tamperphy int, tamperenc int, consumption int);
v = rec["Message"]
sql='insert into meters values (current_timestamp,{0},{1},{2},{3},{4})'.format(v["ID"],v["Type"],v["TamperPhy"],v["TamperEnc"],v["Consumption"])
print(sql)
db.execute(sql)
|
#!/usr/bin/env python
# coding: utf-8
# # SONALI PATIL
# # Task 1 - Prediction using Supervised ML (Level - Beginner)
#
# In[25]:
# import all reuired libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[26]:
# Reading the file
url="http://bit.ly/w-data"
data=pd.read_csv(url)
# In[27]:
data.head()
# In[28]:
data.info()
# In[29]:
data.describe()
# # Visualizing the data
#
# In[30]:
## Plotting the distribution of scores
data.plot(x='Hours', y='Scores', style='*')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
# # From above graph we can clearly see there is a positive linear relation bet. no. of hours and percentage of the score
# # Preparing the data
# In[31]:
X = data.iloc[:, :-1].values
y = data.iloc[:, 1].values
# In[46]:
# Split this data into a training and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=0)
# In[48]:
print("Shape of X_train",X_train.shape)
print("shape of y_train",y_train.shape)
print("Shape of X_test",X_test.shape)
print("Shape of y_test",y_test.shape)
# In[50]:
## After the spliting now we have to train our algorithm
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
print("Training complete!!!!!.")
# # Plotting Regression Line
# In[56]:
regressor.coef_
# In[57]:
regressor.intercept_
# In[58]:
line = regressor.coef_*X+regressor.intercept_
# Plotting for the test data
plt.scatter(X, y)
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.plot(X, line);
plt.show()
# # Predictions
# In[36]:
print(X_test) # Testing data - In Hours
y_pred = regressor.predict(X_test) # Predicting the scores
# In[37]:
# Comparing Actual vs Predicted
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
# In[59]:
# Predict the value by own data
hours = [9.25]
own_pred = regressor.predict([hours])
print("No of Hours = {}".format(hours))
print("Predicted Score = {}".format(own_pred[0]))
# # Evaluting the model
# #The final step is to evaluate the performance of algorithm.
# #This step is particularly important to compare how well different algorithms perform on a particular dataset. For simplicity #here, I have evaluted model using mean absolute error,mean squared error and root mean squared error
#
# In[63]:
from sklearn import metrics
print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:',metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:',np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# In[ ]:
|
from django import forms
class LoginUsers(forms.Form):
"""Class for login of students, teachers..."""
username = forms.CharField(label='Contrasena')
password = forms.CharField(widget=forms.PasswordInput, min_length=8, label='Usuario')
class AsesorLaboral(forms.Form):
choices_calificacion = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
)
responsabilidad = forms.ChoiceField(choices=choices_calificacion, label='Responsabilidad', required=False)
trabajoEquipo = forms.ChoiceField(choices=choices_calificacion, label='Trabajo en Equipo', required=False)
asistenciaPuntualidad = forms.ChoiceField(choices=choices_calificacion, label='Asistencia y Puntualidad ', required=False)
habilidadProblemas = forms.ChoiceField(choices=choices_calificacion, label='Habilidad ante Problemas', required=False)
manejoHerramientas = forms.ChoiceField(choices=choices_calificacion, label='Manejo de Herramientas', required=False)
disponibilidad = forms.ChoiceField(choices=choices_calificacion, label='Disponibilidad', required=False)
respeto = forms.ChoiceField(choices=choices_calificacion, label='Respeto', required=False)
imagenPersonal = forms.ChoiceField(choices=choices_calificacion, label='Imagen Personal', required=False) |
# file létrehozva
def teglalapKerulet():
a=float(input("Kérem a téglalap egyik oldalát[cm]:"))
b=float(input("Kérem a téglalap másik oldalát[cm]:"))
return float(2*(a+b))
def teglalapTerulet():
a=float(input("Kérem a téglalap egyik oldalát[cm]:"))
b=float(input("Kérem a téglalap másik oldalát[cm]:"))
return float(a*b)
def nyolcszogKerulet():
a=float(input("Kérem a 8szög oldalát[cm]:"))
return float(8*a)
def nyolcszogKerulet():
a=float(input("Kérem a 8szög oldalát[cm]:"))
r=float(input("Kérem a 8szög sugarát[cm]:"))
return float(4*a*r)
def haromszogKerulet():
a=float(input("Kérem adja meg a háromszög egyik oldalát:"))
b=float(input("Kérem adja meg a háromszög másik oldalát:"))
c=float(input("Kérem adja meg a háromszög harmadik oldalát:"))
return float(a+b+c)
def haromszogTerulet():
a=float(input("Kérem adja meg a háromszög adott oldalát:"))
ma=float(input("Kérem adja meg a háromszög adott oldalának a magasságát:"))
return float(a*ma/2)
def korKerulet():
r=float(input("Kérem adja meg a kör sugarát:"))
return float(2*3.14*r)
def korTerulet():
r=float(input("Kérem adja meg a kör sugarát:"))
return float(3.14*r**2)
print("1 - Háromszög")
print("2 - Kör")
print("3 - Teglalap")
print("4 - Nyolcszög")
v=input("Milyen alakzattal szeretnél dolgozni?")
if v=="3":
print(teglalapKerulet())
print(teglalapTerulet())
elif v=="1":
print(haromszogKerulet())
print(haromszogTerulet())
elif v=="2":
print(korKerulet())
print(korTerulet())
else:
print(nyolcszogKerulet())
print(nyolcszogTerulet())
|
#! /usr/bin/python
import cgi
import cgitb
cgitb.enable ()
form = cgi.FieldStorage ()
def fib (n):
if n <= 1: return 1
else: return fib (n-1) + fib (n-2)
print "Content-Type: text/html"
print
print """
<HTML>
<HEAD>
<TITLE>Python fibonacci</TITLE>
</HEAD>
<BODY>
"""
if "num" not in form:
print "<h2> Missing number </h2>"
else:
print "<h3> The requested fibonacci number is : "
print fib (int (form["num"].value))
print "</h3>"
print """
</BODY>
</HTML>
"""
|
import unittest
from value_objects import once
from value_objects.util.testing import eq
class OnceTestCase( unittest.TestCase ):
def test_once_example( self ):
'''
make sure properties decorated with @once are only computed once
'''
class Person( object ):
def __init__( s, age ):
s.compute_count = 0
s.age = age
@once
def double_age( s ):
s.compute_count += 1
return s.age * 2
person = Person( age = 5 )
eq( 5, person.age )
eq( 0, person.compute_count )
eq( 10, person.double_age )
eq( 1, person.compute_count )
eq( 10, person.double_age )
eq( 1, person.compute_count )
def test_unnamed_lambdas( sef ):
'''
make sure lambda with the same __name__ are cached separately
'''
class C( object ):
x = once( lambda s: 'x' )
y = once( lambda s: 'y' )
obj = C()
eq( obj.x, 'x' )
eq( obj.y, 'y' )
eq( obj.x, 'x' )
eq( obj.y, 'y' )
|
"""basic ding-dong bot for the wechaty plugin"""
from typing import Union
from wechaty import Message, Contact, Room, FileBox
from wechaty.plugin import WechatyPlugin
class DingDongPlugin(WechatyPlugin):
"""basic ding-dong plugin"""
@property
def name(self):
"""name of the plugin"""
return 'ding-dong'
async def on_message(self, msg: Message):
"""listen message event"""
from_contact = msg.talker()
text = msg.text()
room = msg.room()
if text == '#ding':
conversation: Union[
Room, Contact] = from_contact if room is None else room
await conversation.ready()
await conversation.say('dong')
file_box = FileBox.from_url(
'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/'
'u=1116676390,2305043183&fm=26&gp=0.jpg',
name='ding-dong.jpg')
await conversation.say(file_box)
|
from datetime import time
lista_horarios = [time(x, y) for x in range(0, 24) for y in range(0, 60, 5)]
HORA = tuple([(x, x.isoformat()) for x in lista_horarios])
DEVOLUCION = (
("Lleno", "Lleno"),
("Mitad", "Mitad"),
("Vacio", "Vacio"),
)
|
import numpy as np
import matplotlib.pyplot as plt
def distance(x1, y1, x2, y2):
return (x2 - x1) ** 2 + (y2 - y1) ** 2
class Node(object):
def __init__(self, i):
self.index = i
self.left = None
self.right = None
self.up = None
self.down = None
class Graph(object):
def __init__(self, matrix, max_l, mnist=False):
self.__row = 0
self.__col = 0
self.graph = []
self.max_l = max_l
self.shape = (self.__row, self.__col)
self.__mnist = mnist
for i in range(len(matrix)):
self.graph.append(Node(i))
for i in range(len(matrix)):
id1, x1, y1, xmin1, ymin1, xmax1, ymax1 = matrix[i]
for n in self.graph:
if n.index == i:
node = n
lmd, rmd, umd, dmd = float('inf'), float('inf'), float('inf'), float('inf')
l, r, u, d = None, None, None, None
for other in self.graph:
j = other.index
if i == j:
continue
id2, x2, y2, xmin2, ymin2, xmax2, ymax2 = matrix[j]
# left and right
if (abs(y1-y2) <= self.max_l/2 and mnist) or (ymin2 < y1 < ymax2 and ymin1 < y2 < ymax1):
# *
# |
# *--j--i
# |
# *
if x1 > x2:
if distance(x1, y1, x2, y2) < lmd:
lmd = distance(x1, y1, x2, y2)
l = other
# *
# |
# i--j--*
# |
# *
else:
if distance(x1, y1, x2, y2) < rmd:
rmd = distance(x1, y1, x2, y2)
r = other
# up and down
if (abs(x2-x1) <= self.max_l/2 and mnist) or (xmin2 < x1 < xmax2 and xmin1 < x2 < xmax1):
# i
# |
# *--j--*
# |
# *
if y1 > y2:
if distance(x1, y1, x2, y2) < dmd:
dmd = distance(x1, y1, x2, y2)
d = other
# *
# |
# *--j--*
# |
# i
else:
if distance(x1, y1, x2, y2) < umd:
umd = distance(x1, y1, x2, y2)
u = other
node.left, node.right, node.up, node.down = l, r, u, d
self.start = None
for node in self.graph:
if node.left is None and node.down is None:
self.start = node
p = self.start
while p:
self.__col += 1
p = p.right
p = self.start
while p:
self.__row += 1
p = p.up
self.shape = (self.__row, self.__col)
def print_graph(self, matrix):
for node in self.graph:
i = node.index
print(matrix[i], "left[{}],right[{}],up[{}],down[{}]".format(
matrix[node.left.index][0] if node.left else None,
matrix[node.right.index][0] if node.right else None,
matrix[node.up.index][0] if node.up else None,
matrix[node.down.index][0] if node.down else None,
))
def plot_graph(self, matrix):
plt.grid(True)
for i, x, y, xmin, ymin, xmax, ymax in matrix:
plt.plot(x, y, 'o')
print('(%d,%d,%d)' % (i, x, y))
plt.annotate('(%d,%d,%d)' % (i, x, y), xy=(x, y), xytext=(-20, 10), textcoords='offset points')
plt.show()
def get_matrix(self, matrix):
Matrix1 = [[0] * self.__col for _ in range(self.__row)]
p = self.start
i, j = 0, 0
while p:
q = p
while q:
# print(i, j, matrix[q.index])
Matrix1[i][j] = matrix[q.index][0]
q = q.right
j += 1
p = p.up
i += 1
j = 0
Matrix1 = np.array(Matrix1)
return Matrix1
class calculator(object):
def __init__(self, classes):
self.operator = None
self.matrixs = []
self.points = {}
self.T = []
self.max_l = 0
for label in classes:
self.points[label] = []
def get_from_points(self, points, max_l):
self.points = points
self.max_l = max_l
def get_from_txt(self, txtPath):
txtFile = open(txtPath)
txtList = txtFile.readlines()
for oneline in txtList:
idx, index, label, xmin, ymin, xmax, ymax, score = oneline.split(" ")
xmin, ymin, xmax, ymax, score = int(xmin), int(ymin), int(xmax), int(ymax), float(score[:-1])
self.max_l = max(self.max_l, xmax-xmin, ymax-ymin)
x, y = (xmin + xmax) / 2, (ymin + ymax) / 2
# 防止数字重叠
if label == 'number':
drop = False
for i, (idx2, x2, y2, xmin2, ymin2, xmax2, ymax2, score2) in enumerate(self.points['number']):
if (xmin2 < x < xmax2 and ymin2 < y < ymax2) or (xmin < x2 < xmax and ymin < y2 < ymax):
if (xmax - xmin) * (ymax - ymin) < (xmax2 - xmin2) * (ymax2 - ymin2):
drop = True
break
else:
self.points['number'][i] = [int(idx), x, y, xmin, ymin, xmax, ymax, score]
drop = True
break
if not drop:
self.points[label].append([int(idx), x, y, xmin, ymin, xmax, ymax, score])
# 其他情况取分高的
else:
drop = False
for l in self.points.keys():
for i, (idx2, x2, y2, xmin2, ymin2, xmax2, ymax2, score2) in enumerate(self.points[l]):
if (xmin2 < x < xmax2 and ymin2 < y < ymax2) or (xmin < x2 < xmax and ymin < y2 < ymax):
if score < score2:
drop = True
break
else:
del self.points[l][i]
if not drop:
self.points[label].append([int(idx), x, y, xmin, ymin, xmax, ymax, score])
print(self.points)
def get_operator(self):
for l in self.points.keys():
if l == "add" or l == "multi" or l == "minus":
if len(self.points[l]) == 1 and self.operator is None:
self.operator = l
elif len(self.points[l]) > 1 or (len(self.points[l]) == 1 and self.operator is not None):
raise NotImplementedError("There are more than one operator in the picture!")
# 图中只要一个矩阵时默认为求行列式
if self.operator is None:
self.operator = "det"
print("The operator is %s" % self.operator)
def sort_matrix(self):
if self.operator == "det":
matrix = []
for i, (id, x, y, xmin, ymin, xmax, ymax, _) in enumerate(self.points['number']):
matrix.append([id, x, y, xmin, ymin, xmax, ymax])
print("The len of matrix is %d" % len(matrix))
# check_dim = True
# for i in range(10):
# if len(matrix) == i ** 2:
# check_dim = False
# if check_dim:
# raise NotImplementedError(
# "It's not a square, so cannot do the det operation!"
# )
self.matrixs.append(matrix)
if len(self.points["T"]) == 1:
self.T.append(True)
elif len(self.points["T"]) == 0:
self.T.append(False)
else:
raise NotImplementedError(
"The number of the transpose operator is not match with the number of the matrix!"
)
else:
_, operator_x, operator_y, _, _, _, _, _ = self.points.get(self.operator, None)[0]
matrix1 = []
matrix2 = []
for i, (id, x, y, xmin, ymin, xmax, ymax, _) in enumerate(self.points['number']):
if x < operator_x:
matrix1.append([id, x, y, xmin, ymin, xmax, ymax])
else:
matrix2.append([id, x, y, xmin, ymin, xmax, ymax])
print("The number of the elements in left matrix is %d" % len(matrix1))
print("The number of the elements in right matrix is %d" % len(matrix2))
self.matrixs = [matrix1, matrix2]
if self.operator == "add" or self.operator == "minus":
if len(matrix1) != len(matrix2):
raise NotImplementedError(
"The len of left matrix is not the same as the len of right matrix,",
" so they cannot do the %s operation" % self.operator
)
self.T = [False, False]
if len(self.points["T"]) > 2:
raise NotImplementedError(
"The number of the transpose operator is not match with the number of the matrix!"
)
else:
for i, (_, x, y, xmin, ymin, xmax, ymax, _) in enumerate(self.points["T"]):
if x < operator_x:
if self.T[0]:
raise NotImplementedError(
"The number of the transpose operator is not match with the number of the matrix!"
)
else:
self.T[0] = True
else:
if self.T[1]:
raise NotImplementedError(
"The number of the transpose operator is not match with the number of the matrix!"
)
else:
self.T[1] = True
def __call__(self):
self.get_operator()
self.sort_matrix()
if self.operator == "det":
graph = Graph(self.matrixs[0], self.max_l)
matrix = graph.get_matrix(self.matrixs[0])
# print(self.matrixs[0])
print("The shape of the matrix is ", graph.shape)
# graph.plot_graph(matrix)
if self.T[0]:
matrix = matrix.T
print(matrix)
return int(np.linalg.det(matrix))
else:
# print(self.matrixs[0], self.max_l)
graph1 = Graph(self.matrixs[0], self.max_l)
graph2 = Graph(self.matrixs[1], self.max_l)
matrix1 = graph1.get_matrix(self.matrixs[0])
if self.T[0]:
matrix1 = matrix1.T
matrix2 = graph2.get_matrix(self.matrixs[1])
if self.T[1]:
matrix2 = matrix2.T
print("The shape of the matrix1 is", graph1.shape)
print("The shape of the matrix2 is", graph2.shape)
matrix1 = np.array(matrix1)
matrix2 = np.array(matrix2)
print(matrix1)
if self.operator == "add":
print("+")
elif self.operator == "minus":
print("-")
elif self.operator == "mulit":
print("x")
print(matrix2)
print("=")
if self.operator == "add":
return matrix1 + matrix2
elif self.operator == "minus":
return matrix1 - matrix2
elif self.operator == "multi":
return np.dot(matrix1, matrix2)
|
from PIL import Image
import json
def conv(img):
pix = img.load()
rdata = []
gdata = []
for y in range(16):
rvalue = 0
gvalue = 0
for x in range(32):
rvalue <<= 1
gvalue <<= 1
r,g,b = pix[x,y]
if r:
rvalue |= 0x01
if g:
gvalue |= 0x01
rdata.append(rvalue)
gdata.append(gvalue)
return(rdata, gdata)
def main(filename):
img = Image.open(filename).convert("RGB")
frames = []
for i in range(img.size[0]-32):
fimg = img.crop((i, 0, i+32, 16))
frames.append(conv(fimg))
print(json.dumps(frames, indent=2))
if __name__ == "__main__":
import sys
main(sys.argv[1])
|
"""
Faça um programa que receba um número e retorne o fatorial dele. O fatorial de
um número qualquer n é: n*(n-1)*(n-2)*(n-3)*....*1.
Exemplo:
Entrada Saída
2 2*1 2
3 3*2*1 6
9 362880
"""
#Solução
num=int(input())
fact=1
for i in range(num, 0, -1):
fact*=i
print(fact) |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* UMSE Antivirus Agent Example
* Author: David Alvarez Perez <dalvarezperez87[at]gmail[dot]com>
* Module: Intelligence Console client
* Description: This module implements Intelligence Console communication
*
* Copyright (c) 2019-2020. The UMSE Authors. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
"""
import requests
ENDPOINT = 'http://127.0.0.1:8080'
def upload_file(file_path, original_filename):
'''
This function submits an UMSE file to the Intelligence Server
'''
url = ENDPOINT+'/upload'
params= {"original_filename" : original_filename}
files = {'ufile': open(file_path, 'rb')}
r = requests.post(url, params=params, files=files)
return r.text
def get_file_info(file_hash):
'''
This function retrieves UMSE file information from the Intelligence Server given the sample hash
'''
url = ENDPOINT+'/intelligence'
query = {'sample_hash': file_hash}
r = requests.post(url, query)
return r.text
if __name__== "__main__":
upload_file("samples\\b3d7aab8ad319dcf7050fe8ce67d69c8c59adc0d90c19a144c2d5c1f66c1babf.umse", "nombre.exe")
get_file_info("ae8a8a81b218f2c80224fdd318f8e6a9e9bd8cea3f3630952916fe1454fead0b") |
from lists.linked.linked_list import Node
from lists.linked.linked_list import LinkedList
import math
from trees.heap import *
#Sample Graph: http://techieme.in/breadth-first-traversal/
class Vertex:
sort_prop = 'val'
def __init__(self, val, edges = None, weights = None):
self.val = val
self.edges = {} if edges is None else edges
self.weights = {} if weights is None else weights
def degree(self):
return len(self.edges)
def __eq__(self, other):
return self.val == self.val
def __lt__(self, other):
return getattr(self, self.sort_prop) < getattr(other, self.sort_prop)
def __le__(self, other):
return getattr(self, self.sort_prop) <= getattr(other, self.sort_prop)
def __gt__(self, other):
return getattr(self, self.sort_prop) > getattr(other, self.sort_prop)
def __ge__(self, other):
return getattr(self, self.sort_prop) >= getattr(other, self.sort_prop)
class Adjacency_List_Graph():
def __init__(self, directed=False):
self.directed = directed
self.vertices = {}
def add_vertex(self, v):
self.vertices[v.val] = v
if not self.directed:
for vertex in v.edges.values():
if vertex.val not in self.vertices:
self.add_vertex(Vertex(vertex.val))
vertex.edges[v.val]=v
def add_vertices(self, *vertices):
for vertex in vertices:
self.add_vertex(vertex)
def add_edge(self, a, b, weight = None):
if a.val not in self.vertices: self.vertices[a.val] = a
if b.val not in self.vertices: self.vertices[b.val] = b
a.edges[b.val] = b
if weight: a.weights[b.val] = weight
if not self.directed: b.edges[a.val] = a
def delete_vertex(self, vertex):
for edge in list(self.vertices[vertex].edges):
self.delete_edge(vertex, edge)
self.vertices[vertex] = None
def delete_edge(self, a, b):
self.vertices[a].edges.pop(b, None)
if not self.directed:
self.vertices[b].edges.pop(a, None)
def contract_vertex(self, v1, v2):
pass
def depth_first_traversal(self, source=None, find=None):
path = []
stack = []
visited_or_in_stack = {}
stack.append(self.vertices.values()[0])
visited_or_in_stack[stack[0].val] = True
while stack:
visiting = stack.pop()
path.append(visiting.val)
for vertex in visiting.edges.values():
if vertex.val not in visited_or_in_stack:
stack.append(vertex)
visited_or_in_stack[vertex.val] = True
return path
def breadth_first_traversal(self, source=None, find=None):
path = []
queue = []
visited_or_in_stack = {}
queue.append(self.vertices.values()[0])
visited_or_in_stack[queue[0].val] = True
while queue:
visiting = queue.pop(0)
path.append(visiting.val)
for vertex in visiting.edges.values():
if vertex.val not in visited_or_in_stack:
queue.append(vertex)
visited_or_in_stack[vertex.val] = True
return path
def get_shortest_path_costs(self, source):
pass
def get_mst(self, root):
pass
if __name__ == '__main__':
import ztesting.graphs_test.adj_list_graph_test as test
test.test_adj_list_graph() |
# Generated by Django 3.2 on 2021-05-13 13:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('listy', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profil',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_confirmed', models.BooleanField(default=False)),
('imie', models.CharField(blank=True, default='', max_length=50, null=True)),
('nazwisko', models.CharField(blank=True, default='', max_length=50, null=True)),
('profilimg', models.ImageField(blank=True, default='media/1.jpg', null=True, upload_to='media/profiles/')),
('opis', models.TextField(blank=True)),
('kod', models.DecimalField(blank=True, decimal_places=0, max_digits=6, null=True)),
('strona', models.CharField(blank=True, default='np www.mojadomena.pl', max_length=255, null=True)),
('telefon', models.CharField(blank=True, default='', max_length=50, null=True)),
('profesja', models.CharField(blank=True, default='', max_length=50, null=True)),
('miasto', models.CharField(blank=True, default='', max_length=50, null=True)),
('port1', models.ImageField(blank=True, default='media/1.jpg', null=True, upload_to='media/portfolio/', verbose_name='galeria zdjęcie nr 1')),
('port2', models.ImageField(blank=True, default='media/1.jpg', null=True, upload_to='media/portfolio/', verbose_name='galeria zdjęcie nr 2')),
('port3', models.ImageField(blank=True, default='media/1.jpg', null=True, upload_to='media/portfolio/', verbose_name='galeria zdjęcie nr 3')),
('port4', models.ImageField(blank=True, default='media/1.jpg', null=True, upload_to='media/portfolio/', verbose_name='galeria zdjęcie nr 4')),
('port5', models.ImageField(blank=True, default='media/1.jpg', null=True, upload_to='media/portfolio/', verbose_name='galeria zdjęcie nr 5')),
('kategoria', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='listy.category', verbose_name='Kategoria')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Profil',
'verbose_name_plural': 'Profile',
},
),
]
|
#ISBNから書名、著者、出版時期を取得するモジュール
InputISBN = input()
if len(InputISBN) == 13:
ISBN_13 = InputISBN
print(ISBN_13)
elif len(InputISBN) == 10:
print("WIP")
# WIP 978 を追加して10->13変換するやつ
#DigitISBN = len(InputISBN) - 1
#for i in range(DigitISBN // 2):
#sum_even = sum(int(InputISBN[2 * i + 1 ]) for i in range(DigitISBN // 2) ) * 3
#sum_odd = sum(int(InputISBN[2 * i]) for i in range(DigitISBN // 2) )
#CheckDigit = 10 - (sum_even + sum_odd) % 10
#print(CheckDigit)
#ISBN_13 = "978" + InputISBN + str(CheckDigit)
#print(ISBN_13)
#WIPここまで
else:
print("ERROR: Check your input.") |
from open_pension_crawler.OpenPensionCrawlSpiderBase import OpenPensionCrawlSpiderBase
class PsagotSpider(OpenPensionCrawlSpiderBase):
name = 'psagot'
allowed_domains = ['psagot.co.il']
start_urls = ['https://www.psagot.co.il/heb/PensionSavings/GeneralInformation/Pages/gemelcompanyreports.aspx']
regex = r'(.*)gSUM_(.*)(01|02|03|04)(.*).(xlsx|xls)'
|
import json
dict1 = {'name': 'zengwenhai', 'age': 29}
json.dump(dict1, open('name.json', 'w'))
temp = open(r'C:\Users\Administrator\PycharmProjects\untitled1\day01\name.json', 'r', encoding='UTF-8')
print(temp) |
import RPi.GPIO as GPIO
import time
import network
SWITCH = 21
GPIO.setmode(GPIO.BCM)
GPIO.setup(SWITCH, GPIO.IN)
def heard(phrase):
print ("heard:" + phrase)
for a in phrase:
if a == "\r" or a == "\n":
pass # strip it
else:
if (GPIO.input(SWITCH)):
network.say("1")
else:
network.say("0")
while True:
print ("waiting for connection")
network.wait(whenHearCall=heard)
print ("connected")
while network.isConnected():
print ("server is running")
time.sleep(1)
print ("connection closed")
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='pyfedid',
version='1.0.0',
plateformes='UNIX',
description='Package for Decathlon FEDID',
packages=find_packages(),
packages_dir={'': 'pyfedid'},
author='Sylvain Lemoine',
author_email='sylvain.lemoine@decathlon.com',
url='https://github.com/decathloncanada/pyfedid',
download_url='https://github.com/decathloncanada/pyfedid',
license='GPL Version 3',
zip_safe=False,
long_description=open('README.md').read())
|
from flask import request
from flask import Flask
import flask
import hashlib
from libs.thumbnail import *
from libs.fs import FS
from flask import request, Blueprint
from libs.util import make_response
from .authorization import require_auth
import logging
app = Blueprint('image', __name__)
def image_ext(content_type):
if content_type == "image/png":
return ".png"
elif content_type == "image/jpeg":
return ".jpg"
else:
return ""
def upload_form_image():
if 'file' not in request.files:
return make_response(400)
f = request.files['file']
content_type = f.headers.get("Content-Type", '')
ext = image_ext(content_type)
if not ext:
return make_response(400, {"error":"can't get image extenstion"})
data = f.read()
if not data:
return make_response(400, {"error":"data is null"})
name = hashlib.md5(data).hexdigest()
path = "/images/" + name + ext
r = FS.upload(path, data)
if not r:
return make_response(400, {"error":"upload file fail"})
url = request.url_root + "images/" + name + ext
src = "/images/" + name + ext
obj = {"src":src, "src_url":url}
return make_response(200, data=obj)
def upload_image_v0():
if not request.data:
return make_response(400)
content_type = request.headers.get("Content-Type", '')
ext = image_ext(content_type)
if not ext:
return make_response(400)
data = request.data
name = hashlib.md5(data).hexdigest()
path = "/images/" + name + ext
r = FS.upload(path, data)
if not r:
return make_response(400)
url = request.url_root + "images/" + name + ext
src = "/images/" + name + ext
obj = {"src":src, "src_url":url}
return make_response(200, data=obj)
@app.route('/images', methods=['POST'])
@require_auth
def upload_image():
if request.version is None:
return upload_image_v0()
else:
return upload_form_image()
def download_thumbnail(path):
tb_path = thumbnail_path(path)
data = FS.download(tb_path)
if not data:
origin, params = parse_thumbnail_path(path)
data = FS.download(origin)
if not data:
return ""
print("data len:", len(data), type(data))
data = create_thumbnail(data, params)
r = FS.upload(tb_path, data)
if not r:
return ""
return data
@app.route('/images/<image_path>', methods=['GET'])
def download_image(image_path):
path = "/images/" + image_path
if is_thumbnail(path):
data = download_thumbnail(path)
else:
data = FS.download(path)
if not data:
return flask.make_response("", 400)
else:
res = flask.make_response(data, 200)
if image_path.endswith(".jpg"):
res.headers['Content-Type'] = "image/jpeg"
elif image_path.endswith(".png"):
res.headers['Content-Type'] = "image/png"
else:
logging.info("invalid image type")
return res
|
import re
import utils
import Dataset
import torch
import torch.utils.data
import torchvision
from engine import train_one_epoch, evaluate
from torch.utils.tensorboard import SummaryWriter
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# replace the classifier with a new one, that has
# num_classes which is user-defined
num_classes = 13 # 12 class + background
background = 'with_paper' # can choose from 'blank' , 'with_paper' , 'with_coin'
object = 'Mutter' # can choose from 'all' , 'Schraube' , 'Mutter'
# set epochs
num_epochs = 15
def faster_rcnn(num_classes):
# load a model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
import transforms as T
def get_transform(train):
transforms = []
if train:
# transforms.append(T.random_affine(degrees=1.98, translate=0.05, scale=0.05, shear=0.641))
transforms.append(T.ColorJitter(brightness=0.5, saturation=0.5))
transforms.append(T.RandomRotation())
transforms.append(T.ToTensor())
transforms.append(T.RandomHorizontalFlip(0.5))
else:
transforms.append(T.ToTensor())
return T.Compose(transforms)
if __name__ == '__main__':
#def main():
writer = SummaryWriter(comment=background+'_'+object+'_'+str(num_epochs))
dataset = Dataset.Schraubenerkennung(root = 'Datensatz_512',
background= background, which_object = object, transforms = get_transform(train=True))
dataset_test = Dataset.Schraubenerkennung(root='Datensatz_512',
background= background, which_object= object, transforms=get_transform(train=False))
#split the dataset in train and test set
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
#set ratio between dataset_train and dataset_test
indices_ratio = 0.1
len_indices = len(indices)
num_test = int(indices_ratio*len_indices)
dataset = torch.utils.data.Subset(dataset, indices[:-num_test])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-num_test:])
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=4, shuffle=True, num_workers=0,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=0,
collate_fn=utils.collate_fn)
# device = torch.device('cpu')
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#choose the model
model = faster_rcnn(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params, lr=0.0002,
weight_decay=0.0005)
# momentum=0.9, # when SGD
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
titels = ['AP IOU=0.5:0.95', 'AP IOU=0.5', 'AP IOU=0.75',
'AP IOU=0.5:0.95 small', 'AP IOU=0.5:0.95 mudium', 'AP IOU=0.5:0.95 large']
for epoch in range(num_epochs):
# For Training
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
results, maps = evaluate(model, data_loader_test, device=device)
for titel, map in zip(titels, maps):
# print('map is: ' ,map)
writer.add_scalar(titel, map, epoch)
writer.close()
# torch.save(model.state_dict(), 'model')
print("That's it!")
|
# -*- coding: utf-8 -*-
#
# Copyright 2015, 2016 Ramil Nugmanov <stsouko@live.ru>
# This file is part of PREDICTOR.
#
# PREDICTOR is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import gzip
from collections import defaultdict
from math import ceil
import dill
import hashlib
import os
import subprocess as sp
import pandas as pd
from functools import reduce
from io import StringIO
from MODtools.config import MOLCONVERT
from MODtools.consensus import ConsensusDragos
from MODtools.utils import chemaxpost
from MWUI.config import ModelType, ResultType
class Model(ConsensusDragos):
def __init__(self, file):
tmp = dill.load(gzip.open(file, 'rb'))
self.__models = tmp['models']
self.__conf = tmp['config']
self.__workpath = '.'
self.Nlim = self.__conf.get('nlim', 1)
self.TOL = self.__conf.get('tol', 1e10)
self.__units = self.__conf.get('report_units')
self.__show_structures = self.__conf.get('show_structures')
def get_example(self):
return self.__conf.get('example')
def get_description(self):
return self.__conf.get('desc')
def get_name(self):
return self.__conf['name']
def get_type(self):
return ModelType(self.__conf['type'])
def setworkpath(self, workpath):
self.__workpath = workpath
for m in self.__models:
m.setworkpath(workpath)
@property
def __format(self):
return "rdf" if self.get_type() == ModelType.REACTION_MODELING else "sdf"
@staticmethod
def __merge_wrap(x, y):
return pd.merge(x, y, how='outer', left_index=True, right_index=True)
@staticmethod
def __report_atoms(atoms):
return atoms and ' [Modeled site atoms: %s]' % ', '.join(atoms) or ''
def get_results(self, structures):
# prepare input file
if len(structures) == 1:
chemaxed = chemaxpost('calculate/molExport',
dict(structure=structures[0]['data'],
parameters=self.__format))
if not chemaxed:
return False
additions = dict(pressure=structures[0]['pressure'], temperature=structures[0]['temperature'])
for n, a in enumerate(structures[0]['additives'], start=1):
additions['additive.%d' % n] = a['name']
additions['amount.%d' % n] = '%f' % a['amount']
data = chemaxed['structure']
else:
with sp.Popen([MOLCONVERT, self.__format],
stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=self.__workpath) as convert_mol:
data = convert_mol.communicate(input=''.join(s['data'] for s in structures).encode())[0].decode()
if convert_mol.returncode != 0:
return False
additions = dict(pressure=[], temperature=[])
for m, s in enumerate(structures):
additions['pressure'].append(s['pressure'])
additions['temperature'].append(s['temperature'])
for n, a in enumerate(s['additives']):
additions.setdefault('additive.%d' % n, {})[m] = a['name']
additions.setdefault('amount.%d' % n, {})[m] = a['amount']
print(additions)
res = []
for m in self.__models:
with StringIO(data) as f:
res.append(m.predict(f, **additions))
err_report = defaultdict(dict)
trust_report = defaultdict(dict)
res_report = defaultdict(dict)
# all_y_domains = reduce(merge_wrap, (x['y_domain'] for x in res))
all_domains = reduce(self.__merge_wrap, (x['domain'] for x in res)).fillna(False)
all_predictions = reduce(self.__merge_wrap, (x['prediction'] for x in res))
in_predictions = all_predictions.mask(all_domains ^ True)
trust = pd.Series(5, index=all_predictions.index)
# mean predicted property
avg_all = all_predictions.mean(axis=1)
sigma_all = all_predictions.var(axis=1)
avg_in = in_predictions.mean(axis=1)
sigma_in = in_predictions.var(axis=1)
avg_diff = (avg_in - avg_all).abs() # difference bt in AD and all predictions. NaN for empty in predictions.
avg_diff_tol = avg_diff > self.TOL # ignore NaN
trust.loc[avg_diff_tol] -= 1
for r, d in avg_diff.loc[avg_diff_tol].items():
s, *n = r if isinstance(r, tuple) else (r,)
err_report[s].setdefault(self.__report_atoms(n), []).append(self.errors['diff'] % d)
avg_in_nul = avg_in.isnull()
trust.loc[avg_in_nul] -= 2 # totally not in domain
for r in avg_in_nul.loc[avg_in_nul].index:
s, *n = r if isinstance(r, tuple) else (r,)
err_report[s].setdefault(self.__report_atoms(n), []).append(self.errors['zad'])
avg_domain = all_domains.mean(axis=1)
avg_domain_bad = (avg_domain < self.Nlim) ^ avg_in_nul # ignore totally not in domain
trust.loc[avg_domain_bad] -= 1
for r, d in avg_domain.loc[avg_domain_bad].items():
s, *n = r if isinstance(r, tuple) else (r,)
err_report[s].setdefault(self.__report_atoms(n), []).append(self.errors['lad'] % ceil(100 * d))
# update avg and sigma based on consensus
good = avg_domain >= self.Nlim
avg_all.loc[good] = avg_in.loc[good]
sigma_all.loc[good] = sigma_in.loc[good]
proportion = sigma_all / self.TOL
proportion_bad = proportion > 1
trust.loc[proportion_bad] -= 1
for r, d in proportion.loc[proportion_bad].items():
s, *n = r if isinstance(r, tuple) else (r,)
err_report[s].setdefault(self.__report_atoms(n), []).append(self.errors['stp'] % (d * 100 - 100))
for r, d in trust.items():
s, *n = r if isinstance(r, tuple) else (r,)
trust_report[s][self.__report_atoms(n)] = self.trust_desc[d]
for (r, av), sg in zip(avg_all.items(), sigma_all.loc[avg_all.index]):
s, *n = r if isinstance(r, tuple) else (r,)
res_report[s][self.__report_atoms(n)] = '%.2f ± %.2f' % (av, sg)
report = []
for s, res_val in res_report.items():
tmp = []
for atoms, value in res_val.items():
tmp.append(dict(key='Predicted value ± sigma%s%s' % ((self.__units and ' (%s)' % self.__units or ''),
atoms), value=value, type=ResultType.TEXT))
tmp.append(dict(key='Trust of prediction%s' % atoms,
value=trust_report[s][atoms], type=ResultType.TEXT))
tmp.append(dict(key='Distrust reason%s' % atoms, value='; '.join(err_report[s].get(atoms, [])),
type=ResultType.TEXT))
report.append(dict(results=tmp))
if len(structures) == len(report):
return report
return False
class ModelLoader(object):
def __init__(self, fast_load=True):
self.__skip_md5 = fast_load
self.__models_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'modelbuilder'))
self.__cache_path = os.path.join(self.__models_path, '.cache')
self.__models = self.__scan_models()
@staticmethod
def __md5(name):
hash_md5 = hashlib.md5()
with open(name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def __scan_models(self):
files = {x['file']: x for x in
dill.load(open(self.__cache_path, 'rb'))} if os.path.exists(self.__cache_path) else {}
cache = {}
for file in (os.path.join(self.__models_path, f) for f in os.listdir(self.__models_path)
if os.path.splitext(f)[-1] == '.model'):
if file not in files or files[file]['size'] != os.path.getsize(file) or \
not self.__skip_md5 and self.__md5(file) != files[file]['hash']:
try:
model = Model(file)
cache[model.get_name()] = dict(file=file, hash=self.__md5(file), example=model.get_example(),
description=model.get_description(),
size=os.path.getsize(file),
type=model.get_type(), name=model.get_name())
except:
pass
else:
cache[files[file]['name']] = files[file]
dill.dump(list(cache.values()), open(self.__cache_path, 'wb'))
return cache
def load_model(self, name):
if name in self.__models:
return Model(self.__models[name]['file'])
def get_models(self):
return list(self.__models.values())
|
#print("hello")
def check(a):
ln = len(a)
pindex=0
#print(a[::-1])
for i in range(len(a)):
for j in range(len(a)):
if i<j:
#print(a[i:j+1])
front = a[i:j+1]
#print(a[j:i-1:-1])
if i+1==j or i-1<=0:
if len(a[j::-1]) > len(a[i:j+1]):
op = len(a[j::-1]) - len(a[i:j+1])
str = (a[j::-1])
#print(str[:-op])
back = str[:-op]
if front == back:
pindex = pindex+1
else:
back = a[j::-1]
if front == back:
pindex = pindex + 1
#print(a[j::-1])
else:
back = a[j:i-1:-1]
if front == back:
pindex = pindex + 1
#print(a[j:i-1:-1])
#print("-----")
if pindex==0:
print("No Palindrome is present")
else:
print(pindex)
check("abcbacba") |
import matplotlib.pyplot as plt
import numpy as np
import uncertainties.unumpy as unp
from scipy.optimize import curve_fit
from scipy import stats
from uncertainties import ufloat
#Monozelle
n1, U1 , I1 = np.genfromtxt('data/klemmspann_belastungsstrom.txt', unpack='True')#U in V, I in mA
I1 = I1*10**(-3)
def f1(m,x,b):
return m*x+b
params1,cov1 = curve_fit(f1,I1*10**3,U1)
errors1 = np.sqrt(np.diag(cov1))
print('Monozelle','\nParameter: R_i:',params1[0],'\pm',errors1[0],'V/A','\nU_0: ',params1[1],'\pm',errors1[1],'V')
I_mono_plot=np.linspace(28,75,1000)
plt.plot(I1*10**3,U1,'kx',label='Messwerte')
plt.plot(I_mono_plot,f1(I_mono_plot,*params1),label='Lineare Regression')
plt.xlabel(r'$I_B \:/\: \si{\milli\ampere}$')
plt.ylabel(r'$U_K \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/plot.pdf')
plt.close()
#Leistung der Monozelle
P_exp = U1 * I1
R_eff = U1 / I1
# R=np.linspace(0,50,1000)
# P= 1.79**2 * R /(R+18)**2
def g(x, a, b):
return (1.7870181508176133**2)*x/(18.054097333727366 + x)**2#*10**3
paramsP, covP = curve_fit(g, R_eff, P_exp)
Pplot = np.linspace(0, 50)
plt.ylim(0, 0.05)
plt.plot(R_eff,P_exp,'kx',label='Messwerte')
# plt.plot(R,P,label='Plot')
plt.plot(Pplot, g(Pplot, *paramsP) , 'b-', label='Theorie')
plt.xlabel(r'$R \:/\: \si{\ohm}$')
plt.ylabel(r'$P \:/\: \si{\watt}$')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/plot4.pdf')
plt.close()
print('Leistung Monozelle: \n',P_exp,'\nR_eff: \n', R_eff)
#Gegenspannung
n4, U4 , I4 = np.genfromtxt('data/gegenspannung.txt', unpack='True')#U in V, I in mA
def f4(m,x,b):
return m*x+b
params4,cov4 = curve_fit(f4,I4,U4)
errors4 = np.sqrt(np.diag(cov4))
print('Gegenspannung','\nParameter: R_i:',params4[0]*1000,'\pm',errors4[0]*1000,'V/A','\nU_0: ',params4[1],'\pm',errors4[1],'V')
I_gegen_plot=np.linspace(27,86,1000)
plt.plot(I4,U4,'kx',label='Messwerte')
plt.plot(I_gegen_plot,f4(I_gegen_plot,*params4),label='Lineare Regression')
plt.xlabel(r'$I_B \:/\: \si{\milli\ampere}$')
plt.ylabel(r'$U_K \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/plot3.pdf')
plt.close()
#Rechteckspannung
n2, U2 , I2 = np.genfromtxt('data/Rechteckspannung.txt', unpack='True')#U in V, I in mA
def f2(m,x,b):
return -m*x+b
params2,cov2 = curve_fit(f2,I2,U2)
errors2 = np.sqrt(np.diag(cov2))
print('Rechteck','\nParameter: R_i:',params2[0]*1000,'\pm',errors2[0]*1000,'V/A','\nU_0: ',params2[1],'\pm',errors2[1],'V')
I_rechteck_plot=np.linspace(0.6,0.95,1000)
plt.plot(I2,U2,'kx',label='Messwerte')
plt.plot(I_rechteck_plot,f2(I_rechteck_plot,*params2),label='Lineare Regression')
plt.xlabel(r'$I_B \:/\: \si{\milli\ampere}$')
plt.ylabel(r'$U_K \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/plot1.pdf')
plt.close()
#Sinusspannung
n3, U3 , I3 = np.genfromtxt('data/Sinusspannung.txt', unpack='True')#U in V, I in mA
def f3(m,x,b):
return -m*x+b
params3,cov3 = curve_fit(f3,I3,U3)
errors3 = np.sqrt(np.diag(cov3))
print('Sinus','\nParameter: R_i:',params3[0]*1000,'\pm',errors3[0]*1000,'V/A','\nU_0: ',params3[1],'\pm',errors3[1],'V')
I_sinus_plot=np.linspace(0,0.75,1000)
plt.plot(I3,U3,'kx',label='Messwerte')
plt.plot(I_sinus_plot,f3(I_sinus_plot,*params3),label='Lineare Regression')
plt.xlabel(r'$I_B \:/\: \si{\milli\ampere}$')
plt.ylabel(r'$U_K \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.grid(True)
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/plot2.pdf')
plt.close()
|
from collections import deque
from abc import ABC, abstractmethod
class Scheduler(ABC):
"""
Describes a scheduling algorithm and keeps track of the statistics
concerned for evaluation.
"""
msg_warn = 'Scheduler internal state not fresh - are you sure you \
performed a reset?'
def __init__(self):
self.processes = 0
self.current_time = 0
self.waiting_time = 0
# Ready Queue - can be any kind of queue
self.q = deque()
# Current running task
self.active = None
# Ordered tasks
self.ordered = deque()
def schedule(self, processes):
"""
Given processes, schedule the order for processes to run. To be
implemented as a concrete method by child classes.
Returns a list of tuples: (time, Process)
"""
assert isinstance(processes, list)
if self.processes > 0 or self.current_time > 0 or self.waiting_time > 0:
print(self.msg_warn)
self.processes = len(processes)
self.ordered, res = deque(processes), []
while self.q or self.ordered or self.active:
self.enqueue_new_jobs()
if self.timer_interrupt():
prev = self.active
process = self.perform_schedule()
if process and process != prev:
res += [(self.current_time, process.id)]
self.step()
return res
def reset(self):
"""Resets the scheulder's internal state."""
self.processes = 0
self.current_time = 0
self.waiting_time = 0
self.q = deque()
self.active = None
self.ordered = deque()
def step(self):
"""Performs a single step in time."""
# Increment time
self.current_time += 1
# Decrement burst time for the active task (if there is one)
if self.active:
self.active.burst_time -= 1
# Update waiting time
self.waiting_time += len(self.q)
def enqueue_new_jobs(self):
"""Enqueues new jobs that just came in into the ready queue."""
while self.ordered and self.ordered[0].arrive_time == self.current_time:
self.q += [self.ordered.popleft()]
def timer_interrupt(self):
"""
Default only interrupts when a task has completed its execution
time, or when a new tasks come into an idle CPU.
"""
if not self.active:
return True
completed = self.active.burst_time == 0 if self.active else False
return completed
@abstractmethod
def perform_schedule(self):
"""
The main algorithm for scheduling should be implemented in this method.
Returns a new process - the next process to be run.
"""
pass
@property
def avg_waiting_time(self):
"""Returns the average waiting time of a schedule."""
return round(float(self.waiting_time) / self.processes, 2)
def __repr__(self):
"""Return the scheduler's statistics as a string."""
s = ['# processes: {}'.format(self.processes),
'current time: {}'.format(self.current_time),
'waiting time: {}'.format(self.waiting_time),
'avg waiting time: {}'.format(self.avg_waiting_time)]
return '\n'.join(s)
|
#!/usr/bin/python2
from copy import copy
import math
import rospy
import baxter_interface
import actionlib
import sys
from baxter_interface import CHECK_VERSION
from baxter_interface import Gripper, Limb
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from sensor_msgs.msg import JointState
from std_msgs.msg import Header, Empty
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from baxter_core_msgs.srv import SolvePositionIK, SolvePositionIKRequest
from tf.transformations import quaternion_from_euler
from std_msgs.msg import (UInt16,)
class Trajectory(object):
def __init__(self, limb):
ns = 'robot/limb/' + limb + '/'
self._client = actionlib.SimpleActionClient(
ns + "follow_joint_trajectory",
FollowJointTrajectoryAction,
)
self._goal = FollowJointTrajectoryGoal()
self._goal_time_tolerance = rospy.Time(0.1)
self._goal.goal_time_tolerance = self._goal_time_tolerance
server_up = self._client.wait_for_server(timeout=rospy.Duration(10.0))
if not server_up:
rospy.logerr("Timed out waiting for Joint Trajectory"
" Action Server to connect. Start the action server"
" before running example.")
rospy.signal_shutdown("Timed out waiting for Action Server")
sys.exit(1)
self.clear(limb)
def add_point(self, positions, time):
point = JointTrajectoryPoint()
point.positions = copy(positions)
point.time_from_start = rospy.Duration(time)
self._goal.trajectory.points.append(point)
def start(self):
self._goal.trajectory.header.stamp = rospy.Time.now()
self._client.send_goal(self._goal)
def stop(self):
self._client.cancel_goal()
def wait(self, timeout=15.0):
self._client.wait_for_result(timeout=rospy.Duration(timeout))
def result(self):
return self._client.get_result()
def clear(self, limb):
self._goal = FollowJointTrajectoryGoal()
self._goal.goal_time_tolerance = self._goal_time_tolerance
self._goal.trajectory.joint_names = [limb + '_' + joint for joint in \
['s0', 's1', 'e0', 'e1', 'w0', 'w1', 'w2']]
global right_gripper
global right_limb
if __name__ == '__main__':
rospy.init_node("move_test")
ns = "ExternalTools/right/PositionKinematicsNode/IKService"
rospy.wait_for_message("/robot/sim/started", Empty)
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
global right_gripper
pub_rate = rospy.Publisher('robot/joint_state_publish_rate',
UInt16, queue_size=10)
left_arm = baxter_interface.limb.Limb("left")
right_arm = baxter_interface.limb.Limb("right")
left_joint_names = left_arm.joint_names()
right_joint_names = right_arm.joint_names()
_rate = 500.0
#rospy.sleep(rospy.Duration(5,0))
rs = baxter_interface.RobotEnable(CHECK_VERSION)
rs.enable()
right_gripper = Gripper('right');
right_limb = Limb('right');
right_gripper.calibrate()
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
print "opening gripper"
right_gripper.open()
current_angles = [right_limb.joint_angle(joint) for joint in right_limb.joint_names()]
orient_quaternion_components = quaternion_from_euler(math.pi, 0,math.pi/2)
orient_down = Quaternion()
orient_down.x = orient_quaternion_components[0]
orient_down.y = orient_quaternion_components[1]
orient_down.z = orient_quaternion_components[2]
orient_down.w = orient_quaternion_components[3]
twist_quaternion_components = quaternion_from_euler(math.pi, 0, 0)
twist_down = Quaternion()
twist_down.x = twist_quaternion_components[0]
twist_down.y = twist_quaternion_components[1]
twist_down.z = twist_quaternion_components[2]
twist_down.w = twist_quaternion_components[3]
highPose = PoseStamped(header=hdr, pose=Pose(position=Point(0, -0.428, -0.57), orientation=orient_down))
gripPose = PoseStamped(header=hdr, pose=Pose(position=Point(0, -0.435, -0.71), orientation=orient_down))
liftPose = PoseStamped(header=hdr, pose=Pose(position=Point(0, -0.61, -0.5), orientation=twist_down))
ikreq = SolvePositionIKRequest()
ikreq.pose_stamp.append(highPose)
ikreq.pose_stamp.append(gripPose)
ikreq.pose_stamp.append(liftPose)
seedstate = JointState()
seedstate.name=('right_e0', 'right_e1', 'right_s0', 'right_s1', 'right_w0', 'right_w1', 'right_w2')
seedstate.position=current_angles
#ikreq.seed_angles.append(seedstate)
#ikreq.seed_angles.append(seedstate)
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed to IK service: %s" % (e))
print "Service call failed"
#print resp
# S0 S1 E0 E1 W0 W1 W2
reset_angles = [-0.2731084643631423, 1.047000013603367, -0.0030268105104838128, 0.4970772672879358, -0.08320234128402682, 0.026794408527332614, 0.026429631701369694]
#reset_angles = [-0.2731084643631423, 1, -0.0030268105104838128, 0.4970772672879358, -0.08320234128402682, 0.026794408527332614, 0.026429631701369694]
traj = Trajectory('right')
traj.add_point(current_angles, 0.0)
traj.add_point(resp.joints[0].position, 3.0)
traj.add_point(resp.joints[1].position, 6.0)
traj.start()
traj.wait(20.0)
rospy.sleep(2)
#print(current_angles)
print("object grabbed")
right_gripper.close()
current_angles = [right_limb.joint_angle(joint) for joint in right_limb.joint_names()]
traj.clear('right')
traj.add_point(current_angles, 0)
traj.add_point(resp.joints[2].position, 4)
# traj.start()
# traj.wait(5)
print('Object picked up')
print(rospy.get_rostime().secs)
#Get the arm ready for launch
traj.clear('right')
current_angles = [right_limb.joint_angle(joint) for joint in right_limb.joint_names()]
launch_ready = current_angles
print(current_angles)
launch_ready[1] = 0
launch_ready[2] = 0
launch_ready[3] = math.pi/2
launch_ready[4] = 0
launch_ready[5] = 0
launch_ready[6] = math.pi/2
traj.add_point(current_angles)
traj.add_point(launch_ready)
rospy.sleep(5)
# print(launch_ready)
# traj.add_point(current_angles, 0.0)
# traj.add_point(launch_ready, 10.0) # Go to launch prep slow for now
# launch_ready[1] = -0.3
# traj.add_point(launch_ready, 15.0)
# launch_ready[1] = 0
# traj.add_point(launch_ready, 20)
# traj.start()
# print(traj.result())
# traj.wait(25.0)
# print('At launch point')
# print(rospy.get_rostime().secs)
'''traj.clear('right')
current_angles = [right_limb.joint_angle(joint) for joint in right_limb.joint_names()]
launch_end = current_angles
launch_end[3] = math.pi/2-(0.9996*0.5)
launch_end[5] = (0.9996*0.5)
launch_end = [0, 0, 0, math.pi/2-(0.9996*0.5), math.pi/2-(0.9996), 0, 0]
traj.add_point(current_angles, 0.0)
traj.add_point(launch_end, 10.0) # Launch fast?
print('Launching in...')
for i in range(0,5):
print(5-i)
rospy.sleep(1.0)
print('Launching')
traj.start()
traj.wait(20)
right_gripper.open()'''
'''print('Launching in...')
for i in range(0,5):
print(5-i)
rospy.sleep(1.0)
print('Launching')'''
# Set the control mode
print("Getting robot state...")
# Set joint state publishing to 500Hz
pub_rate.publish(_rate)
# Set the arms to the neutral position
#left_arm.move_to_neutral()
#right_arm.move_to_neutral()
rate = rospy.Rate(_rate)
# Make function for velocities
def get_v_function(A_params):
def v_func(t):
return -1*(A_params[1] + 2*A_params[2]*t + 3*A_params[3]*t*t)
return v_func
#rjoint_vels = dict([(joint, 0.0) for i, joint in enumerate(right_joint_names)])
#ljoint_vels = dict([(joint, 0.0) for i, joint in enumerate(left_joint_names)])
#print(rjoint_vels)
rjoint_vels = {'right_e1':0, 'right_w1':0}
#rjoint_vels['right_e1'] = -1.75
#rjoint_vels['right_w1'] = -3.5
my_A_params = [0, 0, 3157729/999600, 0]
rospy.sleep(5)
print('Starting velocities')
start = rospy.Time.now().to_sec()
launchtime = 0.556
e1_fun = get_v_function(my_A_params)
w1_fun = get_v_function(my_A_params)
while rospy.Time.now().to_sec()-start < launchtime:
elapsed = rospy.Time.now().to_sec() - start
pub_rate.publish(_rate)
rjoint_vels['right_e1'] = 2*e1_fun(elapsed)
rjoint_vels['right_w1'] = 4*w1_fun(elapsed)
right_arm.set_joint_velocities(rjoint_vels)
rate.sleep()
right_gripper.open()
print('Gripper released')
# Spin the velocities at zero for a few seconds to help release
rjoint_vels = dict([(joint, 0.0) for i, joint in enumerate(right_joint_names)])
spintime = 1.0
print('Spin time')
while rospy.Time.now().to_sec()-start < spintime:
pub_rate.publish(_rate)
right_arm.set_joint_velocities(rjoint_vels)
rate.sleep()
print('Spin done')
|
from logger import Logger
from flask import Flask, render_template, request, make_response, Response, jsonify
import datetime
import json
import os
import shutil
logger = Logger().get_logger()
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/reset", methods=["POST"])
def reset():
with open('static/json/text_data.json', 'w') as file:
file.write("")
shutil.rmtree("./static/download")
os.mkdir("./static/download")
os.defpath
return ""
@app.route("/download/<file_name>", methods=["POST"])
def download(file_name):
read = []
file = open('static/json/text_data.json', 'r')
file.seek(0, 2)
if file.tell() == 0:
file.close()
else:
file.seek(0)
read = json.load(file)
file.close()
file = open('static/download/' + file_name + '.txt', 'a', encoding='utf-8')
for list in read:
file.write(list['name'] + ":" + list['text'] + "\n")
file.close()
open_file = open('static/download/' + file_name + '.txt', "rb").read()
return open_file
@app.route("/interval", methods=["POST"])
def interval():
file = open('static/json/text_data.json', 'r')
file.seek(0, 2)
if file.tell() == 0:
file.close()
return make_response(json.dumps([], ensure_ascii=False))
else:
file.seek(0)
read = json.load(file)
file.close()
return make_response(json.dumps(read, ensure_ascii=False))
@app.route("/post", methods=["POST"])
def post():
if request.method == "POST":
name = request.form['name']
text = request.form['text']
year = int(request.form['year'])
month = int(request.form['month'])
day = int(request.form['day'])
hour = int(request.form['hour'])
minute = int(request.form['minute'])
second = int(request.form['second'])
millisecond = int(request.form['millisecond'])
time = datetime.datetime(year, month, day, hour, minute, second, millisecond * 1000).strftime("%Y-%m-%d %H:%M:%S.%f")
write_data = {
'name' : "Nameless" if name == "" else name,
'text' : text,
'time' : time
}
with open('static/json/text_data.json', 'ab+') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(json.dumps([write_data]).encode())
else:
f.seek(-1, 2)
f.truncate()
f.write(' , '.encode())
f.write(json.dumps(write_data).encode())
f.write(']'.encode())
file = open('static/json/text_data.json', 'r')
json_read = json.load(file)
print(json_read)
json_read = sorted(json_read, key=lambda x: x['time'])
return make_response(json.dumps(json_read, ensure_ascii=False))
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=8000, threaded=True) |
#!/usr/bin/python
import math
def prime_factors(n):
try:
assert n > 1
except AssertionError:
print 'Enter an integer > 1'
return
factors = []
factor = 2
limit = int(n / 2);
while(factor <= limit):
if(n % factor == 0):
factors.append(factor)
n = n / factor
else:
factor += 1
if(len(factors) == 0):
print str(n) + ' is a prime number'
else:
print str(factors).strip('[]')
def main():
while(True):
s = raw_input('Find the prime factors of (q for quit): ')
if(s == 'q'):
break
try:
n = int(s)
prime_factors(n)
except (ValueError, SyntaxError):
print 'Enter a valid integer'
if(__name__ == '__main__'):
main()
|
# Copyright (c) 2020 Adam Souzis
# SPDX-License-Identifier: MIT
import six
import collections
import re
import os
from .support import Status, Defaults, ResourceChanges, Priority
from .result import serializeValue, ChangeAware, Results, ResultsMap
from .util import (
registerClass,
lookupClass,
loadModule,
validateSchema,
findSchemaErrors,
UnfurlError,
UnfurlTaskError,
UnfurlAddingResourceError,
filterEnv,
toEnum,
wrapSensitiveValue,
)
from . import merge
from .eval import Ref, mapValue, RefContext
from .runtime import RelationshipInstance, Operational
from .yamlloader import yaml
import logging
logger = logging.getLogger("unfurl.task")
class TaskRequest(object):
"""
Yield this to run a child task. (see :py:meth:`unfurl.configurator.TaskView.createSubTask`)
"""
def __init__(self, configSpec, resource, reason, persist=False, required=None):
self.configSpec = configSpec
self.target = resource
self.reason = reason
self.persist = persist
self.required = required
self.error = configSpec.name == "#error"
class JobRequest(object):
"""
Yield this to run a child job.
"""
def __init__(self, resources, errors):
self.instances = resources
self.errors = errors
def __repr__(self):
return "JobRequest(%s)" % (self.instances,)
# we want ConfigurationSpec to be standalone and easily serializable
class ConfigurationSpec(object):
@classmethod
def getDefaults(self):
return dict(
className=None,
majorVersion=0,
minorVersion="",
workflow=Defaults.workflow,
timeout=None,
operation_host=None,
environment=None,
inputs=None,
inputSchema=None,
preConditions=None,
postConditions=None,
primary=None,
dependencies=None,
outputs=None,
)
def __init__(
self,
name,
operation,
className=None,
majorVersion=0,
minorVersion="",
workflow=Defaults.workflow,
timeout=None,
operation_host=None,
environment=None,
inputs=None,
inputSchema=None,
preConditions=None,
postConditions=None,
primary=None,
dependencies=None,
outputs=None,
):
assert name and className, "missing required arguments"
self.name = name
self.operation = operation
self.className = className
self.majorVersion = majorVersion
self.minorVersion = minorVersion
self.workflow = workflow
self.timeout = timeout
self.operationHost = operation_host
self.environment = environment
self.inputs = inputs or {}
self.inputSchema = inputSchema
self.outputs = outputs or {}
self.preConditions = preConditions
self.postConditions = postConditions
self.artifact = primary
def findInvalidateInputs(self, inputs):
if not self.inputSchema:
return []
return findSchemaErrors(serializeValue(inputs), self.inputSchema)
# XXX same for postConditions
def findInvalidPreconditions(self, target):
if not self.preConditions:
return []
# XXX this should be like a Dependency object
expanded = serializeValue(target.attributes)
return findSchemaErrors(expanded, self.preConditions)
def create(self):
klass = lookupClass(self.className)
if not klass:
raise UnfurlError("Could not load configurator %s" % self.className)
else:
return klass(self)
def shouldRun(self):
return Defaults.shouldRun
def copy(self, **mods):
args = self.__dict__.copy()
args.update(mods)
return ConfigurationSpec(**args)
def __eq__(self, other):
if not isinstance(other, ConfigurationSpec):
return False
# XXX3 add unit tests
return (
self.name == other.name
and self.operation == other.operation
and self.className == other.className
and self.majorVersion == other.majorVersion
and self.minorVersion == other.minorVersion
and self.workflow == other.workflow
and self.timeout == other.timeout
and self.environment == other.environment
and self.inputs == other.inputs
and self.inputSchema == other.inputSchema
and self.outputs == other.outputs
and self.preConditions == other.preConditions
and self.postConditions == other.postConditions
)
class ConfiguratorResult(object):
"""
Modified indicates whether the underlying state of configuration,
was changed i.e. the physically altered the system this configuration represents.
status reports the Status of the current configuration.
"""
def __init__(
self,
success,
modified,
status=None,
configChanged=None,
result=None,
outputs=None,
exception=None,
):
self.modified = modified
self.status = toEnum(Status, status)
self.configChanged = configChanged
self.result = result
self.success = success
self.outputs = outputs
self.exception = None
def __str__(self):
result = "" if self.result is None else str(self.result)[:240] + "..."
return (
"changes: "
+ (
" ".join(
filter(
None,
[
self.success and "success",
self.modified and "modified",
self.status is not None and self.status.name,
],
)
)
or "none"
)
+ "\n "
+ result
)
class AutoRegisterClass(type):
def __new__(mcls, name, bases, dct):
cls = type.__new__(mcls, name, bases, dct)
if cls.shortName:
name = cls.shortName
elif name.endswith("Configurator"):
name = name[: -len("Configurator")]
if name:
registerClass(cls.__module__ + "." + cls.__name__, cls, name)
return cls
@six.add_metaclass(AutoRegisterClass)
class Configurator(object):
shortName = None
"""shortName can be used to customize the "short name" of the configurator
as an alternative to using the full name ("module.class") when setting the implementation on an operation.
(Titlecase recommended)"""
def __init__(self, configurationSpec):
self.configSpec = configurationSpec
def getGenerator(self, task):
return self.run(task)
# yields a JobRequest, TaskRequest or a ConfiguratorResult
def run(self, task):
"""
This should perform the operation specified in the :class:`ConfigurationSpec`
on the :obj:`task.target`.
Args:
task (:class:`TaskView`) The task currently running.
Yields:
Should yield either a :class:`JobRequest`, :class:`TaskRequest`
or a :class:`ConfiguratorResult` when done
"""
yield task.done(False)
def canDryRun(self, task):
"""
Returns whether this configurator can handle a dry-runs for the given task.
(And should check `task.dryRun` in during run()"".
Args:
task (:obj:`TaskView`) The task about to be run.
Returns:
bool
"""
return False
def canRun(self, task):
"""
Return whether or not the configurator can execute the given task.
Does this configurator support the requested action and parameters
and given the current state of the target instance?
Args:
task (:class:`TaskView`) The task that is about to be run.
Returns:
(bool or str): Should return True or a message describing why the task couldn't be run.
"""
return True
def shouldRun(self, task):
"""Does this configuration need to be run?"""
return self.configSpec.shouldRun()
# XXX3 should be called during when checking dependencies
# def checkConfigurationStatus(self, task):
# """Is this configuration still valid?"""
# return Status.ok
class TaskView(object):
"""The interface presented to configurators."""
def __init__(self, manifest, configSpec, target, reason=None, dependencies=None):
# public:
self.configSpec = configSpec
self.target = target
self.reason = reason
self.logger = logger
self.cwd = os.path.abspath(self.target.baseDir)
# private:
self._errors = [] # UnfurlTaskError objects appends themselves to this list
self._inputs = None
self._environ = None
self._manifest = manifest
self.messages = []
self._addedResources = []
self._dependenciesChanged = False
self.dependencies = dependencies or []
self._resourceChanges = ResourceChanges()
# public:
self.operationHost = self._findOperationHost(target, configSpec.operationHost)
@property
def inputs(self):
"""
Exposes inputs and task settings as expression variables, so they can be accessed like:
eval: $inputs::param
or in jinja2 templates:
{{ inputs.param }}
"""
if self._inputs is None:
inputs = self.configSpec.inputs.copy()
relationship = isinstance(self.target, RelationshipInstance)
if relationship:
target = self.target.target
else:
target = self.target
HOST = (target.parent or target).attributes
ORCHESTRATOR = target.root.findInstanceOrExternal("localhost")
vars = dict(
inputs=inputs,
task=self.getSettings(),
connections=list(self._getConnections()),
allConnections=self._getAllConnections(),
SELF=self.target.attributes,
HOST=HOST,
ORCHESTRATOR=ORCHESTRATOR and ORCHESTRATOR.attributes or {},
OPERATION_HOST=self.operationHost
and self.operationHost.attributes
or {},
)
if relationship:
vars["SOURCE"] = self.target.source.attributes
vars["TARGET"] = target.attributes
# expose inputs lazily to allow self-referencee
ctx = RefContext(self.target, vars)
if self.configSpec.artifact and self.configSpec.artifact.baseDir:
ctx.baseDir = self.configSpec.artifact.baseDir
self._inputs = ResultsMap(inputs, ctx)
return self._inputs
@property
def vars(self):
"""
A dictionary of the same variables that are available to expressions when evaluating inputs.
"""
return self.inputs.context.vars
def _getConnections(self):
seen = set()
for parent in reversed(self.target.ancestors):
# use reversed() so nearer overrides farther
# XXX broken if multiple requirements point to same parent (e.g. dev and prod connections)
# XXX test if operationHost is external (e.g locahost) getRequirements() matches local parent
found = False
if self.operationHost:
for rel in self.operationHost.getRequirements(parent):
# examine both the relationship's properties and its capability's properties
found = True
if id(rel) not in seen:
seen.add(id(rel))
yield rel
if not found:
# not found, see if there's a default connection
# XXX this should use the same relationship type as findConnection()
for rel in parent.getDefaultRelationships():
if id(rel) not in seen:
seen.add(id(rel))
yield rel
def _findRelationshipEnvVars(self):
"""
We look for instances that the task's implementation might to connect to
(by following the targets hostedOn relationships)
and check if the operation_host has any relationships with those instances too.
If it does, collect any environment variables set by those connections.
For example, consider an implementation whose target is a Kubernetes cluster hosted on GCP.
The operation_host's connections to those instances might set KUBECONFIG and GOOGLE_APPLICATION_CREDENTIALS
respectively and the implementation will probably need both those set when it executes.
"""
env = {}
t = lambda datatype: datatype.type == "unfurl.datatypes.EnvVar"
# XXX broken if multiple requirements point to same parent (e.g. dev and prod connections)
for rel in self._getConnections():
env.update(rel.mergeProps(t))
return env
def getEnvironment(self, addOnly):
# note: inputs should be evaluated before environment
# use merge.copy to preserve basedir
rules = merge.copy(self.target.root.envRules)
if self.configSpec.environment:
rules.update(self.configSpec.environment)
rules = serializeValue(
mapValue(rules, self.inputs.context), resolveExternal=True
)
env = filterEnv(rules, addOnly=addOnly)
relEnvVars = self._findRelationshipEnvVars()
env.update(relEnvVars)
targets = []
if isinstance(self.target, RelationshipInstance):
targets = [
c.tosca_id
for c in self.target.target.getCapabilities(
self.target.capability.template.name
)
]
env.update(
dict(
TARGETS=",".join(targets),
TARGET=self.target.target.tosca_id,
SOURCES=",".join(
[
r.tosca_id
for r in self.target.source.getRequirements(
self.target.requirement.template.name
)
]
),
SOURCE=self.target.source.tosca_id,
)
)
return env
def getSettings(self):
return dict(
verbose=self.verbose,
name=self.configSpec.name,
dryrun=self.dryRun,
workflow=self.configSpec.workflow,
operation=self.configSpec.operation,
timeout=self.configSpec.timeout,
target=self.target.name,
reason=self.reason,
cwd=self.cwd,
)
def _findOperationHost(self, target, operation_host):
# SELF, HOST, ORCHESTRATOR, SOURCE, TARGET
if not operation_host or operation_host in ["localhost", "ORCHESTRATOR"]:
return target.root.findInstanceOrExternal("localhost")
if operation_host == "SELF":
return target
if operation_host == "HOST":
# XXX should search all ancestors to find parent that can handle the given operation
# e.g. ansible configurator should find ancestor compute node
return target.parent
if operation_host == "SOURCE":
return target.source
if operation_host == "TARGET":
return target.target
host = target.root.findInstanceOrExternal(operation_host)
if host:
return host
raise UnfurlTaskError(self, "can not find operation_host: %s" % operation_host)
def _getAllConnections(self):
cons = {}
if self.operationHost:
for rel in self.operationHost.requirements:
cons.setdefault(rel.name, []).append(rel)
for rel in self.target.root.requirements:
if rel.name not in cons:
cons[rel.name] = [rel]
return cons
def findConnection(self, target, relation="tosca.relationships.ConnectsTo"):
connection = self.query(
"$OPERATION_HOST::.requirements::*[.type=%s][.target=$target]" % relation,
vars=dict(target=target),
)
# alternative query: [.type=unfurl.nodes.K8sCluster]::.capabilities::.relationships::[.type=unfurl.relationships.ConnectsTo.K8sCluster][.source=$OPERATION_HOST]
if not connection:
# no connection, see if there's a default relationship template defined for this target
endpoints = target.getDefaultRelationships(relation)
if endpoints:
connection = endpoints[0]
return connection
def sensitive(self, value):
"""Mark the given value as sensitive. Sensitive values will be encrypted or redacted when outputed.
Returns:
sensitive: A subtype of `sensitive` appropriate for the value or the value itself if it can't be converted.
"""
return wrapSensitiveValue(
value, self.operationHost and self.operationHost.templar._loader._vault
)
def addMessage(self, message):
self.messages.append(message)
def findInstance(self, name):
return self._manifest.getRootResource().findInstanceOrExternal(name)
# XXX
# def pending(self, modified=None, sleep=100, waitFor=None, outputs=None):
# """
# >>> yield task.pending(60)
#
# set modified to True to advise that target has already been modified
#
# outputs to share operation outputs so far
# """
def done(
self,
success=None,
modified=None,
status=None,
result=None,
outputs=None,
captureException=None,
):
"""`run()` should call this method and yield its return value before terminating.
>>> yield task.done(True)
Args:
success (bool): indicates if this operation completed without an error.
modified (bool): (optional) indicates whether the physical instance was modified by this operation.
status (Status): (optional) should be set if the operation changed the operational status of the target instance.
If not specified, the runtime will updated the instance status as needed, based
the operation preformed and observed changes to the instance (attributes changed).
result (dict): (optional) A dictionary that will be serialized as YAML into the changelog, can contain any useful data about these operation.
outputs (dict): (optional) Operation outputs, as specified in the toplogy template.
Returns:
:class:`ConfiguratorResult`
"""
if success is None:
success = not self._errors
if isinstance(modified, Status):
status = modified
modified = True
kw = dict(result=result, outputs=outputs)
if captureException is not None:
logLevel = logging.DEBUG if success else logging.ERROR
kw["exception"] = UnfurlTaskError(self, captureException, logLevel)
return ConfiguratorResult(success, modified, status, **kw)
# updates can be marked as dependencies (changes to dependencies changed) or required (error if changed)
# configuration has cumulative set of changes made it to resources
# updates update those changes
# other configurations maybe modify those changes, triggering a configuration change
def query(
self,
query,
dependency=False,
name=None,
required=False,
wantList=False,
resolveExternal=True,
strict=True,
vars=None,
):
# XXX pass resolveExternal to context?
try:
result = Ref(query, vars=vars).resolve(
self.inputs.context, wantList, strict
)
except:
UnfurlTaskError(
self, "error while evaluating query: %s" % query, logging.WARNING
)
return None
if dependency:
self.addDependency(
query, result, name=name, required=required, wantList=wantList
)
return result
def addDependency(
self,
expr,
expected=None,
schema=None,
name=None,
required=False,
wantList=False,
):
getter = getattr(expr, "asRef", None)
if getter:
# expr is a configuration or resource or ExternalValue
expr = Ref(getter()).source
dependency = Dependency(expr, expected, schema, name, required, wantList)
self.dependencies.append(dependency)
self.dependenciesChanged = True
return dependency
def removeDependency(self, name):
old = self.dependencies.pop(name, None)
if old:
self.dependenciesChanged = True
return old
# def createConfigurationSpec(self, name, configSpec):
# if isinstance(configSpec, six.string_types):
# configSpec = yaml.load(configSpec)
# return self._manifest.loadConfigSpec(name, configSpec)
def createSubTask(
self, operation, resource=None, inputs=None, persist=False, required=False
):
"""Create a subtask that will be executed if yielded by `run()`
Args:
operation (str): The operation call (like `interface.operation`)
resource (:class:`NodeInstance`) The current target if missing.
Returns:
:class:`TaskRequest`
"""
if resource is None:
resource = self.target
if inputs is None:
inputs = self.configSpec.inputs
if isinstance(operation, six.string_types):
taskRequest = self.job.plan.createTaskRequest(
operation, resource, "for subtask: " + self.configSpec.name, inputs
)
if taskRequest.error:
return None
else:
taskRequest.persist = persist
taskRequest.required = required
return taskRequest
# XXX:
# # Configurations created by subtasks are transient insofar as the are not part of the spec,
# # but they are recorded as part of the resource's configuration state.
# # Marking as persistent or required will create a dependency on the new configuration.
# if persist or required:
# expr = "::%s::.configurations::%s" % (configSpec.target, configSpec.name)
# self.addDependency(expr, required=required)
# operation should be a ConfigurationSpec
return TaskRequest(operation, resource, "subtask", persist, required)
# # XXX how can we explicitly associate relations with target resources etc.?
# # through capability attributes and dependencies/relationship attributes
def updateResources(self, resources):
"""Notifies Unfurl of new or changes to instances made while the configurator was running.
Operational status indicates if the instance currently exists or not.
This will queue a new child job if needed.
.. code-block:: YAML
- name: aNewResource
template: aNodeTemplate
parent: HOST
attributes:
anAttribute: aValue
readyState:
local: ok
state: state
- name: SELF
attributes:
anAttribute: aNewValue
Args:
resources (list or str): Either a list or string that is parsed as YAML.
Returns:
:class:`JobRequest`: To run the job based on the supplied spec
immediately, yield the returned JobRequest.
"""
from .manifest import Manifest
if isinstance(resources, six.string_types):
try:
resources = yaml.load(resources)
except:
UnfurlTaskError(self, "unable to parse as YAML: %s" % resources)
return None
if isinstance(resources, collections.Mapping):
resources = [resources]
elif not isinstance(resources, collections.MutableSequence):
UnfurlTaskError(
self,
"updateResources requires a list of updates, not a %s"
% type(resources),
)
return None
errors = []
newResources = []
newResourceSpecs = []
for resourceSpec in resources:
# we might have items that aren't resource specs
if not isinstance(resourceSpec, collections.Mapping):
continue
originalResourceSpec = resourceSpec
try:
rname = resourceSpec.get("name", "SELF")
if rname == ".self" or rname == "SELF":
existingResource = self.target
else:
existingResource = self.findInstance(rname)
if existingResource:
updated = False
# XXX2 if spec is defined (not just status), there should be a way to
# indicate this should replace an existing resource or throw an error
if "readyState" in resourceSpec:
# we need to set this explicitly for the attribute manager to track status
# XXX track all status attributes (esp. state and created) and remove this hack
operational = Manifest.loadStatus(resourceSpec)
if operational.localStatus is not None:
existingResource.localStatus = operational.localStatus
if operational.state is not None:
existingResource.state = operational.state
updated = True
attributes = resourceSpec.get("attributes")
if attributes:
for key, value in mapValue(
attributes, existingResource
).items():
existingResource.attributes[key] = value
logger.debug(
"setting attribute %s with %s on %s",
key,
value,
existingResource.name,
)
updated = True
if updated:
logger.info("updating resources %s", existingResource.name)
continue
pname = resourceSpec.get("parent")
if pname in [".self", "SELF"]:
resourceSpec["parent"] = self.target.name
elif pname == "HOST":
resourceSpec["parent"] = (
self.target.parent.name if self.target.parent else "root"
)
if isinstance(resourceSpec.get("template"), dict):
# inline node template, add it to the spec
tname = resourceSpec["template"].pop("name", rname)
nodeSpec = self._manifest.tosca.addNodeTemplate(
tname, resourceSpec["template"]
)
resourceSpec["template"] = nodeSpec.name
if resourceSpec.get("readyState") and "created" not in resourceSpec:
# setting "created" to the target's key indicates that
# the target is responsible for deletion
# if "created" is not defined, set it if readyState is set
resourceSpec["created"] = self.target.key
if (
self.job
and "parent" not in resourceSpec
and "template" in resourceSpec
):
nodeSpec = self._manifest.tosca.getTemplate(
resourceSpec["template"]
)
parent = (
self.job.plan.findParentResource(nodeSpec) or self.target.root
)
else:
parent = self.target.root
# note: if resourceSpec[parent] is set it overrides the parent keyword
resource = self._manifest.createNodeInstance(
rname, resourceSpec, parent=parent
)
# XXX wrong... these need to be operational instances
# if resource.required or resourceSpec.get("dependent"):
# self.addDependency(resource, required=resource.required)
except:
errors.append(UnfurlAddingResourceError(self, originalResourceSpec))
else:
newResourceSpecs.append(originalResourceSpec)
newResources.append(resource)
if newResourceSpecs:
self._resourceChanges.addResources(newResourceSpecs)
self._addedResources.extend(newResources)
logger.info("add resources %s", newResources)
jobRequest = JobRequest(newResources, errors)
if self.job:
self.job.jobRequestQueue.append(jobRequest)
return jobRequest
return None
class Dependency(Operational):
"""Represents a runtime dependency for a configuration.
Dependencies are used to determine if a configuration needs re-run as follows:
* Tosca `DependsOn`
* They are dynamically created when evaluating and comparing the
configuration spec's attributes with the previous values
* Persistent dependencies can be created when the configurator
invokes these apis: `createSubTask`, `updateResources`, `query`, `addDependency`
"""
def __init__(
self,
expr,
expected=None,
schema=None,
name=None,
required=False,
wantList=False,
):
"""
if schema is not None, validate the result using schema
if expected is not None, test that result equals expected
otherwise test that result isn't empty has not changed since the last attempt
"""
assert not (expected and schema)
self.expr = expr
self.expected = expected
self.schema = schema
self._required = required
self.name = name
self.wantList = wantList
@property
def localStatus(self):
return Status.ok
@property
def priority(self):
return Priority.required if self._required else Priority.optional
def refresh(self, config):
if self.expected is not None:
changeId = config.changeId
context = RefContext(
config.target, dict(val=self.expected, changeId=changeId)
)
result = Ref(self.expr).resolve(context, wantList=self.wantList)
self.expected = result
@staticmethod
def hasValueChanged(value, changeset):
if isinstance(value, Results):
return Dependency.hasValueChanged(value._attributes, changeset)
elif isinstance(value, collections.Mapping):
if any(Dependency.hasValueChanged(v, changeset) for v in value.values()):
return True
elif isinstance(value, (collections.MutableSequence, tuple)):
if any(Dependency.hasValueChanged(v, changeset) for v in value):
return True
elif isinstance(value, ChangeAware):
return value.hasChanged(changeset)
else:
return False
def hasChanged(self, config):
changeId = config.changeId
context = RefContext(config.target, dict(val=self.expected, changeId=changeId))
result = Ref(self.expr).resolveOne(context) # resolve(context, self.wantList)
if self.schema:
# result isn't as expected, something changed
if not validateSchema(result, self.schema):
return False
else:
if self.expected is not None:
expected = mapValue(self.expected, context)
if result != expected:
logger.debug("hasChanged: %s != %s", result, expected)
return True
elif not result:
# if expression no longer true (e.g. a resource wasn't found), then treat dependency as changed
return True
if self.hasValueChanged(result, config):
return True
return False
def _setDefaultCommand(kw, implementation, inputs):
# is it a shell script or a command line?
shell = inputs.get("shell")
if shell is None:
# no special shell characters
shell = not re.match(r"[\w.-]+\Z", implementation)
operation_host = kw.get("operation_host")
implementation = implementation.lstrip()
if not operation_host or operation_host == "localhost":
className = "unfurl.configurators.shell.ShellConfigurator"
if shell:
shellArgs = dict(command=implementation)
else:
shellArgs = dict(command=[implementation])
else:
className = "unfurl.configurators.ansible.AnsibleConfigurator"
module = "shell" if shell else "command"
playbookTask = dict(cmd=implementation)
cwd = inputs.get("cwd")
if cwd:
playbookTask["chdir"] = cwd
if shell and isinstance(shell, six.string_types):
playbookTask["executable"] = shell
shellArgs = dict(playbook=[{module: playbookTask}])
kw["className"] = className
if inputs:
shellArgs.update(inputs)
kw["inputs"] = shellArgs
def getConfigSpecArgsFromImplementation(iDef, inputs, template):
# XXX template should be operation_host's template!
implementation = iDef.implementation
kw = dict(inputs=inputs, outputs=iDef.outputs)
configSpecArgs = ConfigurationSpec.getDefaults()
artifact = None
if isinstance(implementation, dict):
for name, value in implementation.items():
if name == "primary":
artifact = template.findOrCreateArtifact(value, path=iDef._source)
elif name == "dependencies":
kw[name] = [
template.findOrCreateArtifact(artifactTpl, path=iDef._source)
for artifactTpl in value
]
elif name in configSpecArgs:
kw[name] = value
else:
# "either because it refers to a named artifact specified in the artifacts section of a type or template,
# or because it represents the name of a script in the CSAR file that contains the definition."
artifact = template.findOrCreateArtifact(implementation, path=iDef._source)
kw["primary"] = artifact
assert artifact or "className" in kw
if "className" not in kw:
if not artifact: # malformed implementation
return None
implementation = artifact.file
try:
# see if implementation looks like a python class
if "#" in implementation:
path, fragment = artifact.getPathAndFragment()
mod = loadModule(path)
kw["className"] = mod.__name__ + "." + fragment
return kw
elif lookupClass(implementation):
kw["className"] = implementation
return kw
except:
pass
# assume it's a command line
logger.debug(
"interpreting 'implementation' as a shell command: %s", implementation
)
_setDefaultCommand(kw, implementation, inputs)
return kw
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from dazl.testing import SandboxLauncher, connect_with_new_party
import pytest
from .dars import DottedFields
@pytest.mark.asyncio
@pytest.mark.skip(
"These tests are temporarily disabled because the new encoder does not support this."
)
async def test_record_dotted_fields_submit(sandbox: SandboxLauncher) -> None:
async with connect_with_new_party(url=sandbox.url, dar=DottedFields) as p:
await p.connection.create(
"DottedFields:American",
{
"person": p.party,
"address.address": "1 Test Place",
"address.city": "Somewhere",
"address.state": "ZZ",
"address.zip": "99999",
},
)
items = []
async with p.connection.query("DottedFields:American") as stream:
async for event in stream.creates():
items.append(event)
assert len(items) == 1
@pytest.mark.asyncio
@pytest.mark.skip(
"These tests are temporarily disabled because the new encoder does not support this."
)
async def test_variant_dotted_fields_submit(sandbox: SandboxLauncher) -> None:
async with connect_with_new_party(url=sandbox.url, dar=DottedFields) as p:
await p.connection.create(
"DottedFields:Person",
{
"person": p.party,
"address.US.address": "1 Test Place",
"address.US.city": "Somewhere",
"address.US.state": "ZZ",
"address.US.zip": "99999",
"address.UK.address": "",
"address.UK.locality": "",
"address.UK.city": "",
"address.UK.state": "",
"address.UK.postcode": "",
},
)
items = []
async with p.connection.query("DottedFields:Person") as stream:
async for event in stream.creates():
items.append(event)
assert len(items) == 1
|
def treColorazione(grafo):
'''Restituisce un 3-colorazione di un grafo se esiste, altrimenti una lista
vuota.'''
def genera(sol, nodo):
# Se l'ultimo nodo è colorato si è trovata una colorazione completa.
if nodo == len(grafo): # foglia
return True
else: # nodo interno
# Trovo i colori ammissibili per il nuovo nodo.
colori = {"r", "b", "v"}
for adiacente in grafo[nodo]:
colori.discard(sol[adiacente])
for colore in colori: # Se non c'è nessuno non si può colorare.
sol[nodo] = colore
if genera(sol, nodo + 1):
return True
sol[nodo] = "-"
sol = ["-" for _ in range(len(grafo))]
genera(sol, 0)
return sol if sol[-1] != "-" else []
|
from django.apps import AppConfig
class FlightDelayPredictionConfig(AppConfig):
name = 'flight_delay_prediction'
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('decades/', views.decade_list, name='decade_list'),
path('fads/', views.fad_list, name='fad_list'),
path('fads/new', views.fad_create, name='fad_create'),
path('decades/new', views.decade_create, name='decade_create'),
path('fads/<int:pk>', views.fad_detail, name='fad_detail'),
path('decades/<int:pk>', views.decade_detail, name='decade_detail'),
path('fads/<int:pk>/edit', views.fad_edit, name='fad_edit'),
path('decades/<int:pk>/edit', views.decade_edit, name='decade_edit'),
path('decades/<int:pk>/delete', views.decade_delete, name='decade_delete'),
path('fads/<int:pk>/delete', views.fad_delete, name='fad_delete'),
] |
# -*- coding: utf-8 -*-
# @Time : 2019-12-27
# @Author : mizxc
# @Email : xiangxianjiao@163.com
import os
from flask import current_app, request, flash, render_template, redirect, url_for
from flask_login import login_required, current_user
from . import bpAdmin
from project.common.dataPreprocess import strLength
from project.model.photoAlbum import *
from project.common.filePreprocess import allowedImage, creatFileName, allowedFileSize, removeFile
from project.common.dataPreprocess import getPagingParameters
@bpAdmin.route("/photoAlbum")
@login_required
def photoAlbum():
albums = Album.objects.order_by('+number')
photos = Photo.objects.order_by('-id')[0:20]
return render_template('admin/photoAlbum.html',albums=albums,photos=photos)
@bpAdmin.route("/photoAlbumAdd", methods=['POST'])
@login_required
def photoAlbumAdd():
title = request.form['title']
isShow = request.form['isShow']
introduction = request.form['introduction']
if not strLength(title,1,60):
flash(u'请输入60个字符内的相册名称!')
return redirect(url_for('admin.photoAlbum'))
if isShow == 'y':
isShow = True
else:
isShow = False
if introduction and not strLength(introduction,1,1000):
flash(u'请输入1000个字符内的栏目介绍!')
return redirect(url_for('admin.photoAlbum'))
p = Album()
p.title = title
p.isShow = isShow
p.number = Album.objects.count()+1
if introduction:p.introduction=introduction
p.save()
flash(u'相册创建成功!')
return redirect(url_for('admin.photoAlbum'))
@bpAdmin.route("/photoAlbumEdit/<id>", methods=['GET','POST'])
@login_required
def photoAlbumEdit(id):
p = Album.objects(id=id).first()
if request.method == 'GET':
return render_template('admin/photoAlbumEdit.html',p=p)
if request.method == 'POST':
title = request.form['title']
isShow = request.form['isShow']
introduction = request.form['introduction']
if not strLength(title, 1, 60):
flash(u'请输入60个字符内的相册名称!')
return redirect(url_for('admin.photoAlbum'))
if isShow == 'y':
isShow = True
else:
isShow = False
if introduction and not strLength(introduction, 1, 1000):
flash(u'请输入1000个字符内的栏目介绍!')
return redirect(url_for('admin.photoAlbum'))
p.title = title
p.isShow = isShow
p.introduction = introduction
p.save()
flash(u'相册修改成功!')
return redirect(url_for('admin.photoAlbum'))
@bpAdmin.route("/photoAlbumSort/<number>/<direction>", methods=['GET'])
@login_required
def photoAlbumSort(number, direction):
current = Album.objects(number=int(number)).first()
currentNumber = int(number)
if direction == 'up':
next = Album.objects(number=int(number)-1).first()
if direction == 'down':
next = Album.objects(number=int(number)+1).first()
nextNumber = next.number
current.number = nextNumber
current.save()
next.number = currentNumber
next.save()
return redirect(url_for('admin.photoAlbum'))
@bpAdmin.route("/photoAlbumDelete/<id>", methods=['GET'])
@login_required
def photoAlbumDelete(id):
pa = Album.objects(id=id).first()
#如果该相册有图片,不能删除
ps = Photo.objects(album=pa)
if len(ps)>0:
flash(u'该相册下包含有图像,不能删除,请先删除图像后,再来删除相册!')
return redirect(url_for('admin.photoAlbum'))
pa.delete()
#删除后,剩下的重新排编号
ps = Album.objects.order_by('+number')
for index, p in enumerate(ps):
p.number = index+1
p.save()
flash(u'相册删除成功!')
return redirect(url_for('admin.photoAlbum'))
@bpAdmin.route("/photoAlbumManage/<id>")
@login_required
def photoAlbumManage(id):
pa = Album.objects(id=id).first()
ps = Photo.objects(album=pa).order_by('-isTop','-id')
return render_template('admin/photoAlbumManage.html',pa=pa,ps=ps)
@bpAdmin.route("/photoAlbumPhotoAdd", methods=['POST'])
@login_required
def photoAlbumPhotoAdd():
albumId = request.form['albumId']
if not albumId:
flash(u'请先添加相册!')
return redirect(url_for('admin.photoAlbum'))
pa = Album.objects(id=albumId).first()
introduction = request.form['introduction']
if len(introduction)>1000:
flash(u'请输入1000个字符内的图片描述!')
return redirect(url_for('admin.photoAlbum'))
p = Photo()
p.introduction = introduction
photo = request.files.get('photo')
#其他字段判断完再判断图片上传
photoPath = None
if photo and allowedImage(photo.filename):
if allowedFileSize(len(photo.read()), 2):
photo.seek(0)
p.title = photo.filename[:50]
fileName = creatFileName(current_user.id, photo.filename)
photo.save(os.path.join(current_app.config['UPLOAD_PHOTOALBUM_PATH'], fileName))
photoPath = current_app.config['UPLOAD_PATH_PHOTOALBUM_FOR_DB'] + '/' + fileName
else:
flash(u"请上传小于2M的图片!")
return redirect(url_for('admin.photoAlbum'))
else:
flash(u"请上传png/jpg/gif图片")
return redirect(url_for('admin.photoAlbum'))
p.path = photoPath
p.album = pa
p.save()
pa.photoCount += 1
pa.save()
flash(u'图像添加成功!')
return redirect(url_for('admin.photoAlbum'))
@bpAdmin.route("/photoAlbumPhotoDelete/<id>", methods=['GET'])
@login_required
def photoAlbumPhotoDelete(id):
p = Photo.objects(id=id).first()
albumId = p.album.id
if p.path:
removeFile(os.path.join(current_app.config['STATIC_PATH'], p.path))
p.delete()
flash(u'图像删除成功!')
p.album.photoCount -= 1
p.album.save()
return redirect(url_for('admin.photoAlbumManage', id=albumId))
@bpAdmin.route("/photoAlbumPhotoIsTop/<albumId>/<photoId>/<isTop>", methods=['GET'])
@login_required
def photoAlbumPhotoIsTop(albumId,photoId,isTop):
p = Photo.objects(id=photoId).first()
if isTop == 'y':
p.isTop = True
flash(u'置顶成功!')
elif isTop == 'n':
p.isTop = False
flash(u'取消置顶成功!')
p.save()
return redirect(url_for('admin.photoAlbumManage', id=albumId))
|
# -*- coding: utf_8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import argparse
import pickle
import nsml
import numpy as np
from nsml import DATASET_PATH
import keras
from keras.models import Sequential, Model
from keras.layers import Input, merge,concatenate, ZeroPadding2D, Dense, Dropout, Flatten, Activation
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import ReduceLROnPlateau
from keras import backend as K
from data_loader import *
#import keras_densenet.models
import keras.backend as K
from custom_layers import Scale
def DenseNet(nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=1000, weights_path=None):
'''Instantiate the DenseNet 121 architecture,
# Arguments
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters
reduction: reduction factor of transition blocks.
dropout_rate: dropout rate
weight_decay: weight decay factor
classes: optional number of classes to classify images
weights_path: path to pre-trained weights
# Returns
A Keras model instance.
'''
eps = 1.1e-5
# compute compression factor
compression = 1.0 - reduction
# Handle Dimension Ordering for different backends
global concat_axis
if K.image_dim_ordering() == 'tf':
concat_axis = 3
img_input = Input(shape=(224, 224, 3), name='data')
else:
concat_axis = 1
img_input = Input(shape=(3, 224, 224), name='data')
# From architecture for ImageNet (Table 1 in the paper)
nb_filter = 64
nb_layers = [6,12,24,16] # For DenseNet-121
# Initial convolution
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Conv2D(nb_filter,( 7, 7), strides = (2,2), name='conv1', use_bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
stage = block_idx+2
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Add transition_block
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
final_stage = stage + 1
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x = Dense(classes, name='fc6')(x)
x = Activation('softmax', name='prob')(x)
model = Model(img_input, x, name='densenet')
if weights_path is not None:
model.load_weights(weights_path)
return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Conv2D(inter_channel,( 1, 1), name=conv_name_base+'_x1', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Conv2D(nb_filter,( 3, 3), name=conv_name_base+'_x2', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Conv2D(int(nb_filter * compression), (1, 1), name=conv_name_base, use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = concatenate([concat_feat, x], axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
def get_categorical_accuracy_keras(y_true, y_pred):
return K.mean(K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1)))
import numpy as np
def get_categorical_accuracy(y_true, y_pred):
return (np.argmax(y_true, axis=1) == np.argmax(y_pred, axis=1)).mean()
def bind_model(model):
def save(dir_name):
os.makedirs(dir_name, exist_ok=True)
model.save_weights(os.path.join(dir_name, 'model'))
print('model saved!')
def load(file_path):
model.load_weights(file_path)
print('model loaded!')
def infer(queries, db):
# Query 개수: 195
# Reference(DB) 개수: 1,127
# Total (query + reference): 1,322
queries, query_img, references, reference_img = preprocess(queries, db)
print('test data load queries {} query_img {} references {} reference_img {}'.
format(len(queries), len(query_img), len(references), len(reference_img)))
queries = np.asarray(queries)
query_img = np.asarray(query_img)
references = np.asarray(references)
reference_img = np.asarray(reference_img)
query_img = query_img.astype('float32')
query_img /= 255
reference_img = reference_img.astype('float32')
reference_img /= 255
get_feature_layer = K.function([model.layers[0].input] + [K.learning_phase()], [model.layers[-2].output])
print('inference start')
# inference
query_vecs = get_feature_layer([query_img, 0])[0]
# caching db output, db inference
db_output = './db_infer.pkl'
if os.path.exists(db_output):
with open(db_output, 'rb') as f:
reference_vecs = pickle.load(f)
else:
reference_vecs = get_feature_layer([reference_img, 0])[0]
with open(db_output, 'wb') as f:
pickle.dump(reference_vecs, f)
# l2 normalization
query_vecs = l2_normalize(query_vecs)
reference_vecs = l2_normalize(reference_vecs)
# Calculate cosine similarity
sim_matrix = np.dot(query_vecs, reference_vecs.T)
retrieval_results = {}
for (i, query) in enumerate(queries):
query = query.split('/')[-1].split('.')[0]
sim_list = zip(references, sim_matrix[i].tolist())
sorted_sim_list = sorted(sim_list, key=lambda x: x[1], reverse=True)
ranked_list = [k.split('/')[-1].split('.')[0] for (k, v) in sorted_sim_list] # ranked list
retrieval_results[query] = ranked_list
print('done')
return list(zip(range(len(retrieval_results)), retrieval_results.items()))
# DONOTCHANGE: They are reserved for nsml
nsml.bind(save=save, load=load, infer=infer)
def l2_normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
# data preprocess
def preprocess(queries, db):
query_img = []
reference_img = []
img_size = (224, 224)
for img_path in queries:
img = cv2.imread(img_path, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, img_size)
query_img.append(img)
for img_path in db:
img = cv2.imread(img_path, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, img_size)
reference_img.append(img)
return queries, query_img, db, reference_img
if __name__ == '__main__':
args = argparse.ArgumentParser()
# hyperparameters
args.add_argument('--epochs', type=int, default=5)
args.add_argument('--batch_size', type=int, default=128)
# DONOTCHANGE: They are reserved for nsml
args.add_argument('--mode', type=str, default='train', help='submit일때 해당값이 test로 설정됩니다.')
args.add_argument('--iteration', type=str, default='0', help='fork 명령어를 입력할때의 체크포인트로 설정됩니다. 체크포인트 옵션을 안주면 마지막 wall time 의 model 을 가져옵니다.')
args.add_argument('--pause', type=int, default=0, help='model 을 load 할때 1로 설정됩니다.')
config = args.parse_args()
## edit start
## end
# training parameters
nb_epoch = 700
batch_size = config.batch_size
num_classes = 1000
input_shape = (224, 224, 3) # input image shape
""" Model """
model = DenseNet()
model.summary()
bind_model(model)
if config.pause:
nsml.paused(scope=locals())
bTrainmode = False
if config.mode == 'train':
bTrainmode = True
""" Initiate RMSprop optimizer """
opt = keras.optimizers.rmsprop(lr=0.00045, decay=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=[get_categorical_accuracy_keras])
# model.compile(loss='categorical_crossentropy',
# optimizer=opt,
# metrics=['accuracy'])
""" Load data """
print('dataset path', DATASET_PATH)
output_path = ['./img_list.pkl', './label_list.pkl']
train_dataset_path = DATASET_PATH + '/train/train_data'
print('train_dataset_path', train_dataset_path)
if nsml.IS_ON_NSML:
# Caching file
nsml.cache(train_data_loader, data_path=train_dataset_path, img_size=input_shape[:2],
output_path=output_path)
else:
# local에서 실험할경우 dataset의 local-path 를 입력해주세요.
train_data_loader(train_dataset_path, input_shape[:2], output_path=output_path)
with open(output_path[0], 'rb') as img_f:
img_list = pickle.load(img_f)
with open(output_path[1], 'rb') as label_f:
label_list = pickle.load(label_f)
# add grayscale
gray = []
for j in range(0, len(img_list)):
img_gray = cv2.cvtColor(np.array(img_list[j]), cv2.COLOR_BGR2GRAY)
img_gray = np.stack((img_gray, img_gray, img_gray), axis = -1)
gray.append(img_gray)
gray_train = np.asarray(gray)
x_train = np.asarray(img_list)
# x_train = np.concatenate((x_train,gray_train),axis = 0)
labels = np.asarray(label_list)
# labels = np.concatenate((labels, labels), axis =0)
y_train = keras.utils.to_categorical(labels, num_classes=num_classes)
x_train = x_train.astype('float32')
x_train /= 255
print(len(labels), 'train samples')
flipdatagen = ImageDataGenerator(horizontal_flip = True, vertical_flip = True)
xFlip_train = x_train
flipdatagen.fit(xFlip_train)
rotdatagen = ImageDataGenerator(rotation_range=90)
xRotate_train = x_train
rotdatagen.fit(xRotate_train)
augdatagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
xAug_train = x_train
yAug_train = y_train
augdatagen.fit(xAug_train)
x_train = np.concatenate((x_train,xAug_train),axis = 0)
x_train = np.concatenate((x_train, gray_train), axis=0)
y_train = np.concatenate((y_train,yAug_train),axis = 0)
y_train = np.concatenate((y_train,yAug_train),axis = 0)
print('y_train : ', len(y_train))
""" Callback """
monitor = 'get_categorical_accuracy_keras'
reduce_lr = ReduceLROnPlateau(monitor=monitor, patience=3)
""" Training loop """
for epoch in range(nb_epoch):
res = model.fit(x_train, y_train,
batch_size=batch_size,
initial_epoch=epoch,
epochs=epoch + 1,
callbacks=[reduce_lr],
verbose=1,
shuffle=True)
print(res.history)
train_loss, train_acc = res.history['loss'][0], res.history['get_categorical_accuracy_keras'][0]
nsml.report(summary=True, epoch=epoch, epoch_total=nb_epoch, loss=train_loss, acc=train_acc)
nsml.save(epoch)
|
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras import backend as K
'''
Loading the Classifier Model from the disk
'''
with open('classifier_model.json', 'r') as json_file:
json_savedModel = json_file.read()
# load the model architecture
model = tf.keras.models.model_from_json(json_savedModel)
model.load_weights('classifier_weights.h5')
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=opt,loss="categorical_crossentropy", metrics=["accuracy"])
'''
Loading the Localization Model from the disk
'''
with open('localization_model.json', 'r') as json_file:
json_savedModel= json_file.read()
# load the model architecture
localize_model = tf.keras.models.model_from_json(json_savedModel)
localize_model.load_weights('localization_weights.h5')
localize_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
'''
Function to classify image as Tumor or Not and Localize in case of Tumor found
'''
def identify_tumor(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR)
img = cv2.resize(img,(128,128))
img = np.array(img)
img = img/255
img = img.reshape((1, 128, 128, 3))
predictions = model.predict(img)
predicted = np.argmax(predictions[0])
probablity = predictions[0][predicted]
labels = {0: 'No Tumor', 1: 'Tumor'}
result = {
'class': labels[predicted],
'probablity': probablity
}
if predicted==1:
localize_tumor(file_path)
return result
'''
Function to Localize Tumor in case of Tumor found
'''
def localize_tumor(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR)
img = cv2.resize(img,(256,256))
img = np.array(img)
img = img/255
img = img.reshape((1, 256, 256, 3))
predictions = localize_model.predict(img)
img = np.squeeze(predictions[0])
img = img*255
img = img.astype('uint8')
cv2.imwrite("mask_output.png", img)
|
from gtts import gTTS
from tempfile import TemporaryFile
import base64
class Tts:
def __init__(self, s, lang):
self.tts = gTTS(s, lang)
def to_bytes(self):
temp = TemporaryFile()
self.tts.write_to_fp(temp)
temp.seek(0)
return temp.read()
def to_base64(self):
return base64.b64encode(self.to_bytes())
|
# coding: utf-8
import os, zipfile
import numpy as np
from PIL import Image
def crop_center(img, crop_size=128):
"""
This function can crop center of image.
If image size is smaller than crop_size, image will be padded "0".
"""
if type(img) is np.ndarray:
img = Image.fromarray(np.uint8(img))
ratio = crop_size / min(img.size)
img.thumbnail(list(map(lambda x: int(x * ratio), img.size)))
x, y = map(lambda x: (x - crop_size) / 2, img.size)
img = img.crop((x, y, x + crop_size, y + crop_size))
return np.array(img)
def img2gif(inputs, save_path, duration=60):
"""
This function can create gif file.
It is very useful to visualize the generated images by GANs etc.
"""
if save_path[-4:] != ".gif":
print("The parameter 'save_path' should be full path(including file name), and the extension should be '.gif'")
return
pil_imgs = []
if type(inputs) in [list, np.ndarray]:
for img in inputs:
pil_imgs.append(Image.fromarray(np.uint8(img)))
else:
for f in os.listdir(inputs):
pil_imgs.append(Image.open(inputs + f))
pil_imgs[0].save(save_path, save_all=True, append_images=pil_imgs[1:], optimize=False, duration=duration, loop=0)
print("Completed.")
def create_zip_files(input_path, save_path):
"""
This function can create zip file.
"""
if save_path[-4:] != ".zip":
print("The parameter 'save_path' should be full path(including file name), and the extension should be '.zip'")
return
with zipfile.ZipFile(save_path, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
for f in os.listdir(input_path):
new_zip.write(input_path + f)
|
"""
Collection of utilities for basic statistical distribution
transformations.
"""
import numpy as np
from scipy.special import erfinv
from pyDOE import lhs
from scipy.stats.distributions import norm, uniform
def design_lhs_exp(variables, maps, offsets=None, samples=int(1e4),
project_linear=True):
""" Design an LHS experiment """
design = lhs(len(variables), samples=samples, criterion='m', iterations=100)
z_design = np.zeros_like(design)
print "Computing LHS design..."
if project_linear:
print " using linear re-projection for log variables"
else:
print " using original variable coordinate"
for i, v in enumerate(variables):
dist, a, b = v[3]
if project_linear: # Re-sample in linear space
if v[0].startswith("ln"):
## 9/4/2014
## This is an experimental correction to re-project the
## logarithmic variables into their normal coordinate
## system. It should only effect the sampling, and hopefully
## improve it by forcing it to even things out over the
## actually range we care about
a = np.exp(a)
b = np.exp(b)
offsets[i] = np.exp(offsets[i])
elif v[0].startswith("log"):
## 10/26/2014
## In accordance with above, but for log10 vars
a = 10.**a
b = 10.**b
offsets[i] = 10.**offsets[i]
if offsets:
## These corrections with "offsets" re-center the interval
## so that the left endpoint is 0. I found that if arbitrary
## lower/upper limits were used, sometimes the PPF routines
## would really mess up in inverting the CDF.
a, b = a-offsets[i], b-offsets[i]
if dist == 'uniform':
design[:, i] = uniform(a, b).ppf(design[:, i])
elif dist == 'normal':
design[:, i] = norm(a, b).ppf(design[:, i])
elif dist == 'loguniform':
design[:, i] = loguni_ppf(design[:, i], a, b)
else:
raise ValueError("no dist defined for %s" % dist)
if offsets:
## Project back in to the correct limits
design[:, i] += offsets[i]
a, b = a+offsets[i], b+offsets[i]
if project_linear:
if v[0].startswith("ln"):
## 9/4/2014
## Second half of correction
a = np.log(a)
b = np.log(b)
design[:, i] = np.log(design[:, i])
elif v[0].startswith("log"):
## 10/26/2014
a = np.log10(a)
b = np.log10(b)
design[:, i] = np.log10(design[:, i])
z_design[:, i] = maps[i](design[:, i], a, b)
design = design.T # in x-coords
z_design = z_design.T
return design, z_design
def map_transfer_fcns(dists, pce_directive, verbose=False):
maps = []
for dist in dists:
if verbose: print dist,
if "wiener" in 'pce_directive':
if dist == 'uniform':
fcn = uni_to_norm
elif dist == 'normal':
fcn = normal_to_standard
else:
raise ValueError("wiener + %s not implemented" % dist)
else:
if dist == 'uniform':
fcn = uni_to_uni
elif dist == "loguniform":
fcn = loguni_to_uni
elif dist == "normal":
fcn = normal_to_standard
else:
raise ValueError("askey + %s not implemented" % dist)
if verbose: print fcn.func_name
maps.append(fcn)
return maps
################
## UNIFORM -> ??
################
def uni_to_norm(x, a, b):
""" Transform a uniform random variable to a standard (normal)
random variable.
Parameters
----------
x : float
coordinate in uniform variable space
a, b : float
lower and upper bounds of the uniform distribution
Returns
-------
float, random variable in SRV space
"""
return np.sqrt(2)*erfinv(2.*(x-a)/(b-a) - 1.0)
def uni_to_uni(x, ai, bi, af=-1., bf=1.):
""" Transform a uniform random variable to one with another set
of lower/upper bounds.
Parameters
----------
x : float
coordinate in original uniform variable space
ai, bi : float
lower and upper bounds of the original uniform distribution
af, bf : float
lower and upper bounds of the destination uniform distribution
Returns
-------
float, random variable in new uniform distribution
"""
return ((bf-af)/(bi-ai))*(x - ai) + af
##################
## NORMAL -> ??
##################
def normal_to_standard(x, mu, sigma):
""" Transform a normal random variable to a standard random variable.
Parameters
----------
x : float
coordinate in original normal variable space
mu, sigma : float
mean and std dev of original normal variable
Returns
-------
float, random variable transformed to standard random variable
"""
return (x - mu)/sigma
##################
## LOGUNIFORM -> ??
##################
def loguni_to_uni(x, alpha, beta, a=-1., b=1.):
""" Transform a logunifrom random variable to a uniform one.
Parameters
----------
x : float
coordinate in original uniform variable space
alpha, beta : float
lower and upper bounds of the original loguniform distribution
a, b : float
lower and upper bounds of the destination uniform distribution
Returns
-------
float, random variable in new uniform distribution
"""
C = (a*np.log(beta) - b*np.log(alpha))/(b - a)
D = (np.log(beta/alpha))/(b - a)
return (1./D)*(np.log(x) - C)
def loguni_pdf(x, alpha, beta):
""" PDF function for Loguniform distribution
Parameters
----------
x : float
coordinate in loguniform variable space
alpha, beta : float
lower/upper bounds of loguniform span
Returns
-------
float, PDF of the specified loguniform distribution evaluated at point
"""
return 1./x/np.log(beta/alpha)
def loguni_cdf(x, alpha, beta):
""" CDF function for Loguniform distribution
Parameters
----------
x : float
coordinate in loguniform variable space
alpha, beta : float
lower/upper bounds of loguniform span
Returns
-------
float, CDF of the specified loguniform distribution, evaluated
from left-to-right over the real number line
"""
return np.log(x/alpha)/np.log(beta/alpha)
def loguni_ppf(q, alpha, beta):
""" PPF function for Loguniform distribution
Parameters
----------
x : float
coordinate in loguniform variable space
alpha, beta : float
lower/upper bounds of loguniform span
Returns
-------
float, PPF of the specified loguniform distribution,
"""
return np.exp(q*np.log(beta/alpha) + np.log(alpha))
|
__all__ = [
'ResetIssueActivity',
]
from gim.core.tasks.issue import IssueJob
class ResetIssueActivity(IssueJob):
queue_name = 'reset-issue-activity'
def run(self, queue):
super(ResetIssueActivity, self).run(queue)
try:
self.object.activity.update()
except self.model.DoesNotExist:
# self.status.hset(STATUSES.CANCELED)
return False
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from ckeditor.fields import RichTextField
class Post(models.Model):
title = models.CharField(max_length=100)
image = models.ImageField(null=True,blank=True,upload_to='MedBay/media')
content = RichTextField(blank=True,null=True)
date_posted = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("post-detail", kwargs={"pk": self.pk}) |
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn ,conn.cursor()
def create_table(conn,cursor):
command = """CREATE TABLE IF NOT EXISTS accounts (
id integer PRIMARY KEY,
account_number integer,
account_cards text NOT NULL,
account_balance integer
);"""
cursor.execute(command)
def save_account(conn,cursor,acc_num,acc_cards,acc_bal):
sql = '''INSERT INTO accounts(account_number,account_cards,account_balance)
VALUES(?,?,?)
'''
acc_cards = "|".join(acc_cards)
cursor.execute(sql,(acc_num,acc_cards,acc_bal))
conn.commit()
return cursor.lastrowid
def get_all_accounts(conn,cursor):
cursor.execute("SELECT * FROM accounts")
rows = cursor.fetchall()
return rows
def get_account(conn,cursor,account_number):
cursor.execute("SELECT * FROM accounts WHERE account_number = "+str(account_number))
rows = cursor.fetchall()
return rows
def update_account(conn,cursor,acc_num,acc_cards,acc_bal):
sql = """UPDATE accounts
SET account_number =?
account_cards = ?
account_balance = ?
WHERE account_number = ?"""
acc_cards = "|".join(acc_cards)
cursor.execute(sql,(acc_num,acc_cards,acc_bal,acc_num))
conn.commit()
def delete_account(conn, cursor, account_number):
sql = """DELETE * FROM accounts WHERE account_number = """ +str(account_number)
cursor.execute(sql)
conn.commit() |
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.views.generic import View
from models import *
from cart import Cart
import json
from django.template import RequestContext
from robokassa.forms import RobokassaForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import forms
class MainView(View):
"""Класс (абстрактный) - созданый для наследования
содержит в себе методы задающие template, data
для каждой страницы"""
render_dict = {}
html = ''
def __init__(self,request):
self.request = request
def set_params_home(self):
"""Ф-я для задания параметров Home страницы"""
self.html = 'index.html'
self.render_dict = {
'regform':forms.RegForm
}
def set_params_productDetail(self, product_id):
"""Ф-я для задания параметров для страницы подробного описания выбраного товара"""
self.html = 'item.html'
self.render_dict = {
'category_list': Category.objects.all(),
'product': Product.objects.get(id=product_id),
}
def set_params_catalog(self):
"""Ф-я для задания параметров для страницы каталога товаров"""
self.html = 'catalog.html'
"список фильтр-блоков"
filter_list = Filter.objects.all()
filter_blocks = []
for item in filter_list:
"одна итерация задает параметры одного фильтр-блока"
block_data = {
'title': item.title,
'values': [],
}
"выбираем все параметры"
filter_value_list = FilterValue.objects.filter(filter=item)
"Одна итерация задает параметры строки в фильтр-блоке"
for value in filter_value_list:
block_data['values'].append(value)
filter_blocks.append(block_data)
self.render_dict = {
'category_list': Category.objects.all(),
# 'product_list': Product.objects.all(),
'filters': filter_blocks,
'sorts': Sort.objects.all(),
}
def set_params_cart(self):
"""Ф-я для задания параметров Cart корзины"""
self.html = 'cart.html'
form = RobokassaForm(initial={
'OutSum': 200,
})
self.render_dict = {
'cart_active': True,
'form': form,
}
def get(self,request):
return render_to_response(
self.html,
self.render_dict,
context_instance=RequestContext(request)
)
class HomeView(MainView):
def __init__(self):
self.set_params_home()
class ProductDetailView(MainView):
#def __init__(self,product_id):
# self.set_params_productDetail(product_id)
def get(self, request, product_id):
self.set_params_productDetail(product_id)
return render_to_response(self.html, self.render_dict, context_instance=RequestContext(request))
class CatalogView(MainView):
def __init__(self):
self.set_params_catalog()
class CartView(MainView):
def __init__(self):
self.set_params_cart()
def add_to_cart(request):
"""Функция добавляет товар из базы товаров
в корзину"""
product_id = request.POST['id']
product = Product.objects.get(id=product_id)
cart = Cart(request)
cart.add(product, product.price)
return HttpResponse()
def remove_from_cart(request):
"""Функция удаляет товар из корзины из конзины"""
product_id = request.POST['id']
product = Product.objects.get(id=product_id)
cart = Cart(request)
cart.remove(product)
return HttpResponse()
def update_cart_item(request):
"""Функция обновляет данные о экземляре товара,
а точнее о его quantity(количестве), в корзине """
product_id = request.POST['id']
quantity = request.POST['value']
cart = Cart(request)
cart.update(product_id, quantity)
return HttpResponse()
def load_catalog_data(request):
"""Функция отвечает на ajax запрос отправкой
списка товаров каталога, адаптированого под вывод.
С учетом страници, каталога, фильтров, сортировок."""
# start_time=datetime.now()
kwargs = {}
args=[]
category = request.POST['category']
filter_json = request.POST['filters']
filters = json.loads(filter_json)
args.append(request.POST['sort'])
page = request.POST['page']
if category:
kwargs['category__name'] = category
if filters:
for f in filters:
key, value = f.split('=')
if '__' in key:
kwargs[key] = value
else:
key += '__in'
if key in kwargs:
kwargs[key].append(value)
else:
kwargs[key] = [value]
print kwargs
products = Product.objects.filter(**kwargs).order_by(*args)
print products.query
paginator = Paginator(products, 5)
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
data = {
'products' : [],
'has_previous' : products.has_previous(),
#'previous_page_number' : products['previous_page_number'],
'has_next' : products.has_next(),
'number' : products.number,
#'next_page_number' : products['next_page_number'],
#'num_pages' : products['paginator']['num_pages']
}
for product in products:
data['products'].append({
'id': product.id,
'title': product.title,
'img_url': product.img_url,
'small_description': product.small_description,
'price': float(product.price),
})
return HttpResponse(json.dumps(data))
def load_cart_data(request):
"""Функция отвечает на ajax запрос отправкой
списка товаров корзины, адаптированого под вывод"""
cart = Cart(request)
items = Item.objects.filter(cart=cart.cart)
products = []
for item in items:
products.append({
'quantity': item.quantity,
'product_id': item.product_id,
'item_price': float(item.item_price),
'total_price': float(item.total_price),
'title': item.product.title,
'url': item.product.img_url,
})
data = {
'products': products,
'cart_summary': float(cart.summary)
}
return HttpResponse(json.dumps(data))
def createOrder(request):
""" Функция создает order, сохраняет его в бд
таже отвечает на ajax запрос датой"""
name = request.POST['name']
phone_number = request.POST['phoneNumber']
email = request.POST['email']
town = request.POST['town']
adress = request.POST['adress']
comment = request.POST['comment']
terminalType = request.POST['terminalType']
cart = Cart(request)
"создаем order, наполняем, сохраняем в бд"
order = MyOrder(
name=name,
phoneNumber=phone_number,
email=email,
town=town,
adress=adress,
comment=comment,
total=cart.summary
)
order.save()
"""Создаем, наполняем, сохраняем в бд n
записей продуктов(из корзины),
каждая привязана foreignkey к order"""
items = Item.objects.filter(cart=cart.cart)
for item in items:
orderproduct = OrderProduct(
count=item.quantity,
product=item.product,
order=order,
)
orderproduct.save()
"""robokassa"""
if terminalType == 'rk':
form = RobokassaForm(initial={
'OutSum': order.total,
'InvId': order.id,
'Desc': order.name,
'Email': order.email,
})
data = {
'robokassaHtml': form.as_p(),
'robokassaTarget': form.target,
'terminalType': terminalType,
}
"""yandexkassa"""
if terminalType == 'yk':
data = {
'shopId': '',
'scid': '',
'sum': float(order.total),
'orderNumber': order.id,
'terminalType': terminalType,
'invoiceOrderid': '',
}
return HttpResponse(json.dumps(data))
|
import signal
import subprocess
import os
import time
class StateManager:
APP_DOWNLOAD_NAME = "temp-led-matrix-app/"
def __init__(self, app_parent_directory: str):
assert os.path.isdir(app_parent_directory), f"Parent direcory {app_parent_directory} doesn't exist"
self._app_directory = os.path.join(app_parent_directory, StateManager.APP_DOWNLOAD_NAME)
self._app_running = False
self._proc = None
def download_app(self):
""" Reclone the app from github
"""
print("StateManager - Downloading app")
cmd = f"git clone git@github.com:jstmn/led-matrix-app.git {self._app_directory}"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
def delete_app(self):
""" Delete the app from disk
"""
print("StateManager - Deleting app")
cmd = f"rm -rf {self._app_directory}"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
def run(self):
""" Start the led-matrix-app
"""
app_py_filepath = os.path.join(self._app_directory, "app.py")
app_start_command = f"python3.6 {app_py_filepath}"
# See https://stackoverflow.com/a/4791612
# The os.setsid() is passed in the argument preexec_fn so
# it's run after the fork() and before exec() to run the shell.
self.proc = subprocess.Popen(
app_start_command,
stdout=subprocess.PIPE,
shell=True,
preexec_fn=os.setsid
)
# Takes ~6 seconds to start. Adding 1.5sec for extra padding
time.sleep(7.5)
# TODO(@jeremysm): The app is not shutting down
def kill_app(self):
""" Kill the led matrix app if it's running
"""
if self.proc is None:
return
# Send the signal to all the process groups
os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
time.sleep(5)
|
## BAGGING CLASSIFICATION
# Import models and utility functions
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# Set seed for reproducibility
SEED = 1
# Split data into 70% train and 30% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=SEED)
# Instantiate a classification-tree 'dt'
dt = DecisionTreeClassifier(max_depth=4, min_samples_leaf=0.16, random_state=SEED)
# Instantiate a BaggingClassifier 'bc'
bc = BaggingClassifier(base_estimator=dt, n_estimators=300, n_jobs=-1)
# Fit 'bc' to the training set
bc.fit(X_train, y_train)
# Predict test set labels
y_pred = bc.predict(X_test)
# Evaluate and print test-set accuracy
accuracy = accuracy_score(y_test, y_pred)
print('Accuracy of Bagging Classifier: {:.3f}'.format(accuracy))
## OOB EVALUATION IN SKLEARN
# Import models and split utility function
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# Set seed for reproducibility
SEED = 1
# Split data into 70% train and 30% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.3, stratify= y, random_state=SEED)
# Instantiate a classification-tree 'dt'
dt = DecisionTreeClassifier(max_depth=4, min_samples_leaf=0.16, random_state=SEED)
# Instantiate a BaggingClassifier 'bc'; set oob_score= True
bc = BaggingClassifier(base_estimator=dt, n_estimators=300, oob_score=True, n_jobs=-1)
# Fit 'bc' to the traing set
bc.fit(X_train, y_train)
# Predict the test set labels
y_pred = bc.predict(X_test)
# Evaluate test set accuracy
test_accuracy = accuracy_score(y_test, y_pred)
# Extract the OOB accuracy from 'bc'
oob_accuracy = bc.oob_score_
# Print test set accuracy
print('Test set accuracy: {:.3f}'.format(test_accuracy))
# Print OOB accuracy
print('OOB accuracy: {:.3f}'.format(oob_accuracy)
# The difference between test accuracy and oob accuracy is minimal, so OOB evaluation an be an efficient technique to obtain a performance estimate of a bagged ensemble on unseen data without cross validation.
## RANDOM FOREST REGRESSOR
# Basic imports
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as MSE
# Set seed for reproducibility
SEED = 1
# Split dataset into 70% train and 30% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=SEED)
# Instantiate a random forests regressor 'rf' 400 estimators
rf = RandomForestRegressor(n_estimators=400, min_samples_leaf=0.12, random_state=SEED)
# Fit 'rf' to the training set
rf.fit(X_train, y_train)
# Predict the test set labels 'y_pred'
y_pred = rf.predict(X_test)
# Evaluate the test set RMSE
rmse_test = MSE(y_test, y_pred)**(1/2)
# Print the test set RMSE
print('Test set RMSE of rf: {:.2f}'.format(rmse_test))
## FEATURE IMPORTANCE in sklearn
import pandas as pd
import matplotlib.pyplot as plt
# Create a pd.Series of features importances
importances_rf = pd.Series(rf.feature_importances_, index=X.columns)
# Sort importances_rf
sorted_importances_rf = importances_rf.sort_values()
# Make a horizontal bar plot
sorted_importances_rf.plot(kind='barh', color='lightgreen');
plt.show()
|
import sys, string, math
a,bg, = map(int,input().split())
for i in range(max(a,bg), a*bg+1) :
if (i%a == 0) and (i%bg == 0) :
ans = i
break
print(ans)
|
# Usage: $ python3 get_comment_ratio.py /home/kevin/Desktop/sac-data/stats output.csv
# python3 get_comment_ratio.py <merged_files> <output_path>
#
# Merges all the extracted contribution per tag data into one single file.
__author__ = 'kevin'
import sys
import csv
import os
# RQ 1: Generate a csv file for each project with: file, release, if its SAC, LOC
csv_header = ['project',
'mean SAC', 'mean non SAC', "mean change",
'median SAC', 'median non SAC', "median change",
'pVal']
def median(l):
l = sorted(l)
if len(l) % 2 == 0:
n = len(l)//2
try:
val = (l[n]+l[n-1])/2
except Exception as ex:
print(ex)
return val
else:
return l[len(l)//2]
def mean(l):
return float(sum(l))/len(l) if len(l) > 0 else float('nan')
def main(argv):
data_dir = argv[1]
output_file = argv[2]
result = []
for data_file in os.listdir(data_dir):
with open(os.path.join(data_dir, data_file), newline="") as csv_file:
#file_name lines_count top_single_dev_contribution_knowledge
# top_single_dev_contribution_knowledge_percent commit_num bug_commit_num
# bug_commit_ratio CountLine CountLineCode CountLineComment SumCyclomatic
# try:
# data = [{ 'contrib_percent': float(row['top_single_dev_contribution_knowledge_percent']),
# 'count_line': float(row['CountLine'])}
# for row in csv.DictReader(csv_file)]
# except Exception as ex:
# print(ex)
data = [{ 'contrib_percent': float(row['top_single_dev_contribution_knowledge_percent']),
'count_line_statement': float(row['CountLineCode']) if row['CountLineCode'] else 0}
for row in csv.DictReader(csv_file)]
files_sac = [{'count_line_statement': l['count_line_statement']}
for l in data if l['contrib_percent'] >= 90]
files_non_sac = [{'count_line_statement': l['count_line_statement']}
for l in data if l['contrib_percent'] < 90]
# if any(l for l in files_sac if l['count_line'] == 0):
# print(files_sac)
#
# if any(l for l in files_non_sac if l['count_line'] == 0):
# print(files_sac)
statement_counts_sac = [f['count_line_statement'] for f in files_sac]
statement_counts_non_sac = [f['count_line_statement'] for f in files_non_sac]
# if not comment_ratios_sac:
# print(comment_ratios_sac)
#
# if not comment_ratios_non_sac:
# print(comment_ratios_non_sac)
mean_sac = mean(statement_counts_sac)
mean_non_sac = mean(statement_counts_non_sac)
mean_change = mean_non_sac - mean_sac
median_sac = median(statement_counts_sac)
median_non_sac = median(statement_counts_non_sac)
median_change = median_non_sac - median_sac
result.append({
'project': data_file,
'mean SAC': round(mean_sac, 2),
'mean non SAC': round(mean_non_sac, 2),
'mean change': round(mean_change, 2),
'median SAC': round(median_sac, 2),
'median non SAC': round(median_non_sac, 2),
'median change': round(median_change, 2),
'pVal': "TO CALCULATE"
})
with open(output_file, 'w', newline='') as output:
writer = csv.DictWriter(output, csv_header)
writer.writeheader()
writer.writerows(result)
if __name__ == "__main__":
main(sys.argv)
|
#encoding:utf-8
from openpyxl import load_workbook
import os
base_dir=os.path.dirname(os.path.dirname(__file__))
data_path=os.path.join(base_dir,'data/ddt_data.xlsx')
class Get_message:
Cookie=None
Api=load_workbook(data_path)['api'].cell(1,2).value
TsestApi=load_workbook(data_path)['api'].cell(2,2).value
|
from karel.stanfordkarel import *
"""
File: ExtensionKarel.py
-----------------------
This file is for optional extension programs.
"""
"""
An part of the extension, I made a program that allows karel to draw a STAR shape in any square world
"""
from karel.stanfordkarel import *
# pre: karel is facing east ready to build a the diagonals of a squared world
# post: karel build the diagonals and now return back and facing east again
def main():
create_a_1st_diagonal_shape()
turn_west_to_the_wall()
create_a_2nd_diagonal_shape()
karel_return_to_its_starting_point()
# pre: karel facing east at the top-most right corner
# post: karel facing east at the top-most left corner of the world
def turn_west_to_the_wall():
turn_around()
while front_is_clear():
move()
turn_around()
# pre: karel facing east at the left-most part of the world i.e corner (1,1)
# post: karel again facing east but this time at the top-most corner of the world
def create_a_1st_diagonal_shape():
while front_is_clear():
if front_is_clear():
paint_corner(BLUE)
turn_left()
if front_is_blocked():
turn_left()
else:
move()
turn_karel_right()
move()
paint_corner(BLUE)
# pre: karel facing east at the top-most left corner of the world
# post: karel is facing east blocked by wall
def create_a_2nd_diagonal_shape():
if front_is_clear():
paint_corner(DARK_GRAY)
move()
turn_karel_right()
move()
paint_corner(DARK_GRAY)
move()
while front_is_clear():
turn_left()
move()
paint_corner(DARK_GRAY)
turn_karel_right()
move()
turn_left()
move()
paint_corner(DARK_GRAY)
# pre: karel is facing east at the south-most corner
# post: karel is facing east at corner (1,1)
def karel_return_to_its_starting_point():
turn_around()
while front_is_clear():
move()
turn_around()
def turn_around():
turn_left()
turn_left()
def turn_karel_right():
for i in range(3):
turn_left()
# There is no need to edit code beyond this point
if __name__ == "__main__":
run_karel_program()
|
# Bài 05: Viết hàm
# def count_upper_lower(str)
# trả lại số lượng chữ cái viết hoa, số lượng chữ cái viết thường trong chuỗi str
s = input('Nhap chuoi: ')
def count_upper_lower(a) :
count_upper = 0
count_lower = 0
for i in s :
if 'A'<= i <='Z' :
count_upper +=1
if 'a' <= i <= 'z' :
count_lower +=1
print(f'So chu cai viet hoa {count_upper}')
print(f'So chu cai viet thuong {count_lower}')
count_upper_lower(s) |
# coding: utf-8
#!/usr/bin/python
# THIS SCRIPT
# 1. LISTS ALL OF YOUR HOST NAMES FOR A SPECIFIC ACCOUNT_KEY
# 2. ALLOWS YOU TO SELECT A SPECIFIC HOST IN WHICH TO CREATE A NEW LOG
# 3. IT THEN PROMPTS YOU FOR A NEW LOG NAME
# 4. IT CREATES A LOG NAME UNDER YOUR SELECTED HOST
# REQUIREMENT - you must have your Logentries Account_Key and have at least one host in your account.
import urllib
import json
import sys
import os
ACCOUNT_KEY = ''
EXISTING_HOST_KEY = ''
HOST_NAME = ''
NEW_LOG_NAME = ''
#LISTS
HOST_NAMES =[]
HOST_KEYS = []
#gets host names and print to screen with numbers from which to choose
def get_host_name():
req = urllib.urlopen("http://api.logentries.com/" + ACCOUNT_KEY + '/hosts/')
response = json.load(req)
for hosts in response['list']:
HOST_NAMES.append(hosts['name'])
HOST_KEYS.append(hosts['key'])
for i in range(0, len(HOST_NAMES)):
print "["+str(i) +"] " + HOST_NAMES[i] + ' - ' + HOST_KEYS[i]
get_log_name_and_token(HOST_KEYS[i])
select_host()
def get_log_name_and_token(host_key):
req = urllib.urlopen("http://api.logentries.com/" + ACCOUNT_KEY + '/hosts/' + host_key + '/')
response = json.load(req)
for log in response['list']:
if log['type']== 'agent':
print "\t"+ "AGENT path:" + log['filename'] + " key:" + log['key']
elif log['type']=='token':
print "\t"+"TOKEN name=" +log['name'] + " key:" + log['key'] + " Token:" + log['token']
def select_host():
number = raw_input("Select the number of the Host in which you would like to add a new log: ")
int_number = int(number)
while int_number > len(HOST_NAMES)-1 or int_number < 0:
print "INVALID NUMBER Please pick a number between 0 and " + str(len(HOST_NAMES)-1) + "."
number = raw_input("Select the number of the Host in which you would like to add a new log: ")
int_number = int(number)
new_log_name = raw_input("Please enter the name of your new log to be created under - " + HOST_NAMES[int_number] + '. ')
print "Creating Log titled " + new_log_name + " in host " + HOST_NAMES[int_number] + ". "
create_log_in_host(HOST_KEYS[int_number], new_log_name, int(number))
#create a new log under the Host Name that is listed.
# might need to check for duplicates as this can be an issue
def create_log_in_host(existing_host_key, log_name, number):
request = urllib.urlencode({
'request': 'new_log',
'user_key': ACCOUNT_KEY,
'host_key': existing_host_key,
'name': log_name,
'type': '',
'filename': '',
'retention': '-1',
'source': 'token'
})
req = urllib.urlopen("http://api.logentries.com", request)
print "log " + log_name + " has been created under host - " + HOST_NAMES[number] + "."
if __name__ == '__main__':
ACCOUNT_KEY = sys.argv[1]
get_host_name()
|
import math
import yfinance as yf
import matplotlib.pyplot as plt
class Index:
def __init__(self, index_ticker, date1, date2):
#variables
self.index_ticker = index_ticker
self.start_date = date1
self.end_date = date2
self.ticker_data = yf.Ticker(index_ticker)
#get the historical prices for this ticker
self.ticker_prices = self.ticker_data.history(period='1d', start=date1, end=date2)
self.ticker1 = self.ticker()
#returns the ticker
def ticker(self):
return self.ticker_prices
#this function makes sure the date that is entered is valid
def date(date):
year,month,day = date.split('-')
month = int(month)
day = int(day)
year= int(year)
if year > 2020:
return "Your date is invalid, sorry."
elif month > 12:
return "Your date is invaild, sorry."
elif (month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12):
if day > 31:
return "Your date is invalid, sorry."
else:
return "Your date is valid"
elif month == 2 and day > 29:
return "Your date is invalid, sorry."
elif (month == 4 or month == 6 or month == 9 or month == 11):
if day > 30:
return "Your date is invalid, sorry,"
else:
return "Your date is valid"
else:
return "Your date is valid"
#this graph the stock's you've choosen in the time interval
def make_graph(data_frames,ticker_list):
for i in range(len(data_frames)):
plt.plot(data_frames[i]['Close'], label=ticker_list[i])
plt.legend(loc="upper left")
plt.xlabel("Date")
plt.ylabel("Close Price")
if(len(ticker_list) > 1):
plt.title("Comparison of stocks")
else:
plt.title("Price Data for " + ticker_list[0].upper())
plt.style.use('dark_background')
plt.show()
def main():
#Asking for how many tickers we need to go through
list_length = int(input("How many stocks are in your portfolio: "))
ticker_list = []
#did a while loop so we can continue the function if the user enters a invalid ticker
i = 0
while i < list_length:
ticker = input("Enter ticker name: ")
#this try catch conditional makes sure that the ticker that is typed in is actually a valid stock
try:
yf.Ticker(ticker).info
except ValueError:
print(ticker, "does not exist, please enter a valid ticker")
continue
#once it goes through the try-catch, the ticker is added to the ticker list
ticker_list.append(ticker)
i = i + 1
j = 0
#this loop asks for the date from start to end
while j < 1:
start_date = input("Enter start date in form YEAR-MONTH-DAY: ")
end_date = input("Enter end date in form YEAR-MONTH-DAY: ")
#putting the inputs in our date checker function above
check_date1 = date(start_date)
check_date2 = date(end_date)
#if the function returns this string it accepts the date, if not, it continues to loop until it recieves valid dates
if check_date1 != "Your date is valid" and check_date2 != "Your date is valid":
print("You've entered an invaild date, please enter a new date")
continue
else:
print("Date is valid")
j = j + 1
#this part is calculating the percent change in the function over the interval of time
frame_list = []
percent_list = []
for ticker in ticker_list:
frame = Index(ticker, start_date, end_date).ticker_prices
frame_list.append(frame)
last_close = frame["Close"][-1]
first_close = frame["Close"][0]
percent_increase = (last_close - first_close)/first_close * 100
print("Here's the first close for the start date:", round(first_close,2))
print("Here's the last close for the end date:", round(last_close,2))
print("This is the percentage change for" , ticker.upper(), "in the period you listed:", str(round(percent_increase,2)), "%")
#add the percent change to the list
percent_list.append(percent_increase)
#need to find the best performing stock based on percentage change
best_performer_index = percent_list.index(max(percent_list))
best_performer = max(percent_list)
print("The best performer of the stocks you selected was", ticker_list[best_performer_index],"with a ", str(round(best_performer,2)),"% ")
#Using the function to graph all the tickers
make_graph(frame_list,ticker_list)
main()
|
# -*- coding: utf-8 -*-
'''
Crea un alumno en la base de datos.
PYTHONPATH="../../../python" python3 createStudent.py dni name lastname legajo
'''
from model.connection import connection
from model.users import users
from model.registry import Registry
import systems
import logging
def createStudent(con, dni, name, lastname, studentN):
logging.getLogger().setLevel(logging.INFO)
u = users.UserDAO.findByDni(con, dni)
if u is not None:
logging.warn('Persona ya existente')
logging.warn(u)
return
user = users.User()
user.name = name
user.lastname = lastname
user.dni = dni
uid = users.UserDAO.persist(con, user)
student = users.Student()
student.id = uid
student.studentNumber = studentN
sid = users.StudentDAO.persist(con, student)
up = users.UserPassword()
up.userId = uid
up.username = dni
up.password = studentN
users.UserPasswordDAO.persist(con, up)
if __name__ == '__main__':
import inject
#inject.configure()
import sys
dni = sys.argv[1]
name = sys.argv[2]
lastname = sys.argv[3]
studentN = sys.argv[4]
assert dni is not None
assert name is not None
assert lastname is not None
assert studentN is not None
r = inject.instance(Registry)
conn = connection.Connection(r.getRegistry('dcsys'))
con = conn.get()
try:
createStudent(con, dni, name, lastname, studentN)
con.commit()
finally:
conn.put(con)
|
'''
The training time is too long with >=100 lines of poem.
Also, it gives low accuracy.
Maybe HMM is not suitable for classification when the number of symbols is too large.
'''
import sys
from multiprocessing import Process
import numpy as np
from google.colab import drive
from sklearn.utils import shuffle
from src.hmmd_tf2 import HMMD_TF
# @DeprecationWarning
def read_data(path, label):
X = []
y = []
poem = open(path).read()
for line in poem.lower().split("\n"):
X.append(line)
y.append(label)
return X, y
def train_classifier(poem1, poem2, weight_hmm1, weight_hmm2):
'''
Multithread binary training.
Each author will have a separate HMM.
:param poem1: path of the first poem file, or many poems could be put in a single file.
:param poem2: path of the second poem file
:param weight_hmm1: the location where the value of hyperparameters of HMM should be stored during training.
:param weight_hmm2: the location where the value of hyperparameters of HMM should be stored during training.
:return:
'''
# the number of line of poems will be analyzed to train a HMM.
# should be a small value. The value may be highers, but it takes time.
limit = 100
# two model has the same number of hidden units.
n_hidden_states = 20
# train model 1
X1, y1 = read_data(poem1, label=0)
X1_train = X1[:limit]
hmm1 = HMMD_TF()
sequences1, vocabulary1 = hmm1.load_data(X1_train, split_level='WORD')
process1 = Process(target=hmm1.fit,
args=(sequences1, vocabulary1, weight_hmm1, n_hidden_states, ''))
process1.start()
# train model 2
X2, y2 = read_data(poem2, label=1)
X2_train = X2[:limit]
hmm2 = HMMD_TF()
sequences2, vocabulary2 = hmm2.load_data(X2_train, split_level='WORD')
process2 = Process(target=hmm2.fit,
args=(sequences2, vocabulary2, weight_hmm2, n_hidden_states,
'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'))
process2.start()
# join all processes
process1.join()
process2.join()
def test(poem1, poem2, weight_hmm1, weight_hmm2):
# get data
X1, y1 = read_data(poem1, label=0)
upper_limit = 110
lower_limit = 100
X1_test = X1[lower_limit:upper_limit]
y1_test = y1[lower_limit:upper_limit]
X2, y2 = read_data(poem2, label=1)
X2_test = X2[lower_limit:upper_limit]
y2_test = y2[lower_limit:upper_limit]
# create the final test
X_test = np.concatenate([X1_test, X2_test])
y_test = np.concatenate([y1_test, y2_test])
X_test, y_test = shuffle(X_test, y_test)
# compute likelihood from model 1
hmm1 = HMMD_TF()
hmm1.read(A_path=weight_hmm1 + "logA.csv", B_path=weight_hmm1 + "logB.csv",
pi_path=weight_hmm1 + "logpi.csv",
vocabulary_path=weight_hmm1 + "vocabulary.csv")
likelihood_arr1 = hmm1.compute_likelihood(X_test, split_level='WORD')
print(f'log likelihood_arr1: {likelihood_arr1}')
# compute likelihood from model 2
hmm2 = HMMD_TF()
hmm2.read(A_path=weight_hmm2 + "logA.csv", B_path=weight_hmm2 + "logB.csv",
pi_path=weight_hmm2 + "logpi.csv",
vocabulary_path=weight_hmm2 + "vocabulary.csv")
likelihood_arr2 = hmm2.compute_likelihood(X_test, split_level='WORD')
print(f'log likelihood_arr2: {likelihood_arr2}')
# compute score
yhat = np.argmax([likelihood_arr1, likelihood_arr2], axis=0)
print(f'True ground: {y_test}')
print(f'Prediction : {yhat}')
score = np.mean(yhat == y_test)
print(f'accuracy = {score}')
if __name__ == '__main__':
np.set_printoptions(threshold=sys.maxsize)
GOOGLE_COLAB = False
if GOOGLE_COLAB:
# run on google colab, but it is still too long
drive.mount('/content/drive')
poem1 = "/content/drive/My Drive/Colab Notebooks/poem/nguyen-binh.txt"
poem2 = "/content/drive/My Drive/Colab Notebooks/poem/truyen_kieu.txt"
weight_hmm1 = "/content/drive/My Drive/Colab Notebooks/poem/hmm1_"
weight_hmm2 = "/content/drive/My Drive/Colab Notebooks/poem/hmm2_"
train_classifier(poem1, poem2, weight_hmm1, weight_hmm2)
test(poem1, poem2, weight_hmm1, weight_hmm2)
else:
# run on local machine
poem1 = "../data/nguyen-binh.txt"
poem2 = "../data/truyen_kieu.txt"
weight_hmm1 = "../hmm1_"
weight_hmm2 = "../hmm2_"
train_classifier(poem1, poem2, weight_hmm1, weight_hmm2)
test(poem1, poem2, weight_hmm1, weight_hmm2)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
import pytest
from pants.backend.java.compile.javac import rules as javac_rules
from pants.backend.java.dependency_inference import symbol_mapper as java_symbol_mapper
from pants.backend.java.dependency_inference.rules import rules as java_dep_inference_rules
from pants.backend.java.target_types import JavaSourcesGeneratorTarget, JavaSourceTarget
from pants.backend.java.target_types import rules as java_target_rules
from pants.backend.scala import target_types as scala_target_types
from pants.backend.scala.dependency_inference import rules as scala_dep_inference_rules
from pants.backend.scala.dependency_inference import scala_parser
from pants.backend.scala.dependency_inference import symbol_mapper as scala_symbol_mapper
from pants.backend.scala.target_types import ScalaSourcesGeneratorTarget, ScalaSourceTarget
from pants.core.util_rules import config_files, source_files, system_binaries
from pants.engine.addresses import Address, Addresses
from pants.engine.rules import QueryRule
from pants.engine.target import Dependencies, DependenciesRequest
from pants.jvm.jdk_rules import rules as java_util_rules
from pants.jvm.resolve import jvm_tool
from pants.jvm.strip_jar import strip_jar
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*jvm_tool.rules(),
*java_dep_inference_rules(),
*java_target_rules(),
*java_util_rules(),
*strip_jar.rules(),
*javac_rules(),
*java_symbol_mapper.rules(),
*source_files.rules(),
*scala_parser.rules(),
*scala_symbol_mapper.rules(),
*scala_dep_inference_rules.rules(),
*scala_target_types.rules(),
*system_binaries.rules(),
*util_rules(),
QueryRule(Addresses, (DependenciesRequest,)),
],
target_types=[
JavaSourcesGeneratorTarget,
JavaSourceTarget,
ScalaSourcesGeneratorTarget,
ScalaSourceTarget,
],
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
def test_java_infers_scala_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"org/pantsbuild/lib/BUILD": "scala_sources()\n",
"org/pantsbuild/lib/Foo.scala": textwrap.dedent(
"""
package org.pantsbuild.lib
object Foo {
def grok(): Unit = {
println("Hello world!")
}
}
"""
),
"org/pantsbuild/example/BUILD": "java_sources()\n",
"org/pantsbuild/example/Bar.java": textwrap.dedent(
"""
package org.pantsbuild.example;
import org.pantsbuild.lib.Foo$;
public class Bar {
public static void main(String[] args) {
Foo$.MODULE$.grok();
}
}
"""
),
}
)
example_tgt = rule_runner.get_target(
Address("org/pantsbuild/example", target_name="example", relative_file_path="Bar.java")
)
deps = rule_runner.request(Addresses, [DependenciesRequest(example_tgt[Dependencies])])
assert deps == Addresses(
[Address("org/pantsbuild/lib", target_name="lib", relative_file_path="Foo.scala")]
)
|
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training a CNN on MNIST with Keras and the DP SGD optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import keras_applications
from privacy.analysis.rdp_accountant import compute_rdp
from privacy.analysis.rdp_accountant import get_privacy_spent
from dp_optimizer import DPGradientDescentGaussianOptimizer
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, train with vanilla SGD.')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 1.1, 'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 16, 'Batch size')
flags.DEFINE_integer('epochs', 200, 'Number of epochs')
flags.DEFINE_integer('microbatches', 16, 'Number of microbatches (must evenly divide batch_size)')
flags.DEFINE_float('delta', 1e-5, 'Delta') # Delta is set to 1e-5 because MNIST has 50000 training points.
FLAGS = flags.FLAGS
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / 60000
rdp = compute_rdp(q=sampling_probability, noise_multiplier=FLAGS.noise_multiplier, steps=steps, orders=orders)
return get_privacy_spent(orders, rdp, target_delta=FLAGS.delta)[0]
class EpsilonPrintingCallback(tf.keras.callbacks.Callback):
"""Callback for Keras model to evaluate epsilon after every epoch."""
def __init__(self):
self.eps_history = []
def on_epoch_end(self, epoch, logs=None):
if FLAGS.dpsgd:
eps = compute_epsilon((epoch + 1) * (60000 // FLAGS.batch_size))
self.eps_history.append(eps)
print(', eps = {}'.format(eps))
def load_mnist():
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.cifar10.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_data = train_data.reshape(train_data.shape[0], 32, 32, 3)
test_data = test_data.reshape(test_data.shape[0], 32, 32, 3)
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
train_labels = tf.keras.utils.to_categorical(train_labels, num_classes=10)
test_labels = tf.keras.utils.to_categorical(test_labels, num_classes=10)
return train_data, train_labels, test_data, test_labels
def resnet_block(x, filters, kernel_size=3, stride=1,
conv_shortcut=False, name=None):
"""A residual block.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
# Returns
Output tensor for the residual block.
"""
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
preact = tf.keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_preact_bn')(x)
preact = tf.keras.layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut is True:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv')(preact)
else:
shortcut = tf.keras.layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = tf.keras.layers.Conv2D(filters, 1, strides=1, use_bias=False,
name=name + '_1_conv')(preact)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, strides=stride,
use_bias=False, name=name + '_2_conv')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = tf.keras.layers.Add(name=name + '_out')([shortcut, x])
return x
def resnet_stack(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
# Returns
Output tensor for the stacked blocks.
"""
x = resnet_block(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = resnet_block(x, filters, name=name + '_block' + str(i))
x = resnet_block(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def ResNet(stack_fn, default_size=32, model_name='resnet', classes=10, **kwargs):
"""Instantiates the ResNet architecture.
# Arguments
stack_fn: a function that returns output tensor for the
stacked residual blocks.
default_size: default size of image.
model_name: string, model name.
classes: number of classes to classify images into.
# Returns
A Keras model instance.
"""
# Determine proper input shape
input_shape = keras_applications.imagenet_utils._obtain_input_shape(None,
default_size=default_size,
min_size=32,
data_format=tf.keras.backend.image_data_format(),
require_flatten=True,
weights=None)
input_tensor = tf.keras.layers.Input(shape=input_shape)
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(input_tensor)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1_conv')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='post_bn')(x)
x = tf.keras.layers.Activation('relu', name='post_relu')(x)
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dense(classes, activation='softmax', name='probs')(x)
# Create model.
model = tf.keras.models.Model(input_tensor, x, name=model_name)
return model
def ResNet18V2(default_size=16, classes=10, **kwargs):
def stack_fn(x):
x = resnet_stack(x, 16, 2, name='conv2')
x = resnet_stack(x, 32, 2, name='conv3')
x = resnet_stack(x, 64, 2, name='conv4')
x = resnet_stack(x, 128, 2, stride1=1, name='conv5')
return x
return ResNet(stack_fn, default_size, 'resnet18v2', classes, **kwargs)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
# Load training and test data.
train_data, train_labels, test_data, test_labels = load_mnist()
# Define a sequential Keras model
model = ResNet18V2(default_size=32)
print(model.summary())
if FLAGS.dpsgd:
optimizer = DPGradientDescentGaussianOptimizer(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.microbatches,
learning_rate=FLAGS.learning_rate)
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, reduction=tf.compat.v1.losses.Reduction.NONE)
else:
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# Compile model with Keras
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
# Train model with Keras
eps_callback = EpsilonPrintingCallback()
fit_history = model.fit(train_data, train_labels, epochs=FLAGS.epochs, validation_data=(test_data, test_labels), batch_size=FLAGS.batch_size, callbacks=[eps_callback])
eps_history = eps_callback.eps_history
val_acc_history = fit_history.history['val_accuracy']
with open('cifar_{}_delta_{}_lr_{}.txt'.format('dpsgd' if FLAGS.dpsgd else 'sgd', FLAGS.delta, FLAGS.learning_rate), 'w') as f:
f.write('eps: {}\n'.format(eps_history))
f.write('validation acc: {}\n'.format(val_acc_history))
if __name__ == '__main__':
app.run(main) |
import importlib
import pkgutil
import aurora.drivers
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
def import_namespace_plugins():
# NOTE: driver class MUST be importable at the top level (i.e. imported in the driver __init__.py file)
for finder, name, ispkg in iter_namespace(aurora.drivers):
if ispkg:
importlib.import_module(name)
def get_namespace_plugins(ns_pkg=None):
if ns_pkg is None:
import aurora.drivers as ns_pkg
return {
name: importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(ns_pkg)
if ispkg
}
def list_drivers(ns_pkg=None):
ns_plugins = get_namespace_plugins(ns_pkg)
if ns_plugins:
print('Drivers found:\n' + '\n'.join(ns_plugins))
else:
print('No drivers are installed') |
#! /usr/bin/env python
#-*- coding: utf-8 -*-
import socket
host='0.0.0.0'
port=50015
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((host, port))
while 1:
cmd = raw_input("Please input cmd:")
s.sendall(cmd)
data = s.recv(1024)
print data
s.close()
|
# coding=utf-8
import sys
from ualfred import Workflow3, notify
log = None
def main(wf):
import pickle
from workflow import web
args = wf.args
query = pickle.loads(str(args[0]))
log.debug('test')
url = 'https://api.caiyunapp.com/v2/' + wf.get_password('apiKey') + '/' + query[4] + ',' + query[5] + '/'
if query[6] == 1:
log.debug(url)
url += 'realtime.json'
r = web.get(url)
data = r.json()
log.debug(data)
wf.add_item(data['status'])
wf.send_feedback()
if __name__ == '__main__':
# Create a global `Workflow` object
wf = Workflow3()
# Call your entry function via `Workflow.run()` to enable its helper
# functions, like exception catching, ARGV normalization, magic
# arguments etc.
log = wf.logger
sys.exit(wf.run(main))
|
# How many numbers below 10,000 are Lychrel, i.e. do not collapse into palindromes by adding them with their reverses after 50 iterations?
# ====================================================================================
# This is nice for thinking about palindromes:
def compute():
ans = sum(1 for i in range(10000) if is_lychrel(i))
return str(ans)
def is_lychrel(n):
for i in range(50):
# Reverse the number and add it to the number:
n += int(str(n)[ : : -1])
# Check whether it's a palindrome:
if str(n) == str(n)[ : : -1]:
return False
return True
if __name__ == "__main__":
print(compute())
|
def trailingZeroesInFact(num):
count = 0
i = 5
while(num//i>0):
count = count + num//i
i = i*5
return count
num = 100
print(trailingZeroesInFact(num)) |
# print("나는 %d 살입니다." % 24)
# print("나는 %s 이고 %d 살입니다" %("홍길동", 24))
# print("나는 {} 이고 {} 살입니다" .format("홍길동", 24))
# print("나는 {1} 이고 {0} 살입니다" .format("홍길동", 24))
#print("나는 {name} 이고 {age} 살입니다" .format(name = "홍길동", age = 24))
text = """
오늘의 온도는 {0:>10}도 이고, 습도는 {1:<10}도 입니다.
내일의 온도는 {2:^10}도 이고, 습도는 {3:-^10}도 입니다.
""".format(30, 70, 28,69)
# print(text)
text = """
오늘의 온도는 %s도 이고, 습도는 %s도 입니다.
내일의 온도는 %s도 이고, 습도는 %s도 입니다.
"""%(30, 70, 28,69)
# print(text)
age = 24
name = "홍길동"
# print(f"나는 {name} 이고 {age} 살입니다")
text2 = f"나는 {name} 이고 {age:0.4f} 살입니다"
print(text2) |
f1,f2=[int(x) for x in raw_input().split(" ")]
n=input()
def fseq(i):
if i==1:
return f1
elif i==2:
return f2
else:
return fseq(i-1)-fseq(i-2)
print (fseq(n)%1000000007) |
from picamraw import PiRawBayer, PiCameraVersion
from ..constants import RAW_BIT_DEPTH
def as_rgb(raw_image_path):
""" Extracts the raw bayer data from a JPEG+RAW file and converts it to an
`RGB Image` (see definition in README).
Args:
raw_image_path: The full path to the JPEG+RAW file
Returns:
An `RGB Image`
"""
raw_bayer = PiRawBayer(
filepath=raw_image_path, camera_version=PiCameraVersion.V2, sensor_mode=0
)
# Divide by the bit-depth of the raw data to normalize into the (0,1) range
rgb_image = raw_bayer.to_rgb() / RAW_BIT_DEPTH
return rgb_image
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
import ujson
from sanic.request import Request
from sanic.log import logger
from web_backend.config import SALT
from web_backend.nvlserver.module.user.service import get_user_by_id
from web_backend.nvlserver.module.permission.service import (
get_permission_list_for_user, get_permission_module_list_by_user_id
)
__all__ = [
# SERVICES WORKING ON LANGUAGE TABLE
'generate_password', 'check_password',
'update_password', 'remove_user_from_redis',
'change_lock_state', 'check_unique_by_column_name',
'update_user_in_redis', 'generate_new_access_token'
]
async def generate_password(
request,
plain_password: str,
salt: str = SALT) -> object:
"""
:param request:
:param plain_password:
:param salt:
:return:
"""
generate_pass_query = '''SELECT crypt((SELECT concat($1::VARCHAR, $2::VARCHAR) AS dta),
gen_salt('bf', 8)) AS crypted_password;'''
ret_val = None
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(generate_pass_query, plain_password, salt)
if row is not None:
ret_val = row
except Exception as guid_err:
logger.error('user.generate_password erred with {}'.format(guid_err))
return ret_val
async def check_password(
request,
user_ident,
plain_pass: str,
user_table: str = 'user',
user_filter_column: str = 'email') -> bool:
"""
:param request:
:param plain_pass:
:param user_table:
:param user_ident:
:param user_filter_column:
:return:
"""
# noinspection SqlResolve
query_pass_checker = '''
SELECT 1::bigint = (SELECT count(*) FROM public.{} WHERE {} = $1::VARCHAR
AND password = crypt($2::VARCHAR, password)) AS pass_correct;
'''.format(user_table, user_filter_column)
ret_val = False
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(query_pass_checker, user_ident, plain_pass + SALT)
if row is not None:
ret_val = row
except Exception as guid_err:
logger.error('user.check_password erred with {}'.format(guid_err))
# print('THIS IS THE DATA CHECK PASSWORD: {}'.format(ret_val))
return ret_val
async def update_password(
request,
user_ident,
crypted_password: str,
user_table: str = 'user',
user_filter_column: str = 'email') -> bool:
""" Update password method.
:param request:
:param user_ident:
:param crypted_password:
:param user_table:
:param user_filter_column:
:return:
"""
# noinspection SqlResolve
query_update_password = '''
UPDATE public.{}
SET password = $2
WHERE {} = $1 RETURNING TRUE;
'''.format(user_table, user_filter_column)
ret_val = False
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(query_update_password, user_ident, crypted_password)
if row is not None:
ret_val = row
except Exception as guid_err:
logger.error('user.update_password erred with {}'.format(guid_err))
# print('THIS IS THE DATA CHECK PASSWORD: {}'.format(ret_val))
return ret_val
async def remove_user_from_redis(
request,
user_id: int = 0) -> bool:
""" Update user element in redis.
:param request:
:param user_id:
:return:
"""
ret_val = False
db_user = await get_user_by_id(request, user_id)
if db_user:
await request.app.redis.delete('user:{}'.format(db_user.get('user_id')))
ret_val = True
return ret_val
async def change_lock_state(
request,
user_ident,
lock_state: bool = False,
user_table: str = 'user',
user_filter_column: str = 'id') -> bool:
""" Update password method.
:param request:
:param user_ident:
:param lock_state:
:param user_table:
:param user_filter_column:
:return:
"""
# noinspection SqlResolve
query_change_lock_state = '''
UPDATE public.{}
SET locked = $2
WHERE {} = $1 RETURNING *;
'''.format(user_table, user_filter_column)
ret_val = False
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchrow(query_change_lock_state, user_ident, lock_state)
if row is not None:
if lock_state is False:
await remove_user_from_redis(request, row.get('id'))
ret_val = row
except Exception as guid_err:
logger.error('user.update_password erred with {}'.format(guid_err))
# print('THIS IS THE DATA CHECK PASSWORD: {}'.format(ret_val))
return ret_val
async def check_unique_by_column_name(
request,
user_ident: object = None,
user_table: str = 'user',
user_filter_column: str = 'email') -> bool:
""" Check if the value in column is unique in table.
:param request:
:param user_ident:
:param user_table:
:param user_filter_column:
:return:
"""
# noinspection SqlResolve
unique_column_value_query = '''
SELECT COUNT(*)
FROM public.{} WHERE {} = $1::VARCHAR AND deleted is FALSE;
'''.format(user_table, user_filter_column)
ret_val = False
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(unique_column_value_query, user_ident)
if row is not None and row == 0:
ret_val = True
except Exception as guid_err:
logger.error('user.check_unique_by_column_name erred with {}'.format(guid_err))
return ret_val
async def update_user_in_redis(
request, user_id: int = 0) -> bool:
""" Update user element in redis.
:param request:
:param user_id:
:return:
"""
ret_val = False
db_user = await get_user_by_id(request, user_id)
if db_user:
scopes = await get_permission_list_for_user(request, db_user.get('user_id'))
module_list = await get_permission_module_list_by_user_id(request, user_id=db_user.get('user_id'))
# logger.info('THESE ARE SCOPES: {}'.format(scopes))
if scopes:
db_user.update({'scopes': [x.get('permission') for x in scopes]})
if module_list:
db_user.update({'acl': [x.get('module_name') for x in module_list]})
await request.app.redis.set('user:{}'.format(
db_user.get('user_id')), ujson.dumps(db_user))
ret_val = True
return ret_val
async def generate_new_access_token(
request: Request,
user_id: int = 0):
"""
:param request:
:param user_id:
:return:
"""
user = await get_user_by_id(request, user_id=user_id)
scopes = await get_permission_list_for_user(request, user_id=user_id)
module_list = await get_permission_module_list_by_user_id(
request, user_id=user_id)
if scopes:
user.update({'scopes': [x.get('permission') for x in scopes]})
if module_list:
user.update({'acl': [x.get('module_name') for x in module_list]})
token = await request.app.auth.generate_access_token(user)
return token
|
class WordCounter(dict):
def add(self, word):
if word not in self:
self[word]=1
else:
self[word]+=1
return self[word] # return the new count
def get(self, word):
rc = 0
if word in self:
rc = self[word]
return rc # return current count
#def clear(self):
# # empty the dictionary
# self.clear()
#wordCounter = WordCounter()
#assert wordCounter.add('the') == 1
#assert wordCounter.add('The') == 1
#assert wordCounter.get('test') == 0
#assert wordCounter.get('the') == 1
#assert wordCounter.get('The') == 1
#assert wordCounter.add('The') == 2
#assert wordCounter.get('The') == 2 |
import os
import glob
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import hdr_writer as hdr
def draw_map(input_file, pft = False, fun = 'mean'):
dt = hdr.catch_data(input_file, 12, 120, 160)
mask = dt == -9999.0
dta = np.ma.masked_array(dt, mask)
if fun == 'mean':
dta = np.mean(dta, axis=0,)
else:
dta = np.sum(dta, axis=0,)
lats = np.arange(-57.5,22.75, 0.5)
lons = np.arange(-89.75, -29.75, 0.5)
lons, lats = np.meshgrid(lons,lats)
prelen = len(input_file.split('.')[0])
if not pft:
prefix = input_file.split('.')[0]
else:
prefix = input_file.split('.')[0] + input_file.split('.')[1]
big_list = ['npp','hr','ar','ph','rm','rg','rms','rml',
'rmf','rgs','rgl','rgf','bf','bl','bw','asd',
'asdd','dasd','asdasd',]
print(prefix[0:prelen])
if prefix[0:prelen] in big_list:
units = 'kgC/m2/y'
elif prefix[0:prelen] in ['clit','csoil','cleaf','cawood','cfroot']:
units = 'kgC/m2'
elif prefix[0:prelen] in ['lai','sadasd']:
units = 'm2m2'
elif prefix[0:prelen] in ['wsoil','runom']:
units = 'kg/m2'
elif prefix[0:prelen] in ['rcm','asdf']:
units = 's/m'
elif prefix[0:prelen] in ['evaptr','et']:
units = 'kg/m2/day'
else:
units = 'missing unit'
# setup polyconic basemap
# by specifying lat/lon corners and central point.
# area_thresh=1000 means don't plot costline features less
# than 1000 km^2 in area.
m = Basemap(llcrnrlon=-105.,llcrnrlat=-53.,urcrnrlon=-30.,urcrnrlat=21.,\
resolution='c',area_thresh=500.,projection='poly',\
lat_0=0.,lon_0=-60.)
#m.bluemarble()
m.drawcoastlines()
im1 = m.pcolormesh(lons,lats,np.flipud(dta),shading='flat',cmap=plt.cm.jet,latlon=True)
cb = m.colorbar(im1,"right", size="10%", pad="1%")
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(-180.,181.,20.))
m.drawmapboundary(fill_color='gray')
m.drawcountries()
#m.drawrivers()
#m.imshow(np.flipud(dta))
plt.title(" %s %s" %(prefix, units))
fh = open(prefix + '.png', mode='w' )
plt.savefig(fh, dpi=350)
plt.close()
fh.close()
def draw_map2(input_file, layer):
dt = hdr.catch_data(input_file, 7, 120, 160)
dt = dt[layer]
mask = dt == -9999.0
dta = np.ma.masked_array(dt, mask)
lats = np.arange(-57.50 ,22.75, 0.5)
lons = np.arange(-89.75, -29.75, 0.5)
lons, lats = np.meshgrid(lons,lats)
prefix = input_file.split('.')[0]
big_list = ['npp','hr','ar','ph','rm','rg','rms','rml',
'rmf','rgs','rgl','rgf','bf','bl','bw','asd',
'asdd','dasd','asdasd',]
if prefix in big_list:
units = 'kgC/m2/y'
elif prefix in ['clit','csoil','cleaf','cawood','cfroot']:
units = 'kgC/m2'
elif prefix in ['lai','sadasd']:
units = 'm2m2'
elif prefix in ['wsoil','runom']:
units = 'kg/m2'
elif prefix in ['rcm','asdf']:
units = 's/m'
elif prefix in ['evaptr','et']:
units = 'kg/m2/day'
else:
units = 'missing unit'
# setup polyconic basemap
# by specifying lat/lon corners and central point.
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
m = Basemap(llcrnrlon=-105.,llcrnrlat=-53.,urcrnrlon=-30.,urcrnrlat=21.,\
resolution='c',area_thresh=500.,projection='poly',\
lat_0=0.,lon_0=-60.)
#m.bluemarble()
m.drawcoastlines()
im1 = m.pcolormesh(lons,lats,np.flipud(dta),shading='flat',cmap=plt.cm.jet,latlon=True)
cb = m.colorbar(im1,"right", size="10%", pad="1%")
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(-180.,181.,20.))
m.drawmapboundary(fill_color='gray')
m.drawcountries()
#m.drawrivers()
#m.imshow(np.flipud(dta))
plt.title(" %s - PFT %d %s" %(prefix,layer + 1,units))
fh = open(prefix +'_'+ str(layer+1) + '.png', mode='w' )
plt.savefig(fh, dpi=350)
plt.close()
fh.close()
def main():
os.chdir('../outputs')
files = sorted(glob.glob1(os.getcwd(), '*.bin'))
files.remove('ambientais.bin')
#print(files)
seven_layers = [files[1],files[2],files[3],files[7]]
#print(seven_layers)
for f in files:
if f not in seven_layers:
draw_map(f)
for f in seven_layers:
for i in range(7):
draw_map2(f, i)
os.chdir('../outputs_pft')
files = sorted(glob.glob1(os.getcwd(), '*.bin'))
not_first = ['bl','bw','bf']
print(files)
for f in files:
pft_id = f.split('.')[1]
varname = f.split('.')[0]
if varname not in not_first:
draw_map(f,True)
elif varname in not_first:
draw_map(f,True, 'sum')
if __name__ == '__main__':
main()
os.chdir('../utils')
|
#!/usr/bin/env python
#coding:utf8
from . import analysis
from analysis.analyse import AnalyseUtils
from flask import render_template, request
from models import NodeUtils, LinkUtils
# 通过指定路由,返回渲染html页面
# 客户端的URL直接在链接里面
@analysis.route('/demo_force',methods=['GET','POST'])
def demo_force(projectId):
return render_template('analysis_pages/demo_force.html', navId = "demoforce", projectId=projectId)
@analysis.route('/demo_image',methods=['GET','POST'])
def demo_image(projectId):
return render_template('analysis_pages/demo_image.html', navId = "demoimage", projectId=projectId)
@analysis.route('/degree_distribute',methods=['GET','POST'])
def degree(projectId):
analyse_utils = AnalyseUtils(projectId)
matrix = analyse_utils.degree_distribution() # 计算得到度分布的二维矩阵
cdd = analyse_utils.cumulative_degree_distribution() # 调用函数,计算累计度分布,赋给变量
dl = analyse_utils.degree_of_people() # 每个人物的度
nd = analyse_utils.shortest_path()
nodes = NodeUtils.getAllNodes(projectId) # 供和弦图使用
edges = LinkUtils.getAllLinks(projectId) # 供和弦图使用
return render_template('analysis_pages/degree_distribute.html', navId = "degreedistribute", projectId=projectId,
matrix = matrix, cumulative_degree = cdd, degreedict = dl, networkdiameter = nd, nodes = nodes, links = edges)
# 提供一个动态路由地址,匹配人物列表页面
@analysis.route('/peoplelist', methods=['GET', 'POST'])
def getGraph(projectId):
nodes = NodeUtils.getAllNodes(projectId) # 得到所有人物属性
return render_template('analysis_pages/peoplelist.html', nodes = nodes, navId = "peoplelist", projectId=projectId)
# 重新计算划分社区
@analysis.route('/calculate_communities', methods=['GET'])
def calculateCommunities(projectId):
analysis_utils = AnalyseUtils(projectId) # 实例化类
analysis_utils.calculate_communities() # 调用函数 随机游走社区发现算法
return '' |
#!/usr/bin/env python
import subprocess
import sys
import argparse
import glob
import os
def call_process(cmd):
print("cmd: %s" % cmd)
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
if child.returncode != 0:
print("Failed: %s: %s" % (cmd, stderr))
return (stdout.rstrip(), stderr, child.returncode)
def create_permanode(*args):
""" args is just an array """
cmd = ['camput', 'permanode']
if args is not None:
cmd.extend(args)
return call_process(cmd)
def put_attr(permanode, *args):
cmd = ['camput', 'attr', '--add', permanode ]
if args is not None:
cmd.extend(args)
return call_process(cmd)
def put_file(f):
return call_process(['camput', 'file', f])
def main():
argparser = argparse.ArgumentParser(description='upload pics to camlistore')
argparser.add_argument("-g", "--glob-path", required=True, action="store", help="path (and glob) -- e.g., '/home/pics/*jpeg'")
argparser.add_argument("-t", "--title", required=True, action="store", help="title of album")
args = argparser.parse_args()
album_title = args.title
glob_path = args.glob_path
# FIXME: find if it actually exists before duplicating:
folder_permanode, stderr, rc = create_permanode()
if rc != 0:
sys.exit(1)
stdout, stderr, rc = put_attr(folder_permanode, "title", album_title)
if rc != 0:
sys.exit(1)
### END FIXME
# FIXME... please
for photo in glob.glob(glob_path):
# upload file:
new_file, stderr, rc = put_file(photo)
if rc != 0:
continue
# create permanode for file:
# FIXME: find if desired permanode already exists before re-creating;
stdout, stderr, rc = pic_permanode, stderr, rc = create_permanode()
if rc != 0:
continue
# give pic permanode a name:
stdout, stderr, rc = put_attr(pic_permanode, "title", os.path.basename(photo))
if rc != 0:
continue
# associate permanode with pic file:
stdout, stderr, rc = put_attr(pic_permanode, "camliContent", new_file)
if rc != 0:
continue
# associate
stdout, stderr, rc = put_attr(folder_permanode, "camliPath:%s" % os.path.basename(photo) , pic_permanode)
if rc != 0:
continue
if __name__ == '__main__':
main()
|
from __future__ import division
from __future__ import print_function
import sys
import os
import torch
import numpy as np
from torch.autograd import Variable
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.utils.model_zoo as model_zoo
from torchvision import datasets, transforms
from loadModelMNIST import *
import pickle
import PIL
#import matplotlib.pyplot as plt
from ProjectImageHandler import *
class pickleDataset(torch.utils.data.Dataset):
def __init__(self,pickle_file,root_dir="./data",train=False,transform=None):
self.root_dir = root_dir
self.transform = transform
pickle_data = pickle.load(open(pickle_file))
self.validate_correct_data(pickle_data)
self.pDataset = []
i = int(not train) #index 0 = Train 1 = test
for j in range(0,len(pickle_data[i][0])):
img = PIL.Image.fromarray(pickle_data[i][0][j])
label = pickle_data[i][1][j]
self.pDataset.append([img,label])
def validate_correct_data(self,pickle_data):
a = np.copy(pickle_data[0][0][0])
image = PIL.Image.fromarray(pickle_data[0][0][0])
ih = ProjectImageHandler()
tensorImage = transforms.ToTensor()(image)
pilImage = transforms.ToPILImage()(tensorImage)
b = np.array(pilImage)
if not (a==b).all():
print("Error: image data may have a different encoding or something")
exit()
def __len__(self):
return len(self.pDataset)
def __getitem__(self, idx):
img,label = self.pDataset[idx]
if self.transform:
img = self.transform(img)
return [img,label]
if __name__ == "__main__":
pickle_file = "data/mnist_blurred_2.p"
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]);
#The array in the pickle_file is assumed to be of the form:
# [ [ [train_img][train_labels] ] [ [test_img][test_labels] ] ]
pd = pickleDataset(pickle_file,train=False,transform=transform)
#sample = pd[0]
#print(np.array(pd.pDataset[0][0]))
#print(pd.pDataset[0][1])
#print(sample[0])
#print(sample[1])
|
import datetime
fname = "test.py.log"
print(f"Append a line to {fname}")
with open(fname, "a") as file:
file.write(str(datetime.datetime.now()))
file.write("\n")
|
from django.test import override_settings
from colossus.apps.campaigns.tests.factories import EmailFactory, LinkFactory
from colossus.apps.lists.tests.factories import MailingListFactory
from colossus.apps.subscribers.constants import ActivityTypes
from colossus.apps.subscribers.models import Subscriber
from colossus.test.testcases import TestCase
from .factories import SubscriberFactory
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
class SubscriberOpenEmailTests(TestCase):
def setUp(self):
mailing_list = MailingListFactory()
self.email = EmailFactory()
self.subscriber_1 = SubscriberFactory(mailing_list=mailing_list)
self.subscriber_1.create_activity(ActivityTypes.SENT, email=self.email) # mock email sent activity
self.subscriber_2 = SubscriberFactory(mailing_list=mailing_list)
self.subscriber_2.create_activity(ActivityTypes.SENT, email=self.email) # mock email sent activity
def test_open_rate_updated(self):
self.assertEqual(0.0, self.subscriber_1.open_rate)
self.subscriber_1.open(self.email)
self.subscriber_1.refresh_from_db()
self.subscriber_1.mailing_list.refresh_from_db()
self.assertEqual(1.0, self.subscriber_1.open_rate)
# two subscribers, one with open_rate = 1.0 other with open_rate = 0.0, expected mailing list open_rate = 0.5
self.assertEqual(0.5, self.subscriber_1.mailing_list.open_rate)
def test_open_email_once(self):
self.subscriber_1.open(self.email)
self.email.refresh_from_db()
self.email.campaign.refresh_from_db()
self.assertEqual(1, self.email.campaign.unique_opens_count)
self.assertEqual(1, self.email.campaign.total_opens_count)
self.assertEqual(1, self.email.unique_opens_count)
self.assertEqual(1, self.email.total_opens_count)
self.assertEqual(1, self.subscriber_1.activities.filter(activity_type=ActivityTypes.OPENED).count())
def test_open_email_twice(self):
self.subscriber_1.open(self.email)
self.subscriber_1.open(self.email)
self.email.refresh_from_db()
self.email.campaign.refresh_from_db()
self.assertEqual(1, self.email.campaign.unique_opens_count)
self.assertEqual(2, self.email.campaign.total_opens_count)
self.assertEqual(1, self.email.unique_opens_count)
self.assertEqual(2, self.email.total_opens_count)
self.assertEqual(2, self.subscriber_1.activities.filter(activity_type=ActivityTypes.OPENED).count())
def test_two_subscribers_open_email_once(self):
self.subscriber_1.open(self.email)
self.subscriber_2.open(self.email)
self.email.refresh_from_db()
self.email.campaign.refresh_from_db()
self.assertEqual(2, self.email.campaign.unique_opens_count)
self.assertEqual(2, self.email.campaign.total_opens_count)
self.assertEqual(2, self.email.unique_opens_count)
self.assertEqual(2, self.email.total_opens_count)
self.assertEqual(1, self.subscriber_1.activities.filter(activity_type=ActivityTypes.OPENED).count())
self.assertEqual(1, self.subscriber_2.activities.filter(activity_type=ActivityTypes.OPENED).count())
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
class SubscriberClickLinkTests(TestCase):
def setUp(self):
mailing_list = MailingListFactory()
self.link = LinkFactory()
self.subscriber_1 = SubscriberFactory(mailing_list=mailing_list)
self.subscriber_1.create_activity(ActivityTypes.SENT, email=self.link.email) # mock email sent activity
self.subscriber_2 = SubscriberFactory(mailing_list=mailing_list)
self.subscriber_2.create_activity(ActivityTypes.SENT, email=self.link.email) # mock email sent activity
def test_click_rate_update(self):
self.assertEqual(0.0, self.subscriber_1.click_rate)
self.subscriber_1.click(self.link)
self.subscriber_1.refresh_from_db()
self.subscriber_1.mailing_list.refresh_from_db()
self.assertEqual(1.0, self.subscriber_1.click_rate)
# two subscribers, one with click_rate = 1.0 other with click_rate = 0.0 expected mailing list click_rate = 0.5
self.assertEqual(0.5, self.subscriber_1.mailing_list.click_rate)
def test_click_link_once(self):
self.subscriber_1.click(self.link)
self.link.refresh_from_db()
self.link.email.campaign.refresh_from_db()
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(1, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(1, self.link.total_clicks_count)
self.assertEqual(1, self.subscriber_1.activities.filter(activity_type=ActivityTypes.CLICKED).count())
def test_click_link_twice(self):
self.subscriber_1.click(self.link)
self.subscriber_1.click(self.link)
self.link.refresh_from_db()
self.link.email.campaign.refresh_from_db()
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(2, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(2, self.link.total_clicks_count)
self.assertEqual(2, self.subscriber_1.activities.filter(activity_type=ActivityTypes.CLICKED).count())
def test_two_subscribers_click_link_once(self):
self.subscriber_1.click(self.link)
self.subscriber_2.click(self.link)
self.link.refresh_from_db()
self.link.email.campaign.refresh_from_db()
self.assertEqual(2, self.link.email.campaign.unique_clicks_count)
self.assertEqual(2, self.link.email.campaign.total_clicks_count)
self.assertEqual(2, self.link.unique_clicks_count)
self.assertEqual(2, self.link.total_clicks_count)
self.assertEqual(1, self.subscriber_1.activities.filter(activity_type=ActivityTypes.CLICKED).count())
self.assertEqual(1, self.subscriber_2.activities.filter(activity_type=ActivityTypes.CLICKED).count())
def test_click_two_links_same_email(self):
link_2 = LinkFactory(email=self.link.email)
self.subscriber_1.click(self.link)
self.subscriber_1.click(link_2)
self.link.refresh_from_db()
link_2.refresh_from_db()
self.link.email.campaign.refresh_from_db()
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(2, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(1, self.link.total_clicks_count)
self.assertEqual(1, link_2.unique_clicks_count)
self.assertEqual(1, link_2.total_clicks_count)
self.assertEqual(2, self.subscriber_1.activities.filter(activity_type=ActivityTypes.CLICKED).count())
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
class SubscriberClickLinkForceOpenTests(TestCase):
def setUp(self):
mailing_list = MailingListFactory()
self.link = LinkFactory()
self.subscriber = SubscriberFactory(mailing_list=mailing_list)
self.subscriber.create_activity(ActivityTypes.SENT, email=self.link.email) # mock email sent activity
def test_click_without_open(self):
"""
Test clicking on a link without opening the email first
The `click` method should enforce the email opening
"""
self.subscriber.click(self.link)
# refresh models
self.link.refresh_from_db()
self.link.email.refresh_from_db()
self.link.email.campaign.refresh_from_db()
# checks for click counts
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(1, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(1, self.link.total_clicks_count)
self.assertEqual(1, self.subscriber.activities.filter(activity_type=ActivityTypes.CLICKED).count())
# checks for open counts
self.assertEqual(1, self.link.email.campaign.unique_opens_count)
self.assertEqual(1, self.link.email.campaign.total_opens_count)
self.assertEqual(1, self.link.email.unique_opens_count)
self.assertEqual(1, self.link.email.total_opens_count)
self.assertEqual(1, self.subscriber.activities.filter(activity_type=ActivityTypes.OPENED).count())
def test_click_twice_without_open(self):
"""
Test clicking on a link twice without opening the email first
Only the first `click` method should trigger the email opening
"""
self.subscriber.click(self.link) # trigger `open` method
self.subscriber.click(self.link) # this time it should not trigger the `open` method
# refresh models
self.link.refresh_from_db()
self.link.email.refresh_from_db()
self.link.email.campaign.refresh_from_db()
# checks for click counts
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(2, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(2, self.link.total_clicks_count)
self.assertEqual(2, self.subscriber.activities.filter(activity_type=ActivityTypes.CLICKED).count())
# checks for open counts
self.assertEqual(1, self.link.email.campaign.unique_opens_count)
self.assertEqual(1, self.link.email.campaign.total_opens_count)
self.assertEqual(1, self.link.email.unique_opens_count)
self.assertEqual(1, self.link.email.total_opens_count)
self.assertEqual(1, self.subscriber.activities.filter(activity_type=ActivityTypes.OPENED).count())
def test_open_once_click_twice(self):
"""
Test opening email and clicking on a link twice
"""
self.subscriber.click(self.link)
self.subscriber.open(self.link.email)
self.subscriber.click(self.link)
# refresh models
self.link.refresh_from_db()
self.link.email.refresh_from_db()
self.link.email.campaign.refresh_from_db()
# checks for click counts
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(2, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(2, self.link.total_clicks_count)
self.assertEqual(2, self.subscriber.activities.filter(activity_type=ActivityTypes.CLICKED).count())
# checks for open counts
self.assertEqual(1, self.link.email.campaign.unique_opens_count)
self.assertEqual(2, self.link.email.campaign.total_opens_count)
self.assertEqual(1, self.link.email.unique_opens_count)
self.assertEqual(2, self.link.email.total_opens_count)
self.assertEqual(2, self.subscriber.activities.filter(activity_type=ActivityTypes.OPENED).count())
def test_open_twice_click_twice(self):
"""
Test opening email and clicking on a link twice
"""
self.subscriber.open(self.link.email)
self.subscriber.click(self.link)
self.subscriber.open(self.link.email)
self.subscriber.click(self.link)
# refresh models
self.link.refresh_from_db()
self.link.email.refresh_from_db()
self.link.email.campaign.refresh_from_db()
# checks for click counts
self.assertEqual(1, self.link.email.campaign.unique_clicks_count)
self.assertEqual(2, self.link.email.campaign.total_clicks_count)
self.assertEqual(1, self.link.unique_clicks_count)
self.assertEqual(2, self.link.total_clicks_count)
self.assertEqual(2, self.subscriber.activities.filter(activity_type=ActivityTypes.CLICKED).count())
# checks for open counts
self.assertEqual(1, self.link.email.campaign.unique_opens_count)
self.assertEqual(2, self.link.email.campaign.total_opens_count)
self.assertEqual(1, self.link.email.unique_opens_count)
self.assertEqual(2, self.link.email.total_opens_count)
self.assertEqual(2, self.subscriber.activities.filter(activity_type=ActivityTypes.OPENED).count())
class SubscriberUpdateOpenRateTests(TestCase):
def setUp(self):
self.subscriber = SubscriberFactory()
self.email = EmailFactory()
def test_open_rate_persistence(self):
self.assertEqual(0.0, Subscriber.objects.get(pk=self.subscriber.pk).open_rate)
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.OPENED, email=self.email)
self.subscriber.update_open_rate()
self.assertEqual(1.0, Subscriber.objects.get(pk=self.subscriber.pk).open_rate)
def test_division_by_zero(self):
"""
Test if the the code is handling division by zero.
There should never be an OPENED activity without a SENT activity (thus not being possible to have a
division by zero). But just in case.
"""
self.assertEqual(0.0, self.subscriber.update_open_rate())
def test_update_open_rate_distinct_count(self):
"""
Test if the update count is only considering distinct open entries
"""
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.OPENED, email=self.email)
self.subscriber.create_activity(ActivityTypes.OPENED, email=self.email)
self.assertEqual(1.0, self.subscriber.update_open_rate())
def test_open_without_sent(self):
"""
Test open count without sent activity
This should not happen under normal circumstances
"""
self.subscriber.create_activity(ActivityTypes.OPENED, email=self.email)
self.assertEqual(0.0, self.subscriber.update_open_rate())
def test_sent_without_open(self):
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.assertEqual(0.0, self.subscriber.update_open_rate())
def test_update_open_rate_50_percent(self):
self.subscriber.create_activity(ActivityTypes.SENT, email=EmailFactory())
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.OPENED, email=self.email)
self.assertEqual(0.5, self.subscriber.update_open_rate())
def test_round_percentage(self):
self.subscriber.create_activity(ActivityTypes.SENT, email=EmailFactory())
self.subscriber.create_activity(ActivityTypes.SENT, email=EmailFactory())
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.OPENED, email=self.email)
self.assertEqual(0.3333, self.subscriber.update_open_rate())
class SubscriberUpdateClickRateTests(TestCase):
def setUp(self):
self.subscriber = SubscriberFactory()
self.email = EmailFactory()
self.link = LinkFactory(email=self.email)
def test_click_rate_persistence(self):
self.assertEqual(0.0, Subscriber.objects.get(pk=self.subscriber.pk).click_rate)
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.CLICKED, email=self.email, link=self.link)
self.subscriber.update_click_rate()
self.assertEqual(1.0, Subscriber.objects.get(pk=self.subscriber.pk).click_rate)
def test_division_by_zero(self):
"""
Test if the the code is handling division by zero.
There should never be an OPENED activity without a SENT activity (thus not being possible to have a
division by zero). But just in case.
"""
self.assertEqual(0.0, self.subscriber.update_click_rate())
def test_update_click_rate_distinct_count(self):
"""
Test if the update count is only considering distinct open entries
"""
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.CLICKED, email=self.email, link=self.link)
self.subscriber.create_activity(ActivityTypes.CLICKED, email=self.email, link=self.link)
self.assertEqual(1.0, self.subscriber.update_click_rate())
def test_open_without_sent(self):
"""
Test open count without sent activity
This should not happen under normal circumstances
"""
self.subscriber.create_activity(ActivityTypes.CLICKED, email=self.email, link=self.link)
self.assertEqual(0.0, self.subscriber.update_click_rate())
def test_sent_without_open(self):
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.assertEqual(0.0, self.subscriber.update_click_rate())
def test_update_click_rate_50_percent(self):
self.subscriber.create_activity(ActivityTypes.SENT, email=EmailFactory())
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.CLICKED, email=self.email, link=self.link)
self.assertEqual(0.5, self.subscriber.update_click_rate())
def test_round_percentage(self):
self.subscriber.create_activity(ActivityTypes.SENT, email=EmailFactory())
self.subscriber.create_activity(ActivityTypes.SENT, email=EmailFactory())
self.subscriber.create_activity(ActivityTypes.SENT, email=self.email)
self.subscriber.create_activity(ActivityTypes.CLICKED, email=self.email, link=self.link)
self.assertEqual(0.3333, self.subscriber.update_click_rate())
|
# -*- coding: utf-8 -*-
print("hello")
print("some basic of python")
'''DATA TYPE
Text Type: str
Numeric Types: int, float, complex
Sequence Types: list, tuple, range
Mapping Type: dict
Set Types: set, frozenset
Boolean Type: bool
Binary Types: bytes, bytearray, memoryview
-----'''
#-------------------------------------------------------------------
#getting data type of a variable
a=8
b='chakra'
print(type(a)) #this will give int
print(type(b)) #this will give str
ab= True
print(type(ab)) #this will print boll
#more example:
x=20.5 #set to float, if decimal point
d=1j #set to complex data type
e=["apple", "banana", "cherry"] #List data type
f=("chakra", "ramesh", "ram")# tuple data type
g=range(9) #range data type
h={"acb","bcd","fgfg"}#set data tyoe
i={"name":"ram", "age":55} #dict data type
xy= frozenset({"apple", "banana", "cherry"}) #frozenset
cd= b"Hello" #bytes data type
x = bytearray(5) #bytearray data type
x = memoryview(bytes(5)) #memoryview data type
#--------------------------------------------------------------
#gow to set datatype
x = str("Hello World") #str
x = int(20) #int
x = float(20.5) #float
yes = complex(1j) #complex
x = list(("apple", "banana", "cherry")) #list
x = tuple(("apple", "banana", "cherry")) #tuple
x = range(6) #range
x_x = dict(name="John", age=36) #dict
x = set(("apple", "banana", "cherry")) #set
x_ = frozenset(("apple", "banana", "cherry")) #frozenset
x = bool(5) #bool
x = bytes(5) #bytes
x = bytearray(5) #bytearray
x = memoryview(bytes(5)) #memoryview
#------------------------------------------------------------
print(x_x)
print(yes)
print(x_)
#Number types in python
#THREE TYPES - NUMBER, FLOAT, COMPLEX
com=35e4
print(type(com)) #will display float
co=4+7j
print(type(co)) #will print complex
#---------------------------------------------------------------
#TYPE CONVERSION
xax = 1 # int
yy = 2.8 # float
zz=1j # complex
#convert from int to float:
a = float(xax)
#convert from float to int:
b = int(yy)
#convert from int to complex:
c = complex(zz)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
####cannot conver complex number into another data type
#----------------------------------------------------------
#RANDOM NUMBER
#python doesnot have random() function like other progrmming language
#it import random class to generate random number
import random
print(random.randrange(1,10)) #randomly prints number between 1 and 10 inclusive
#----------------------------------------------------------
#CASTING
#INTEGER casting
a=int(1) #will print int 1
print(a)
b=int(1.9) #will print int 1, ignores .9
print(b)
c=int("7") #string "7" but prints only 7
print(c)
#float casting
p=float(1.2) #printrs 1.2
print(p)
q=float(1) #prints 1.0 since its a int
print(q)
r=float("3.9") #prints 3.9 simce its a string
print(r)
#string casting
l=str(123)
print(l)
m=str(123.00)
print(m)
n=str("chakra")
print(n)
#-----------------------------------------------------------------------
#PYTHON STRING
#string are decleared either single quote or double quote
a="chakra"
b='chakra'
#both "chakra" and 'chakra' are same
#multi line string
a="""Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua"""
#OR
a='''Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua'''
#eithe way is same, use 3 ' or 3 "
#-------------------------------------------------------------------------
#string and array in python
#python doesnot hav char data type, it takes single letter as string
chakra="my name is CP"
print(chakra[5]) #will print character in 5ht position
#slicing string
#prints 2 nd letter thats index 1 till index6, not 7
print(chakra[1:7])
#negative indexing
#Use negative indexes to start the slice from the end of the string:
#Get the characters from position 5 to position 1, starting the count from the end of the string:
b = "Hello, World!"
print(b[-5:-2])
#string length
#prints length of variable b
print(len(b))
#strip methods- removes any white space from beginning of the line
a = " Hello, World! "
print(a.strip()) # returns "Hello, World!"
#lower case
ab="BHUTAN"
print(ab.lower())
#change lower to upper case
cd="nepal"
print(cd.upper())
#The replace() method replaces a string with another string:
#replace H with J
a = "Hello, World!"
print(a.replace("H", "J"))
#The split() method splits the string into substrings if it finds instances of the separator:
#
a = "Hello, World!"
print(a.split(',')) # returns ['Hello', ' World!']
#CHECK STRING
#Check if the phrase "ain" is present in the following text:
#use IN or NOT IN
txt = "The rain in Spain stays mainly in the plain"
x = "ain" in txt
print(x)
#string concatination
#adding two string togeter using +
a="cp"
b="neopaney"
c=(a+" "+b)
print(c)
#The format() method takes the passed arguments, formats them, and places -
#them in the string where the placeholders {} are:
#Use the format() method to insert numbers into strings:
age = 36
txt = "My name is John, and I am {}"
print(txt.format(age))
#Escape Character
#To insert characters that are illegal in a string, use an escape character.
#An escape character is a backslash \ followed by the character you want to insert.
#An example of an illegal character is a double quote inside a string that is
#surrounded by double quotes:
#The escape character allows you to use double quotes when you normally would not be allowed:
txt = "We are the so-called \"Vikings\" from the north."
print(txt)
#Other escape characters used in Python:
"""
\' Single Quote
\\ Backslash
\n New Line
\r Carriage Return
\t Tab
\b Backspace
\f Form Feed
\ooo Octal value
\xhh Hex value
"""
#STRING METHODS
#capitalize() Converts the first character to upper case
#casefold() Converts string into lower case
#center() Returns a centered string
#count() Returns the number of times a specified value occurs in a string
#encode() Returns an encoded version of the string
#endswith() Returns true if the string ends with the specified value
#expandtabs() Sets the tab size of the string
#find() Searches the string for a specified value and returns the position of where it was found
#format() Formats specified values in a string
#format_map() Formats specified values in a string
#index() Searches the string for a specified value and returns the position of where it was found
#isalnum() Returns True if all characters in the string are alphanumeric
#isalpha() Returns True if all characters in the string are in the alphabet
#isdecimal() Returns True if all characters in the string are decimals
#isdigit() Returns True if all characters in the string are digits
#isidentifier() Returns True if the string is an identifier
#islower() Returns True if all characters in the string are lower case
#isnumeric() Returns True if all characters in the string are numeric
#isprintable() Returns True if all characters in the string are printable
#isspace() Returns True if all characters in the string are whitespaces
#istitle() Returns True if the string follows the rules of a title
#isupper() Returns True if all characters in the string are upper case
#join() Joins the elements of an iterable to the end of the string
#ljust() Returns a left justified version of the string
#lower() Converts a string into lower case
#lstrip() Returns a left trim version of the string
#maketrans() Returns a translation table to be used in translations
#partition() Returns a tuple where the string is parted into three parts
#replace() Returns a string where a specified value is replaced with a specified value
#rfind() Searches the string for a specified value and returns the last position of where it was found
#rindex() Searches the string for a specified value and returns the last position of where it was found
#rjust() Returns a right justified version of the string
#rpartition() Returns a tuple where the string is parted into three parts
#rsplit() Splits the string at the specified separator, and returns a list
#rstrip() Returns a right trim version of the string
#split() Splits the string at the specified separator, and returns a list
#splitlines() Splits the string at line breaks and returns a list
#startswith() Returns true if the string starts with the specified value
#strip() Returns a trimmed version of the string
#swapcase() Swaps cases, lower case becomes upper case and vice versa
#title() Converts the first character of each word to upper case
#translate() Returns a translated string
#upper() Converts a string into upper case
#zfill() Fills the string with a specified number of 0 values at the beginning
|
import requests
import csv
from bs4 import BeautifulSoup
from csv import writer
import re
def textFinder(href):
return href and re.compile("/text").search(href)
searchResults = requests.get('https://www.congress.gov/bill/116th-congress/senate-resolution/316?q=%7B%22search%22%3A%5B%22air+pollution%22%5D%7D&s=1&r=1')
soup = BeautifulSoup(searchResults.text, 'html.parser')
for billTextUrl in soup.find_all("a", href=textFinder):
# print(billTextUrl)
billTextUrl2 = billTextUrl.get('href')
# billTextUrl3 = billTextUrl2['href']
print(billTextUrl2)
# print(billTextLink) |
import pandas as pd
import re
from datetime import date
import os, glob
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
today = date.today()
root_directory = 'pandemicmap/data/rki_files'
def load_df():
#file_list = os.listdir('pandemicmap/data/rki_files/')
list_of_files = glob.glob('pandemicmap/data/rki_files/*.csv')
latest_file = max(list_of_files, key=os.path.getctime)
latest_file = latest_file.split('/')[-1]
file_path = 'pandemicmap/data/rki_files/{}'.format(latest_file)
df = pd.read_csv(file_path, sep=';')
return df
def clean_df(df):
df = df.drop('Unnamed: 0', axis=1)
df = df.drop(16, axis=0)
for key, row in df.iterrows():
if re.search(" ", df.loc[key, 'Bestaetigte Faelle']):
data = split_bestaetigte_faelle(row['Bestaetigte Faelle'])
data_elektronisch = split_bestaetigte_faelle(row['Davon elektronisch uebermittelt'])
df.loc[key,'Bestaetigte Faelle'] = data[0]
df.loc[key,'Davon elektronisch uebermittelt'] = data_elektronisch[0]
df.loc[key,'Bestaetigte Todesfaelle'] = data[1]
df.loc[key,'Bestaetigte Todesfaelle Elektronisch'] = data_elektronisch[1]
else:
df.loc[key,'Bestaetigte Todesfaelle'] = '0'
df.loc[key,'Bestaetigte Todesfaelle Elektronisch'] = '0'
df.columns = ['STATE', 'CONFIRMED_CORONA_AMT', 'REPORTED_DIGITAL_CORONA_AMT',
'FOCUS_AREAS', 'CONFIRMED_DEATHS',
'REPORTED_DIGITAL_DEATHS']
return df
def split_bestaetigte_faelle(data):
data = data.split()
data[1] = data[1].replace('(','')
data[1] = data[1].replace(')','')
if re.search('.', data[0]):
data[0] = data[0].replace('.','')
return data
def main():
df = load_df()
df = clean_df(df)
df.to_csv('pandemicmap/data/rki_files/pandemic_data_clean_{date}.csv'.format(date=today.strftime("%Y%m%d"), sep=';'))
if __name__ == '__main__':
main() |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# TODO: Implements IPv4 option list extractor.
import collections
import datetime
# Internet Protocol version 4
# Analyser for IPv4 header
from .ip import IP
from ..utilities import Info, ProtoChain
# TOS (DS Field) Precedence
TOS_PRE = {
'111': 'Network Control',
'110': 'Internetwork Control',
'101': 'CRITIC/ECP',
'100': 'Flash Override',
'011': 'Flash',
'010': 'Immediate',
'001': 'Priority',
'000': 'Routine',
}
# TOS (DS Field) Delay
TOS_DEL = {
'0': 'Normal Delay',
'1': 'Low Delay',
}
# TOS (DS Field) Throughput
TOS_THR = {
'0': 'Normal Throughput',
'1': 'High Throughput',
}
# TOS (DS Field) Relibility
TOS_REL = {
'0': 'Normal Relibility',
'1': 'High Relibility',
}
# TOS ECN FIELD
TOS_ECN = {
'00': 'Not-ECT',
'01': 'ECT(1)',
'10': 'ECT(0)',
'11': 'CE',
}
# QS Functions
QS_FUNC = {
1: 'Quick-Start Request',
2: 'Report of Approved Rate',
}
"""IPv4 Option Utility Table
T | F
bool, short of True / False
opt_class
dict, option classes
IPv4_OPT
dict, IPv4 option dict.
Value is a tuple which contains:
|--> bool, if length greater than 1
| |--> T - True
| |--> F - False
|--> str, description string, also attribute name
|--> (optional) int, process that data bytes need (when length greater than 2)
|--> 0: do nothing
|--> 1: unpack according to size
|--> 2: unpack route data options then add to dict
|--> 3: unpack Quick-Start then add to dict
|--> 4: unpack Time Stamp then add to dict
|--> 5: unpack Traceroute then add to dict
|--> 6: unpack (Extended) Security then add tot dict
|--> 7: unpack Router Alert then add to dict
"""
T = True
F = False
opt_class = {
0: 'control',
1: 'reserved for future use',
2: 'debugging and measurement',
3: 'reserved for future use',
}
process_opt = {
0: lambda self, size, kind: self._read_mode_donone(size, kind), # do nothing
1: lambda self, size, kind: self._read_mode_unpack(size, kind), # unpack according to size
2: lambda self, size, kind: self._read_mode_route(size, kind), # route data
3: lambda self, size, kind: self._read_mode_qs(size, kind), # Quick-Start
4: lambda self, size, kind: self._read_mode_ts(size, kind), # Time Stamp
5: lambda self, size, kind: self._read_mode_tr(size, kind), # Traceroute
6: lambda self, size, kind: self._read_mode_sec(size, kind), # (Extended) Security
7: lambda self, size, kind: self._read_mode_rsralt(size, kind), # Router Alert
}
IPv4_OPT = { # copy class number kind length process name
0: (F, 'eool'), # 0 0 0 0 - - [RFC 791] End of Option List
1: (F, 'nop'), # 0 0 1 1 - - [RFC 791] No-Operation
7: (T, 'rr', 2), # 0 0 7 7 N 2 [RFC 791] Record Route
11: (T, 'mtup', 1), # 0 0 11 11 4 1 [RFC 1063][RFC 1191] MTU Probe
12: (T, 'mtur', 1), # 0 0 12 12 4 1 [RFC 1063][RFC 1191] MTU Reply
25: (T, 'qs', 3), # 0 0 25 25 8 3 [RFC 4782] Quick-Start
68: (T, 'ts', 4), # 0 2 4 68 N 4 [RFC 791] Time Stamp
82: (T, 'tr', 5), # 0 2 18 82 N 5 [RFC 1393][RFC 6814] Traceroute
130: (T, 'sec', 6), # 1 0 2 130 N 6 [RFC 1108] Security
131: (T, 'lsr', 2), # 1 0 3 131 N 2 [RFC 791] Loose Source Route
133: (T, 'esec', 6), # 1 0 5 133 N 6 [RFC 1108][RFC 6814] Extended Security
136: (T, 'sid', 1), # 1 0 8 136 4 1 [RFC 791][RFC 6814] Stream ID
137: (T, 'ssr', 2), # 1 0 9 137 N 2 [RFC 791] Strict Source Route
148: (T, 'rtralt', 7), # 1 0 20 148 4 7 [RFC 2113] Router Alert
}
class IPv4(IP):
"""This class implements Internet Protocol version 4.
Properties:
* name -- str, name of corresponding procotol
* info -- Info, info dict of current instance
* layer -- str, `Internet`
* length -- int, header length of corresponding protocol
* protocol -- str, name of next layer protocol
* protochain -- ProtoChain, protocol chain of current instance
* src -- str, source IP address
* dst -- str, destination IP address
Methods:
* read_ipv4 -- read Internet Protocol version 4 (IPv4)
Attributes:
* _file -- BytesIO, bytes to be extracted
* _info -- Info, info dict of current instance
* _protos -- ProtoChain, protocol chain of current instance
Utilities:
* _read_protos -- read next layer protocol type
* _read_fileng -- read file buffer
* _read_unpack -- read bytes and unpack to integers
* _read_binary -- read bytes and convert into binaries
* _decode_next_layer -- decode next layer protocol type
* _import_next_layer -- import next layer protocol extractor
* _read_ip_seekset -- when fragmented, read payload throughout first
* _read_ipv4_addr -- read IPv4 address
* _read_ipv4_options -- read IPv4 option list
"""
##########################################################################
# Properties.
##########################################################################
@property
def name(self):
return 'Internet Protocol version 4'
@property
def length(self):
return self._info.hdr_len
@property
def protocol(self):
return self._info.proto
##########################################################################
# Methods.
##########################################################################
def read_ipv4(self, length):
"""Read Internet Protocol version 4 (IPv4).
Structure of IPv4 header [RFC 791]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Discription
0 0 ip.version Version (4)
0 4 ip.hdr_len Interal Header Length (IHL)
1 8 ip.dsfield.dscp Differentiated Services Code Point (DSCP)
1 14 ip.dsfield.ecn Explicit Congestion Notification (ECN)
2 16 ip.len Total Length
4 32 ip.id Identification
6 48 ip.flags.rb Reserved Bit (must be zero)
6 49 ip.flags.df Don't Fragment (DF)
6 50 ip.flags.mf More Fragments (MF)
6 51 ip.frag_offset Fragment Offset
8 64 ip.ttl Time To Live (TTL)
9 72 ip.proto Protocol (Transport Layer)
10 80 ip.checksum Header Checksum
12 96 ip.src Source IP Address
16 128 ip.dst Destination IP Address
20 160 ip.options IP Options (if IHL > 5)
"""
_vihl = self._read_fileng(1).hex()
_dscp = self._read_binary(1)
_tlen = self._read_unpack(2)
_iden = self._read_unpack(2)
_frag = self._read_binary(2)
_ttol = self._read_unpack(1)
_prot = self._read_protos(1)
_csum = self._read_fileng(2)
_srca = self._read_ipv4_addr()
_dsta = self._read_ipv4_addr()
ipv4 = dict(
version = _vihl[0],
hdr_len = int(_vihl[1], base=16) * 4,
dsfield = dict(
dscp = (
TOS_PRE.get(_dscp[:3]),
TOS_DEL.get(_dscp[3]),
TOS_THR.get(_dscp[4]),
TOS_REL.get(_dscp[5]),
),
ecn = TOS_ECN.get(_dscp[-2:]),
),
len = _tlen,
id = _iden,
flags = dict(
rb = b'\x00',
df = True if int(_frag[1]) else False,
mf = True if int(_frag[2]) else False,
),
frag_offset = int(_frag[3:], base=2) * 8,
ttl = _ttol,
proto = _prot,
checksum = _csum,
src = _srca,
dst = _dsta,
)
_optl = ipv4['hdr_len'] - 20
if _optl:
options = self._read_ipv4_options(_optl)
ipv4['opt'] = options[0] # tuple of option acronyms
ipv4.update(options[1]) # merge option info to buffer
hdr_len = ipv4['hdr_len']
raw_len = ipv4['len'] - hdr_len
if not ipv4['flags']['df']:
ipv4 = self._read_ip_seekset(ipv4, hdr_len, raw_len)
# make next layer protocol name
proto = ipv4['proto']
if proto is None:
proto = ''
name_ = proto.lower() or 'raw'
proto = proto or None
self._protos = ProtoChain(proto)
return self._decode_next_layer(ipv4, _prot, raw_len)
##########################################################################
# Data models.
##########################################################################
def __init__(self, _file, length=None):
self._file = _file
self._info = Info(self.read_ipv4(length))
def __len__(self):
return self._info.hdr_len
def __length_hint__(self):
return 20
##########################################################################
# Utilities.
##########################################################################
def _read_ipv4_addr(self):
"""Read IP address."""
_byte = self._read_fileng(4)
_addr = '.'.join([str(_) for _ in _byte])
return _addr
def _read_opt_type(self, kind):
"""Read option type field.
Keyword arguments:
kind -- int, option kind value
Structure of option type field [RFC 791]:
Octets Bits Name Discription
0 0 ip.opt.type.copy Copied Flag (0/1)
0 1 ip.opt.type.class Option Class (0-3)
0 3 ip.opt.type.number Option Number
"""
bin_ = bin(kind)[2:].zfill(8)
type_ = {
'copy' : bool(int(bin_[0], base=2)),
'class' : opt_class.get(int(bin_[1:3], base=2)),
'number' : int(bin_[3:], base=2),
}
return type_
def _read_ipv4_options(self, size=None):
"""Read IPv4 option list.
Keyword arguments:
* size -- int, buffer size
"""
counter = 0 # length of read option list
optkind = tuple() # option kind list
options = dict() # dict of option data
while counter < size:
# get option kind
kind = self._read_unpack(1)
# fetch corresponding option tuple
opts = IPv4_OPT.get(kind)
if opts is None:
len_ = size - counter
counter = size
options['Unknown'] = self._read_fileng(len_)
break
# extract option
dscp = opts[1]
if opts[0]:
byte = self._read_unpack(1)
if byte: # check option process mode
data = process_opt[opts[2]](self, byte, kind)
else: # permission options (length is 2)
data = dict(
kind = kind, # option kind
type = self._read_opt_type(kind), # option type info
length = 2, # option length
flag = True, # permission flag
)
else: # 1-bytes options
byte = 1
data = dict(
kind = kind, # option kind
type = self._read_opt_type(kind), # option type info
length = 1, # option length
)
# record option data
counter += byte
if dscp in optkind:
if isinstance(options[dscp], tuple):
options[dscp] += (Info(data),)
else:
options[dscp] = (Info(options[dscp]), Info(data))
else:
optkind += (dscp,)
options[dscp] = data
# break when eol triggered
if not kind: break
# get padding
if counter < size:
len_ = size - counter
options['padding'] = self._read_fileng(len_)
return optkind, options
def _read_mode_donone(self, size, kind):
"""Read options request no process.
Keyword arguemnts:
size - int, length of option
kind - int, option kind value
Structure of TCP options:
Octets Bits Name Discription
0 0 tcp.opt.kind Kind
1 8 tcp.opt.length Length
2 16 tcp.opt.data Kind-specific Data
"""
data = dict(
kind = kind,
length = size,
data = self._read_fileng(size),
)
return data
def _read_mode_unpack(self, size, kind):
"""Read options request unpack process.
Keyword arguemnts:
size - int, length of option
kind - int, option kind value
Structure of TCP options:
Octets Bits Name Discription
0 0 tcp.opt.kind Kind
1 8 tcp.opt.length Length
2 16 tcp.opt.data Kind-specific Data
"""
data = dict(
kind = kind,
length = size,
data = self._read_unpack(size),
)
return data
def _read_mode_route(self, size, kind):
"""Read options with route data.
Keyword arguemnts:
size - int, length of option
kind - int, 7/131/137 (RR/LSR/SSR)
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Discription
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
"""
_rlst = list()
_rrip = list()
_rpad = list()
_type = self._read_opt_type(kind)
_rptr = self._read_unpack(1)
_rlst.append(_rptr)
_rrec = _rptr + 4
while _rptr <= size:
_rrec
_rrip = self._read_ipv4_addr()
_rpad = self._read_fileng(_rptr - 4)
_resv = self._read_fileng(_rptr - 4)
for _ in range((size - 3 - _rptr) // 4):
_rrip.append(self._read_ipv4_addr())
_pads = self._read_fileng((size - 3 - _rptr) % 4)
data = dict(
kind = kind,
type = _type,
length = size,
pointer = _rptr,
reserved = _resv,
data = tuple(_rrip) if len(_rrip) > 1 else _rrip[0],
padding = _pads,
)
return data
def _read_mode_qs(self, size, kind):
"""Read Quick Start option.
Keyword arguemnts:
size - int, length of option
kind - int, 25 (QS)
Structure of Quick-Start (QS) option [RFC 4782]:
* A Quick-Start Request.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option | Length=8 | Func. | Rate | QS TTL |
| | | 0000 |Request| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| QS Nonce | R |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Report of Approved Rate.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option | Length=8 | Func. | Rate | Not Used |
| | | 1000 | Report| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| QS Nonce | R |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Discription
0 0 ip.qs.kind Kind (25)
0 0 ip.qs.type.copy Copied Flag (0)
0 1 ip.qs.type.class Option Class (0)
0 3 ip.qs.type.number Option Number (25)
1 8 ip.qs.length Length (8)
2 16 ip.qs.func Function (0/8)
2 20 ip.qs.rate Rate Request / Report (in Kbps)
3 24 ip.qs.ttl QS TTL / None
4 32 ip.qs.nounce QS Nounce
7 62 ip.qs.resv Reserved (\x00\x00)
"""
_type = self._read_opt_type(kind)
_fcrr = self._read_binary(1)
_func = int(_fcrr[:4], base=2)
_rate = int(_fcrr[4:], base=2)
_ttlv = self._read_unpack(1)
_nonr = self._read_binary(4)
_qsnn = int(_nonr[:30], base=2)
data = dict(
kind = kind,
type = _type,
length = size,
func = QS_FUNC.get(_func),
rate = 40000 * (2 ** _rate) / 1000,
ttl = None if func else _rate,
nounce = _qsnn,
resv = '\x00\x00',
)
return data
def _read_mode_ts(self, size, kind):
"""Read Time Stamp option.
Keyword arguemnts:
size - int, length of option
kind - int, 68 (TS)
Structure of Timestamp (TS) option [RFC 791]:
+--------+--------+--------+--------+
|01000100| length | pointer|oflw|flg|
+--------+--------+--------+--------+
| internet address |
+--------+--------+--------+--------+
| timestamp |
+--------+--------+--------+--------+
| . |
.
.
Octets Bits Name Discription
0 0 ip.ts.kind Kind (25)
0 0 ip.ts.type.copy Copied Flag (0)
0 1 ip.ts.type.class Option Class (0)
0 3 ip.ts.type.number Option Number (25)
1 8 ip.ts.length Length (≤40)
2 16 ip.ts.pointer Pointer (≥5)
3 24 ip.ts.overflow Overflow Octets
3 28 ip.ts.flag Flag
4 32 ip.ts.ip Internet Address
8 64 ip.ts.timestamp Timestamp
"""
_tptr = self._read_unpack(1)
_oflg = self._read_binary(1)
_oflw = int(_oflg[:4], base=2)
_flag = int(_flag[4:], base=2)
_ipad = self._read_ipv4_addr()
_time = self._read_unpack(4, lilendian=True)
data = dict(
)
return data
def _read_mode_tr(self, size, kind):
return self._read_fileng(size)
def _read_mode_sec(self, size, kind):
return self._read_fileng(size)
def _read_mode_rsralt(self, size, kind):
return self._read_fileng(size)
|
from django.shortcuts import render, redirect
from django.views import View
from .forms import UserForm
from django.contrib.auth import logout
def index(request):
if request.user.is_authenticated:
return redirect('account:index')
return render(request, 'home/home.html')
class UserFormView(View):
form_class = UserForm
template_name = 'home/register.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
# create object
user = form.save(commit=False)
# clean data
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
return render(request, 'home/registersucces.html')
def Logout(request):
logout(request)
return redirect('books:index')
|
Root /
folder ~
.cd - current directory
.pwd - previous working directory (shows absolute path, from root directory to where you are now)
.mkdir - make directory
.ls - list
. (dot) stands for current directory
..(dot dot) will list structure of the parent directory
to make a file: touch
These are common Git commands used in various situations:
start a working area (see also: git help tutorial)
clone Clone a repository into a new directory
init Create an empty Git repository or reinitialize an existing one
work on the current change (see also: git help everyday)
add Add file contents to the index
mv Move or rename a file, a directory, or a symlink
reset Reset current HEAD to the specified state
rm Remove files from the working tree and from the index
examine the history and state (see also: git help revisions)
bisect Use binary search to find the commit that introduced a bug
grep Print lines matching a pattern
log Show commit logs
show Show various types of objects
status Show the working tree status
grow, mark and tweak your common history
branch List, create, or delete branches
checkout Switch branches or restore working tree files
commit Record changes to the repository
diff Show changes between commits, commit and working tree, etc
merge Join two or more development histories together
rebase Forward-port local commits to the updated upstream head
tag Create, list, delete or verify a tag object signed with GPG
collaborate (see also: git help workflows)
fetch Download objects and refs from another repository
pull Fetch from and integrate with another repository or a local branch
push Update remote refs along with associated objects
'git help -a' and 'git help -g' list available subcommands and some
concept guides. See 'git help <command>' or 'git help <concept>'
to read about a specific subcommand or concept.
To clone a repository:
highlight the https from the page,
type git clone and the address to pull down the repository
Master
Everyone takes from the master file for their own branches
Make your own branch everyday, to push code up and not overwrite Master
Local copy is your machine, remote is the master
In Bash:
Ninas-Air:MonsterAcademy ninaalli$ cd week1-day1/
Ninas-Air:week1-day1 ninaalli$ git branch
* master
Ninas-Air:week1-day1 ninaalli$ git checkout -b ninaalli
Switched to a new branch 'ninaalli'
Ninas-Air:week1-day1 ninaalli$ git branch
master
* ninaalli
Ninas-Air:week1-day1 ninaalli$
git branch -b (will auto switch to the new repository)
git branch NAME will show only the branch, not switch
iif you type ATOM in Bash, it opens a blank atom page
if you type atom in the folder, Atom . (this will open the current directory into the text editor)
git commit to make sure you see what was changed, you have to tell it to change and commit it
not tracked (files that did not exist prior, if tracked and modified will be red.
to stage a commit, git add (place path here)
to create a new file in the file you are in: touch (new name for file)
When you commit, you commit all of them - if you dont want them there:
git reset (file name)
anything that follows git is a git command
-m is short for message (sees it as a string - a variation of characters)
git commit -m "comment what you did" (for every 20-30 changes committed, this is important)
git diff (shows you the changes in the codes
after git add, always do git status to make sure you arent surprised by the outcomes.
git push goes to the remote git
git push origin ninaalli
shows what is in the file
git remote
git remote -v
Ninas-Air:week1-day1 ninaalli$ git remote -v
origin https://github.com/gila-monsters-2016/week1-day1.git (fetch)
origin https://github.com/gila-monsters-2016/week1-day1.git (push)
Ninas-Air:week1-day1 ninaalli$
***to FETCH: it will go to the remote we specify and will take the directory and bring it down to our github, it wont combine the two.
PULL- fetches changes from remote and combines them into the local repository
if you are on your branch, do not PULL
if you checkout to Master, it will merge all the branches,
to get any changes you will have to merge it.
ALWAYS BE AWARE OF THE BRANCH YOU ARE ON.
GitHub will try to find the differences prior to a commit so you can work on changes so there is no conflict (which you have to go in and resolve)
DAY 1 most important lessons:
Git
branch (have to be on the branch to do all of this)
checkout -b <branch name>
status
add <path to the file>
commit -m <message>
push <name of the remote><name of the branch>
git stash apply - will make it look like
|
from keras.datasets import mnist
from keras import models
from keras import layers
from keras.utils import to_categorical
# The Simplified Big Picture of How Most Supervised Networks Work
# The Just of It
# [1] Draw a batch of training samples x and corresponding targets y.
# [2] Run the network on x (a step called the forward pass) to obtain predictions y_pred.
# [3] Compute the loss of the network on the batch, a measure of the mismatch
# between y_pred and y.
# [4] Update all weights of the network in a way that slightly reduces the loss on this
# batch.
# [1] Draw a batch of training samples x and corresponding targets y.
# Load Training Data
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Format data into 60k samples by 28*28 = 784, 2d tensor - (60k, 768)
train_images = train_images.reshape((60000, 28 * 28))
# Convert to float, the 255 divides out gray scale values betwen 0 or 1 per pixel.
train_images = train_images.astype('float32') / 255
# Im assuming this is some kind of categorization bs for data type comparison.
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# Format data into 10k samples by 28*28 = 784, 2d tensor - (60k, 768)
test_images = test_images.reshape((10000, 28 * 28))
# Convert to float, the 255 divides out gray scale values betwen 0 or 1 per pixel.
test_images = test_images.astype('float32') / 255
# Defining/Loading the Network
# [Defining] The type of network.
network = models.Sequential()
# Takes the input information, and performs basic tensor ops, and relu activation function.
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
# Takes the output of the previous operation, and shunts it into the 10 possible states, ie the numbers.
network.add(layers.Dense(10, activation='softmax'))
# [Loading] a network
# network = models.load_model('trained_mnist.h5')
# network = models.load_model('untrained_mnist.h5')
# Compiling options, though really still part of defining as youre telling the compiler what optimization scheme and loss function to use etc.
# The rms prop is the optimizer that minimizes loss, and categorical crossentropy is the loss function that is differentiated.
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# [2] Run the network on x (a step called the forward pass) to obtain predictions y_pred.
# [3] Compute the loss of the network on the batch, a measure of the mismatch
# between y_pred and y.
# [4] Update all weights of the network in a way that slightly reduces the loss on this
# batch.
# Running the network, optimizing, and updating weights.
#You have 60k samples, you take 128 samples from them, which equates to 469 runs per epoch,
# and you do this for 5 epochs, so in total you do roughly 2,345 gradient descent optimization runs.
network.fit(train_images, train_labels, epochs=5, batch_size=128)
# [Saving] a network after training is done this wayself.
# #https://www.tensorflow.org/guide/keras/save_and_serialize
# network.save('untrained_mnist.h5')
# This checks to see how well the model performs, against testing data.
test_loss, test_acc = network.evaluate(test_images, test_labels)
print('test_acc: ', test_acc)
print('test_loss:', test_loss)
#This is for visualization of model graphs.
#https://www.tensorflow.org/tensorboard/graphs
|
import os
import zipfile
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from powersimdata.network.usa_tamu.constants.zones import abv2state
from prereise.gather.demanddata.nrel_efs.get_efs_data import (
_check_electrification_scenarios_for_download,
_check_path,
_check_technology_advancements_for_download,
_download_data,
_extract_data,
account_for_leap_year,
partition_demand_by_sector,
partition_flexibility_by_sector,
)
def test_check_electrification_scenarios_for_download():
# Run the check
test_es = _check_electrification_scenarios_for_download(es={"All"})
# Specify the expected values
exp_es = {"Reference", "Medium", "High"}
# Compare the two sets
assert test_es == exp_es
def test_check_technology_advancements_for_download():
# Run the check
test_ta = _check_technology_advancements_for_download(ta={"All"})
# Specify the expected values
exp_ta = {"Slow", "Moderate", "Rapid"}
# Compare the two sets
assert test_ta == exp_ta
def test_check_path():
# Run check
test_fpath = _check_path(fpath="")
# Specify the expected file path
exp_fpath = os.getcwd()
# Compare the two file paths
assert test_fpath == exp_fpath
@pytest.mark.integration
def test_download_data():
try:
# Download a file using _download_data
_download_data(
zip_name="project_resstock_efs_2013.zip",
url="https://data.nrel.gov/system/files/128/project_resstock_efs_2013.zip",
fpath="",
)
# Check that the expected .zip file was downloaded
assert os.path.isfile("project_resstock_efs_2013.zip")
finally:
# Remove the downloaded .zip file
os.remove("project_resstock_efs_2013.zip")
def test_extract_data():
# Create a dummy demand data set
cont_states = sorted(set(abv2state) - {"AK", "HI"})
dummy_demand_data = {
"Electrification": ["High"] * 4 * 48 * 8760,
"TechnologyAdvancement": ["Rapid"] * 4 * 48 * 8760,
"Year": [2030] * 4 * 48 * 8760,
"LocalHourID": sorted(list(range(1, 8761)) * 4 * 48),
"State": sorted(list(cont_states) * 4) * 8760,
"Sector": ["Commercial", "Industrial", "Residential", "Transportation"]
* 48
* 8760,
"LoadMW": [1, 2, 3, 4] * 48 * 8760,
}
dummy_demand_df = pd.DataFrame(data=dummy_demand_data)
dummy_demand_df.to_csv("test_demand.csv", index=False)
# Create a .zip file of the dummy demand data set
with zipfile.ZipFile("test_demand.zip", "w") as z:
z.write("test_demand.csv")
os.remove("test_demand.csv")
try:
# Try extracting the dummy .csv file from the dummy .zip file
_extract_data(
z=None,
zf_works=False,
zip_name="test_demand.zip",
csv_name="test_demand.csv",
fpath=os.getcwd(),
sz_path="C:/Program Files/7-Zip/7z.exe",
)
# Load the downloaded EFS demand data set
df = pd.read_csv("test_demand.csv")
# Access the columns
test_cols = list(df.columns)
exp_cols = [
"Electrification",
"TechnologyAdvancement",
"Year",
"LocalHourID",
"State",
"Sector",
"LoadMW",
]
# Compare the two values
assert len(test_cols) == len(exp_cols)
except FileNotFoundError:
# If the automated extraction did not work, check that the .zip file was created
assert os.path.isfile("test_demand.zip")
# Remove the downloaded .zip file
os.remove("test_demand.zip")
finally:
# Remove the downloaded EFS data set
os.remove("test_demand.csv")
def test_partition_demand_by_sector():
# Create a dummy demand data set
cont_states = sorted(set(abv2state) - {"AK", "HI"})
dummy_demand_data = {
"Electrification": ["High"] * 4 * 48 * 8760,
"TechnologyAdvancement": ["Rapid"] * 4 * 48 * 8760,
"Year": [2030] * 4 * 48 * 8760,
"LocalHourID": sorted(list(range(1, 8761)) * 4 * 48),
"State": sorted(list(cont_states) * 4) * 8760,
"Sector": ["Commercial", "Industrial", "Residential", "Transportation"]
* 48
* 8760,
"LoadMW": [1, 2, 3, 4] * 48 * 8760,
}
dummy_demand_df = pd.DataFrame(data=dummy_demand_data)
dummy_demand_df.to_csv("EFSLoadProfile_High_Rapid.csv", index=False)
try:
# Generate the test results
test_sect_dem = partition_demand_by_sector(
es="High", ta="Rapid", year=2030, save=False
)
# Create the expected results
exp_res_dem = pd.DataFrame(
3,
index=pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
columns=cont_states,
)
exp_res_dem.index.name = "Local Time"
# Compare the two DataFrames
assert_frame_equal(exp_res_dem, test_sect_dem["Residential"], check_names=False)
finally:
# Delete the test .csv file
os.remove("EFSLoadProfile_High_Rapid.csv")
def test_partition_flexibility_by_sector():
# Create a dummy flexibility data set
cont_states = sorted(set(abv2state) - {"AK", "HI"})
dummy_flex_data = {
"Electrification": ["High"] * 4 * 48 * 8760,
"TechnologyAdvancement": ["Rapid"] * 4 * 48 * 8760,
"Flexibility": ["Base"] * 4 * 48 * 8760,
"Year": [2030] * 4 * 48 * 8760,
"LocalHourID": sorted(list(range(1, 8761)) * 4 * 48),
"State": sorted(list(cont_states) * 4) * 8760,
"Sector": ["Commercial", "Industrial", "Residential", "Transportation"]
* 48
* 8760,
"LoadMW": [1, 2, 3, 4] * 48 * 8760,
}
dummy_flex_df = pd.DataFrame(data=dummy_flex_data)
dummy_flex_df.to_csv("EFSFlexLoadProfiles_High.csv", index=False)
try:
# Generate the test results
test_sect_flex = partition_flexibility_by_sector(
es="High", ta="Rapid", flex="Base", year=2030, save=False
)
# Create the expected results
exp_res_flex = pd.DataFrame(
3,
index=pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
columns=cont_states,
)
exp_res_flex.index.name = "Local Time"
# Compare the two DataFrames
assert_frame_equal(
exp_res_flex, test_sect_flex["Residential"], check_names=False
)
finally:
# Delete the test .csv file
os.remove("EFSFlexLoadProfiles_High.csv")
def test_account_for_leap_year():
# Create dummy aggregate demand DataFrame
cont_states = sorted(set(abv2state) - {"AK", "HI"})
dem = pd.DataFrame(
1,
index=list(range(8760)),
columns=cont_states,
)
dem.iloc[24:48] += 1
# Generate the test result
test_dem = account_for_leap_year(dem)
# Create the expected result
exp_dem = pd.DataFrame(
1,
index=list(range(8784)),
columns=cont_states,
)
exp_dem.iloc[24:48] += 1
exp_dem.iloc[8760:8784] += 1
# Compare the two DataFrames
assert_frame_equal(exp_dem, test_dem)
|
"""
Tests for mimic identity (:mod:`mimic.model.identity` and
:mod:`mimic.rest.auth_api`)
"""
from __future__ import absolute_import, division, unicode_literals
import json
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.task import Clock
from mimic.canned_responses.auth import (
get_token, HARD_CODED_TOKEN, HARD_CODED_USER_ID,
HARD_CODED_USER_NAME, HARD_CODED_ROLES,
get_endpoints
)
from mimic.canned_responses.mimic_presets import get_presets
from mimic.catalog import Entry, Endpoint
from mimic.core import MimicCore
from mimic.resource import MimicRoot
from mimic.test.behavior_tests import (
behavior_tests_helper_class,
register_behavior
)
from mimic.test.dummy import ExampleAPI
from mimic.test.helpers import json_request, request, request_with_content
def core_and_root(api_list):
"""
Given a list of APIs to load, return core and root.
"""
core = MimicCore(Clock(), api_list)
root = MimicRoot(core).app.resource()
return core, root
class ExampleCatalogEndpoint(object):
def __init__(self, tenant, num, endpoint_id):
self._tenant = tenant
self._num = num
self.endpoint_id = endpoint_id
@property
def region(self):
return "EXAMPLE_{num}".format(num=self._num)
@property
def tenant_id(self):
return "{tenant}_{num}".format(tenant=self._tenant,
num=self._num)
def url_with_prefix(self, prefix):
return "http://ok_{num}".format(num=self._num)
class ExampleCatalogEntry(object):
"""
Example of a thing that a plugin produces at some phase of its lifecycle;
maybe you have to pass it a tenant ID to get one of these. (Services which
don't want to show up in the catalog won't produce these.)
"""
def __init__(self, tenant_id, name, endpoint_count=2, idgen=lambda: 1):
# some services transform their tenant ID
self.name = name
self.type = "compute"
self.path_prefix = "/v2/"
self.endpoints = [ExampleCatalogEndpoint(tenant_id, n + 1, idgen())
for n in range(endpoint_count)]
def example_endpoints(counter):
"""
Create some example catalog entries from a given tenant ID, like the plugin
loader would.
"""
def endpoints(tenant_id):
yield ExampleCatalogEntry(tenant_id, "something", idgen=counter)
yield ExampleCatalogEntry(tenant_id, "something_else", idgen=counter)
return endpoints
class CatalogGenerationTests(SynchronousTestCase):
"""
Tests for generating a service catalog in various formats from a common
data source.
"""
# Service catalogs are pretty large, so set the testing option to a value
# where we can see as much as possible of the difference in the case of a
# failure.
maxDiff = None
def test_tokens_response(self):
"""
:func:`get_token` returns JSON-serializable data in the format
presented by a ``POST /v2.0/tokens`` API request; i.e. the normal
user-facing service catalog generation.
"""
tenant_id = 'abcdefg'
self.assertEqual(
get_token(
tenant_id=tenant_id, timestamp=lambda dt: "<<<timestamp>>>",
entry_generator=example_endpoints(lambda: 1),
prefix_for_endpoint=lambda e: 'prefix'
),
{
"access": {
"token": {
"id": HARD_CODED_TOKEN,
"expires": "<<<timestamp>>>",
"tenant": {
"id": tenant_id,
"name": tenant_id, # TODO: parameterize later
},
"RAX-AUTH:authenticatedBy": [
"PASSWORD",
]
},
"serviceCatalog": [
{
"name": "something",
"type": "compute",
"endpoints": [
{
"region": "EXAMPLE_1",
"tenantId": "abcdefg_1",
"publicURL": "http://ok_1"
},
{
"region": "EXAMPLE_2",
"tenantId": "abcdefg_2",
"publicURL": "http://ok_2"
}
]
},
{
"name": "something_else",
"type": "compute",
"endpoints": [
{
"region": "EXAMPLE_1",
"tenantId": "abcdefg_1",
"publicURL": "http://ok_1"
},
{
"region": "EXAMPLE_2",
"tenantId": "abcdefg_2",
"publicURL": "http://ok_2"
}
]
}
],
"user": {
"id": HARD_CODED_USER_ID,
"name": HARD_CODED_USER_NAME,
"roles": HARD_CODED_ROLES,
}
}
}
)
def test_endpoints_response(self):
"""
:func:`get_endpoints` returns JSON-serializable data in the format
presented by a ``GET /v2.0/tokens/<token>/endpoints``; i.e. the
administrative list of tokens.
"""
tenant_id = 'abcdefg'
from itertools import count
accum = count(1)
def counter():
return next(accum)
# Possible TODO for cloudServersOpenStack:
# "versionInfo": "http://localhost:8902/v2",
# "versionList": "http://localhost:8902/",
# "versionId": "2",
self.assertEqual(
get_endpoints(
tenant_id=tenant_id,
entry_generator=example_endpoints(counter),
prefix_for_endpoint=lambda e: 'prefix'
),
{
"endpoints": [
{
"region": "EXAMPLE_1",
"tenantId": "abcdefg_1",
"publicURL": "http://ok_1",
"name": "something",
"type": "compute",
"id": 1,
},
{
"region": "EXAMPLE_2",
"tenantId": "abcdefg_2",
"publicURL": "http://ok_2",
"name": "something",
"type": "compute",
"id": 2,
},
{
"region": "EXAMPLE_1",
"tenantId": "abcdefg_1",
"publicURL": "http://ok_1",
"name": "something_else",
"type": "compute",
"id": 3,
},
{
"region": "EXAMPLE_2",
"tenantId": "abcdefg_2",
"publicURL": "http://ok_2",
"name": "something_else",
"type": "compute",
"id": 4
}
]
},
)
def test_unversioned_entry(self):
"""
An L{Endpoint} created without a 'prefix' returns a URI without a
version.
"""
self.assertEqual(
get_endpoints(
tenant_id="1234",
entry_generator=lambda t_id: [Entry(
tenant_id=t_id, type="compute",
name="compute_name", endpoints=[
Endpoint(tenant_id=t_id,
region="None",
endpoint_id="eid")
]
)],
prefix_for_endpoint=lambda ep: "http://prefix/"
),
{
"endpoints": [
{
"id": "eid",
"name": "compute_name",
"type": "compute",
"region": "None",
"tenantId": "1234",
"publicURL": "http://prefix/1234"
}
]
}
)
def authenticate_with_username_password(test_case, root,
uri='/identity/v2.0/tokens',
username=None, password=None,
tenant_name=None, tenant_id=None,
request_func=json_request):
"""
Returns a tuple of the response code and json body after authentication
with username and password.
"""
creds = {
"auth": {
"passwordCredentials": {
"username": username or "demoauthor",
"password": password or "theUsersPassword"
}
}
}
if tenant_id is not None:
creds["auth"]["tenantId"] = tenant_id
if tenant_name is not None:
creds["auth"]["tenantName"] = tenant_name
return test_case.successResultOf(request_func(
test_case, root, b"POST", uri, json.dumps(creds).encode("utf-8"))
)
def authenticate_with_api_key(test_case, root, uri='/identity/v2.0/tokens',
username=None, api_key=None,
tenant_name=None, tenant_id=None):
"""
Returns a tuple of the response code and json body after authentication
using the username and api_key.
"""
creds = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": username or "demoauthor",
"apiKey": api_key or "jhgjhghg-nhghghgh-12222"
}
}
}
if tenant_id is not None:
creds["auth"]["tenantId"] = tenant_id
if tenant_name is not None:
creds["auth"]["tenantName"] = tenant_name
return test_case.successResultOf(json_request(test_case, root, b"POST",
uri, creds))
def authenticate_with_token(test_case, root, uri='/identity/v2.0/tokens',
token_id=None, tenant_id=None):
"""
Returns a tuple of the response code and json body after authentication
using token and tenant ids.
"""
creds = {
"auth": {
"tenantId": tenant_id or "12345",
"token": {
"id": token_id or "iuyiuyiuy-uyiuyiuy-1987878"
}
}
}
return test_case.successResultOf(json_request(test_case, root, b"POST",
uri, creds))
def impersonate_user(test_case, root,
uri="http://mybase/identity/v2.0/RAX-AUTH/impersonation-tokens",
username=None, impersonator_token=None):
"""
Returns a tuple of the response code and json body after authentication
using token and tenant ids.
"""
headers = {
b'X-Auth-Token': [impersonator_token.encode("utf-8")]
} if impersonator_token else None
return test_case.successResultOf(json_request(
test_case, root, b"POST", uri,
{"RAX-AUTH:impersonation": {"expire-in-seconds": 30,
"user": {"username": username or "test1"}}},
headers=headers
))
class GetAuthTokenAPITests(SynchronousTestCase):
"""
Tests for ``/identity/v2.0/tokens``, provided by
:obj:`mimic.rest.auth_api.AuthApi.get_token_and_service_catalog`
"""
def test_response_has_auth_token(self):
"""
The JSON response has a access.token.id key corresponding to its
MimicCore session, and therefore access.token.tenant.id should match
that session's tenant_id.
"""
core, root = core_and_root([])
(response, json_body) = authenticate_with_username_password(self, root)
self.assertEqual(200, response.code)
token = json_body['access']['token']['id']
tenant_id = json_body['access']['token']['tenant']['id']
session = core.sessions.session_for_token(token)
self.assertEqual(token, session.token)
self.assertEqual(tenant_id, session.tenant_id)
def test_response_has_user_admin_identity_role(self):
"""
The JSON response for authenticate has the role `identity:user-admin`.
"""
core, root = core_and_root([])
(response, json_body) = authenticate_with_username_password(self, root)
self.assertEqual(200, response.code)
self.assertEqual(
json_body['access']['user']['roles'], HARD_CODED_ROLES)
def test_response_has_same_roles_despite_number_of_auths(self):
"""
The JSON response for authenticate has only one `identity:user-admin`
role, no matter how many times the user authenticates.
"""
core, root = core_and_root([])
(response, json_body) = authenticate_with_username_password(self, root)
self.assertEqual(200, response.code)
self.assertEqual(
json_body['access']['user']['roles'], HARD_CODED_ROLES)
(response1, json_body1) = authenticate_with_username_password(
self, root)
self.assertEqual(200, response1.code)
self.assertEqual(
json_body1['access']['user']['roles'], HARD_CODED_ROLES)
(response2, json_body2) = authenticate_with_username_password(
self, root)
self.assertEqual(200, response2.code)
self.assertEqual(
json_body2['access']['user']['roles'], HARD_CODED_ROLES)
def test_authentication_request_with_no_body_causes_http_bad_request(self):
"""
The response for empty body request is bad_request.
"""
core, root = core_and_root([])
(response, json_body) = self.successResultOf(json_request(
self, root, b"POST", "/identity/v2.0/tokens", b""))
self.assertEqual(400, response.code)
def test_authentication_request_with_invalid_body_causes_http_bad_request(self):
"""
The response for not JSON body request is bad_request.
"""
core, root = core_and_root([])
response = self.successResultOf(request(
self, root, b"POST", "/identity/v2.0/tokens", b"{ bad request: }"))
self.assertEqual(400, response.code)
def test_auth_accepts_tenant_name(self):
"""
If "tenantName" is passed, the tenant specified is used instead of a
generated tenant ID.
"""
core, root = core_and_root([])
(response, json_body) = authenticate_with_username_password(
self,
root,
tenant_name="turtlepower")
self.assertEqual(200, response.code)
self.assertEqual("turtlepower",
json_body['access']['token']['tenant']['id'])
token = json_body['access']['token']['id']
session = core.sessions.session_for_token(token)
self.assertEqual(token, session.token)
self.assertEqual("turtlepower", session.tenant_id)
def test_auth_accepts_tenant_id(self):
"""
If "tenantId" is passed, the tenant specified is used instead of a
generated tenant ID.
"""
core, root = core_and_root([])
(response, json_body) = authenticate_with_username_password(
self,
root,
tenant_id="turtlepower")
self.assertEqual(200, response.code)
self.assertEqual("turtlepower",
json_body['access']['token']['tenant']['id'])
token = json_body['access']['token']['id']
session = core.sessions.session_for_token(token)
self.assertEqual(token, session.token)
self.assertEqual("turtlepower", session.tenant_id)
def test_response_service_catalog_has_base_uri(self):
"""
The JSON response's service catalog whose endpoints all begin with
the same base URI as the request.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_username_password(
self,
root, uri='http://mybase/identity/v2.0/tokens')
self.assertEqual(200, response.code)
services = json_body['access']['serviceCatalog']
self.assertEqual(1, len(services))
urls = [
endpoint['publicURL'] for endpoint in services[0]['endpoints']
]
self.assertEqual(1, len(urls))
self.assertTrue(urls[0].startswith('http://mybase/'),
'{0} does not start with "http://mybase"'
.format(urls[0]))
class GetEndpointsForTokenTests(SynchronousTestCase):
"""
Tests for ``/identity/v2.0/tokens/<token>/endpoints``, provided by
`:obj:`mimic.rest.auth_api.AuthApi.get_endpoints_for_token`
"""
def test_session_created_for_token(self):
"""
A session is created for the token provided
"""
core, root = core_and_root([])
token = '1234567890'
request(
self, root, b"GET",
"/identity/v2.0/tokens/{0}/endpoints".format(token)
)
session = core.sessions.session_for_token(token)
self.assertEqual(token, session.token)
def test_response_service_catalog_has_base_uri(self):
"""
The JSON response's service catalog whose endpoints all begin with
the same base URI as the request.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/1234567890/endpoints"
))
self.assertEqual(200, response.code)
urls = [endpoint['publicURL'] for endpoint in json_body['endpoints']]
self.assertEqual(1, len(urls))
self.assertTrue(
urls[0].startswith('http://mybase/'),
'{0} does not start with "http://mybase"'.format(urls[0]))
def test_api_service_endpoints_are_not_duplicated(self):
"""
The service catalog should not duplicate endpoints for an entry/endpoints
"""
regions_and_versions_list = [
("ORD", "v1"), ("DFW", "v1"), ("DFW", "v2"), ("IAD", "v3")]
core, root = core_and_root([ExampleAPI(
regions_and_versions=regions_and_versions_list)])
(response, json_body) = authenticate_with_username_password(self, root)
self.assertEqual(response.code, 200)
service_catalog = json_body["access"]["serviceCatalog"]
self.assertEqual(len(service_catalog), 1)
endpoints_list = service_catalog[0]["endpoints"]
self.assertEqual(len(endpoints_list), 4)
def test_get_token_and_catalog_for_password_credentials(self):
"""
The response returned should include the password credentials that were supplied
during authentication
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_username_password(self, root,
tenant_id='12345')
self.assertEqual(response.code, 200)
tenant_id = json_body["access"]["token"]["tenant"]["id"]
self.assertEqual(tenant_id, "12345")
tenant_name = json_body["access"]["token"]["tenant"]["name"]
self.assertEqual(tenant_name, tenant_id)
user_name = json_body["access"]["user"]["name"]
self.assertEqual(user_name, "demoauthor")
def test_get_token_and_catalog_for_api_credentials(self):
"""
The response returned should include the credentials that were supplied
during authentication
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_api_key(self, root,
tenant_name='12345')
self.assertEqual(response.code, 200)
tenant_id = json_body["access"]["token"]["tenant"]["id"]
self.assertEqual(tenant_id, "12345")
tenant_name = json_body["access"]["token"]["tenant"]["name"]
self.assertEqual(tenant_name, tenant_id)
user_name = json_body["access"]["user"]["name"]
self.assertEqual(user_name, "demoauthor")
def test_get_token_and_catalog_for_token_credentials(self):
"""
The response returned should include the credentials that were supplied
during authentication
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_token(
self, root, tenant_id='12345')
self.assertEqual(response.code, 200)
tenant_id = json_body["access"]["token"]["tenant"]["id"]
self.assertEqual(tenant_id, "12345")
tenant_name = json_body["access"]["token"]["tenant"]["name"]
self.assertEqual(tenant_name, tenant_id)
user_name = json_body["access"]["user"]["name"]
self.assertTrue(user_name)
def test_token_and_catalog_for_password_credentials_wrong_tenant(self):
"""
Tenant ID is validated when provided in username/password auth.
If authed once as one tenant ID, and a second time with a different
tenant ID, then the second auth will return with a 401 Unauthorized.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_username_password(
self, root, tenant_id="12345")
self.assertEqual(response.code, 200)
username = json_body["access"]["user"]["id"]
(response, fail_body) = authenticate_with_username_password(
self, root, tenant_id="23456")
self.assertEqual(response.code, 401)
self.assertEqual(fail_body, {
"unauthorized": {
"code": 401,
"message": ("Tenant with Name/Id: '23456' is not valid for "
"User 'demoauthor' (id: '{0}')".format(username))
}
})
def test_list_tenants_for_token(self):
"""
Identity can list the tenants associated with a given token.
Since Mimic sessions associate a single tenant with each token,
a list of 1 tenant will be returned.
"""
core, root = core_and_root([ExampleAPI()])
(response, _) = authenticate_with_token(
self, root, tenant_id='turtlepower', token_id='ABCDEF987654321')
self.assertEqual(response.code, 200)
(response, json_body) = self.successResultOf(
json_request(self, root, b"GET",
"/identity/v2.0/tenants",
headers={b'X-Auth-Token': [b'ABCDEF987654321']}))
self.assertEqual(response.code, 200)
self.assertEqual(len(json_body['tenants']), 1)
self.assertEqual(json_body['tenants'][0]['id'], 'turtlepower')
self.assertEqual(json_body['tenants'][0]['name'], 'turtlepower')
self.assertTrue(json_body['tenants'][0]['enabled'] is True)
def test_list_tenants_for_unknown_token_gets_401(self):
"""
Listing tenants for an unknown token causes a 401 Unauthorized.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(
json_request(self, root, b"GET",
"/identity/v2.0/tenants",
headers={b'X-Auth-Token': [b'XYZDEF987654321']}))
self.assertEqual(response.code, 401)
self.assertEqual(json_body['unauthorized']['code'], 401)
def test_rax_kskey_apikeycredentials(self):
"""
Test apiKeyCredentials
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"/identity/v2.0/users/1/OS-KSADM/credentials/RAX-KSKEY:apiKeyCredentials"
))
self.assertEqual(response.code, 404)
self.assertEqual(
json_body['itemNotFound']['message'], 'User 1 not found')
creds = {
"auth": {
"passwordCredentials": {
"username": "HedKandi",
"password": "Ministry Of Sound UK"
},
"tenantId": "77777"
}
}
(response, json_body) = self.successResultOf(json_request(
self, root, b"POST", "/identity/v2.0/tokens", creds))
self.assertEqual(response.code, 200)
user_id = json_body['access']['user']['id']
username = json_body['access']['user']['name']
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"/identity/v2.0/users/" + user_id +
"/OS-KSADM/credentials/RAX-KSKEY:apiKeyCredentials"
))
self.assertEqual(response.code, 200)
self.assertEqual(json_body['RAX-KSKEY:apiKeyCredentials']['username'],
username)
self.assertTrue(
len(json_body['RAX-KSKEY:apiKeyCredentials']['apiKey']) == 32)
def test_token_and_catalog_for_api_credentials_wrong_tenant(self):
"""
Tenant ID is validated when provided in api-key auth.
If authed once as one tenant ID, and a second time with a different
tenant ID, then the second auth will return with a 401 Unauthorized.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_api_key(
self, root, tenant_id="12345")
self.assertEqual(response.code, 200)
username = json_body["access"]["user"]["id"]
(response, fail_body) = authenticate_with_api_key(
self, root, tenant_id="23456")
self.assertEqual(response.code, 401)
self.assertEqual(fail_body, {
"unauthorized": {
"code": 401,
"message": ("Tenant with Name/Id: '23456' is not valid for "
"User 'demoauthor' (id: '{0}')".format(username))
}
})
def test_token_and_catalog_for_token_credentials_wrong_tenant(self):
"""
Tenant ID is validated when provided in token auth.
If authed once as one tenant ID, and a second time with a different
tenant ID, then the second auth will return with a 401 Unauthorized.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_token(
self, root, tenant_id="12345")
self.assertEqual(response.code, 200)
(response, fail_body) = authenticate_with_token(
self, root, tenant_id="23456")
self.assertEqual(response.code, 401)
self.assertEqual(fail_body, {
"unauthorized": {
"code": 401,
"message": ("Token doesn't belong to Tenant with Id/Name: "
"'23456'")
}
})
def test_get_token_and_catalog_for_invalid_json_request_body(self):
"""
:func: `get_token_and_service_catalog` returns response code 400, when
an invalid json request body is used to authenticate.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"POST", "/identity/v2.0/tokens",
{
"auth": {
"token": {
"id": "iuyiuyiuy-uyiuyiuy-1987878"
}
}
}
))
self.assertEqual(response.code, 400)
self.assertEqual(json_body["message"], "Invalid JSON request body")
def test_response_for_get_username(self):
"""
Test to verify :func: `get_username`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v1.1/mosso/123456"
))
self.assertEqual(301, response.code)
self.assertTrue(json_body['user']['id'])
def test_response_for_impersonation(self):
"""
Test to verify :func: `get_impersonation_token`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = impersonate_user(self, root)
self.assertEqual(200, response.code)
self.assertTrue(json_body['access']['token']['id'])
def test_impersonation_without_expires_in_seconds_attribute(self):
"""
Test to verify :func: `get_impersonation_token` when the `expire-in-seconds`
is not provided.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"POST", "http://mybase/identity/v2.0/RAX-AUTH/impersonation-tokens",
{"RAX-AUTH:impersonation": {"user": {"username": "user-test"}}}))
self.assertEqual(200, response.code)
self.assertTrue(json_body['access']['token']['id'])
def test_impersonation_request_with_no_body_causes_http_bad_request(self):
"""
The response for empty body request is bad_request.
"""
core, root = core_and_root([])
(response, json_body) = self.successResultOf(json_request(
self, root, b"POST", "http://mybase/identity/v2.0/RAX-AUTH/impersonation-tokens", b""))
self.assertEqual(400, response.code)
def test_impersonation_request_with_invalid_body_causes_http_bad_request(self):
"""
The response for not JSON body request is bad_request.
"""
core, root = core_and_root([])
response = self.successResultOf(request(
self, root, b"POST", "http://mybase/identity/v2.0/RAX-AUTH/impersonation-tokens",
b"{ bad request: }"))
self.assertEqual(400, response.code)
def test_response_for_validate_token(self):
"""
Test to verify :func: `validate_token`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/123456a?belongsTo=111111"
))
self.assertEqual(200, response.code)
self.assertEqual(json_body['access']['token']['id'], '123456a')
self.assertTrue(json_body['access']['user']['id'])
self.assertTrue(len(json_body['access']['user']['roles']) > 0)
self.assertTrue(json_body['access'].get('serviceCatalog') is None)
def test_response_for_validate_token_when_tenant_not_provided(self):
"""
Test to verify :func: `validate_token` when tenant_id is not
provided using the argument `belongsTo`
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/123456a"
))
self.assertEqual(200, response.code)
self.assertEqual(json_body['access']['token']['id'], '123456a')
self.assertTrue(json_body['access']['token']['tenant']['id'])
def test_response_for_validate_token_then_authenticate(self):
"""
Test to verify :func: `validate_token` and then authenticate
"""
core, root = core_and_root([ExampleAPI()])
(response1, json_body1) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/123456a?belongsTo=111111"
))
self.assertEqual(200, response1.code)
(response, json_body) = authenticate_with_token(self, root,
tenant_id="111111",
token_id="123456a")
self.assertEqual(response.code, 200)
self.assertEqual(json_body["access"]["token"]["id"],
json_body1["access"]["token"]["id"])
self.assertEqual(json_body["access"]["token"]["tenant"]["id"],
json_body1["access"]["token"]["tenant"]["id"])
self.assertEqual(json_body["access"]["user"]["name"],
json_body1["access"]["user"]["name"])
def test_response_for_validate_impersonated_token(self):
"""
Test to verify :func: `validate_token` and then authenticate
"""
core, root = core_and_root([ExampleAPI()])
# Authenticate the impersonator (admin user)
(response0, json_body0) = authenticate_with_token(
self, root,
tenant_id="111111",
token_id="123456a")
self.assertEqual(200, response0.code)
impersonator_token = json_body0["access"]["token"]["id"]
# Authenticate using the username so we know the tenant_id
(response1, json_body1) = authenticate_with_username_password(
self, root,
username="test1",
tenant_id="12345")
self.assertEqual(200, response1.code)
# Impersonate user test1
(response2, json_body2) = impersonate_user(
self, root,
username="test1",
impersonator_token=impersonator_token)
self.assertEqual(200, response2.code)
impersonated_token = json_body2["access"]["token"]["id"]
# validate the impersonated_token
(response3, json_body3) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}?belongsTo=12345".format(
impersonated_token)
))
self.assertEqual(200, response3.code)
self.assertTrue(json_body3["access"]["RAX-AUTH:impersonator"])
def test_response_for_validate_impersonated_token_multiple_users(self):
"""
Test to verify :func: `validate_token` and then authenticate
"""
core, root = core_and_root([ExampleAPI()])
# Authenticate the impersonator (admin user 1)
(response0, json_body0) = authenticate_with_token(
self, root,
tenant_id="111111",
token_id="123456a")
self.assertEqual(200, response0.code)
impersonator_token1 = json_body0["access"]["token"]["id"]
# Authenticate the impersonator (admin user 2)
(response1, json_body1) = authenticate_with_token(
self, root,
tenant_id="222222",
token_id="123456b")
self.assertEqual(200, response1.code)
impersonator_token2 = json_body1["access"]["token"]["id"]
# Authenticate the impersonatee using the username so we know the
# tenant_id to make the validate token id call with 'belongsTo'
(response2, json_body2) = authenticate_with_username_password(
self, root,
username="test1",
tenant_id="12345")
self.assertEqual(200, response2.code)
# Impersonate user test1 using admin user1's token
(response3, json_body3) = impersonate_user(
self, root,
username="test1",
impersonator_token=impersonator_token1)
self.assertEqual(200, response3.code)
impersonated_token1 = json_body3["access"]["token"]["id"]
# Impersonate user test1 using admin user2's token
(response4, json_body4) = impersonate_user(
self, root,
username="test1",
impersonator_token=impersonator_token2)
self.assertEqual(200, response4.code)
impersonated_token2 = json_body4["access"]["token"]["id"]
# validate the impersonated_token1
(response5, json_body5) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}?belongsTo=12345".format(
impersonated_token1)
))
self.assertEqual(200, response5.code)
self.assertTrue(json_body5["access"]["RAX-AUTH:impersonator"])
self.assertEqual(json_body5["access"]["RAX-AUTH:impersonator"]["name"],
json_body0["access"]["user"]["name"])
# validate the impersonated_token2
(response6, json_body6) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}?belongsTo=12345".format(
impersonated_token2)
))
self.assertEqual(200, response6.code)
self.assertTrue(json_body6["access"]["RAX-AUTH:impersonator"])
self.assertEqual(json_body6["access"]["RAX-AUTH:impersonator"]["name"],
json_body1["access"]["user"]["name"])
def test_response_for_validate_token_with_maas_admin_role(self):
"""
Test to verify :func: `validate_token` when the token_id provided
is of an maas admin user specified in `mimic_presets`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/this_is_an_impersonator_token"
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["RAX-AUTH:impersonator"]["roles"][0]["name"],
"monitoring:service-admin")
def test_response_for_validate_token_with_racker_role(self):
"""
Test to verify :func: `validate_token` when the token_id provided
is of a racker specified in `mimic_presets`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/this_is_a_racker_token"
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["RAX-AUTH:impersonator"]["roles"][0]["name"],
"Racker")
def test_response_for_validate_token_when_invalid(self):
"""
Test to verify :func: `validate_token` when the token_id provided
is invalid, as specified in `mimic_presets`.
"""
core, root = core_and_root([ExampleAPI()])
token = get_presets["identity"]["token_fail_to_auth"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}".format(token)
))
self.assertEqual(401, response.code)
def test_response_for_validate_token_with_observer_role(self):
"""
Test to verify :func: `validate_token` when the tenant_id provided
is of an observer role, as specified in `mimic_presets`.
"""
core, root = core_and_root([ExampleAPI()])
token = get_presets["identity"]["observer_role"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/any_token?belongsTo={0}".format(token)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["user"]["roles"][0]["name"],
"observer")
self.assertEqual(json_body["access"]["user"]["roles"][0]["description"],
"Global Observer Role.")
def test_response_for_validate_token_with_creator_role(self):
"""
Test to verify :func: `validate_token` when the tenant_id provided
is of an creator role, as specified in `mimic_presets`.
"""
core, root = core_and_root([ExampleAPI()])
token = get_presets["identity"]["creator_role"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/any_token?belongsTo={0}".format(token)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["user"]["roles"][0]["name"],
"creator")
self.assertEqual(json_body["access"]["user"]["roles"][0]["description"],
"Global Creator Role.")
def test_response_for_validate_token_with_admin_and_observer_role(self):
"""
Test to verify :func: `validate_token` when the tenant_id provided
is of an admin role, as specified in `mimic_presets`.
"""
core, root = core_and_root([ExampleAPI()])
token = get_presets["identity"]["admin_role"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/any_token?belongsTo={0}".format(token)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["user"]["roles"][0]["name"],
"admin")
self.assertEqual(json_body["access"]["user"]["roles"][0]["description"],
"Global Admin Role.")
self.assertEqual(json_body["access"]["user"]["roles"][1]["name"],
"observer")
self.assertEqual(json_body["access"]["user"]["roles"][1]["description"],
"Global Observer Role.")
def test_response_for_list_users(self):
"""
Test to verify :func: `get_users_details`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/users?name=random_user"
))
self.assertEqual(200, response.code)
self.assertTrue(json_body['user']['RAX-AUTH:domainId'])
def test_response_for_list_users_after_authentication(self):
"""
Test to verify :func: `get_users_details`.
"""
core, root = core_and_root([ExampleAPI()])
(response, json_body) = authenticate_with_token(
self, root, tenant_id="12345")
self.assertEqual(response.code, 200)
username = json_body["access"]["user"]["name"]
user_id = json_body["access"]["user"]["id"]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/users?name={0}".format(username)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body['user']['RAX-AUTH:domainId'], "12345")
self.assertEqual(json_body['user']['username'], username)
self.assertEqual(json_body['user']['id'], user_id)
class AuthIntegrationTests(SynchronousTestCase):
"""
Tests that combine multiple auth calls together and assure that they
return consistent data.
"""
def test_user_for_tenant_then_impersonation(self):
"""
After authenticating once as a particular tenant, get the user that
tenant, then attempt to impersonate that user. The tenant IDs should
be the same. This is an autoscale regression test.
"""
core, root = core_and_root([ExampleAPI()])
tenant_id = "111111"
# authenticate as that user - this is not strictly necessary, since
# getting a user for a tenant should work regardless of whether a user
# was previously in the system, but this will ensure that we can check
# the username
response, json_body = authenticate_with_username_password(
self, root, username="my_user", tenant_id=tenant_id)
self.assertEqual(200, response.code)
self.assertEqual(tenant_id,
json_body['access']['token']['tenant']['id'])
# get user for tenant
response, json_body = self.successResultOf(json_request(
self, root, b"GET", "/identity/v1.1/mosso/111111"))
self.assertEqual(301, response.code)
user = json_body['user']['id']
self.assertEqual("my_user", user)
# impersonate this user
response, json_body = impersonate_user(self, root, username=user)
self.assertEqual(200, response.code)
token = json_body["access"]['token']["id"]
# get endpoints for this token, see what the tenant is
response, json_body = self.successResultOf(json_request(
self, root, b"GET",
"/identity/v2.0/tokens/{0}/endpoints".format(token)))
self.assertEqual(200, response.code)
self.assertEqual(tenant_id,
json_body["endpoints"][0]["tenantId"])
# authenticate with this token and see what the tenant is
response, json_body = authenticate_with_token(
self, root, token_id=token, tenant_id=tenant_id)
self.assertEqual(tenant_id,
json_body['access']['token']['tenant']['id'])
def test_api_key_then_other_token_same_tenant(self):
"""
After authenticating as a particular tenant with an API key,
authenticate as the same tenant with a token that is different
from the one returned by the API key response. Both tokens
should be accessing the same session.
"""
core, root = core_and_root([ExampleAPI()])
tenant_id = "123456"
response, json_body = authenticate_with_api_key(
self, root, tenant_id=tenant_id)
self.assertEqual(200, response.code)
username_from_api_key = json_body["access"]["user"]["name"]
response, json_body = authenticate_with_token(
self, root, token_id="fake_111111", tenant_id=tenant_id)
self.assertEqual(200, response.code)
username_from_token = json_body["access"]["user"]["name"]
# Since usernames are generated if not specified, and token
# authentication does not specify a username, it is sufficient
# to check that the usernames are equal. If the sessions are
# distinct, then the token would have generated a UUID for its
# username.
self.assertEqual(username_from_api_key, username_from_token)
auth_behavior_endpoint = (
"http://mybase/mimic/v1.1/IdentityControlAPI/behaviors/auth")
@behavior_tests_helper_class
class IdentityAuthBehaviorControlPlane(object):
"""
Helper object used to generate tests for Nova create server behavior
CRUD operations.
"""
criteria = [{"username": "failme"}]
names_and_params = (
("fail",
{"message": "Auth failure", "code": 500, "type": "identityFault"}),
("fail",
{"message": "Invalid creds", "code": 403})
)
def __init__(self, test_case):
"""
Set up the criteria, api mock, etc.
"""
self.test_case = test_case
_, self.root = core_and_root([])
self.behavior_api_endpoint = auth_behavior_endpoint
def trigger_event(self):
"""
Create server with with the name "failing_server_name".
"""
return authenticate_with_username_password(
self.test_case, self.root, username="failme")
def validate_injected_behavior(self, name_and_params, response, body):
"""
Given the behavior that is expected, validate the response and body.
"""
name, params = name_and_params
self.test_case.assertEquals(response.code, params['code'])
if params['code'] == 500:
expected = {"identityFault": {"message": "Auth failure",
"code": 500}}
else:
expected = {"unauthorized": {"message": "Invalid creds",
"code": 403}}
self.test_case.assertEquals(body, expected)
def validate_default_behavior(self, response, body):
"""
Validate the response and body of a successful server create.
"""
self.test_case.assertEquals(response.code, 200)
self.test_case.assertIn('access', body)
class IdentityBehaviorInjectionTests(SynchronousTestCase):
"""
Tests for specific failures and/or criteria.
"""
def test_username_criteria_works_on_all_auth_methods_with_username(self):
"""
Failure injection based on the username criteria will work on
username/password, username/api-key, and impersonation. But not
token ID, even if it's with the same tenant.
"""
core, root = core_and_root([])
fail_params = {"message": "Invalid creds", "code": 403}
# make sure a user exists in mimic with the given username tenant
# associated
response, body = authenticate_with_username_password(
self, root, username="failme", tenant_id="123456")
self.assertEqual(response.code, 200)
# username auths fail
register_behavior(self, root, auth_behavior_endpoint,
behavior_name="fail",
criteria=[{"username": "failme"}],
parameters=fail_params)
for auth_func in (authenticate_with_username_password,
authenticate_with_api_key,
impersonate_user):
response, body = auth_func(self, root, username="failme")
self.assertEqual(response.code, 403)
self.assertEqual(body, {"unauthorized": fail_params})
# token auth with that tenant ID succeeds
response, body = authenticate_with_token(
self, root, tenant_id="123456")
self.assertEqual(response.code, 200)
def test_tenant_id_criteria_works_on_all_auth_methods_with_tenant(self):
"""
Failure injection based on the username criteria will work on
username/password, username/api-key, and token.
But not impersonation.
"""
core, root = core_and_root([])
fail_params = {"message": "Invalid creds", "code": 403}
# make sure a user exists in mimic with the given username tenant
# associated
response, body = authenticate_with_username_password(
self, root, username="failme", tenant_id="123456")
self.assertEqual(response.code, 200)
# tenant auths fail
register_behavior(self, root, auth_behavior_endpoint,
behavior_name="fail",
criteria=[{"tenant_id": "123456"}],
parameters=fail_params)
for auth_func in (authenticate_with_username_password,
authenticate_with_api_key,
authenticate_with_token):
response, body = auth_func(self, root, tenant_id="123456")
self.assertEqual(response.code, 403)
self.assertEqual(body, {"unauthorized": fail_params})
# impersonation with that username succeeds
response, body = impersonate_user(self, root, username="failme")
self.assertEqual(response.code, 200)
def test_string_errors_as_well_as_json_errors(self):
"""
Failure injection will return a string error response as well as a
json response.
"""
core, root = core_and_root([])
fail_params = {"message": "Failure of JSON", "code": 500,
"type": "string"}
register_behavior(self, root, auth_behavior_endpoint,
behavior_name="fail",
criteria=[{"username": "failme"}],
parameters=fail_params)
response, body = authenticate_with_username_password(
self, root, username="failme", request_func=request_with_content)
self.assertEqual(response.code, 500)
self.assertEqual(body, b"Failure of JSON")
class IdentityNondedicatedFixtureTests(SynchronousTestCase):
def test_non_dedicated_tokens(self):
"""
Obtain Identity entries when presented tokens issued to non-dedicated users
"""
url = "/identity/v2.0/tokens"
core, root = core_and_root([])
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/OneTwo"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "135790")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/ThreeFour"))
self.assertEqual(200, response.code)
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/ThreeFourImpersonator"))
self.assertEqual(200, response.code)
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/ThreeFourRacker"))
self.assertEqual(200, response.code)
class IdentityDedicatedFixtureTests(SynchronousTestCase):
def test_dedicated_tokens(self):
"""
Obtain Identity entries when presented tokens issued to dedicated users
"""
url = "/identity/v2.0/tokens"
core, root = core_and_root([])
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridOneTwo"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["RAX-AUTH:contactId"], "12")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridOneTwoRacker"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["RAX-AUTH:contactId"], "12")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridThreeFour"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["RAX-AUTH:contactId"], "34")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridThreeFourImpersonator"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["RAX-AUTH:contactId"], "34")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridFiveSix"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["RAX-AUTH:contactId"], "56")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridSevenEight"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["RAX-AUTH:contactId"], "78")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/HybridNineZero"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "hybrid:123456")
self.assertEqual(content["access"]["user"]["id"], "90")
|
# -*- coding: UTF-8 -*-
'''
@author: leochechen
@summary: ctf framework运行过程中会出现的异常
'''
class FrameworkException(RuntimeError):
'''
框架异常
'''
pass
class CTFRuntimeException(Exception):
'''
CTF流程运行时会出现的异常
'''
pass
class VarAbort(CTFRuntimeException):
'''
用例运行异常
'''
pass
class VarFail(CTFRuntimeException):
'''
用例验证失败
'''
pass
class GroupAbort(CTFRuntimeException):
'''
用例组异常
'''
pass
class VarNotRun(CTFRuntimeException):
'''
用例没有运行
'''
pass
class VarUnsupported(CTFRuntimeException):
'''
不支持该运行方式
'''
pass
class VarmapException(FrameworkException):
'''
XML文件解析错误
'''
pass
class VarmapParseException(FrameworkException):
'''
标签驱动解析遇到无法识别的标签时抛出的异常
'''
pass
class CTFTestServerError(FrameworkException):
'''
ctf server出现异常
'''
pass
class CTFInvaildArg(FrameworkException):
'''
ctf传输数据异常
'''
pass
|
# building a translator
from translate import Translator
translator = Translator(to_lang="ja")
text = ''
try:
with open('translation_file.txt', 'r') as f:
text = f.read()
translation = translator.translate(text)
print(translation)
except FileNotFoundError as err:
print("file Not found")
try:
with open('translated_file.txt', 'w') as f:
f.write(translation)
except FileExistsError as err:
raise err |
import cv2
import numpy as np
class PolarHeightFilter:
@staticmethod
def filterHeight(inputImage, threshold = 30):
'''
filter the area whose height is less than the threshold
'''
height = inputImage.shape[0]
width = inputImage.shape[1]
blackColor = np.zeros((1, 3), np.uint8)
for i in xrange(width):
prevColor = blackColor
currentColor = inputImage[height - 1, i, :]
currentHeight = 0
for j in reversed(xrange(height)):
if inputImage[j, i, :].all() == currentColor.all():
currentHeight += 1
else:
if currentHeight < threshold and prevColor.all() != blackColor.all():
print j, i, currentHeight
for k in xrange(j - currentHeight, j):
inputImage[k, i] = prevColor
prevColor = currentColor
currentColor = inputImage[j, i, :]
currentHeight = 1
return inputImage
|
import time
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import glob
import re
from pupil_parse.preprocess_utils import config as cf
from pupil_parse.preprocess_utils import extract_session_metadata as md
def main():
(raw_data_path, _, processed_data_path, figure_path, simulated_data_path) = cf.path_config()
(unique_subjects, unique_sessions, unique_reward_codes) = md.extract_subjects_sessions(raw_data_path,
reward_task=1)
start_time = time.time()
for subject in unique_subjects:
for reward_code, session in zip(unique_reward_codes, unique_sessions):
processed_fn = ('tepr' + '_sub-' + str(subject) + '_sess-' +
str(session) + '_cond-' + str(reward_code) + '_trial.csv')
learning_signals_fn = 'sub-{}_cond-{}_learning_signals.csv'.format(subject, reward_code)
pupil_amplitude_base = 'tepr_sub-{}_sess-*_cond-{}_trial_peaks.csv'.format(subject, reward_code) #cant handle wildcard
pupil_summary_base = 'tepr_sub-{}_sess-*_cond-{}_trial_means.csv'.format(subject, reward_code)
pupil_amplitude_fn = glob.glob(os.path.join(processed_data_path, pupil_amplitude_base))
pupil_summary_fn = glob.glob(os.path.join(processed_data_path, pupil_summary_base))
if not pupil_amplitude_fn:
print('No data for this session.')
continue
learning_signals_df = pd.read_csv(os.path.join(simulated_data_path, learning_signals_fn))
pupil_amplitude_df = pd.read_csv(pupil_amplitude_fn[0])
pupil_summary_df = pd.read_csv(pupil_summary_fn[0])
try:
trial_df = pd.concat([learning_signals_df, pupil_amplitude_df, pupil_summary_df], axis=1)
trial_df = trial_df.loc[:,~trial_df.columns.duplicated()]
trial_df.drop(columns=['trial_epoch'], inplace=True)
trial_df.to_csv(os.path.join(processed_data_path, processed_fn), index=False)
except:
print('error in concat.')
pass
end_time = time.time()
time_elapsed = end_time - start_time
print('time elapsed: ', time_elapsed)
if __name__ == '__main__':
main()
|
#Hash 就是把任意长度的输入,通过散列算法,变成固定长度的输出
#简单来说就是将任意长度的输入压缩到某一固定长度
#hash的值要求必须固定,hash值必须是不可变的
#所以列表是不可能被hash的
hash((1,2,3))
print(hash((1,2,3)))
#hash([1,2,3])会出错
|
import math
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import neural_network as mm
from neural_network import tf, tint
from replay_buffer import ReplayBuffer
from envs import AtariEnv
from rainbow import DQN
parser = argparse.ArgumentParser()
parser.add_argument("--learning_rate", default=2.5e-4, help="Learning rate", type=float)
parser.add_argument("--run", default=0, help="run", type=int)
parser.add_argument("--mbsize", default=32, help="Minibatch size", type=int)
parser.add_argument("--other_mbsize", default=2048, help="Minibatch size", type=int)
parser.add_argument("--buffer_size", default=500000, help="Replay buffer size",type=int)
parser.add_argument("--clone_interval", default=10000, type=int)
parser.add_argument("--num_rand_classes", default=10, type=int)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument("--opt", default='adam')
parser.add_argument("--loss_func", default='qlearn')
parser.add_argument("--env_name", default='ms_pacman')
parser.add_argument("--device", default='cuda', help="device")
class odict(dict):
def __getattr__(self, x):
return self[x]
def sl1(a, b):
"""Smooth L1 distance"""
d = a - b
u = abs(d)
s = d**2
m = (u < s).float()
return u * m + s * (1 - m)
def make_opt(opt, theta, lr, weight_decay):
if opt == "sgd":
return torch.optim.SGD(theta, lr, weight_decay=weight_decay)
elif opt == "msgd":
return torch.optim.SGD(theta, lr, momentum=0.9, weight_decay=weight_decay)
elif opt == "rmsprop":
return torch.optim.RMSprop(theta, lr, weight_decay=weight_decay)
elif opt == "adam":
return torch.optim.Adam(theta, lr, weight_decay=weight_decay)
else:
raise ValueError(opt)
def fill_buffer_with_expert(env, replay_buffer):
model_path = f"rainbow_atari_models/{ARGS.env_name}.pth"
with open(model_path, "rb") as f:
m = torch.load(f)
device = mm.get_device()
dqn = DQN(
odict({
"history_length": 4,
"hidden_size": 256,
"architecture": "data-efficient",
"atoms": 51,
"noisy_std": 0.1,
"V_min": -10,
"V_max": 10,
"device": device,
}), env.num_actions)
dqn.load_state_dict(m)
dqn.eval()
dqn.to(device)
rand_classes = np.zeros(replay_buffer.size)
ram2class = {}
totr = 0
obs = env.reset()
replay_buffer.new_episode(obs, env.enumber % 2)
it = 0
while replay_buffer.idx < replay_buffer.size - 10:
action = dqn.act_e_greedy(
torch.tensor(obs).float().to(device) / 255, epsilon=0.01)
obs_ram = env.getRAM().tostring()
if obs_ram not in ram2class:
ram2class[obs_ram] = np.random.randint(0, ARGS.num_rand_classes)
rand_classes[replay_buffer.idx] = ram2class[obs_ram]
obsp, r, done, tr = env.step(action)
replay_buffer.add(obs, action, r, done, env.enumber % 2)
obs = obsp
totr += tr
if done:
totr = 0
obs = env.reset()
replay_buffer.new_episode(obs, env.enumber % 2)
it += 1
# Remove last episode from replay buffer, as it didn't end
it = replay_buffer.idx
curp = replay_buffer.p[it]
while replay_buffer.p[it] == curp:
replay_buffer._sumtree.set(it, 0)
it -= 1
print(f'went from {replay_buffer.idx} to {it} when deleting states')
return rand_classes
def main():
device = torch.device(ARGS.device)
mm.set_device(device)
results = {
"measure": [],
"parameters": [],
}
seed = ARGS.run + 1_642_559 # A large prime number
torch.manual_seed(seed)
np.random.seed(seed)
rng = np.random.RandomState(seed)
env = AtariEnv(ARGS.env_name)
mbsize = ARGS.mbsize
Lambda = ARGS.Lambda
nhid = 32
num_measure = 1000
gamma = 0.99
clone_interval = ARGS.clone_interval
num_iterations = ARGS.num_iterations
num_Q_outputs = 1
# Model
_Qarch, theta_q, Qf, _Qsemi = mm.build(
mm.conv2d(4, nhid, 8, stride=4), # Input is 84x84
mm.conv2d(nhid, nhid * 2, 4, stride=2),
mm.conv2d(nhid * 2, nhid * 2, 3),
mm.flatten(),
mm.hidden(nhid * 2 * 12 * 12, nhid * 16),
mm.linear(nhid * 16, num_Q_outputs),
)
clone_theta_q = lambda: [i.detach().clone() for i in theta_q]
theta_target = clone_theta_q()
opt = make_opt(ARGS.opt, theta_q, ARGS.learning_rate, ARGS.weight_decay)
# Replay Buffer
replay_buffer = ReplayBuffer(seed, ARGS.buffer_size)
# Losses
td = lambda s, a, r, sp, t, idx, w, tw: sl1(
r + (1 - t.float()) * gamma * Qf(sp, tw)[:, 0].detach(),
Qf(s, w)[:, 0],
)
tdL = lambda s, a, r, sp, t, idx, w, tw: sl1(
Qf(s, w)[:, 0], replay_buffer.LG[idx])
mc = lambda s, a, r, sp, t, idx, w, tw: sl1(
Qf(s, w)[:, 0], replay_buffer.g[idx])
# Define metrics
measure = Measures(
theta_q, {
"td": lambda x, w: td(*x, w, theta_target),
"tdL": lambda x, w: tdL(*x, w, theta_target),
"mc": lambda x, w: mc(*x, w, theta_target),
}, replay_buffer, results["measure"], 32)
# Get expert trajectories
rand_classes = fill_buffer_with_expert(env, replay_buffer)
# Compute initial values
replay_buffer.compute_values(lambda s: Qf(s, theta_q), num_Q_outputs)
replay_buffer.compute_returns(gamma)
replay_buffer.compute_reward_distances()
replay_buffer.compute_episode_boundaries()
replay_buffer.compute_lambda_returns(lambda s: Qf(s, theta_q), Lambda, gamma)
# Run policy evaluation
for it in range(num_iterations):
do_measure = not it % num_measure
sample = replay_buffer.sample(mbsize)
if do_measure:
measure.pre(sample)
replay_buffer.compute_value_difference(sample, Qf(sample[0], theta_q))
loss = tdL(*sample, theta_q, theta_target)
loss = loss.mean()
loss.backward()
opt.step()
opt.zero_grad()
replay_buffer.update_values(sample, Qf(sample[0], theta_q))
if do_measure:
measure.post()
if it and clone_interval and it % clone_interval == 0:
theta_target = clone_theta_q()
replay_buffer.compute_lambda_returns(lambda s: Qf(s, theta_q), Lambda, gamma)
if it and it % clone_interval == 0 or it == num_iterations - 1:
ps = {str(i): p.data.cpu().numpy() for i, p in enumerate(theta_q)}
ps.update({"step": it})
results["parameters"].append(ps)
with open(f'results/td_lambda_{run}.pkl', 'wb') as f:
pickle.dump(results, f)
class Measures:
def __init__(self, params, losses, replay_buffer, results, mbsize):
self.p = params
self.losses = losses
self.rb = replay_buffer
self.mbsize = mbsize
self.rs = results
def pre(self, sample):
self._sampleidx = sample[-1]
near_s, self.near_pmask = self.rb.slice_near(self._sampleidx, 30)
self._samples = {
"sample": sample,
"other": self.rb.sample(self.mbsize),
"near": near_s,
}
self._cache = {}
for loss_name, loss in self.losses.items():
for item_name, item in self._samples.items():
with torch.no_grad():
self._cache[f'{item_name}_{loss_name}_pre'] = loss(item, self.p)
def post(self):
r = {
"vdiff_acc": self.rb.vdiff_acc + 0,
"vdiff_cnt": self.rb.vdiff_cnt + 0,
'rdist': self.rb.rdist[self._sampleidx].data.cpu().numpy(),
'g': self.rb.g[self._sampleidx].data.cpu().numpy(),
'near_pmask': self.near_pmask.data.cpu().numpy(),
}
self.rb.vdiff_acc *= 0
self.rb.vdiff_cnt *= 0
for loss_name, loss in self.losses.items():
for item_name, item in self._samples.items():
k = f'{item_name}_{loss_name}'
with torch.no_grad():
self._cache[f'{k}_post'] = (loss(item, self.p))
r[f'{k}_gain'] = (self._cache[f'{k}_pre'] -
self._cache[f'{k}_post']).cpu().data.numpy()
r[k] = self._cache[f'{k}_post'].cpu().data.numpy()
self.rs.append(r)
if __name__ == "__main__":
ARGS = parser.parse_args()
main()
|
def accum(s):
return '-'.join(str.title(a * i) for i, a in enumerate(s, 1))
|
# coding: utf-8
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from nltk.stem import RSLPStemmer
import string
import numpy
class LSA:
def __init__(self, ngram_max, min_freq, p_eig, phrases):
self.ngram_max = ngram_max
self.min_freq = min_freq
self.p_eig = p_eig
self.ngram_min = 1
self.stopwords = ["e", "de", "da", "do", "dos", "das", "em", "o", "a", "os", "as", "que", "um", "uma", "para", "com", "no", "na", "nos", "nas",
"por", "por", "mais", "se", "como", "mais", "à", "às", "ao", "aos", "ou", "quando", "muito", "pela", "pelas", "pelos",
"pelo", "isso", "esse", "essa", "esses", "essas", "num", "numa", "nuns", "numas", "este", "esta", "estes", "estas", "isto",
"aquilo", "aquele", "aquela", "aqueles", "aquelas", "sem", "entre", "nem", "quem", "qual", "depois", "só", "mesmo", "mas"]
self.phrases = phrases
self.features_utterance = []
@staticmethod
def normalizer(x_abnormal):
minimum = x_abnormal.min()
maximum = x_abnormal.max()
if minimum == maximum:
return x_abnormal
else:
x_new = (x_abnormal - minimum) / (maximum - minimum)
return x_new
def tokenize(self, t):
if self.stopwords:
if t in self.stopwords:
return []
sentence = t.lower()
sentence = word_tokenize(sentence)
aux = []
for word in sentence:
if self.stopwords:
if word not in self.stopwords and word not in string.punctuation:
aux.append(RSLPStemmer().stem(word.lower()))
else:
if word not in string.punctuation:
aux.append(RSLPStemmer().stem(word.lower()))
phrase = []
for word in aux:
phrase.append(word)
return phrase
def manage_keywords(self, keywords):
tokens, vocabulary = [], []
for i in keywords:
t = self.tokenize(i)
if len(t) > 1:
key_str = ''
for j in t:
key_str = key_str + ' ' + j
tokens.append(key_str[1:])
else:
tokens.extend(t)
for i in tokens:
repeat = False
for v in vocabulary:
if i == v:
repeat = True
break
if not repeat:
vocabulary.append(i)
return vocabulary
def tf_idf(self):
vec = TfidfVectorizer(min_df=self.min_freq,
stop_words=self.stopwords,
tokenizer=self.tokenize,
ngram_range=(self.ngram_min, self.ngram_max))
x = vec.fit_transform(self.phrases)
self.features_utterance = vec.get_feature_names()
return x.todense()
def eliminate_dimensions(self, tfidf):
if self.p_eig == 1:
return tfidf
res = 0
u, eigen, v = numpy.linalg.svd(tfidf, compute_uv=True)
normalized_eigenvalues = eigen / numpy.sum(eigen)
eigenvalues = numpy.diag(eigen)
for i in range(0, len(eigen)):
res += normalized_eigenvalues[i]
if res >= self.p_eig:
k = i+1
x = numpy.matrix.dot(numpy.matrix.dot(u[:, 0:k], eigenvalues[0:k, 0:k]), v[0:k, :])
return x
def train_phrases(self):
tfidf_utterance = numpy.array(self.tf_idf())
return numpy.round(self.eliminate_dimensions(tfidf_utterance), 10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.