text stringlengths 8 6.05M |
|---|
import argparse
import httplib2
import os
import sys
import json
import io
import os.path
from os import listdir
from os.path import isfile,join
# simulate the 'Cloud' in local storage
ROOT_DIR = '/home/ubuntu/tmp/'
def upload_file(service,from_file_name,to_file_name):
# try delete it first
try:
delete_file(service,'',"/" + to_file_name)
except Exception as e:
pass
# The BytesIO object may be replaced with any io.Base instance.
f = open(from_file_name,'r')
out_folder_name = ROOT_DIR + service['folder'] + '/'
out_f = open(out_folder_name + to_file_name,'w')
out_f.write(f.read())
f.close()
out_f.close()
def upload_string(service, str_to_upload,to_file_name):
# The BytesIO object may be replaced with any io.Base instance.
out_folder_name = ROOT_DIR + service['folder'] + '/'
out_f = open(out_folder_name + to_file_name,'w')
out_f.write(str_to_upload);
out_f.close()
def delete_file(service,object_name):
out_folder_name = ROOT_DIR + service['folder'] + '/'
os.remove(out_folder_name + object_name);
def download_file(service ,object_name, to_file_name):
in_folder_name = ROOT_DIR + service['folder'] + '/'
f_in = open(in_folder_name + object_name, 'r');
f_out = open(to_file_name,'w');
f_out.write(f_in.read());
return None
def get_all_file_names(service):
folder_name = ROOT_DIR + service['folder'] + '/'
file_names = [(f,os.stat(join(folder_name,f)).st_size) for f in os.listdir(folder_name) if isfile(join(folder_name,f)) ]
return file_names
def create_service_object(extra_info):
service = {'folder':extra_info}
return service
if __name__ == "__main__":
s = create_service_object()
print get_all_file_names(s)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Optional, Tuple
from gym.utils import seeding
from numpy.random import RandomState
def np_random(seed: Optional[int]) -> Tuple[RandomState, int]:
"""Set the seed for numpy's random generator.
Args:
seed (Optional[int]):
Returns:
Tuple[RandomState, int]: Returns a tuple of random state and seed.
"""
rng, seed = seeding.np_random(seed)
assert isinstance(seed, int)
return rng, seed
|
K = 3
M = 5
A = [2, 1, 5, 1, 2, 2, 2]
def solution(K, M, A):
worst_rating_bound = sum(A)
best_rating_bound = max(A)
if K == 1:
return worst_rating_bound
if K >= len(A):
return best_rating_bound
rating = 0
while worst_rating_bound >= best_rating_bound:
mid_rating = int((worst_rating_bound + best_rating_bound) / 2)
need_k = get_k_for_rating(mid_rating, A)
if need_k > K:
best_rating_bound = mid_rating + 1
else:
worst_rating_bound = mid_rating - 1
rating = mid_rating
return rating
def get_k_for_rating(rating, A):
needed_boxes = 1
curr_sum = 0
for i, number in enumerate(A):
if curr_sum + number > rating:
needed_boxes += 1
curr_sum = number
else:
curr_sum += number
return needed_boxes
assert (solution(K, M, A) == 6)
assert (solution(4, M, [0, 0, 0, 0]) == 0)
assert (solution(4, M, [4, 4, 4, 4]) == 4)
assert (solution(4, M, [4, 4, 7, 4]) == 7)
assert (solution(4, M, [1]) == 1)
assert (solution(4, M, [1]) == 1)
assert (solution(1, M, [2, 3]) == 5)
assert (solution(2, M, [1, 2, 1, 2, 1, 2]) == 5)
assert (solution(1, M, [0, 0, 0, 0, 0, 1]) == 1)
assert (solution(2, M, [0, 0, 0, 0, 0, 1]) == 1)
assert (solution(2, M, [0, 0, 0, 0, 1, 1]) == 1)
assert (solution(3, M, [0, 0, 99, 0, 0]) == 99)
assert (solution(2, M, [0, 0, 99, 1, 0]) == 99)
assert (solution(3, M, [0, 1, 99, 1, 0]) == 99)
|
import numpy as np
import os
import neural_network as nn
import model as m
import distribution as d
import settings as s
def prt_distribution(distribution, model=None): # Print the distribution and if available the corresponding model
""" # If you want to see the 'Fritz' distribution which a specific parameter, then uncomment this part
parameter = 1
n = 0
while d.get_para_range()[n] < parameter:
if n + 1 >= len(d.get_para_range()):
break
n = n + 1
print('Distribution: {}, parameter: {}, sum: {}'.format(d.get_name(), d.get_para_range()[n], np.sum(distribution,
axis=0)))
"""
print('Distribution: {}, normalization (sum): {}'.format(d.get_name(), round(np.sum(distribution)), 9))
if model is not None:
m.print_model(model)
d.print_distribution(distribution)
def find_min_res(distribution): # Uses several attempts with the same start configuration to find the best distribution
print('\nStart finding the closest distribution according to the target distribution in {} subrounds'
.format(s.NeuralNetwork().n_of_min_samples))
result = nn.training()
dist_old = nn.np_discrepancy(result, target=distribution, dist_mod='ec')
res_list = np.ones(s.NeuralNetwork().n_of_min_samples)
res_list[0] = dist_old
print('Subround 1 of {} with a founded distance of {}\n'.format(s.NeuralNetwork().n_of_min_samples, dist_old))
for i in range(s.NeuralNetwork().n_of_min_samples - 1):
temp = nn.training()
dist_new = nn.np_discrepancy(temp, target=distribution, dist_mod='ec')
res_list[i + 1] = dist_new
if dist_new < dist_old:
result = temp
dist_old = nn.np_discrepancy(result, target=distribution, dist_mod='ec')
print('Subround {} of {} with a new founded distance of {} (min distance is {})\n'.format(i + 2,
s.NeuralNetwork().n_of_min_samples, dist_new, dist_old))
nn.set_find_min_distances(res_list)
return result
def train(distribution, model=None):
if d.get_name() == 'fritz' or d.get_name() == 'LLL' or d.get_name() == 'LLL_random_noise':
# Used to reproduce the Fritz and LLL distribution over a range of parameters
d.set_target(distribution)
result = np.zeros_like(distribution)
for i in range(len(d.get_para_range())-1, -1, -1):
print('\nRound {} of {}, with model {} and {} distribution of param v = {}. Normalization (sum): {}'
.format(len(d.get_para_range()) - i, len(d.get_para_range()), m.get_name(), d.get_name(),
round(s.Distribution().para_range[i], 6), round(np.sum(distribution[i], axis=0)), 9))
nn.new_neural_network(model, distribution[i, :])
if s.NeuralNetwork().find_min_result:
result[i, :] = find_min_res(distribution[i, :])
else:
result[i, :] = nn.training()
print('Distance calculated: ', nn.np_discrepancy(result[i, :], target=distribution[i, :], dist_mod='ec'))
return result
elif d.get_name() == 'quantum calculated distribution': # Here to start searching possibly used states in order to
d.set_target(distribution) # reproduce the quantum distribution
prt_distribution(distribution.flatten())
return nn.find_used_states_for_distribution(distribution)
else:
print('Wrong distribution, pls choose a valid distribution or define it first in <<distribution>>')
def start(distribution_name, network=None):
model, distribution = m.new_model(network, distribution_name)
prt_distribution(distribution)
result = train(distribution, model)
prt_distribution(result)
if model is not None:
nn.plot_result(result, distribution)
if __name__ == '__main__':
for i in ['models_plot', 'result_distance', 'result_plot_sweep', 'saved_settings']:
if not os.path.exists(i):
os.makedirs(i)
#start('fritz', 'triangle')
#start('quantum calculated distribution')
start('LLL', 'triangle')
print("\n********Finish. If you used the neural network, then the resulting plots are now in the folder "
"'D:/ETH/Bachelorarbeit/Code/result_...********")
|
from django.db import models
class Titanic(models.Model):
PassengerId = models.CharField(max_length=50)
Survived = models.IntegerField(null=True, blank=True)
Pclass = models.IntegerField(default=0)
Age = models.IntegerField()
Name = models.CharField(max_length=50)
Sex = models.CharField(max_length=6)
SibSp = models.IntegerField()
Parch=models.IntegerField()
Ticket = models.CharField(max_length =10)
Fare = models.FloatField(default=0)
Cabin = models.CharField(max_length =10, null=True, blank =True)
Embarked = models.CharField(max_length =10)
#the data is converteed into a dictionary, since the object needs to fed into a dataframe. Dictionary converts into the dataframe.
def to_dict(self): #dictionary helps in making a dataframe.
return {
'PassengerId': self.PassengerId,
'Survived': self.Survived,
'Pclass': self.Pclass,
'Name':self.Name,
'Sex':self.Sex,
'Age':self.Age,
'SibSp':self.SibSp,
'Parch':self.Parch,
'Ticket':self.Ticket,
'Fare':self.Fare,
'Cabin':self.Cabin,
'Embarked':self.Embarked
}
|
######################################
# author ben lawson <balawson@bu.edu>
# Edited by: Craig Einstein <einstein@bu.edu>
######################################
# Some code adapted from
# CodeHandBook at http://codehandbook.org/python-web-application-development-using-flask-and-mysql/
# and MaxCountryMan at https://github.com/maxcountryman/flask-login/
# and Flask Offical Tutorial at http://flask.pocoo.org/docs/0.10/patterns/fileuploads/
# see links for further understanding
###################################################
import flask
from flask import Flask, Response, request, render_template, redirect, url_for
from flaskext.mysql import MySQL
import flask_login
from operator import itemgetter
#for image uploading
from werkzeug import secure_filename
import os, base64
mysql = MySQL()
app = Flask(__name__)
app.secret_key = 'super secret string' # Change this!
#These will need to be changed according to your creditionals
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'password' # change this to your own password
app.config['MYSQL_DATABASE_DB'] = 'photoshare'
app.config['MYSQL_DATABASE_HOST'] = '127.0.0.1'
mysql.init_app(app)
#begin code used for login
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT email from Users")
users = cursor.fetchall()
def getUserList():
cursor = conn.cursor()
cursor.execute("SELECT email from Users")
return cursor.fetchall()
class User(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(email):
users = getUserList()
if not(email) or email not in str(users):
return
user = User()
user.id = email
return user
@login_manager.request_loader
def request_loader(request):
users = getUserList()
email = request.form.get('email')
if not(email) or email not in str(users):
return
user = User()
user.id = email
cursor = mysql.connect().cursor()
cursor.execute("SELECT password FROM Users WHERE email = '{0}'".format(email))
data = cursor.fetchall()
pwd = str(data[0][0] )
try:
user.is_authenticated = request.form['password'] == pwd
return user
except:
pass
'''
A new page looks like this:
@app.route('new_page_name')
def new_page_function():
return new_page_html
'''
@app.route('/login', methods=['GET', 'POST'])
def login():
if flask.request.method == 'GET':
return '''
<form action='login' method='POST'>
<input type='text' name='email' id='email' placeholder='email'></input>
<input type='password' name='password' id='password' placeholder='password'></input>
<input type='submit' name='submit'></input>
</form></br>
<a href='/'>Home</a>
'''
#The request method is POST (page is recieving data)
email = flask.request.form['email']
cursor = conn.cursor()
#check if email is registered
if cursor.execute("SELECT password FROM Users WHERE email = '{0}'".format(email)):
data = cursor.fetchall()
pwd = str(data[0][0] )
if flask.request.form['password'] == pwd:
user = User()
user.id = email
flask_login.login_user(user) #okay login in user
return flask.redirect(flask.url_for('protected')) #protected is a function defined in this file
#information did not match
return "<a href='/login'>Try again</a>\
</br><a href='/register'>or make an account</a>"
@app.route('/logout')
def logout():
flask_login.logout_user()
return render_template('hello.html', message='Logged out')
@login_manager.unauthorized_handler
def unauthorized_handler():
return render_template('unauth.html')
#Edit by HUIYI CHEN
@app.route("/register", methods=['POST', 'GET'])
def register_user():
if request.method == 'POST':
try:
email=request.form.get('email')
password=request.form.get('password')
#By HUIYI CHEN: try to get first name, last name, date of birth
first_name = request.form.get('first_name')
last_name = request.form.get('last_name')
birthday = request.form.get('birthday')
except:
print ("couldn't find all tokens") #this prints to shell, end users will not see this (all print statements go to shell)
return render_template('register.html', supress=True)
if not email or not password or not first_name or not last_name or not birthday:
print ("couldn't find all tokens") #this prints to shell, end users will not see this (all print statements go to shell)
return render_template('register.html', message = "not enough information", supress=True)
#BY HUIYI CHEN: try to get gender and hometown, if not null
gender = request.form.get('gender')
if not gender:
gender = 0
try:
hometown = request.form.get('hometown')
except:
hometown = 'NULL'
cursor = conn.cursor()
test = isEmailUnique(email)
if test:
print (cursor.execute("INSERT INTO Users (email, password, first_name, last_name, date_of_birth, gender, hometown) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}')".format(email, password, first_name, last_name, birthday,gender, hometown)))
conn.commit()
#log user in
user = User()
user.id = email
flask_login.login_user(user)
uid = getUserIdFromEmail(flask_login.current_user.id)
return render_template('hello.html', name=email, message='Account Created!')
else:
print ("email is not unique")
#Edit by HUIYI CHEN
return render_template('register.html', supress=False)
else:
return render_template('register.html', supress=True)
def getUsersPhotos(uid):
cursor = conn.cursor()
cursor.execute("SELECT imgdata, picture_id, caption FROM Pictures WHERE user_id = '{0}'".format(uid))
return cursor.fetchall() #NOTE list of tuples, [(imgdata, pid), ...]
def getUserIdFromEmail(email):
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM Users WHERE email = '{0}'".format(email))
return cursor.fetchone()[0]
def isEmailUnique(email):
#use this to check if a email has already been registered
cursor = conn.cursor()
if cursor.execute("SELECT email FROM Users WHERE email = '{0}'".format(email)):
#this means there are greater than zero entries with that email
return False
else:
return True
#end login code
#BY HUIYI CHEN
#begin friend related code
def getUsersFriends(uid):
cursor = conn.cursor()
cursor.execute("SELECT first_name, last_name, email FROM Users WHERE user_id IN (SELECT userA FROM Befriend_With WHERE userB = '{0}')".format(uid))
return cursor.fetchall()
#Friends list page
@app.route('/friends')
@flask_login.login_required
def friends_list():
uid = getUserIdFromEmail(flask_login.current_user.id)
return render_template('friends.html', name=flask_login.current_user.id, friendslist=getUsersFriends(uid))
@app.route('/addfriend', methods=['GET', 'POST'])
@flask_login.login_required
def add_friend():
if request.method == 'POST':
try:
friend_email=request.form.get('friend_email')
except:
return render_template('addfriend.html')
if isEmailUnique(friend_email):
return render_template('addfriend.html', message = "User not exist, try a different one")
else:
friend_id = getUserIdFromEmail(friend_email)
uid = getUserIdFromEmail(flask_login.current_user.id)
if areFriends(friend_id, uid):
return render_template('addfriend.html', message = "You are friends already, try a different one")
elif friend_id == uid:
return render_template('addfriend.html', message = "No need to add yourself, try a different one")
else:
cursor = conn.cursor()
cursor.execute("INSERT INTO Befriend_With (UserA, UserB) VALUES ('{0}', '{1}')".format(uid, friend_id))
cursor.execute("INSERT INTO Befriend_With (UserA, UserB) VALUES ('{0}', '{1}')".format(friend_id, uid))
conn.commit()
albums = getAlbumFromUid(uid)
return render_template('hello.html', name=flask_login.current_user.id, message="You have a new friend", albums = albums)
else:
return render_template('addfriend.html', name=flask_login.current_user.id)
def areFriends(fid, uid):
cursor = conn.cursor()
if cursor.execute("SELECT * FROM Befriend_With WHERE userA = '{0}' AND userB = '{1}'".format(fid, uid)):
return True #fid and uid are already friends
else:
return False
@app.route('/searchfriends', methods=['GET','POST'])
@flask_login.login_required
def search_friends():
if request.method == "POST":
first_name = request.form.get('first_name')
last_name = request.form.get('last_name')
if first_name:
results = getResults(first_name, 'first_name')
elif last_name:
results = getResults(last_name, 'last_name')
else:
return render_template('searchfriends.html', name = flask_login.current_user.id, message="empty input, try a different one")
if results:
return render_template('addfriend.html', name = flask_login.current_user.id, results = results)
else:
return render_template('searchfriends.html', name = flask_login.current_user.id, message="User not exist, try a different one")
else:
return render_template('searchfriends.html', name = flask_login.current_user.id)
def getResults(name, attribute):
cursor = conn.cursor()
cursor.execute("SELECT first_name, last_name, email FROM Users WHERE {0} = '{1}'".format(attribute, name))
return cursor.fetchall()
#end friend related code
#BY HUIYI CHEN
#begin user activity code
@app.route('/contribution')
def contribution():
cursor = conn.cursor()
cursor.execute("SELECT user_id FROM Users")
users = cursor.fetchall()
contribution = []
for user_id in users:
photos = len(getUsersPhotos(user_id[0]))
comments = getUsersComments(user_id[0])
total = photos + comments
user_email = getUserEmailFromID(user_id[0])
contribution.append((user_email, total))
contribution = sorted(contribution, key = itemgetter(1), reverse = True)
contribution = contribution[:10]
print(contribution)
return render_template('contribution.html', contributors = contribution)
def getUserEmailFromID(uid):
cursor = conn.cursor()
cursor.execute("SELECT email FROM Users WHERE user_id = '{0}'".format(uid))
return cursor.fetchone()[0]
def getUsersComments(uid):
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM Comments WHERE user_id = '{0}'".format(uid))
return cursor.fetchone()[0]
#end user activity code
#BY HUIYI CHEN
#begin album code
@app.route('/createalbum', methods = ['GET','POST'])
@flask_login.login_required
def create_album():
if request.method == 'POST':
uid = getUserIdFromEmail(flask_login.current_user.id)
album_name = request.form.get('album_name')
if albumExists(album_name, uid):
return render_template('createalbum.html', message = "Album exsits, try a different name")
cursor = conn.cursor()
cursor.execute("INSERT INTO Albums(user_id, date_of_creation, album_name) VALUES ('{0}', CURRENT_DATE, '{1}')".format(uid, album_name))
conn.commit()
albums = getAlbumFromUid(uid)
return render_template("hello.html", name = flask_login.current_user.id, message = "Album created successfully", albums = albums,tags = getTags())
else:
return render_template('createalbum.html')
def getAlbumFromUid(uid):
uid = getUserIdFromEmail(flask_login.current_user.id)
cursor = conn.cursor()
cursor.execute("SELECT album_id, album_name FROM Albums WHERE user_id = '{0}'".format(uid))
return cursor.fetchall()
def albumExists(album_name, uid):
cursor = conn.cursor()
if cursor.execute("SELECT * FROM Albums WHERE album_name = '{0}' AND user_id = '{1}'".format(album_name, uid)):
return True
else:
return False
def getAlbumPictures(aid):
cursor = conn.cursor()
cursor.execute("SELECT picture_id, caption, imgdata FROM Pictures WHERE album_id = '{0}'".format(aid))
return cursor.fetchall()
def getAlbumInformation(aid):
cursor = conn.cursor()
cursor.execute("SELECT album_name, date_of_creation, user_id FROM Albums WHERE album_id = '{0}'".format(aid))
return cursor.fetchone()
@app.route('/album/<aid>')
def album(aid):
(album_name, date_of_creation, uid) = getAlbumInformation(aid)
pictures = getAlbumPictures(aid)
cursor = conn.cursor()
cursor.execute("SELECT first_name, last_name,email FROM Users WHERE user_id = '{0}'".format(uid))
(first_name, last_name,email) = cursor.fetchone()
if email == flask_login.current_user.id:
auth = True
else:
auth = False
return render_template('album.html', name = flask_login.current_user.id, album_name = album_name, first_name = first_name, last_name = last_name, date_of_creation = date_of_creation, pictures = pictures, album_id = aid, auth = auth)
@app.route('/deletealbum/<aid>')
@flask_login.login_required
def deleteAlbum(aid):
cursor = conn.cursor()
cursor.execute("DELETE FROM Albums WHERE album_id = '{0}'".format(aid))
conn.commit()
uid = getUserIdFromEmail(flask_login.current_user.id)
albums = getAlbumFromUid(uid)
return render_template('hello.html', name=flask_login.current_user.id, message="Album has been deleted", albums = albums,tags = getTags())
#end album code
def getTags():
cursor = conn.cursor()
cursor.execute("SELECT DISTINCT word FROM Tags")
tags = cursor.fetchall()
return tags
#begin picture code
@app.route('/picture/<pid>')
def picture(pid):
(uid, caption, imgdata, album_id, likes) = getPictureInformation(pid)
tags = getPictureTags(pid)
cursor = conn.cursor()
cursor.execute("SELECT first_name, last_name, email FROM Users WHERE user_id = '{0}'".format(uid))
(first_name, last_name, email) = cursor.fetchone()
conn.commit()
cursor = conn.cursor()
cursor.execute("SELECT album_name FROM Albums WHERE album_id = '{0}'".format(album_id))
album_name = cursor.fetchone()[0]
conn.commit()
if email == flask_login.current_user.id:
auth = True
else:
auth = False
comments = getComments(pid)
return render_template('picture.html', name = flask_login.current_user.id, caption = caption, imgdata = imgdata, first_name = first_name, last_name = last_name, tags = tags, album_name = album_name, auth = auth, pid = pid, album_id = album_id, comments = comments, likes = likes)
@app.route('/likepicture/<pid>')
def like(pid):
cursor = conn.cursor()
cursor.execute("UPDATE Pictures SET likes = likes + 1 WHERE picture_id = '{0}'".format(pid))
conn.commit()
return redirect(url_for('picture', pid = pid))
def getPictureInformation(pid):
cursor = conn.cursor()
cursor.execute("SELECT user_id, caption, imgdata, album_id,likes FROM Pictures WHERE picture_id = '{0}'".format(pid))
return cursor.fetchone()
def getComments(pid):
cursor = conn.cursor()
cursor.execute("SELECT user_id, comment_text FROM Comments")
id_comments = cursor.fetchall()
comments = []
for comment in id_comments:
if comment[0] == 0:
first_name = 'Anonymous'
last_name = 'User'
else:
cursor = conn.cursor()
cursor.execute("SELECT first_name, last_name FROM Users WHERE user_id = '{0}' AND picture_id = '{1}'".format(comment[0], pid))
(first_name, last_name) = cursor.fetchone()
comments.append((first_name, last_name, comment[1]))
return comments
@app.route('/deletepicture/<pid>')
@flask_login.login_required
def deletePicture(pid):
cursor = conn.cursor()
cursor.execute("DELETE FROM Pictures WHERE picture_id = '{0}'".format(pid))
conn.commit()
uid = getUserIdFromEmail(flask_login.current_user.id)
albums = getAlbumFromUid(uid)
return render_template('hello.html', name=flask_login.current_user.id, message="Picture has been deleted", albums = albums,tags = getTags())
def getPictureTags(pid):
cursor = conn.cursor()
cursor.execute("SELECT word FROM Tags WHERE picture_id = '{0}'".format(pid))
return cursor.fetchall()
@app.route('/addtag/<pid>', methods = ['GET', 'POST'])
@flask_login.login_required
def addTag(pid):
if request.method == 'POST':
tag= request.form.get('tag')
if tagExists(tag, pid):
return render_template('addtag.html', message = "Tag exsits, try a different one", pid=pid)
if ' ' in tag:
return render_template('addtag.html', message = "Tag must be a single word", pid=pid)
cursor = conn.cursor()
cursor.execute("INSERT INTO Tags(word, picture_id) VALUES ('{0}', '{1}')".format(tag, pid))
conn.commit()
return redirect(url_for('picture', pid=pid))
else:
return render_template('addtag.html', pid = pid)
def tagExists(tag, pid):
cursor = conn.cursor()
if cursor.execute("SELECT * FROM Tags WHERE word = '{0}' AND picture_id = '{1}'".format(tag, pid)):
return True
else:
return False
#end picture code
#begin comment code
@app.route('/comment/<pid>', methods = ['GET', 'POST'])
def comment(pid):
if request.method == 'POST':
comment = request.form.get('comment')
email = flask_login.current_usr.id
if email:
uid = getUserIdFromEmail(flask_login.current_user.id)
else:
uid = 0
cursor = conn.cursor()
cursor.execute("INSERT INTO Comments(user_id, comment_date, comment_text, picture_id VALUES ('{0}', CURRENT_DATE, '{1}', '{2}')".format(uid, comment, pid))
conn.commit()
return redirect(url_for('picture', pid=pid))
else:
return render_template('comment.html', pid = pid)
#end comment code
#begin view tags code
@app.route('/myphotosbytag/<tag>')
@flask_login.login_required
def myPhotosByTag(tag):
uid = getUserIdFromEmail(flask_login.current_user.id)
cursor.execute("SELECT Pictures.picture_id, Pictures.caption, Pictures.imgdata FROM Tags INNER JOIN Pictures ON Tags.picture_id = Pictures.picture_id WHERE word = '{0}' AND Pictures.user_id = '{1}'".format(tag, uid))
pictures = cursor.fetchall()
return render_template('tags.html', pictures = pictures, tag = tag)
@app.route('/photosbytag/<tag>')
def photosByTag(tag):
cursor = conn.cursor()
cursor.execute("SELECT Pictures.picture_id, Pictures.caption, Pictures.imgdata FROM Tags INNER JOIN Pictures ON Tags.picture_id = Pictures.picture_id WHERE word = '{0}'".format(tag))
pictures = cursor.fetchall()
return render_template('tags.html', pictures = pictures, tag = tag)
def getUserNameFromUid(uid):
cursor = conn.cursor()
cursor.execute("SELECT first_name, last_name FROM Users WHERE user_id = '{0}'".format(uid))
return cursor.fetchall()
def getAlbumName(aid):
cursor = conn.cursor()
cursor.execute("SELECT album_name FROM Users WHERE album_id = '{0}'".format(aid))
return cursor.fetchone()[0]
#end view tags code
@app.route('/profile')
@flask_login.login_required
def protected():
uid = getUserIdFromEmail(flask_login.current_user.id)
albums = getAlbumFromUid(uid)
return render_template('hello.html', name=flask_login.current_user.id, message="Here's your profile", albums = albums,tags = getTags())
#begin photo uploading code
# photos uploaded using base64 encoding so they can be directly embeded in HTML
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/upload/<aid>', methods=['GET', 'POST'])
@flask_login.login_required
def upload_file(aid):
if request.method == 'POST':
uid = getUserIdFromEmail(flask_login.current_user.id)
imgfile = request.files['photo']
caption = request.form.get('caption')
photo_data = base64.standard_b64encode(imgfile.read())
cursor = conn.cursor()
cursor.execute("INSERT INTO Pictures (imgdata, user_id, caption, album_id, likes) VALUES ('{0}', '{1}', '{2}', '{3}', 0)".format(photo_data,uid, caption, aid))
conn.commit()
return render_template('hello.html', name=flask_login.current_user.id, message='Photo uploaded!', albums=getAlbumFromUid(uid), tags = getTags())
# return render_template('upload.html', aid = aid, message = "Only allow file type: png, jpg, jpeg, gif")
#The method is GET so we return a HTML form to upload the a photo.
else:
return render_template('upload.html', aid = aid)
#end photo uploading code
#default page
@app.route("/", methods=['GET'])
def hello():
return render_template('hello.html', message='Welecome to Photoshare', tags = getTags())
if __name__ == "__main__":
#this is invoked when in the shell you run
#$ python app.py
app.run(port=5000, debug=True)
|
from aiogram import types
easy = types.InlineKeyboardMarkup(
inline_keyboard=[
[
types.InlineKeyboardButton(text="Town Portal Scroll", callback_data="Town Portal Scroll")],
[types.InlineKeyboardButton(text="Ironwood Branch", callback_data="Ironwood Branch")],
[types.InlineKeyboardButton(text="Quelling Blade", callback_data="Quelling Blade")],
[types.InlineKeyboardButton(text="<-- Назад", callback_data="Назад4")
]
]) |
import sys
import matplotlib
matplotlib.use("Qt5Agg")
from PySide2 import QtWidgets, QtGui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import cv2 as cv
# Personnal modules
from drag import DraggablePoint
class MyGraph(FigureCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.grid(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# To store the 2 draggable points
self.list_points = []
img = cv.imread("/home/timo/Data2/wingNet/wings/No_TPS/avi_wings/0_wings/fly1.jpg")
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
self.axes.imshow(img)
self.show()
self.plotDraggablePoints([0.1, 0.1], [0.2, 0.2], size=[0.02, 0.02], img_shape=img.shape)
# self.plotDraggablePoints([1, 1], [2, 2], [1, 1])
def plotDraggablePoints(self, xy1, xy2, size=None, img_shape=(200, 200)):
"""Plot and define the 2 draggable points of the baseline"""
# del(self.list_points[:])
self.list_points.append(DraggablePoint(self, x=xy1[0], y=xy1[1], size=size[0], img_shape=img_shape))
self.list_points.append(DraggablePoint(self, x=xy2[0], y=xy2[1], size=size[1], img_shape=img_shape))
self.updateFigure()
def clearFigure(self):
"""Clear the graph"""
self.axes.clear()
self.axes.grid(True)
del(self.list_points[:])
self.updateFigure()
def updateFigure(self):
"""Update the graph. Necessary, to call after each plot"""
self.draw()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ex = MyGraph()
sys.exit(app.exec_())
#import numpy as np
#import math
#
#
#def square_distance_centroid(points, norm_factor):
# points[0::2] = np.array(points[0::2])*norm_factor[0]
# points[1::2] = np.array(points[1::2])*norm_factor[1]
# pts = [np.array([x, y]) for x, y in zip(points[0::2], points[1::2])]
# centroid = np.array([sum(points[0::2]) / len(pts), sum(points[1::2]) / len(pts)])
#
# distances = np.array([math.sqrt(sum(x)) for x in ((pts[:] - centroid) ** 2)[:]])
# distances = distances ** 2
# metric = math.sqrt(sum(distances))
# return metric
#
#
#points = [0,0,0,2,2,0,2,2]
#print(square_distance_centroid(points, np.array([2, 5])))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 20:32:36 2019
@author: HP
"""
res=[]
res.append(1)
n=100
def update_arr(x):
carry=0
for i in range(len(res)-1,-1,-1):
num=res[i]*x
num=num+carry
res[i]=num%10
carry=int(num/10)
while carry>0:
res.insert(0,carry%10)
carry=int(carry/10)
for x in range(1,n+1):
update_arr(x)
s=''
dic={0:'0',1:'1',2:'2',3:'3',4:'4',5:'5',6:'6',7:'7',8:'8',9:'9'}
for i in res:
s=s+dic[i]
print(s) |
# Generated by Django 3.2.3 on 2021-06-12 03:34
from django.db import migrations, models
import django.db.models.deletion
import pizza_app.models
class Migration(migrations.Migration):
dependencies = [
('pizza_app', '0005_auto_20210610_0547'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='fee_delvery',
new_name='fee_delivery',
),
migrations.AddField(
model_name='pizza',
name='image',
field=models.CharField(default='.', max_length=45, validators=[pizza_app.models.ValidarLongitudMinima]),
preserve_default=False,
),
migrations.CreateModel(
name='IngredientSize',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
('type_size', models.IntegerField()),
('price', models.IntegerField()),
('discount', models.BooleanField(default=False)),
('special_price', models.IntegerField()),
('ingredient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sizes', to='pizza_app.ingredient')),
],
),
]
|
from django.test import TestCase
class FetcherTestCase(TestCase):
def test_DataSourceModelExists(self):
"""
Test if DataSource model exists
"""
try:
from fetcher.models import DataSource
except ImportError:
self.fail('Cannot import DataSource')
|
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
from Parser_ import HTMLParser
from Loader import Loader
import sys
import json
if __name__ == '__main__':
for i in range(3092, 5900):
if i != 3800:
p = HTMLParser(i)
p.searchCols()
del p
print("s")
load = Loader()
load.repeat_loader()
|
def bread(func):
def wrapper():
print("</----\>")
func()
print("<\____/>")
return wrapper
@bread
def sandwich(food="--ветчина--"):
return food
print(sandwich()) |
Direcciones=input().split()
contador=1
conteo=[]
compacto=[]
for i in range(len(Direcciones)):
if i<(len(Direcciones)-1):
if Direcciones[i]==Direcciones[i+1]:
contador+=1
else:
compacto.append(Direcciones[i])
conteo.append(contador)
contador=1
else:
if Direcciones[i]==Direcciones[i-1]:
contador+=1
compacto.append(Direcciones[i])
conteo.append(contador)
else:
contador=+1
compacto.append(Direcciones[i])
conteo.append(contador)
for i in range(len(compacto)):
print(compacto[i],end=" ")
print("")
if conteo[len(conteo)-1]!=1:
conteo[len(conteo)-1]=conteo[len(conteo)-1]-1
for i in range(len(conteo)):
print(conteo[i],end=" ")
|
#
# @lc app=leetcode.cn id=222 lang=python3
#
# [222] 完全二叉树的节点个数
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countNodes(self, root: TreeNode) -> int:
# 18/18 cases passed (60 ms)
# Your runtime beats 95.13 % of python3 submissions
# Your memory usage beats 6.89 % of python3 submissions (21.9 MB)
left, right = root, root
h_left = h_right = 0
while left:
left = left.left
h_left += 1
while right:
right = right.right
h_right += 1
# 判断是否是满二叉树: 左右子树的高度是否相同
if h_left == h_right:
return 2 ** h_left - 1
# 否则按照正常二叉树统计节点数
return 1 + self.countNodes(root.left) + self.countNodes(root.right)
# @lc code=end
|
#Q5. Define an employee class and initialize it with name and salary.
# Now, make a classmethod that takes in a string parameter "name-2000" which creates an instance and returns the instance based on parameter.
class Employee:
def __init__(self,name,salary):
self.name = name
self.salary= salary
@classmethod
def getObjFromString(cls,inp):
name,salary= inp.split("-")
return cls(name,salary)
def getname(self):
return self.name
def getSalary(self):
return self.salary
emp= Employee.getObjFromString("Bishesh-500")
print(emp.getname())
print(emp.getSalary())
|
import os,sys
import string
from optparse import OptionParser
import glob
import json
import subprocess
__version__="1.0"
__status__ = "Dev"
def create_docker_file(prj):
line_list = [
"FROM nginx:1.21.0-alpine as production"
,"ENV NODE_ENV production"
,"RUN mkdir -p /data/shared/%s" % (prj)
,"RUN ln -s /data/shared/%s /usr/share/nginx/html/ln2data" % (prj)
,"RUN ln -s /data/shared/%s/releases /usr/share/nginx/html/ln2releases" % (prj)
,"RUN ln -s /data/shared/%s/downloads /usr/share/nginx/html/ln2downloads" % (prj)
,"RUN ln -s /data/shared/%s/releases/ftp /usr/share/nginx/html/ftp" % (prj)
,"COPY ./build /usr/share/nginx/html"
,"COPY nginx.conf /etc/nginx/conf.d/default.conf"
,"EXPOSE 80"
,"CMD [\"nginx\", \"-g\", \"daemon off;\"]"
]
with open("Dockerfile", "w") as FW:
FW.write("%s\n" % ("\n\n".join(line_list)))
return
###############################
def main():
usage = "\n%prog [options]"
parser = OptionParser(usage,version="%prog version___")
parser.add_option("-s","--server",action="store",dest="server",help="dev/tst/beta/prd")
(options,args) = parser.parse_args()
for key in ([options.server]):
if not (key):
parser.print_help()
sys.exit(0)
server = options.server
config_obj = json.loads(open("./conf/config.json", "r").read())
image = config_obj["project"] + "_app_%s" % (server)
container = "running_" + image
app_port = config_obj["app_port"][server]
data_path = config_obj["data_path"]
with open(".env.production", "w") as FW:
FW.write("REACT_APP_SERVER=%s\n" % (server))
FW.write("REACT_APP_ROOT_URL=%s\n" % (config_obj["app_root"][server]))
FW.write("REACT_APP_API_URL=%s\n" % (config_obj["api_root"][server]))
FW.write("REACT_APP_APP_VERSION=1.1\n")
create_docker_file(config_obj["project"])
cmd_list = []
if os.path.isdir(data_path) == False:
cmd_list.append("mkdir -p %s" % (data_path))
cmd = "npm run build"
cmd_list.append(cmd)
cmd = "docker build -t %s ." % (image)
cmd_list.append(cmd)
for c in [container]:
cmd = "docker ps --all |grep %s" % (c)
container_id = subprocess.getoutput(cmd).split(" ")[0].strip()
if container_id.strip() != "":
cmd_list.append("docker rm -f %s " % (container_id))
cmd = "docker create --name %s -p 127.0.0.1:%s:80 -v %s:%s %s" % (container,app_port,data_path, data_path, image)
cmd_list.append(cmd)
for cmd in cmd_list:
#print (cmd)
x = subprocess.getoutput(cmd)
print (x)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
import numpy as np
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
from sklearn.svm import SVC
clf = SVC(kernel="rbf",C=10000)
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
t0 = time()
clf.fit(features_train,labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
from sklearn.metrics import accuracy_score
acc = round(accuracy_score(labels_test,pred), 5)
print "accuracy:", acc
print np.bincount(pred)
#########################################################
### training time: 332.671 s
### predicting time: 34.717 s
### accuracy: 0.98407
|
test_case = int(input())
while test_case:
n = int(input())
a = list(map(int, input().split()))
odd = []
even = []
for i in range(n):
if i % 2 != a[i] % 2:
odd.append(i) if a[i] % 2 else even.append(i)
print(-1 if (len(odd) != len(even)) else len(odd))
test_case -= 1
# Alternate Solution
for s in [*open(0)][2::2]:
a = [int(x) % 2 for x in s.split()]
print((sum(i % 2 ^ x for i, x in enumerate(a)) // 2, -1)[sum(a) != len(a) // 2])
|
import sys
def main():
from c9hubapi.rest import api
api.app.run(debug=True, host='0.0.0.0', port=3232)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
cfg4_speedplot.py
======================
"""
import os, logging, numpy as np
log = logging.getLogger(__name__)
import matplotlib.pyplot as plt
from opticks.ana.metadata import Metadata
from opticks.ana.catdir import Catdir
def speedplot(cat, tag, a, landscape=False, ylim=None, log_=False):
if a is None:
log.warning("no metadata skipping")
return
nnp = len(np.unique(a.numPhotons))
if nnp != 1:
log.fatal("numPhotons not unique, cannot compare : nnp %s " % nnp)
log.fatal("Tags and negated counterparts should always have the same photon statistics")
log.fatal(" TO AVOID THIS PROBLEM ADOPT A NEW TAG WHEN CHANGING PHOTON STATS ")
assert nnp == 1, "Tags and negated counterparts should always have the same photon statistics"
mega = float(a.numPhotons[0])/1e6
title = "Propagate times (s) for %3.1fM Photons with %s geometry, tag %s, [max/avg/min]" % (mega, cat, tag)
plt.close()
plt.ion()
fig = plt.figure()
fig.suptitle(title)
compute = a.flgs & Metadata.COMPUTE != 0
interop = a.flgs & Metadata.INTEROP != 0
cfg4 = a.flgs & Metadata.CFG4 != 0
msks = [cfg4, interop, compute]
ylims = [[0,60],[0,5],[0,1]]
labels = ["CfGeant4", "Opticks Interop", "Opticks Compute"]
n = len(msks)
for i, msk in enumerate(msks):
if landscape:
ax = fig.add_subplot(1,n,i+1)
else:
ax = fig.add_subplot(n,1,i+1)
pass
d = a[msk]
t = d.propagate
mn = t.min()
mx = t.max()
av = np.average(t)
label = "%s [%5.2f/%5.2f/%5.2f] " % (labels[i], mx,av,mn)
loc = "lower right" if i == 0 else "upper right"
ax.plot( d.index, d.propagate, "o")
ax.plot( d.index, d.propagate, drawstyle="steps", label=label)
if log_:
ax.set_yscale("log")
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ylims[i])
pass
ax.legend(loc=loc)
pass
ax.set_xlabel('All times from: MacBook Pro (2013), NVIDIA GeForce GT 750M 2048 MB (384 cores)')
ax.xaxis.set_label_coords(-0.5, -0.07 )
plt.show()
if __name__ == '__main__':
from opticks.ana.main import opticks_main
ok = opticks_main()
cat = Catdir(ok.catdir)
a = cat.times(ok.tag)
if 1:
speedplot(cat, ok.tag, a, landscape=True, ylim=[0.1, 60], log_=True)
|
#!/usr/bin/env python
# coding=utf-8
import os
import re
import sys
import time
import subprocess
import phone
from dingding import DingDing
CAMMAND_GIT_CONFIG_ACCESS_TOKEN = "git config --global pushconfig.accesstoken" #连接gitlab的 token
ding = None
def main():
print "main"
def setup():
global ding
if ding:
return;
access_token = get_access_token()
if len(access_token)<=0:
print "未设置钉钉群的access_token,无法启用钉钉功能"
return
ding = DingDing(access_token)
def sendToDingDing(reviewer, mrUrl, msg="", sender="",icon=""):
#延时下 把消息置顶
time.sleep(0.5)
setup()
if not ding:
print "钉钉初始化失败"
return
if not reviewer:
print "未指定reviewer人,不发送机器人消息"
return;
if not mrUrl:
print "merge request链接不存在,不发送机器人消息"
return;
if reviewer == sender:
print "指定reviewer的人是自己,不发送钉钉消息"
return
print "发送消息给钉钉好友"
mobile = phone.phone_with_name(reviewer)
print reviewer+"的手机号是:"+mobile
title = reviewer+":"+sender+"向你发了一个merge request"
text = msg
message_url = mrUrl;
pic_url = icon
result1 = ding.send_link(title, text, message_url, pic_url)
print result1
result2 = ding.send_text(text="点击链接查看",at_mobiles=[mobile])
print result2
def get_access_token():
return cammand_out_put(CAMMAND_GIT_CONFIG_ACCESS_TOKEN,False,"")
def cammand_out_put(cammand, can_raise, raise_return_value):
try:
return subprocess.check_output(cammand, shell=True).strip()
pass
except subprocess.CalledProcessError as e:
if can_raise:
raise(e)
else:
return raise_return_value
pass
pass
if __name__ == "__main__":
main()
|
import pyeapi
from pprint import pprint
import yaml
from my_funcs import read_yaml,print_out
yaml_device = read_yaml("device.yaml")
connection = pyeapi.client.connect(**yaml_device)
device = pyeapi.client.Node(connection)
output = device.enable("show ip arp")
print_out(output)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-22 22:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('update_release', '0003_auto_20171202_0557'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gov_id', models.CharField(help_text='Government issued ID', max_length=24)),
('given_name', models.CharField(blank=True, help_text='given name', max_length=255)),
('given_name_alpha', models.CharField(blank=True, help_text='given name, last name first', max_length=255)),
('birthday', models.DateTimeField(blank=True)),
('incarcerated_date', models.DateTimeField(blank=True)),
('parole_date', models.DateTimeField(blank=True)),
('discharge_date', models.DateTimeField(blank=True)),
('so', models.NullBooleanField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='members', to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='members', to='update_release.Facility')),
],
),
]
|
#!/usr/bin/enc pyton3
#This script is for going in every directory and concatenating all text files in that directory
import os
import subprocess
print("-----------------------------------------------------------Hello user------------------------------------------------------\n")
#pwd=os.getcwd()
#print(pwd)
filenames=os.listdir()
#print(filenames)
length=len(filenames)
concat=""
for x in range(length):
if filenames[x]!="concat_all.py":
f=open(filenames[x],"r+")
concat=concat+f.read()
f.close()
f1=open("Training_train","w+")
f1.write(concat)
f1.read()
f1.close()
print("--------------------------------------------------------------------------pirateking--------------------------------------------------------------------")
|
import sys
sys.path.insert(0, '../../packages/WPG')
# sys.path.insert(0,'/diskmnt/a/lsamoylv/WPG')
# sys.path.insert(0,'/data/S2E/packages/WPG')
# sys.path.insert(0,'/Users/lsamoylv/code/WPG')
import os
import pylab as plt
import numpy as np
from wpg import Wavefront
def show_diagnostics(FELsource_out_number):
# read FELsource_out_.h5
if not FELsource_out_number == 'FELsource_out_0000001.h5':
#FELsource_out_file = "FELsource_out_{}.h5".format(FELsource_out_number.zfill(7))
FELsource_out_file = "{}.h5".format(FELsource_out_number.zfill(7))
else:
FELsource_out_file = FELsource_out_number
if not os.path.exists(FELsource_out_file):
print 'Input file {} not found.'.format(FELsource_out_file)
return
wf = Wavefront()
wf.load_hdf5(FELsource_out_file)
# show two figures window 1: image of I(x,y) integral intensity, with real
# x and y axis and title with file name
J2eV = 6.24150934e18;
mesh = wf.params.Mesh
tmin = mesh.sliceMin;
tmax = mesh.sliceMax;
dt = (tmax - tmin) / (mesh.nSlices - 1);
dx = (mesh.xMax - mesh.xMin) / (mesh.nx - 1);
dy = (mesh.yMax - mesh.yMin) / (mesh.ny - 1);
wf_intensity = wf.get_intensity(polarization='horizontal');
total_intensity = wf_intensity.sum(axis=-1);
data = total_intensity * dt
plt.figure()
plt.imshow(data*dx*dy*1e6*J2eV/wf.params.photonEnergy,extent=[mesh.xMin*1e6,mesh.xMax*1e6,mesh.yMin*1e6,mesh.yMax * 1e6])
title = 'Number of photons per %.2f x %.2f $\mu m ^2$ pixel' % (dx*1e6, dx*1e6)
plt.title(title)
plt.colorbar(); plt.xlabel('[$\mu m$]');
# window 2: plot of 2 curves:
#(1) history/parent/temporal_struct - FAST post-processing
temporal_struct = wf.custom_fields['history']['parent']['misc']['temporal_struct']
t0 = (temporal_struct[:, 0].max() + temporal_struct[:, 0].min()) / 2
plt.figure()
plt.plot(temporal_struct[:, 0] - t0, temporal_struct[:, 1] * 1e-9, 'b',label = 'output FAST-pp')
plt.hold(True)
#(2) integral intensity I(t) calculated for wavefront written in h5
t = np.linspace(tmin, tmax, wf.params.Mesh.nSlices)
pulse_energy = wf.get_intensity().sum(axis=0).sum(axis=0) #check it
plt.plot(t * 1e15, pulse_energy*dx*dy*1e6*1e-9,'ro', label = 'wavefront data')
title = 'FEL pulse energy %.2f %s ' % (pulse_energy.sum(axis=0) * dx * dy * 1e6 * dt * 1e3, 'mJ')
plt.title(title)
plt.xlabel('time [fs]');
plt.ylabel('Instantaneous power [GW]');
plt.legend()
plt.grid(True)
plt.show()
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--input-file", dest="in_fname", default="FELsource_out_0000001.h5",help="Input wavefront file: FELsource_out_***.h5")
(options, args) = parser.parse_args()
if not options.in_fname : # if filename is not given
parser.error('Input filename not specified, use --input-file options')
return
show_diagnostics(options.in_fname)
if __name__ == "__main__":
main()
|
# hello worldを1文字変更した場合のエラーメッセージを調べる
# 1文字削除、xを追加、1を追加
# python errormsg.py > output
# grep Hello.java output | sed 's/^.*: //' | sort | uniq
import sys
import subprocess
hello_src="""public class Hello {
public static void main(String[] args) {
System.out.println("hello, world");
}
}
"""
def remove_one_char(str,index):
return str[:index]+str[index+1:]
def insert_one_char(str,index,ch):
return str[:index]+ch+str[index:]
def get_error_messages(filename, src):
f = open(filename,"w")
f.write(src)
f.close()
error = ""
try:
# subprocess.check_output(["javac","Hello.java"],stderr=subprocess.STDOUT)
subprocess.check_output(["javac","-J-Duser.language=ja","-J-Dfile.encoding=utf8","Hello.java"],stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
error = e.output
return error;
def main():
for i in range(len(hello_src)):
print >> sys.stderr, i
src = insert_one_char(hello_src,i,"x")
error = get_error_messages("Hello.java", src)
if error != "":
print "========"
print error
print src
for i in range(len(hello_src)):
print >> sys.stderr, i
src = insert_one_char(hello_src,i,"1")
error = get_error_messages("Hello.java", src)
if error != "":
print "========"
print error
print src
for i in range(len(hello_src)):
print >> sys.stderr, i
src = remove_one_char(hello_src,i)
error = get_error_messages("Hello.java", src)
if error != "":
print "========"
print error
print src
main()
|
import time
def factorial(n):#recibimos un numero factorial
respuesta = 1 #respuesta que comienza en uno
while n > 1:
respuesta *= n #multiplicamos respuest por n
n -= 1 #va a ir decreciendo
return respuesta#regresamos respuesta
def factorial_r(n):#factorial recursivo
if n==1: #si en es igual a uno,regresa uno
return 1
return n * factorial_r(n-1)
if __name__ =='__main__':
n = 100 #definimos n
comienzo = time.time()#se ejecuta el modulo time en el comienzo
factorial(n)
final = time.time()
print(final - comienzo) #se imprime cuanto tiempo nos tardamos o se tarda el algoritmo
comienzo = time.time()
factorial_r(n)#
final = time.time()
print(final - comienzo)
#se compara los dos tiempos de las implementaciones
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
# creating empty list to store contents
products = []
prices = []
ratings = []
content = requests.get("https://www.amazon.in/s/ref=mega_elec_s23_2_1_1_1?rh=i%3Acomputers%2Cn%3A1375424031&ie=UTF8&bbn=976392031",headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}) #retrieve contents from the webpage URL
soup = BeautifulSoup(content.content,"html.parser")
for a in soup.find_all('div',attrs={'class':'a-fixed-left-grid-inner'}):
product = a.find('h2',attrs={'class':'a-size-medium s-inline s-access-title a-text-normal'})
price = a.find('span',attrs={'class':'currencyINR'})
rating = a.find('span',attrs={'class':'a-icon-alt'})
products.append(product.text)
prices.append(price.text)
ratings.append(rating.text)
df = pd.DataFrame({"Product Name":products,"Price":prices,"Rate":ratings})
df.to_csv("amazon.csv")
|
from __future__ import division
from __future__ import print_function
from datahandling import insert_update_db, my_query
from numpy import isnan, argmax, argmin, mean
from pandas import ewma
from tinydb import TinyDB, Query
from equipment import Rheomix
from logging import debug
def rheomix_sva(db):
Q = Query()
equipment = Rheomix()
for f in equipment.alldatafiles():
sample_number = equipment.file_parse(f)
done = db.contains((Q.equipment_name == equipment.name)
& (Q.sample_number == int(sample_number)))
if done:
debug('Skipped Sample %s', sample_number)
continue
time_data, torque_data = equipment.simple_data(f)
# Remove NaN from data
time_data = time_data[~isnan(time_data)]
torque_data = torque_data[~isnan(torque_data)]
# Find the first maximum of the curve
# initial maximum for sample entering the rheomix
no_of_data_points = len(torque_data)
# Divide data in to the first third and the second two thirds to capture the two maximums
cut_point = no_of_data_points//3
torque_data_1 = torque_data[:cut_point]
index_1 = argmax(torque_data_1)
# Sample 4 needs special attention since it has a maximum before the initial maximum
if sample_number == '04':
extra = 1
torque_data = torque_data[index_1 + extra:]
time_data = time_data[index_1 + extra:]
torque_data_1 = torque_data[:cut_point]
index_1 = argmax(torque_data_1)
# Cut data to exclude data points before first maximum
time_data = time_data[index_1:]
torque_data = torque_data[index_1:]
# Filter Data using EWMA Filter
# 0 < alpha <= 1
# alpha = 1 is no filtering, decrease alpha increase filtering
alpha = 0.05
my_com = 1.0/alpha - 1.0
torque_data = ewma(torque_data, com=my_com)
# Find the second maximum of the curve
# second maximum for final degradation point
torque_data_2 = torque_data[cut_point:]
index_2 = argmax(torque_data_2)
index_2 = cut_point + index_2
# Determine stability time
index_min = argmin(torque_data[:index_2])
torque_min = torque_data[index_min]
# Threshold value set by user but is the same for every curve
threshold = torque_min + 3.0
i = 0
t = torque_data[i]
while t > threshold:
i += 1
t = torque_data[i]
index_stab_start = i
i = index_min
t = torque_data[i]
while t < threshold:
i += 1
t = torque_data[i]
index_stab_end = i
# Calculate stability time, resting torque, final degradation time
stab_time = round(time_data[index_stab_end] - time_data[index_stab_start], 1)
stab_torque = torque_data[index_stab_start:index_stab_end]
rest_torque = round(mean(stab_torque), 1)
final_deg_time = round(time_data[index_2] - time_data[index_stab_start], 1)
diff_long_short = round(final_deg_time - stab_time, 1)
# Insert single value data into data base
data_types = ['stability_time_min',
'final_deg_time_min',
'diff_long_short_stab_min',
'resting_torque_Nm'
]
values = [stab_time, final_deg_time, diff_long_short, rest_torque]
insert_update_db(db, False, equipment.name, sample_number, data_types,
values)
debug('Processed Sample %s', sample_number)
|
# This should be a simple word counter which give us the most common word in a file
# If ran from the command line without arguments it should print out the usage:
# python most_common_word.py [source]
# When no argument is provided print out
# No source provided
# When the argument provided and the source is a file
# count all words in the given file and print the most common
# ("cat", "CAT", "cat," "cat." are different words )
import sys
class WordCounter():
def __init__(self):
self.args = sys.argv
self.command = sys.argv[1:]
if len(self.args) == 0:
print("python most_common_word.py [source]")
elif len(sys.argv) == 1 :
print ("No source provided")
elif len(self.command) == 2 and self.command[0] == 'most_common_word.py':
self.most_common_word(self.command[0], self.command[1])
def most_common_word(self, program, source):
try:
source_file = open(source, "r")
file_content = source_file.read()
everything = file_content.split()
counter = dict()
for i in range(len(everything)):
if everything[i] not in counter:
counter[everything[i]] = 1
else:
counter[everything[i]] += 1
print(counter)
except FileNotFoundError:
print('No such file was found')
count_that = WordCounter()
count_that.most_common_word("most_common_word.py", "filetoread.txt")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 15:09:54 2018
@author: HP
"""
def reverse(s):
final_string = s[::-1]
return final_string
print(reverse('siva'))
print(reverse('william'))
print(reverse('tat'))#this is a palindrome
def palindrome(s):
s = s.replace(' ','').lower()
if s[::-1]==s:
return print(s,': this is a palindrome')
else:
print(s,': not a palindrome')
palindrome('tat')
palindrome('good') |
import pygame
from . import constants as CO
pygame.init()
pygame.display.set_mode(CO.SRC_SIZE,pygame.FULLSCREEN) # 设置窗体大小 全屏 pygame.FULLSCREEN
pygame.display.set_caption("InterstellarStronghold-星际要塞") # 设置标题
|
import os
import re
import sys
filename = sys.argv[1]
tmpPath = sys.argv[2]
if os.path.isfile(filename):
lines_tmp = []
confOpen = open(filename)
confAlllines = confOpen.readlines()
for lines in confAlllines:
lines_tmp.append(lines)
confOpen.close()
if len(lines_tmp) > 0:
for index in range(len(lines_tmp)):
includeMatch = re.match("\s*include\s+'\s*(.*)\s*'\s*;", lines_tmp[index])
if includeMatch is not None:
includePath = includeMatch.group(1).strip()
if includePath.startswith('/') == True:
lines_tmp[index] = lines_tmp[index].replace(includePath, tmpPath + includePath)
elif re.match('\s*include\s+"\s*(.*)\s*"\s*;', lines_tmp[index]) is not None:
includeMatch = re.match('\s*include\s+"\s*(.*)\s*"\s*;', lines_tmp[index])
includePath = includeMatch.group(1).strip()
if includePath.startswith('/') == True:
lines_tmp[index] = lines_tmp[index].replace(includePath, tmpPath + includePath)
elif re.match('\s*include\s+(.*)\s*;', lines_tmp[index]) is not None:
includeMatch = re.match('\s*include\s+(.*)\s*;', lines_tmp[index])
includePath = includeMatch.group(1).strip()
if includePath.startswith('/') == True:
lines_tmp[index] = lines_tmp[index].replace(includePath, tmpPath + includePath)
write_str = ''.join(lines_tmp)
fp = open(filename, 'w+')
fp.write(write_str)
fp.close() |
#!/usr/bin/python
import optparse
import urllib
import time
# Parse command-line options
parser = optparse.OptionParser()
parser.add_option('-a', '--airport', dest='airport', default='SLC', metavar='AAA', help='retrieve data for airport AAA (default %default)')
parser.add_option('-c', '--carrier', dest='carrier', default='DL', metavar='CC', help='retrieve data for airline CC (default %default)')
parser.add_option('-b', '--begin', dest='first_day', type='int', default=1, help='first day of month to retrieve (default %default)')
parser.add_option('-e', '--end', dest='last_day', type='int', default=1, help='last day of month to retrieve (default %default)')
parser.add_option('-m', '--month', dest='month', type='int', default=1, metavar='MM', help='retrieve data for month number MM (default %default)')
parser.add_option('-l', '--lastmonth', dest='last_month', type='int', default=12, metavar='MM', help='if specified, retrives data from previously specified month until MM (default %default)')
parser.add_option('-y', '--year', dest='year', type='int', default=2010, metavar='YYYY', help='retrieve data for year YYYY (default %default)')
parser.add_option('-o', '--output', dest='output', metavar='FILE', help='save output to FILE')
parser.add_option('-f', '--format', dest='format', type='choice', choices=['csv', 'arff'], default='csv', help='output format of the data (csv or arff; default %default)')
(options, args) = parser.parse_args()
# Construct POST data
params = {
'sdtime': 'Scheduled departure time',
'adtime': 'Actual departure time',
'setime': 'Scheduled elapsed time',
'aetime': 'Actual elapsed time',
'ddtime': 'Departure delay',
'wotime': 'Wheels-off time',
'totime': 'Taxi-out time',
'delay': 'Cause of Delay',
'airport1': options.airport,
}
#for day in range(options.first_day, options.last_day+1):
# params['Day%i' % day] = day
airlines = [ 'AA', 'CO', 'DL', 'UA', 'WN' ]
airports = ['ORD', 'BOS', 'SEA', 'IAD', 'MSP', 'PWM', 'DSM', 'MEM', 'CMH', 'FYI', 'DEN']
for airport in airports:
options.airport = airport
params['airport1'] = airport
for airline in airlines:
params['airline'] = airline
for year in range(2008, 2011):
params['year1'] = str(year)
for month in range(1, 13):
params['month%i' % month] = month
for day in range(1, 32):
params['Day%i' % day] = str(day)
options.output = 'data/%s/%s_%s_%i_%02i_%02i.csv' % (options.airport, options.airport, airline, year, month, day )
print options.output
# Request data
dataStr = "<!DOCTYPE HTML"
#make sure we don't get an error page from the server
while dataStr.startswith("<!DOCTYPE HTML"):
data = urllib.urlopen('http://www.bts.gov/xml/ontimesummarystatistics/src/dstat/OntimeSummaryDepaturesDataCSV.xml', urllib.urlencode(params))
dataStr = data.read()
#if we do get an error then we just wait a few seconds and try again
if dataStr.startswith("<!DOCTYPE HTML"):
print "Error. Trying again in 10 seconds...."
time.sleep(10)
with open(options.output, 'w') as output:
output.write(dataStr)
#print data.read()
del params['Day%i' % day]
del params['month%i' % month]
|
import sys
import lyricsgenius as genius
import pandas as pd
import os
import json
dataset = "Eminem_dataset.txt"
writer = "Eminem"
json_str = "Lyrics_Eminem.json"
if __name__ == "__main__":
geniusCreds = "sTzgVYcb_lBs-WPI5q35Gf9lvZ0My3bFyzZ35-KYUp2SAHnjxjZll7rqr09HNOHV"
api = genius.Genius(geniusCreds)
artist = api.search_artist(writer, max_songs = 200)
os.getcwd()
artist.save_lyrics()
Artist=json.load(open(json_str))
with open(dataset, 'w') as file_:
for song in range(len(Artist['songs'])):
title = Artist['songs'][song]['title']
lyrics = Artist['songs'][song]['lyrics']
line = "{}\t{}\t{}\n".format(title, writer, lyrics.replace("\n", "\\"))
file_.write(line)
|
import boto3
stack = cloudformation.create_stack(
StackName='string',
TemplateBody='string',
TemplateURL='string',
Parameters=[
{
'ParameterKey': 'string',
'ParameterValue': 'string',
'UsePreviousValue': True|False
},
],
DisableRollback=True|False,
TimeoutInMinutes=123,
NotificationARNs=[
'string',
],
Capabilities=[
'CAPABILITY_IAM',
],
ResourceTypes=[
'string',
],
OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE',
StackPolicyBody='string',
StackPolicyURL='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
|
#!/usr/bin/python
from __future__ import print_function
import os
import sys
import requests
from sys import stderr
from urlparse import urlparse
from threading import Thread
from time import sleep
import socket
from select import poll, POLLIN, POLLPRI, POLLOUT, POLLHUP, POLLERR
from libproxy import ProxyFactory
from Queue import Queue, Empty
pf = ProxyFactory()
def unset_envvars():
"""Remove proxy envvars if set. We leave NO_PROXY alone."""
for key in "http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY":
if key in os.environ:
del os.environ[key]
def start_server():
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
server.bind(("127.0.0.1", 1234))
server.listen(1)
bridge = {}
message_queues = {}
sock_fd_to_sock = {
server.fileno(): server
}
closing = {}
READ_ONLY = POLLIN | POLLPRI | POLLHUP | POLLERR
WRITE_ONLY = POLLOUT | POLLHUP | POLLERR
READ_WRITE = READ_ONLY | POLLOUT
p = poll()
p.register(server, READ_ONLY)
print("Ready to accept connections")
while True:
print("Open connections: {}".format(len(sock_fd_to_sock)))
for sock_fd, event in p.poll():
sock = sock_fd_to_sock[sock_fd]
if event & (POLLIN | POLLPRI):
if sock is server:
conn, addr = server.accept()
sock_fd_to_sock[conn.fileno()] = conn
p.register(conn, READ_ONLY)
message_queues[conn] = Queue()
else:
data = sock.recv(1024)
if data:
if sock in bridge: # Bridge
dst = bridge[sock]
message_queues[dst].put(data)
p.modify(dst, READ_WRITE)
else: # Proxy
assert "\n" in data
cmd, url, http = data[:data.find("\n")].split()
if cmd != "GET":
print(cmd)
sys.exit(1)
print("Visit URL ", url)
message_queues[sock].put(data)
for proxy in pf.getProxies(url):
if proxy == "direct://":
o = urlparse(url)
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_sock.connect((o.hostname, o.port or 80))
elif proxy.startswith("http://"):
print("+ HTTP Proxy {}".format(proxy), file=stderr)
o = urlparse(proxy)
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_sock.connect((o.hostname, o.port))
p.register(new_sock, READ_WRITE)
sock_fd_to_sock[new_sock.fileno()] = new_sock
message_queues[new_sock] = message_queues[sock]
message_queues[sock] = Queue()
bridge[sock] = new_sock
bridge[new_sock] = sock
else: # no data
other = bridge.get(sock, None)
p.unregister(sock)
del sock_fd_to_sock[sock.fileno()]
del message_queues[sock]
del bridge[sock]
sock.close()
if other is not None:
p.modify(other, WRITE_ONLY)
closing[other] = True
continue
elif event & POLLOUT:
try:
next_msg = message_queues[sock].get_nowait()
sent = sock.send(next_msg)
assert sent > 0
except Empty:
if sock in closing:
p.unregister(sock)
del sock_fd_to_sock[sock.fileno()]
del message_queues[sock]
del bridge[sock]
del closing[sock]
sock.close()
else:
p.modify(sock, READ_ONLY)
else:
print("Unknown event: {}".format(event))
def main():
unset_envvars()
start_server()
if __name__ == "__main__":
main()
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Posts)
admin.site.register(models.Comments)
|
# Copyright (c) Members of the EGEE Collaboration. 2006-2009.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andrea Ceccanti (INFN)
#
import string
__voms_version__ = "${pom.version}"
__voms_prefix__ = "${package.prefix}"
import re, commands, exceptions, os.path, glob, platform
def mysql_util_cmd(command, options):
db_cmd = "%s %s --dbauser %s --dbusername %s --dbpassword %s --dbname %s --dbhost %s --dbport %s --mysql-command %s" % (VOMSDefaults.voms_mysql_util,
command,
options.dbauser,
options.dbusername,
options.dbpassword,
options.dbname,
options.dbhost,
options.dbport,
options.mysql_command)
if options.dbapwdfile:
dbapwd = open(options.dbapwdfile).read()
options.dbapwd = dbapwd
if options.dbapwd:
db_cmd += " --dbapwd=%s" % options.dbapwd
return db_cmd
def voms_add_admin_cmd(vo, cert, ignore_email=False):
if ignore_email:
return "%s %s" % (__voms_db_util_base_cmd(vo, "add-admin"), "--cert %s --ignore-cert-email" % cert)
else:
return "%s %s" % (__voms_db_util_base_cmd(vo, "add-admin"), "--cert %s" % cert)
def voms_ro_auth_clients_cmd(vo):
return __voms_db_util_base_cmd(vo, "grant-read-only-access")
def voms_deploy_database_cmd(vo):
return __voms_db_util_base_cmd(vo, "deploy")
def voms_undeploy_database_cmd(vo):
return __voms_db_util_base_cmd(vo, "undeploy")
def voms_upgrade_database_cmd(vo):
return __voms_db_util_base_cmd(vo, "upgrade")
def __voms_db_util_base_cmd(vo, command):
return "%s %s --vo %s" % (VOMSDefaults.voms_db_util, command, vo)
def voms_version():
if __voms_version__ == "${pom.version":
return "unset"
return __voms_version__
def voms_prefix():
if __voms_prefix__ == "${package.prefix}":
return "/opt/voms"
else:
return __voms_prefix__
def template_prefix():
return os.path.join(voms_prefix(), "usr","share", "voms-admin","templates")
def admin_conf_dir(vo=None):
if vo is None:
return os.path.join(voms_prefix(), "etc", "voms-admin")
else:
return os.path.join(voms_prefix(), "etc", "voms-admin", vo)
def core_conf_dir(vo=None):
if vo is None:
return os.path.join(voms_prefix(), "etc","voms")
else:
return os.path.join(voms_prefix(), "etc","voms", vo)
def admin_db_properties_path(vo):
return os.path.join(admin_conf_dir(vo), "database.properties")
def admin_service_properties_path(vo):
return os.path.join(admin_conf_dir(vo), "service.properties")
def admin_service_endpoint_path(vo):
return os.path.join(admin_conf_dir(vo), "service-endpoint")
def admin_logging_conf_path(vo):
return os.path.join(admin_conf_dir(vo), "logback.xml")
def vomses_path(vo):
return os.path.join(admin_conf_dir(vo), "vomses")
def lsc_path(vo):
return os.path.join(admin_conf_dir(vo), "lsc")
def aup_path(vo):
return os.path.join(admin_conf_dir(vo), "vo-aup.txt")
def voms_log_path():
return os.path.join(voms_prefix(),"var", "log", "voms")
def voms_conf_path(vo):
return os.path.join(core_conf_dir(), vo, "voms.conf")
def voms_pass_path(vo):
return os.path.join(core_conf_dir(), vo, "voms.pass")
def voms_lib_dir():
prefix=voms_prefix()
plat = platform.machine()
libdir = "lib"
if plat == "x86_64":
## FIXME: understand how this behaves in Debian
libdir = "lib64"
return os.path.join(prefix,"usr", libdir)
class VOMSDefaults:
db_props_template = os.path.join(template_prefix(), "database.properties")
service_props_template = os.path.join(template_prefix(),"service.properties")
voms_template = os.path.join(template_prefix(),"voms.conf")
vo_aup_template = os.path.join(template_prefix(),"vo-aup.txt")
logging_conf_template = os.path.join(template_prefix(), "logback.xml")
voms_admin_war = os.path.join(voms_prefix(), "usr","share","webapps","voms-admin.war")
voms_admin_libs = glob.glob(os.path.join(voms_prefix(),"var", "lib","voms-admin","lib")+"/*.jar")
voms_admin_classes = os.path.join(voms_prefix(),"var", "lib","voms-admin","tools")
voms_admin_jar = os.path.join(voms_prefix(), "usr", "share","java","voms-admin.jar")
voms_db_util = os.path.join(voms_prefix(),"usr", "sbin","voms-db-util")
voms_mysql_util = os.path.join(voms_prefix(), "usr", "sbin", "voms-mysql-util")
schema_deployer_class = "org.glite.security.voms.admin.persistence.deployer.SchemaDeployer"
oracle_driver_class = "oracle.jdbc.driver.OracleDriver"
oracle_dialect = "org.hibernate.dialect.Oracle9Dialect"
mysql_driver_class = "org.gjt.mm.mysql.Driver"
mysql_dialect = "org.hibernate.dialect.MySQLInnoDBDialect"
def parse_sysconfig():
sysconfig_filename = os.path.join(voms_prefix(),
"etc", "sysconfig", "voms-admin")
helper = PropertyHelper(sysconfig_filename)
return helper
def get_oracle_env():
sysconfig = parse_sysconfig()
template_str = "LD_LIBRARY_PATH=$ORACLE_LIBRARY_PATH TNS_ADMIN=$TNS_ADMIN"
template = string.Template(template_str)
return template.substitute(sysconfig)
class VOMSError(exceptions.RuntimeError):
pass
class PropertyHelper(dict):
empty_or_comment_lines = re.compile("^\\s*$|^#.*$")
property_matcher = re.compile("^\\s*([^=\\s]+)=?\\s*(\\S.*)$")
def __init__(self,filename):
self._filename = filename
self._load_properties()
def _load_properties(self):
f = open(self._filename,"r")
for l in f:
if re.match(PropertyHelper.empty_or_comment_lines, l) is None:
m = re.search(PropertyHelper.property_matcher,l)
if m:
PropertyHelper.__setitem__(self,m.groups()[0],m.groups()[1])
f.close()
def save_properties(self):
def helper(l):
m = re.search(PropertyHelper.property_matcher,l)
if m:
return re.sub("=.*$","=%s" % self[m.groups()[0]],l)
else:
return l
f = open(self._filename,"rw+")
lines = map(helper,f.readlines())
f.seek(0)
f.writelines(lines)
f.truncate()
f.close()
class X509Helper:
def __init__(self,filename, openssl_cmd=None):
self.filename= filename
self.openssl_cmd = openssl_cmd
self.parse()
def parse(self):
if self.openssl_cmd:
openssl = self.openssl_cmd
else:
openssl = 'openssl'
base_cmd = openssl+' x509 -in \'%s\' -noout ' % self.filename
status,subject = commands.getstatusoutput(base_cmd+'-subject')
if status:
raise VOMSError, "Error invoking openssl: "+ subject
status,issuer = commands.getstatusoutput(base_cmd+'-issuer')
if status:
raise VOMSError, "Error invoking openssl: "+ issuer
status,email = commands.getstatusoutput(base_cmd+'-email')
if status:
raise VOMSError, "Error invoking openssl: "+ email
self.subject = re.sub(r'^subject= ','',subject.strip())
self.issuer = re.sub(r'^issuer= ','',issuer.strip())
self.subject = re.sub(r'/(E|e|((E|e|)(mail|mailAddress|mailaddress|MAIL|MAILADDRESS)))=','/Email=',self.subject)
# Handle emailAddress also in the CA DN (Bug #36490)
self.issuer = re.sub(r'/(E|e|((E|e|)(mail|mailAddress|mailaddress|MAIL|MAILADDRESS)))=','/Email=',self.issuer)
# Handle also UID
self.subject = re.sub(r'/(UserId|USERID|userId|userid|uid|Uid)=','/UID=',self.subject)
self.email = email.strip()
# Check that only first email address is taken from the certificate, the openssl -email command
# returns one address per line
emails = email.splitlines(False)
if len(emails) > 0:
self.email = emails[0]
def __repr__(self):
return 'Subject:%s\nIssuer:%s\nEmail:%s' % (self.subject, self.issuer, self.email) |
import flask
from flask import Flask, url_for, render_template, redirect, request
from flask import session as login_session
import requests
import os
import sys
from flask import send_from_directory
from datetime import datetime, timedelta
from storeutils import pool_server, pay_order, make_order, add_product_to_order,query_items_in_order_detailed,query_orders
from storeutils import query_products, Pagination, query_product_detail
from storeutils import get_profile,refresh_token,code_for_token, auth_url
from flask_bootstrap import Bootstrap
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
import flaskr
#CAM = "http://grulicueva.homenet.org/~luciano/lab2store"
#CLIENT_ID = 'TRq20Yb5xutn9T8cjxU7MjlJRUrwqi0VwCevobaP' #luciano
#CLIENT_SECRET = 'u1pKCkRmd0QTWmww9v43a7zv8ymtNN6OdYdR5puO4ZviDCWZzI' #luciano
###CLIENT_ID = 'rCr4rtPXAhZcPJ8NqFLEVaX5UJJBQuJ90bqG1viK' #jose
###CLIENT_SECRET = 'yNoWbOya0zGG0yYCjSKChkA2sCsGuAOCdkemgLh9rhcstXMCf0' #jose
###SERVER_ADDR ="http://139.59.145.248" # DIGITALOCEAN 1 CPU
###API_KEY = ''
###CAM = ""
#CLIENT_ID = 'ISk23tJdGT7pZZpzrrPGq4n3jvf3M0TjpMbsxud3' #anna 2cpu
#CLIENT_SECRET = '8ZIoI9qfErtwzDxnI5aZsw2cy0hO7v8QSQxcCqR1HwudjTbeh5' #anna 2cpu
#SERVER_ADDR ="http://138.68.67.49" # DIGITALOCEAN 2 CPU
#CLIENT_ID = 'l7ePTstSGglimGHhpE2Ogtks3KkCa5jzk8Vj2qQ1' #peter
#CLIENT_SECRET = '5VUhu0AglAJozVGTtcXb8Um6sfInMvU6Y1c6kSYCsqWZEQ6Whl' #peter
#SERVER_ADDR ="http://localhost:5000"
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = 'twtrtretrefsdgfgvbcvbbvbcviutiujgkhj'
############################################################################################################
#print os.environ['PAT']
#print os.environ['HOME']
#import sys
#print sys.home
#app.config['PAT'] = os.environ.get('PAT','default si no existe')
#print app.config['HISTSIZE']
#print os.environ.get('ENVIRO', 'defaeeult si no existe')#
app.config.from_envvar('ENVIRO', silent=True)
#REDIRECT_URI = app.config['CAMINO']+'callback' #luciano
SERVER_ADDR=app.config['SERVER_ADDR']
CAM=app.config['CAMINO']
CLIENT_ID = app.config['CLIENT_ID']
CLIENT_SECRET = app.config['CLIENT_SECRET']
#print REDIRECT_URI
### AS I RECEIVED A WARNING OF LACK OF FAVICON, I CREATE ONE AND PUT IN THE PATH ###
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico', mimetype='image/vnd.microsoft.icon')
##################################### HERE FUNCTIONS ###########################################
PER_PAGE = 10
@app.route('/productos', defaults={'page': 1})
@app.route('/productos/<int:page>')
def show_products(page):
token=login_session.get('access_token')
productos = query_products(page, PER_PAGE,token)
arrayurls = []
if not productos and page != 1:
print "nothing"
#abort(404)
count=200
if page ==1:
inicio=1
fin=PER_PAGE
else:
inicio=page*PER_PAGE
fin=inicio+PER_PAGE
for i in range(inicio,fin):
url_product="http://flask-enviroment.z2spn3xrd3.us-west-2.elasticbeanstalk.com/api/product/item/"+str(i)
arrayurls.append({'url': url_product})
pagination = Pagination(page, PER_PAGE, count)
return render_template('productsu.html', pagination=pagination, products=productos, urls=arrayurls, CAMINO=CAM )
##################################### HERE FUNCTIONS ###########################################
### HOMEPAGE ###
@app.route('/')
def index():
results = pool_server()
result = results['answer']
if result == "offline":
img = "status_offline.gif"
elif result == "OK":
img = "server_online.gif"
return flask.render_template('indexoff.html', CAMINO=CAM, url=auth_url(), WHO="Guest", IMG=img)
####################################################################################################################
@app.route('/order')
def makeanorder():
if login_session.get('access_token') is None:
return redirect(url_for("index"))
tk = login_session['access_token']
order=make_order(tk)
return flask.render_template('orderu.html', NUMORDER=order["order"], FECHA=order["date"], CAMINO=CAM)
@app.route('/orderdetail', defaults={'order_id': 1})
@app.route('/orderdetail/<int:order_id>')
def orderdetail(order_id):
token=login_session.get('access_token')
print "token es"
print token
print "order id es"
print order_id
if token is not None:
itemlist = query_items_in_order_detailed(order_id,token)
suma = 0
print "item"
print itemlist[0]['product_id']
if itemlist[0]['product_id'] == 0 or itemlist[0]['product_id'] is None:
itemlist=""
else:
for item in itemlist:
suma=item['quantity']*item['price']+suma
return render_template('orderdetail.html', items=itemlist, order=order_id, price=suma, CAMINO=CAM)
else:
return "token no llego"
@app.route('/orderdetail/<int:order_id>/<string:val>/<int:product_id>')
def addproduct_to_order(order_id,product_id,val):
token=login_session.get('access_token')
if val=="add":
valor=1
else:
valor=0
resu=add_product_to_order(order_id,product_id,token,valor)
url=CAM+"/orderdetail/"+str(order_id)
return redirect(url)
PPER_PAGE_ORDER = 4
@app.route('/orderlist', defaults={'page': 1})
@app.route('/orderlist/<int:page>')
def show_order(page):
token=login_session.get('access_token')
orders = query_orders(page, PER_PAGE,token)
arrayurls = []
if not orders and page != 1:
print "nothing"
#abort(404)
count=200
if page ==1:
inicio=1
fin=PPER_PAGE_ORDER
else:
inicio=page*PPER_PAGE_ORDER
fin=inicio+PPER_PAGE_ORDER
for i in range(inicio,fin):
url_order=SERVER_ADDR+str(i)
arrayurls.append({'url': url_order})
pagination = Pagination(page, PPER_PAGE_ORDER, count)
return render_template('ordersu.html', pagination=pagination, orders=orders, urls=arrayurls, CAMINO=CAM )
@app.route('/payorder', methods=['POST'])
def payorder():
token = login_session.get('access_token')
if request.method == 'POST':
order_id = request.form.get('order_id')
resu = pay_order(order_id, token)
url = CAM+"/orderlist"
return redirect(url)
#######################################################################################################################
@app.route('/addnewproduct', methods=['POST'])
def addnewproduct_to_order():
token=login_session.get('access_token')
if request.method == 'POST':
product_id = request.form.get('product_id')
order_id = request.form.get('order_id')
product_id=int(product_id)
print "product"
print product_id
print "order"
print order_id
if type(product_id) == int:
resu=add_product_to_order(order_id,product_id,token,1)
url=CAM+"/orderdetail/"+str(order_id)
print "veamos url"
print url
return redirect(url)
@app.route('/productos/item', defaults={'product_id': 1})
@app.route('/productos/item/<int:product_id>')
def productdetail(product_id):
token = login_session.get('access_token')
itemlist = query_product_detail(product_id, token)
#print "itelmlis"
#print itemlist['price']
return render_template('productdetu.html', price=itemlist['price'], product_id=itemlist['product_id'],
name=itemlist['name'], description=itemlist['description'], CAMINO=CAM )
#######################################################################################################################
#####################################################################################################################
@app.route('/me')
def askaboutme():
if login_session.get('access_token') is None:
return redirect(url_for("index"))
tk = login_session['access_token']
unnombre=get_profile(tk)
exp=login_session['expira']
ahora=datetime.now()
if exp > ahora:
sta = "Valid"
else:
sta = "Expired"
return flask.render_template('usersu.html', USERNAME=unnombre, EXPIRE=exp, STATUS=sta, CAMINO=CAM)
@app.route('/ping')
def ping():
results = pool_server()
result=results['answer']
if result=="offline":
img = "status_offline.gif"
elif result=="OK":
img = "server_online.gif"
return flask.render_template('indexoff.html', url=auth_url(), WHO="Guest", IMG=img, CAMINO=CAM)
@app.route('/logout')
def logout():
login_session.clear()
return redirect(url_for("index"))
@app.route('/callback')
def callback():
args = flask.request.args
if args.get('error', None):
return "Authentication error: {0}".format(args['error'])
code = args.get('code', None)
user = args.get('user', None)
if not code:
return "Authentication error: no code provided"
tokens = code_for_token(code)
acc_token = tokens[0]
login_session['access_token'] = acc_token
ref_token = tokens[1]
expira = tokens[3]
ahora=datetime.now()
expiral = datetime(int(expira[0]),int(expira[1]),int(expira[2]),int(expira[3]),int(expira[4]),int(expira[5]))
user=get_profile(acc_token)
login_session['expira'] = expiral
if expiral > ahora:
estado = "Valid"
else:
estado = "Deprecated"
return render_template("indexu.html", NOW=ahora,WHO=user, STATUS=estado, EXPIRES=expiral, CAMINO=CAM)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5003) |
# Generated from parity_game.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .parity_gameParser import parity_gameParser
else:
from parity_gameParser import parity_gameParser
# This class defines a complete listener for a parse tree produced by parity_gameParser.
class parity_gameListener(ParseTreeListener):
# Enter a parse tree produced by parity_gameParser#game.
def enterGame(self, ctx:parity_gameParser.GameContext):
pass
# Exit a parse tree produced by parity_gameParser#game.
def exitGame(self, ctx:parity_gameParser.GameContext):
pass
# Enter a parse tree produced by parity_gameParser#preamble.
def enterPreamble(self, ctx:parity_gameParser.PreambleContext):
pass
# Exit a parse tree produced by parity_gameParser#preamble.
def exitPreamble(self, ctx:parity_gameParser.PreambleContext):
pass
# Enter a parse tree produced by parity_gameParser#start.
def enterStart(self, ctx:parity_gameParser.StartContext):
pass
# Exit a parse tree produced by parity_gameParser#start.
def exitStart(self, ctx:parity_gameParser.StartContext):
pass
# Enter a parse tree produced by parity_gameParser#node.
def enterNode(self, ctx:parity_gameParser.NodeContext):
pass
# Exit a parse tree produced by parity_gameParser#node.
def exitNode(self, ctx:parity_gameParser.NodeContext):
pass
# Enter a parse tree produced by parity_gameParser#successors.
def enterSuccessors(self, ctx:parity_gameParser.SuccessorsContext):
pass
# Exit a parse tree produced by parity_gameParser#successors.
def exitSuccessors(self, ctx:parity_gameParser.SuccessorsContext):
pass
# Enter a parse tree produced by parity_gameParser#owner.
def enterOwner(self, ctx:parity_gameParser.OwnerContext):
pass
# Exit a parse tree produced by parity_gameParser#owner.
def exitOwner(self, ctx:parity_gameParser.OwnerContext):
pass
# Enter a parse tree produced by parity_gameParser#identifier.
def enterIdentifier(self, ctx:parity_gameParser.IdentifierContext):
pass
# Exit a parse tree produced by parity_gameParser#identifier.
def exitIdentifier(self, ctx:parity_gameParser.IdentifierContext):
pass
# Enter a parse tree produced by parity_gameParser#parity.
def enterParity(self, ctx:parity_gameParser.ParityContext):
pass
# Exit a parse tree produced by parity_gameParser#parity.
def exitParity(self, ctx:parity_gameParser.ParityContext):
pass
# Enter a parse tree produced by parity_gameParser#successor.
def enterSuccessor(self, ctx:parity_gameParser.SuccessorContext):
pass
# Exit a parse tree produced by parity_gameParser#successor.
def exitSuccessor(self, ctx:parity_gameParser.SuccessorContext):
pass
# Enter a parse tree produced by parity_gameParser#start_node.
def enterStart_node(self, ctx:parity_gameParser.Start_nodeContext):
pass
# Exit a parse tree produced by parity_gameParser#start_node.
def exitStart_node(self, ctx:parity_gameParser.Start_nodeContext):
pass
del parity_gameParser |
import numpy as np
import pandas as pd
def cleanse_data(df):
""" Cleans the MTA Dataset and create some elementary features
Args:
df: A DataFrame with correctly formatted MTA Data
Returns:
A DataFrame with cleansed MTA Data
"""
# Strip leading and trailing spaces for Headers and all alphanumeric entries
df.columns = df.columns.str.strip()
df = strip_spaces(df)
# check that column names are correct
check_column_names(df.columns)
# Create DateTime Column
df["DATETIME"] = pd.to_datetime((df["DATE"] + ' ' + df["TIME"]), format="%m/%d/%Y %H:%M:%S")
# Create a Unique Identifier for each Turnstile.
df["TURNSTILE_ID"] = df["C/A"] + '_' + df["UNIT"] + '_' + df["SCP"] + '_' + df["STATION"]
# Deaggregate Entries and Exits
df.sort_values(["TURNSTILE_ID", "DATETIME"], inplace=True)
df["entry_diffs"] = df["ENTRIES"].diff()
df["exit_diffs"] = df["EXITS"].diff()
mask = df.TURNSTILE_ID != df.TURNSTILE_ID.shift(1)
df.loc[mask, 'entry_diffs'] = np.nan
df.loc[mask, 'exit_diffs'] = np.nan
# Drop first entry for cumalative entries
df = df.dropna(how='any')
# Drop the Cummalative entry and exit columns
df = df.drop(columns=["ENTRIES", "EXITS"])
# Rename the new columns to ENTRIES and EXITS
df = df.rename(columns={"entry_diffs": "ENTRIES", "exit_diffs": "EXITS"})
# Drop Remaining Anomalous Entries
df = df.loc[df["ENTRIES"] < 3000]
df = df.loc[df["ENTRIES"] > 0]
df = df.loc[df["EXITS"] < 3000]
df = df.loc[df["ENTRIES"] > 0]
# Create a Weekday Column
days_of_the_week = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday"
}
df["WDAY"] = df["DATETIME"].apply(lambda x: x.weekday())
df["WEEKDAY"] = df["WDAY"].map(days_of_the_week)
# Create a Week Column
df["WEEK"] = df["DATETIME"].apply(lambda x: x.week)
# Create an Hour column
df["HOUR"] = df["DATETIME"].apply(lambda x: x.hour)
df.ENTRIES = df.ENTRIES.astype(int)
df.EXITS = df.EXITS.astype(int)
station_coordinates = pd.read_csv("./datasets/station_coordinates.csv")
df = pd.merge(df, station_coordinates, on="STATION")
df = df.drop(['C/A', 'UNIT', 'SCP', 'LINENAME', 'DIVISION'], axis=1)
return df
def strip_spaces(df):
"""Strips leading and trailing spaces for all columns of type `object`.
Args:
df (DataFrame): A DataFrame with alphanumeric columns.
Returns:
A DataFrame where leading and trailing spaces have been stripped.
"""
if type(df) != pd.core.frame.DataFrame:
assert TypeError('df must be a pandas DataFrame')
if len(df.columns) == 0:
assert ValueError('df cannot be an empty dataframe')
for column in df.columns:
if df[column].dtype == object:
try:
df[column] = df[column].str.strip()
except:
print('Could not strip leading and trailing spaces from: ', column)
return df
def check_column_names(columns):
""" Checks whether the column names match the MTA Column Names
Args:
columns: A list of column names
Asserts:
ValueError: If all columns are not equal to saved columns
"""
correct_column_names = ['C/A', 'UNIT', 'SCP', 'STATION',
'LINENAME', 'DIVISION', 'DATE',
'TIME', 'DESC', 'ENTRIES', 'EXITS', ]
if len(columns) != len(correct_column_names):
assert ValueError('The DataFrame does not contain the correct number of columns.')
if (columns != correct_column_names).any():
assert ValueError('The columns are not correctly formatted MTA Data.') |
#!/usr/bin/env python3
import scapy.all as scapy
import argparse as argp
def get_arguments():
parser = argp.ArgumentParser(description='Send an ARP broadcast and prints the result')
parser.add_argument('--target', '-t', dest="target", type=str,
help='IP address of the host which you send the packet')
args = parser.parse_args()
return args
def scan(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
clients_list = []
for element in answered_list:
client_dict = {"ip": element[1].psrc, "mac": element[1].hwsrc}
clients_list.append(client_dict)
return(clients_list)
def print_result(results_list):
print("#######################################################")
print("IP\t\t\tMAC Address")
print("#######################################################")
for client in results_list:
print(client["ip"] + "\t\t" + client["mac"])
print("#######################################################")
args = get_arguments()
scan_result = scan(args.target)
print_result(scan_result)
|
#1059-pares-e-impares
# entrada
N = int(input())
impar = []
par = []
for i in range(N):
numero = int(input())
if (False == (numero % 2 == 0)):
impar.append(numero)
elif (True):
par.append(numero)
impar.sort(reverse=True)
par.sort(reverse=False)
for i in range(len(par)):
print(par[i])
for i in range(len(impar)):
print(impar[i]) |
# -*- coding: utf-8 -*-
class Solution:
def minTimeToVisitAllPoints(self, points):
result = 0
for i in range(len(points) - 1):
result += self.distance(points[i], points[i + 1])
return result
def distance(self, p1, p2):
dx = abs(p1[0] - p2[0])
dy = abs(p1[1] - p2[1])
return min(dx, dy) + abs(dx - dy)
if __name__ == "__main__":
solution = Solution()
assert 7 == solution.minTimeToVisitAllPoints([[1, 1], [3, 4], [-1, 0]])
assert 5 == solution.minTimeToVisitAllPoints([[3, 2], [-2, 2]])
|
class Solution(object):
def rob(self, nums):
prev = curr = 0
for x in nums:
prev, curr = curr, max(prev + x, curr)
return curr
print(Solution().rob([5,5,5,0,0,0,5,5,0,5,5,5,0]))#25 |
def main():
b=(int(input("Enter your weight in lbs:" )))
h=(int(input("Enter your height in inches:" )))
y=(b*720)/h/h
if 25 >= y >= 19 :
print ("You are within the healthy range")
elif y > 25 :
print ("you are above the healthy range")
else :
print ("you are below the healthy range")
|
from setuptools import setup
setup(
name = "represent-boundaries",
version = "0.2",
url='http://github.com/rhymeswithcycle/represent-boundaries',
description="A Web API to geographical districts loaded from shapefiles. Packaged as a Django app.",
license = "MIT",
packages = [
'boundaries',
'boundaries.management',
'boundaries.management.commands'
],
install_requires = [
'django-jsonfield>=0.7.1',
'django-appconf',
],
classifiers = [
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: MIT License',
'Framework :: Django',
'Topic :: Scientific/Engineering :: GIS',
]
)
|
""" File: prob1.py
Author: Abraham Aruguete
Purpose: i think this is a review of classes or something"""
class Simplest:
""" This is a simple class for simple folk."""
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
class Rotate:
"""This is a class which has three parameters, getter and setter methods, and a rotate method which shifts the three parameters. """
def __init__(self, first, second, third):
self._first = first
self._second = second
self._third = third
def get_first(self):
return self._first
def get_second(self):
return self._second
def get_third(self):
return self._third
def rotate(self):
tempVar = self._first
self._first = self._second
self._second = self._third
self._third = tempVar
class Band:
""" This class is a class which represents a musical group. It starts out with one singer, a None drummer, and zero guitar players, and has various attributes which modify these things. """
def __init__(self, singer):
self._singer = singer
self._drummer = None
self._guitar_players = 0
self._guitar_players_list = []
def get_singer(self):
return self._singer
def set_singer(self, new_singer):
self._singer = new_singer
def get_drummer(self):
return self._drummer
def set_drummer(self, new_drummer):
self._drummer = new_drummer
def add_guitar_player(self, new_guitar_player):
self._guitar_players += 1
self._guitar_players_list.append(new_guitar_player)
def fire_all_guitar_players(self):
self._guitar_players = 0
self._guitar_players_list = []
def get_guitar_players(self):
newList = self._guitar_players_list
return newList
def play_music(self):
if (self._singer == "Frank Sinatra"):
print("Do be do be do")
elif (self._singer == "Kurt Cobain"):
print("bargle nawdle zouss")
else:
print("La la la")
if (self._drummer != None):
print("Bang bang bang!")
for index in range(self._guitar_players):
print("Strum!")
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def add_two_numbers(self, l1, l2):
carry = 0
head = ListNode(0)
l = head
while l1 or l2:
if not l1:
l1 = ListNode(0)
if not l2:
l2 = ListNode(0)
sum = l1.val+l2.val+carry
carry = sum//10
sum = sum % 10
l.next = ListNode(sum)
l = l.next
l1 = l1.next
l2 = l2.next
if carry > 0:
l.next = ListNode(carry)
return head.next
def create_linked_list(l):
head = ListNode(0)
cur_node = head
for e in l:
if not cur_node.next:
cur_node.next = ListNode(e)
cur_node = cur_node.next
return head.next
def print_linked_list(ll):
vals = []
while ll:
vals.append(str(ll.val))
ll = ll.next
print(" -> ".join(vals))
if __name__ == "__main__":
s = Solution()
# l1 = create_linked_list([1, 8])
# l2 = create_linked_list([0])
l1 = create_linked_list([2, 4, 3])
l2 = create_linked_list([5, 6, 4])
l = s.add_two_numbers(l1, l2)
print_linked_list(l)
|
# -*- coding: utf-8 -*-
from pynginx.schedule.base import Schedule as BaseSchedule
import time
import traceback
class Schedule(BaseSchedule):
@property
def index(self):
if self.lock.acquire():
if 0 == self.length:
self.lock.release()
return -1
self.i = (self.i + 1) % self.length
self.lock.release()
return self.i
if __name__ == '__main__':
s = Schedule()
s.add(('127.0.0.1',7013))
s.add(('127.0.0.1',7023))
while True:
try:
s.get()
except:
# 未捕获异常,导致了线程的崩溃了。但是后台检测线程依旧在继续
print traceback.format_exc()
time.sleep(0.2)
|
'''
You will be given the number of angles of a shape with equal sides and angles,
and you need to return the number of its sides, and the measure of the interior angles.
Should the number be equal or less than 2, return:
"this will be a line segment or a dot"
Otherwise return the result in the following format:
"This shape has s sides and each angle measures d degrees"
(replace s with number of sides and d with the measure of the interior angles).
Angle measure should be floored to the nearest integer.
Number of sides will be tested from 0 to 180.
'''
def describe_the_shape(angles):
if angles <=2:
return 'this will be a line segment or a dot'
return 'This shape has {} sides and each angle measures {}'.format(angles, (angles-2)*180//angles)
describe_the_shape(6)
# Returns:'This shape has 6 sides and each angle measures 120'
|
from .garmin_calculation import create_garmin_quick_look
from .fitbit_calculation import create_fitbit_quick_look
from .apple_calculation import create_apple_quick_look
def which_device(user):
if hasattr(user,"garmin_token"):
return "garmin"
elif hasattr(user,"fitbit_refresh_token"):
return "fitbit"
else:
hasattr(user,"apple_refresh_token")
return "apple"
def create_quick_look(user,from_date,to_date):
device = which_device(user)
if device and device == 'garmin':
return create_garmin_quick_look(user,from_date,to_date)
elif device and device == 'fitbit':
return create_fitbit_quick_look(user, from_date, to_date)
elif device and device == 'apple':
return create_apple_quick_look(user, from_date, to_date)
# else:
# return {} |
list=[]
while True:
A=int(input())
if A != 0:
list.append(A)
elif A == 0:
if len(list) == 0:
print("0")
break
else:
print(sum(list))
del list[:]
break
else:
break
|
""" script to scrap IPEDS website for .csv files """
import argparse
import zipfile
import shutil
import glob
import re
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
def scrape():
""" get html page that lists its links to .zip files """
driver = webdriver.Firefox()
# https://nces.ed.gov/ipeds/datacenter/Default.aspx?gotoReportId=7&fromIpeds=true
driver.get('https://nces.ed.gov/ipeds/datacenter/login.aspx?gotoReportId=7')
driver.implicitly_wait(10)
button = driver.find_element_by_id('ImageButton1')
button.click()
driver.implicitly_wait(10)
button = driver.find_element_by_id('contentPlaceHolder_ibtnContinue')
button.click()
with open('./cache/ipeds_data.html', 'w') as out_file:
out_file.write(driver.page_source)
def get_dlinks():
""" parses html for download links """
html_doc = ''
# input everything in HTML file into html_doc for processing
with open('./cache/ipeds_data.html') as in_file:
html_doc = str(in_file.readlines())
# initialize BeautifulSoup parser for html_doc
soup = BeautifulSoup(html_doc, 'html.parser')
# filter html_doc for data we want
with open('./cache/download_links.txt', 'w') as out_file:
# find all anchor tags with href property = 'data'
for line in soup.find_all(href=re.compile("data")):
# convert working line into string for easier processing
line = str(line)
# find the index the ends the substring "<a href=\""
index_begin = line.find('<a href=\"') + len('<a href=\"')
# find the index the ends the substring ".zip"
index_end = line.find('.zip') + len('.zip')
# filer line down "data/<filename>.zip" (ex. "data/HD2015.zip")
line = line[index_begin : index_end]
# filter out empty lines
if line == '':
continue
else:
# write the partial url ("data/<filename>.zip") into file
out_file.write("{}\n".format(line))
def unzip_delete(filename):
""" unzips zip files and deletes zip file, take in filename without file extension """
# unzip zip files
with zipfile.ZipFile('./data/{}'.format(filename),"r") as zip_ref:
zip_ref.extractall('./csv/{}'.format(filename))
# zipfile unzips files but keeps the directory structure
# i.e. file.zip becomse file.zip > fileCSV.csv
# these next two pieces of code:
# move csv file out of folder
for file in glob.glob('./csv/{}/*'.format(filename)):
# print(file)
# shutil.copy(file, dest_dir)
# './csv/{}/{}.csv'.format(filename, filename[: filename.find('.')].lower())
shutil.move(file, './csv/')
# delete (now) empty folder
shutil.rmtree('./csv/{}'.format(filename))
def single_download(year, check=False, prefix='HD', url='data/', file_ex='.zip'):
""" downloads a single year's .zip data file """
if check is True:
# checks if file exists
res = requests.head('https://nces.ed.gov/ipeds/datacenter/{}{}{}{}'
.format(url, prefix, year, file_ex))
print('{}{}{} {}'.format(prefix, year, file_ex, str(res)))
else:
res = requests.get('https://nces.ed.gov/ipeds/datacenter/{}{}{}{}'
.format(url, prefix, year, file_ex))
if res.status_code == 200:
with open('./data/{}{}.zip'.format(prefix, year), 'wb') as out_file:
out_file.write(res.content)
unzip_delete('{}{}'.format(prefix, year))
return 0
else:
return -1
def series_download(year_begin, year_end, prefix='HD', url='data/', file_ex='.zip'):
""" downloads all .zip data files from the year_begin to year_end """
if (year_begin > year_end):
tmp = year_begin
year_begin = year_end
year_end = tmp
for year in range(year_begin, year_end + 1):
print('Downloading {}{} File'.format(prefix, year))
single_download(year, prefix='HD', url='data/', file_ex='.zip')
print('...Download {}{} File Complete'.format(prefix, year))
def downloader(prefix='HD', check=False, check_all=False):
""" parses file (download_links.txt) generates by g_dlinks()
and downloads (or checks) .zip files """
# download wanted files
with open('./cache/download_links.txt') as in_file:
for line in in_file:
line = str(line)
line = line.strip()
if check_all is True:
# checks if any file exists
res = requests.head('https://nces.ed.gov/ipeds/datacenter/{}'.format(line))
print(line + ' ' + str(res))
elif line.find(prefix) == -1:
# skip the current line if not the prefix we want
continue
else:
if check is True:
# checks if file exists
res = requests.head('https://nces.ed.gov/ipeds/datacenter/{}'.format(line))
print(line + ' ' + str(res))
else:
# download file
res = requests.get('https://nces.ed.gov/ipeds/datacenter/{}'.format(line))
if res.status_code == 200:
filename = line[line.find('/') + 1 :]
with open('./data/{}'.format(filename),
'wb') as out_file:
out_file.write(res.content)
print('...Download {} Complete'.format(filename))
unzip_delete('{}'.format(filename))
else:
# skip the current line
continue
def main():
""" main subroutine """
des = """This program scraps the IPEDS website for its .csv data files."""
# initiate the parser
parser = argparse.ArgumentParser(description=des)
# define argument options
parser.add_argument('-f',
'--fresh',
help='refreshes download cache, \
run if the files you are getting are old',
action='store_true')
parser.add_argument('-p',
'--prefix',
help='define the prefix of the files wanted, \
default is "HD" (for getting HDxxxx.zip files for example)')
parser.add_argument('-y',
'--year',
help='input one number indicating the year you want \
and downloads it with specified prefix')
parser.add_argument('-s',
'--series',
nargs=2,
help='input two numbers indicating series of years you want \
(from year of first number to year of second number) \
and downloads them with specified prefix')
parser.add_argument('-c',
'--check',
help='checks to see if the files \
(with the given prefix - default is HD - and year) exist')
parser.add_argument('-a',
'--checkAll',
help='checks to see if any files exist \
(note that checkAll overrides all other options), \
<Response 200> indicates that it does \
(google search HTTP codes for other troubleshooting)',
action='store_true')
parser.add_argument('-d',
'--downloadAll',
help='downloads all files with specified prefix',
action='store_true')
# read arguments from the command line
args = parser.parse_args()
print('')
if args.checkAll:
print('Checking All Files...')
downloader(check_all=True)
return
if args.fresh:
print('Refreshing Download Cache...')
scrape()
print('...Parsing HTML for Download Links...')
get_dlinks()
print('...Download Cache Refreshed')
return
if args.prefix is None:
args.prefix = 'HD'
print('Prefix Used: {}'.format(args.prefix))
if args.check:
print('Checking {}{} File'.format(args.prefix, args.check))
single_download(args.check, prefix='HD', check=True)
return
if args.year:
print('Year: {}'.format(args.year))
print('Downloading {}{} File'.format(args.prefix, args.year))
if single_download(args.year, prefix=args.prefix) == 0:
print('...Download Complete')
else:
print('...File Does Not Exist')
return
if args.series:
print('Years: {} - {}'.format(args.series[0], args.series[1]))
series_download(int(args.series[0]), int(args.series[1]))
return
if args.downloadAll:
print('Downloading All {} Files...'.format(args.prefix))
downloader(prefix=args.prefix)
print('...Download Complete')
if __name__ == '__main__':
main()
|
from __future__ import division
import time
import numpy as np
import pandas as pd
import csv
import itertools
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.classification import accuracy_score,precision_score
from sklearn.model_selection import KFold, GridSearchCV,cross_val_predict
import matplotlib.pyplot as plt
set_sizes = [100,500,1000,5000,10000,50000,100000,500000,1000000,5000000,10000000,50000000,100000000]
column_names = ["Feature 1","Feature 2", "Feature 3","Target"]
i = set_sizes[7]
dataframe = pd.read_csv("skin.csv",
sep=',',header=0,names=column_names,usecols=[0,1,2,3],
nrows =i)
svc = SVC()
X_fold = dataframe.head(i)
Y_fold=X_fold.Target
X_fold = X_fold[["Feature 1","Feature 2","Feature 3"]]
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics.classification import accuracy_score,precision_score
predicted = cross_val_predict(svc, X_fold, Y_fold, cv=10)
print(accuracy_score(Y_fold, predicted))
#clf = svm.SVC(kernel='linear', C=1)
#scores = cross_val_score(regr, X, Y, cv=5)
print("Mean squared error: %.2f"
% mean_squared_error(Y_fold, predicted)) |
#!/usr/bin/python
import os, sys
import json
zdir = {}
filestat=[]
dirstat=[]
for path, dirs, files in os.walk("/home/w1pko", followlinks=None):
try:
# Store files in the directory
for file in files:
#print os.path.join(path, file)
st = os.stat( os.path.join( path, file ) )
file_stat = {
'name': file,
'perm': oct( st.st_mode )[-4::],
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size
}
filestat.append( file_stat )
# Store directory in
for di in dirs:
std = os.stat( os.path.join( path, di ) )
di_stat = {
'name': di,
'perm': oct(std.st_mode)[-4::],
'uid': std.st_uid,
'gid': std.st_gid,
'size': std.st_size
}
dirstat.append( di_stat )
pa = path.replace('/', '-')
zdir = { 'files':filestat, 'dirs':dirstat}
#f = open( '/home/w1pko/json'+pa+'dat', 'w')
#f.write( json.dumps(zdir, separators=(',',':')) )
#f.close()
except OSError:
pass
|
from flask import Flask
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = "change this to be a more random key"
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://info3180-project1:fortis4eva@localhost/info3180-project1"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning
db = SQLAlchemy(app)
allowed_files = ["jpg", "jpeg", "png"]
# Flask-Login login manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
app.config.from_object(__name__)
from app import views
|
from pandac.PandaModules import * #basic Panda modules
from direct.showbase.DirectObject import DirectObject #event handling
from Level import *
class MapGen(object):
maxLevel = 5
def __init__(self, player):
self.curLev = 4
self.initMap(player)
self.initLight()
def initMap(self, player):
#Creates grass/sky environment
#self.env = loader.loadModel("Models/Level1")
#self.env.setTwoSided(True)
#self.env.reparentTo(render)
# self.env = Level(self.curLev, player)
self.env = Level()
def initLight(self):
#Loads ambient lighting
self.ambientLight = AmbientLight("ambientLight")
# self.ambientLight.setColor((0.1, 0.1, 0.1, 1.0))
self.ambientLight.setColor((0.0, 0.0, 0.0, 1.0))
self.ambientLightNP = render.attachNewNode(self.ambientLight)
#the node that calls setLight is what's illuminated by the given light
render.setLight(self.ambientLightNP)
def nextLevel(self):
if self.curLev < MapGen.maxLevel:
self.curLev += 1
self.initMap()
self.initLight()
print("nextlevel")
else:
print("maxed out level!")
# win screen?
|
from mod_base import*
class TakeOP(Command):
"""Take OPs from a nick."""
def run(self, win, user, data, caller=None):
args = Args(data)
if data != None:
users = []
for arg in args:
user = self.bot.FindUser(arg)
if user == False:
continue
if win.HasUser(user):
users.append(user)
if users != []:
win.TakeUserModes(users, IRC_MODE_OP)
else:
win.Send("invalid nicks")
module = {
"class": TakeOP,
"type": MOD_COMMAND,
"level": 2,
"zone":IRC_ZONE_BOTH,
} |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('humanstxt', '0003_otherpeople'),
]
operations = [
migrations.AddField(
model_name='otherpeople',
name='group',
field=models.ForeignKey(blank=True, to='humanstxt.Group', null=True,on_delete=models.DO_NOTHING),
),
]
|
#!/usr/bin/env python3
from functools import reduce
def solve1(in_):
prev = in_
while True:
new = apply(prev)
if new == prev:
break
prev = new
print("1: ", sum(1 for x in reduce(lambda a,b: a+b, prev) if x == '#'))
def apply(in_):
"""
If a seat is empty (L) and there are no occupied seats adjacent to it, the
seat becomes occupied.
If a seat is occupied (#) and four or more seats adjacent to it are also
occupied, the seat becomes empty.
Otherwise, the seat's state does not change.
"""
newmap = []
for y, l in enumerate(in_):
newl = []
for x, p in enumerate(l):
val = p
adj = adjseats(x, y, in_)
if val == "L":
if not any(x == '#' for x in adj):
val = "#"
elif val == "#":
if sum(1 for x in adj if x == "#") >= 4:
val = "L"
newl.append(val)
newmap.append(newl)
return newmap
# This could be useful if I wanted to make a graphical output
def m2s(in_):
return "\n".join(''.join(l) for l in in_)
def adjseats(x, y, map_):
x_min = max(0, x-1)
x_max = min(len(map_[0])-1, x+1)
y_min = max(0, y-1)
y_max = min(len(map_)-1, y+1)
seats = []
for rx in range(x_min, x_max+1):
for ry in range(y_min, y_max+1):
if [rx, ry] == [x, y]:
continue
seats.append(map_[ry][rx])
return seats
def solve2(in_):
prev = in_
while True:
new = apply2(prev)
if new == prev:
break
prev = new
print("2: ", sum(1 for x in reduce(lambda a,b: a+b, prev) if x == "#"))
def apply2(in_):
newmap = []
for y, l in enumerate(in_):
newl = []
for x, p in enumerate(l):
val = p
newl.append(val)
newmap.append(newl)
return newmap
def find_first(map_, x, y):
xmax = len(map_[0])-1
ymax = len(map_)-1
return map_[y][x]
if __name__ == "__main__":
with open("input.txt", "r") as rd:
data = [list(x.strip()) for x in rd.readlines()]
solve1(data)
solve2(data)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 01 16:01:55 2015
@author: lenovo
"""
#score = 89
#if score >= 90:
# print 'A'
#else:
# if score >=80:
# print 'B'
# else:
# if score >=70:
# print 'C'
# else:
# if score >=60:
# print 'D'
# else:
# print 'D'
#if score >= 90:
# print 'A'
#elif score >= 80:
# print 'B'
#elif score >= 70:
# print 'C'
#elif score >= 60:
# print 'D'
#else:
# print 'E'
import math
ch = ''
while True:
a = float(raw_input('input a:'))
b = float(raw_input('input b:'))
c = float(raw_input('input c:'))
if a!= 0:
delat = b**2 - 4*a*c
if delat < 0:
print 'there is no solution'
elif delat == 0:
s = (-b)/(2*a)
print 's:', s
else:
root = math.sqrt(b**2 - 4*a*c)
s1 = (-b + root)/(2 * a)
s2 = (-b - root)/(2 * a)
print 'the solutions are:', s1, s2
ch = raw_input('qiut?:')
if ch == 'q':
break
|
#-*- coding: utf-8 -*-
'''
Created on Jan 8, 2011
@author: Peter
'''
from numpy import *
from BeautifulSoup import BeautifulSoup
# 从页面读取数据,生成retX和retY列表
def scrapePage(retX, retY, inFile, yr, numPce, origPrc):
# 打开并读取HTML文件
fr = open(inFile);
soup = BeautifulSoup(fr.read())
i=1
# 根据HTML页面结构进行解析
currentRow = soup.findAll('table', r="%d" % i)
while(len(currentRow)!=0):
currentRow = soup.findAll('table', r="%d" % i)
title = currentRow[0].findAll('a')[1].text
lwrTitle = title.lower()
# 查找是否有全新标签
if (lwrTitle.find('new') > -1) or (lwrTitle.find('nisb') > -1):
newFlag = 1.0
else:
newFlag = 0.0
# 查找是否已经标志出售,我们只收集已出售的数据
soldUnicde = currentRow[0].findAll('td')[3].findAll('span')
if len(soldUnicde)==0:
print "item #%d did not sell" % i
else:
# 解析页面获取当前价格
soldPrice = currentRow[0].findAll('td')[4]
priceStr = soldPrice.text
priceStr = priceStr.replace('$','') #strips out $
priceStr = priceStr.replace(',','') #strips out ,
if len(soldPrice)>1:
priceStr = priceStr.replace('Free shipping', '')
sellingPrice = float(priceStr)
# 去掉不完整的套装价格
if sellingPrice > origPrc * 0.5:
print "%d\t%d\t%d\t%f\t%f" % (yr,numPce,newFlag,origPrc, sellingPrice)
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
i += 1
currentRow = soup.findAll('table', r="%d" % i)
# 依次读取六种乐高套装的数据,并生成数据矩阵
def setDataCollect(retX, retY):
scrapePage(retX, retY, '/home/shiyanlou/mylab10/setHtml/lego8288.html', 2006, 800, 49.99)
scrapePage(retX, retY, '/home/shiyanlou/mylab10/setHtml/lego10030.html', 2002, 3096, 269.99)
scrapePage(retX, retY, '/home/shiyanlou/mylab10/setHtml/lego10179.html', 2007, 5195, 499.99)
scrapePage(retX, retY, '/home/shiyanlou/mylab10/setHtml/lego10181.html', 2007, 3428, 199.99)
scrapePage(retX, retY, '/home/shiyanlou/mylab10/setHtml/lego10189.html', 2008, 5922, 299.99)
scrapePage(retX, retY, '/home/shiyanlou/mylab10/setHtml/lego10196.html', 2009, 3263, 249.99)
# 计算给定lambda值得回归系数
def ridgeRegres(xMat,yMat,lam=0.2):
# 使用矩阵运算实现146页的回归系数计算公式
xTx = xMat.T*xMat
denom = xTx + eye(shape(xMat)[1])*lam
# 判断是否为奇异矩阵
if linalg.det(denom) == 0.0:
print "This matrix is singular, cannot do inverse"
return
ws = denom.I * (xMat.T*yMat)
return ws
# 计算回归系数矩阵
def ridgeTest(xArr,yArr):
# 初始化X和Y矩阵
xMat = mat(xArr); yMat=mat(yArr).T
# 对X和Y矩阵进行标准化
# 计算所有特征的均值
yMean = mean(yMat,0)
# 特征值减去各自的均值
yMat = yMat - yMean
# 标准化X矩阵数据
# 获得均值
xMeans = mean(xMat,0)
# 获得方差
xVar = var(xMat,0)
# 标准化方法:减去均值除以方差
xMat = (xMat - xMeans)/xVar
# 计算回归系数30次
numTestPts = 30
wMat = zeros((numTestPts,shape(xMat)[1]))
for i in range(numTestPts):
ws = ridgeRegres(xMat,yMat,exp(i-10))
wMat[i,:]=ws.T
return wMat
# 交叉验证测试岭回归
def crossValidation(xArr,yArr,numVal=10):
# 获得数据点个数,xArr和yArr具有相同长度
m = len(yArr)
indexList = range(m)
errorMat = zeros((numVal,30))
# 主循环 交叉验证循环
for i in range(numVal):
# 随机拆分数据,将数据分为训练集(90%)和测试集(10%)
trainX=[]; trainY=[]
testX = []; testY = []
# 对数据进行混洗操作
random.shuffle(indexList)
# 切分训练集和测试集
for j in range(m):
if j < m*0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else:
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
# 获得回归系数矩阵
wMat = ridgeTest(trainX,trainY)
# 循环遍历矩阵中的30组回归系数
for k in range(30):
# 读取训练集和数据集
matTestX = mat(testX); matTrainX=mat(trainX)
# 对数据进行标准化
meanTrain = mean(matTrainX,0)
varTrain = var(matTrainX,0)
matTestX = (matTestX-meanTrain)/varTrain
# 测试回归效果并存储
yEst = matTestX * mat(wMat[k,:]).T + mean(trainY)
# 计算误差
errorMat[i,k] = ((yEst.T.A-array(testY))**2).sum()
# 计算误差估计值的均值
meanErrors = mean(errorMat,0)
minMean = float(min(meanErrors))
bestWeights = wMat[nonzero(meanErrors==minMean)]
# 不要使用标准化的数据,需要对数据进行还原来得到输出结果
xMat = mat(xArr); yMat=mat(yArr).T
meanX = mean(xMat,0); varX = var(xMat,0)
unReg = bestWeights/varX
# 输出构建的模型
print "the best model from Ridge Regression is:\n",unReg
print "with constant term: ",-1*sum(multiply(meanX,unReg)) + mean(yMat)
lgX = []
lgY = []
setDataCollect(lgX, lgY)
crossValidation(lgX, lgY, 10)
|
dias = {}
def adicionarDia(posicao):
if(posicao >= 1 and posicao <= 7):
dia = str(input("Digite o dia da semana para ser adicionado: "))
dias[posicao] = dia
print(dias)
else:
print("A semana tem apenas 7 dias!")
return dias
def exibirDias(dias):
for d in dias:
print("Dia: ", d,"\n")
def main(Args=None):
cont = "s"
while (cont == 's'):
op = int(input("O que deseja?\n1. Adicionar dia à semana.\n2. Exibir dias da semana.\n3. Sair.\n"))
if (op == 1):
posicao = int(input("Digite um número para a posição: "))
adicionarDia(posicao)
elif (op == 2):
exibirDias(dias)
elif (op == 3):
cont = "n"
print("OBRIGADO!")
else:
print("OPÇÃO INVÁLIDA!!")
if(__name__ == __name__):
main() |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from app import views
urlpatterns = patterns('',
# Examples:
url(r'^$', views.home, name='home'),
url(r'^post/', views.PostView.as_view(), name='post'),
url(r'^comment/', views.CommentView.as_view(), name='postComment'),
url(r'^signUp/', views.signUp, name='signUp'),
url(r'^signIn/', views.signIn, name='signIn'),
url(r'^addUser/', views.UserSignUp.as_view(), name='addUser'),
url(r'^logout/', views.logout, name='signOut'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
import os
import pdb
import random
from text_data import loadPrepareData
from text_data import indexesFromSentence
from text_data import batch2TrainData
import torch.nn as nn
import torch
DATA_DIR = "/home/changmin/research/steganography/data/"
TEXT = "dialogues_text.txt"
ALL_PATH = os.path.join(DATA_DIR, "dialogues_text.txt")
TRAIN_PATH = os.path.join(DATA_DIR, "train/dialogues_train.txt")
VAL_PATH = os.path.join(DATA_DIR, "validation/dialogues_validation.txt")
TEST_PATH = os.path.join(DATA_DIR, "test/dialogues_test.txt")
voc, pairs = loadPrepareData(None, "dialog", TRAIN_PATH, 768)
#print(voc.word2index)
#print(pairs[0][0])
#print(indexesFromSentence(voc, pairs[0][0]))
batch_size = 32
batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
embedding = nn.Embedding(voc.num_words, 256)
print("input_variable:", input_variable)
print("lengths:", lengths)
print("target_variable:", target_variable)
print("mask:", mask)
print("max_target_len:", max_target_len)
print(input_variable.shape)
test = input_variable.view(batch_size, 3, 256)
print(embedding(input_variable).shape)
print(embedding(test).shape)
print(embedding(input_variable).view(batch_size, 3, 256, 256).shape)
|
'''Create a program that asks the user to enter their name and their age. Print out a message
addressed to them that tells them the year that they will turn 100 years old.'''
#without using function
'''name=str(input("enter your name :"))
agee=int(input("enter your current age:"))
hundredth_yr=2021+(100-agee)
print(name,"become 100 yrs old in",hundredth_yr)
'''
#using function
name=str(input("enter your name :"))
agee=int(input("enter your current age:"))
def age(n,a):
hundredth_yr=2021+(100-a)
return hundredth_yr
print(name,"become 100 yrs old in",age(name,agee)) |
import json
import pymongo
import itertools
from progress.bar import Bar
import gzip
from jsonlinewriter import TransactionWriter
from anonymize import anonymize
import argparse
import os
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
parser = argparse.ArgumentParser()
parser.add_argument('-nas')
args = parser.parse_args()
client = pymongo.MongoClient("mongodb://marvin.nptlab.di.unimi.it")
blockchain_db = client['blockchain_db']
blocks = blockchain_db['blocks']
projection = blocks.find({"transactions":{"$exists": True, "$ne":[]}},{"transactions":1, "transaction_ids":1}, no_cursor_timeout = True)
n_blocks = blocks.count()
gblock_processed = 0
n_gblock = 0
missed = list()
bar = Bar("Blocchi Filtrati", max=n_blocks/10000)
print('\n')
if args.nas != "":
path_list = args.nas.split('/')
base_path = os.getcwd()
for dir in path_list:
base_path = os.path.join(base_path, dir)
else:
base_path = os.getcwd()
with TransactionWriter('transactions', base_path) as t_writer:
for gblock in grouper(projection,10000):
for block in filter(lambda b: b is not None, gblock):
for tid, trin in zip(block['transaction_ids'],block['transactions']):
transaction = dict()
transaction['tid'] = tid
del trin['signatures']
del trin['ref_block_num']
del trin['ref_block_prefix']
transaction['transaction'] = trin
for op in transaction['transaction']['operations']:
op['tid'] = tid
op['timestamp'] = transaction['transaction']['expiration']
try:
op = anonymize(op)
t_writer.write_line(transaction)
except Exception as e:
missed.append(op)
bar.next()
with open('failed_ops.txt','wb') as f:
for miss in missed:
f.write(json.dumps(miss).encode('utf-8'))
f.write('\n'.encode('utf-8'))
bar.finish()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#様々なグローバル変数群
canonb = []*CANBSIZ #削除か終了のためのバッファ
coremap = []*CMAPSIZ #コア割り当てのための空き
swapmap = []*SMAPSIZ #スワップ割り当てのための空き
rootdir = None #rootディレクトリのinodeのポインタ
cputype = None #CPUの種類 40,45,または 70
execnt = None #exec内のプロセス数
lbolt = None #time of day in 60th not in time
time = []*2 #1970年からの経過時間(秒)
tout = []*2 #次のスリープまでの時間
mpid = = None #一般的で独特なPID
runin = None #スケジューリングフラグ
runout = None #スケジューリングフラグ
runrun = None #スケジューリングフラグ
curpri = None #更なるスケジューリング
maxmem = None #プロセスごとの最大メモリ量
lks = None #クロックデバイスへのポインタ
rootdev = None #rootのdev conf.c参照
swapdev = None #swapのdev conf.c参照
swplo = None #スワップ空間のブロック数
nswap = None #スワップ空間のサイズ
updlock = None #同期のためのロック
rablock = None #前方読み込みのためのブロック
regloc = "" #ユーザーのレジスタ保存命令 trap.c参照
'''
ルーチンの呼び出し構造であり,クロックの割り込みによってアレンジされる。(clock.cを参照。)
時間の量を含む明確な引数を受け取ります。
例えばテレタイプ上での時間を遅延させるのに使われる。
'''
class Callo():
def __init__(self, c_time, c_arg, c_func):
self.c_time = c_time #増加する時間
self.c_arg = c_arg #ルーチンの引数
self.c_func = c_func #ルーチン
class Mount():
'''
マウント構造:
マウントされたファイルのスーパーブロックと
位置を特定するのに使われる。
'''
def __init__(self, m_dev, m_bufp, m_inodp):
self.m_dev = m_dev #マウントされたデバイス
self.m_bufp = m_bufp #スーパーブロックのポインタ
self.m_inodp = m_inodp #マウントされたinodeへのポインタ
if __name__ == '__main__':
print(__file__+" is loaded.")
|
#encoding: utf-8
#Patron 1, la n especifica la altura del romboide. Debe ser mayor o igual a 5
import sys
if len(sys.argv) != 2 :
print 'Args: número'
sys.exit(2)
n = int(sys.argv[1])
if n < 5 :
print 'El primer argumento debe ser mayor o igual a 5'
sys.exit(1)
i = 0
j = 0
c = n
espacios = 0 #espacios despues del primer asterisco
while i < n :
j = 0
while c > 0:
print '',
c-=1
j+=1 #contador de espacios en blanco insertados
print '*',
i+=1
c = n - i
#insertamos espacios y otro asterisco
espacios = n - j
if espacios > 0 :
while espacios -1 > 0 :
print ' ',
espacios -= 1
print '*',
print
c = 2
i = 0
while i < n - 1 :
j = 0
while c > 0 :
print '',
c-=1
j+=1
print '*',
i+=1
c += i + 2
espacios = n - j
if espacios > 0 :
while espacios - 1 > 0 :
print ' ',
espacios -= 1
print '*',
print
|
"""initial database migration
Revision ID: 51796ab1b4e0
Revises:
Create Date: 2019-12-17 13:46:53.096680
"""
from alembic import op
import sqlalchemy as sa
import datetime
import uuid
import os
import sys
from flask_bcrypt import Bcrypt
from importlib import import_module
# revision identifiers, used by Alembic.
revision = '51796ab1b4e0'
down_revision = None
branch_labels = None
depends_on = None
flask_bcrypt = Bcrypt()
current_working_dir = os.getcwd()
user = import_module('main.model.user')
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('blacklist_tokens',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('token', sa.String(length=500), nullable=False),
sa.Column('blacklisted_on', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('registered_on', sa.DateTime(), nullable=False),
sa.Column('admin', sa.Boolean(), nullable=False),
sa.Column('public_id', sa.String(length=100), nullable=True),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('password_hash', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('public_id'),
sa.UniqueConstraint('username')
)
op.bulk_insert(
user.User.__table__,
[
{'id': 0, 'email': 'admin@gmail.com', 'registered_on': datetime.datetime.now(), 'admin': True, 'public_id': uuid.uuid4().hex, 'username': 'admin', 'password_hash': flask_bcrypt.generate_password_hash('admin').decode('utf-8')},
{'id': 1, 'email': 'user1@gmail.com', 'registered_on': datetime.datetime.now(), 'admin': False, 'public_id': uuid.uuid4().hex, 'username': 'user1', 'password_hash': flask_bcrypt.generate_password_hash('1234').decode('utf-8')},
{'id': 2, 'email': 'user2@gmail.com', 'registered_on': datetime.datetime.now(), 'admin': False, 'public_id': uuid.uuid4().hex, 'username': 'user2', 'password_hash': flask_bcrypt.generate_password_hash('1234').decode('utf-8')},
{'id': 3, 'email': 'user3@gmail.com', 'registered_on': datetime.datetime.now(), 'admin': False, 'public_id': uuid.uuid4().hex, 'username': 'user3', 'password_hash': flask_bcrypt.generate_password_hash('1234').decode('utf-8')},
{'id': 4, 'email': 'user4@gmail.com', 'registered_on': datetime.datetime.now(), 'admin': False, 'public_id': uuid.uuid4().hex, 'username': 'user4', 'password_hash': flask_bcrypt.generate_password_hash('1234').decode('utf-8')},
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
op.drop_table('blacklist_tokens')
# ### end Alembic commands ###
|
import numpy as np
import matplotlib.pyplot as plt
# f = open(r"filter_h.dat", "rb") # バイナリファイル読み込み。
# tmp = f.read() # ファイルの中身をread()メソッドで一気に読み込み。
# for idx in range(len(tmp)): # ファイルのバイト数をlen()で取り出して、その回数for文で回す、
# print(tmp[idx]) # 1バイトづつデータを出力
# print(type(tmp[10]))
# with open('oto.raw', mode='rb') as fin:
# content = fin.read()
# print(content)
data03_axis1, data03_value1 = np.loadtxt(
"./test.txt", delimiter=',', unpack=True)
data04_axis1, data04_value1 = np.loadtxt(
"./test_h.txt", delimiter=',', unpack=True)
# fig = plt.figure(figsize=(4, 6))
# ax = fig.add_subplot(111)
# # ax.plot(data03_axis1, data03_value1, "o-", color="k", label="value1 of data01")
# ax.plot(data04_axis1, data04_value1, "o-", color="r", label="value1 of data04")
# ax.set_xlabel("axis1")
# ax.set_ylabel("value1")
# ax.legend(loc="upper left")
# plt.show()
# print(data03_value1)
# for i in range(31, 4000):
# data04_value1.append([0])
z = np.zeros(3969)
a = np.append(data04_value1, z)
freq = np.fft.fft(a)
Amp = np.abs(freq)
plt.plot(Amp, label='|F(k)|')
plt.show()
|
from nfd_router_client import RouterClient
from subprocess import check_output
import subprocess
import time
import socket
import json
class RouterEM(object):
def __init__(self, logger, vnfm_host, vnfm_port, sv_mode, probe_id):
self.mode = 'no_SV'
self.probe_id = probe_id
self.logger = logger
self.vnfm_host = vnfm_host
self.vnfm_port = vnfm_port
self.logger.debug("creating vnf client with: %s, %s", vnfm_host, vnfm_port)
self.client = RouterClient(logger, vnfm_host, vnfm_port)
self.update_mode(sv_mode)
def notify_vnfm(self, listening_interface, containerID):
self.client.notify_vnfm(listening_interface, containerID)
def enforce_initial_configuration(self, config):
time.sleep(10)
self.logger.debug("enforcing initial configuration")
cf={}
if(config.get('ingress','null')!='null'):
cf['ingress_configurations']=config['ingress']
cf['container_down']=config['container_down']
cf['replicas_configurations']=''
del config['ingress']
del config['container_down']
cf['to_add']=config
cf['strategy']='multicast'
with open('/root/nfd_conf', 'a') as outfile:
json.dump(cf, outfile)
outfile.write('\n')
self.logger.debug(str(config))
for key, face_list in config.iteritems():
for face in face_list:
prefix=key
interface = " tcp://"+face
str_command = "nfdc register "+prefix+interface
subprocess.call(args=str_command, shell=True)
self.logger.debug("installing rule"+str_command)
time.sleep(1)
strategy_command = "nfdc strategy set {0} /localhost/nfd/strategy/multicast/%FD%03".format(key)
subprocess.call(args=strategy_command, shell=True)
self.logger.debug("changing strategy to multicast")
def update_configuration(self, new_config):
# config == {"prefix":[list_of_ip_addr_in_string]}
cf=new_config
cf['strategy']='round'
self.logger.debug("updating configuration")
with open('/root/nfd_conf', 'a') as outfile:
json.dump(cf, outfile)
outfile.write('\n')
self.logger.debug(str(new_config))
new_rules = new_config['to_add']
for key, face_list in new_rules.iteritems():
prefix =key
for face in face_list:
interface = " tcp://"+face
str_command = "nfdc register "+prefix+interface
ping_command = "ping -c 5 "+face.split(':')[0]
subprocess.call(args=ping_command, shell=True)
subprocess.call(args=str_command, shell=True)
self.logger.debug("installing rule "+str_command)
time.sleep(2)
strategy_command = "nfdc strategy set {0} /localhost/nfd/strategy/round-robin/%FD%01".format(prefix)
subprocess.call(args=strategy_command, shell=True)
self.logger.debug("changing strategy to :"+strategy_command)
def register_face(self, prefix, remote_face):
self.logger.debug('registring face')
self.logger.debug('prefix: {0} -- remote_face: {1}'.format(prefix, remote_face))
cmd = 'nfdc register {0} tcp://{1}'.format(prefix, remote_face)
subprocess.call(args=cmd, shell=True)
self.logger.debug('face registred')
def unregister_face(self, prefix, deprecated_face):
self.logger.debug('unregistring faces')
self.logger.debug('prefix: {0} -- face: {1}'.format(prefix, deprecated_face))
out = check_output('nfd-status')
faceid_list = [face for face in out.split('\n') if face.startswith(' faceid=')]
for face in faceid_list:
values = [value for value in face.split(' ') if value != '']
if values[0].split('=')[0] == 'faceid':
faceid = values[0].split('=')[1]
if values[1].split('=')[0] == 'remote':
remote_face = values[1].split('://')[1]
if remote_face == deprecated_face:
self.logger.debug('remote_face == '+remote_face)
self.logger.debug('deprecated_face == '+deprecated_face)
cmd = "nfdc unregister "+prefix+' '+faceid
subprocess.call(args=cmd, shell=True)
self.logger.debug("command executed : "+cmd)
break
def update_mode(self, mode):
if mode == 'SV' and self.mode == "no_SV":
cmd = 'cd /SV_ST/bin/ && ./SV &'
subprocess.call(args=cmd, shell=True)
self.mode = mode
self.logger.debug("changing mode to SV")
# {"action":"edit", "drop":True, "address":"127.0.0.1", "port":9999, "report_each":5}
sv_configuration = {"action":"edit",
"name":str(self.probe_id),
"drop":True,
"address":"127.0.0.1",
"port": 9999,
"report_each":5
}
time.sleep(2)
json_config = json.dumps(sv_configuration)
UDP_IP = "127.0.0.1"
UDP_PORT = 10000
COMMAND = json_config
self.logger.debug("UDP target IP: "+UDP_IP)
self.logger.debug("UDP target port: "+str(UDP_PORT))
self.logger.debug("message: "+COMMAND)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(COMMAND, (UDP_IP, UDP_PORT))
|
GREET = 'Hello, {}!'.format
def greeting_for_all_friends(friends):
return [GREET(a) for a in friends] if friends else None
|
PW = 25
PT = 6
def split_layers(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
f = open("input", "r")
image = f.read().strip('\n')
digits = PW * PT
layers = list(split_layers(image, digits))
fewest = layers[0]
min0 = fewest.count('0')
for i in range(1, len(layers)):
min_next = layers[i].count('0')
if min0 > min_next:
min0 = min_next
fewest = layers[i]
print ( fewest.count('1') * fewest.count('2') )
x = y = 0
for i in range(0, digits):
for l in layers:
if l[i] == '0':
print(' ', end='')
break
elif l[i] == '1':
print('#', end='')
break
x = (x + 1) % PW
if not x:
y += 1
print()
f.close() |
import re
filename = input("enter the name of the file: ")
if(len(filename)==0):
filename= "regex_sum_691845.txt"
handle=open(filename)
summ=0
for line in handle:
#print(line)
line=line.rstrip()
y= re.findall('[0-9]+',line)
if(len(y)==0) :
continue
#print(y)
for x in y:
w = int(x)
summ=summ+w
print(summ) |
import os
import socket
import logging
from channel.connector import Connector
class Client(Connector):
"""UNIX-socket client used to communicate with the daemon."""
def __init__(self):
Connector.__init__(self)
def start(self):
"""
Connects to the UNIX-socket if it exists.
:return: if the connection to the socket was successfull
:rType: bool
"""
if os.path.exists(self.path):
try:
self.socket.connect(self.path)
return True
except OSError as e:
logging.error('Failed to connect to socket: %s' % e.strerror)
else:
logging.error('Could not find socket: %s' % self.path)
return False
def send_message(self, message):
"""
Sends a message via the socket.
:param message str: the mesage to encode and send
"""
raw_message = self.encode_message(message)
self.socket.send(raw_message)
def close(self):
"""Closes the socket connection."""
self.socket.close()
|
import cv2
import os
import numpy as np
import scipy as scp
import scipy.misc
from enum import Enum
class ClassesDef(Enum):
PAVED_NONPAVED = 1
PAVED_NONPAVED_ROCK = 2
#classes = ClassesDef.PAVED_NONPAVED_ROCK
def good_res_image(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgbw = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)[1] # ensure binary
ret, labels = cv2.connectedComponents(imgbw)
countCCPixels = np.zeros(ret, dtype=int)
rows,cols = labels.shape[:2]
if len(countCCPixels) == 2:
for i in range(rows):
for j in range(cols):
if not (img[i][j][0] == 0 and img[i][j][1] == 0 and img[i][j][2] == 0):
countCCPixels[labels[i][j]] = countCCPixels[labels[i][j]] + 1
if countCCPixels[labels[i][j]] >= rows*cols*0.1:
return True
#if countCCPixels[0] >= rows*cols*0.1 and countCCPixels[1] >= rows*cols*0.1 :
#return True
return False
# for i in range(len(countCCPixels)):
# if countCCPixels[i] >= rows*cols/4.0:
# return True
# return False
def alreadyAnalysedImg(img, analysed_images):
for analysed_image in analysed_images:
width, height, z = analysed_image.shape
flagNextImg = False
flagAlreadyAnalysed = True
for i in range(width):
for j in range(height):
if analysed_image[i,j,0] != img[i,j,0] or analysed_image[i,j,1] != img[i,j,1] or analysed_image[i,j,2] != img[i,j,2]:
flagNextImg = True
flagAlreadyAnalysed = False
break
if flagNextImg:
break
if flagAlreadyAnalysed:
return True
return False
# def saveStreet(street):
# if street[len(street)-1] == ' ':
# street = street[0:len(street)-1]
# streetpath_input = os.path.join(maindir,street)
# os.makedirs(os.path.join(dest_dir,street), exist_ok=True)
# analysed_images = []
# for imgName in os.listdir(maindir+''+street):
# input_image = os.path.join(streetpath_input,imgName)
# image = scipy.misc.imread(input_image, mode='RGB')
# if alreadyAnalysedImg(image, analysed_images):
# continue
# analysed_images.append(image)
# scipy.misc.imsave(os.path.join(os.path.join(dest_dir,street),imgName), image)
def numRepetitionsStreet(street):
if street[len(street)-1] == ' ':
street = street[0:len(street)-1]
streetpath_input = os.path.join(maindir,street)
numRep = 0
analysed_images = []
for imgName in os.listdir(maindir+''+street):
input_image = os.path.join(streetpath_input,imgName)
image = scipy.misc.imread(input_image, mode='RGB')
if alreadyAnalysedImg(image, analysed_images):
numRep = numRep + 1
continue
analysed_images.append(image)
return numRep
def getNumberOfImagesFromClass(line, classes, u):
if len(line.strip()) == 0 :
return '?', 0, 0, 0, 0, 0, 0
numImgs = int(line.split('number of images: ')[1].split(' number')[0])
#print(numImgs)
numPavedImgs = int(line.split('number of paved images: ')[1].split(' number')[0])
numRockImgs = int(line.split('number of rock images: ')[1].split(' number')[0])
numNonPavedImgs = int(line.split('number of non-paved images: ')[1].split(' average')[0])
avgPavedPx = float(line.split('average paved pixels: ')[1].split(' average')[0])
avgRockPx = float(line.split('average rock pixels: ')[1].split(' average')[0])
avgNonPavedPx = float(line.split('average non-paved pixels: ')[1])
#print('{} {} {} {}'.format(numImgs, numPavedImgs, numRockImgs, numNonPavedImgs))
#print(numNonPavedImgs)
total = numPavedImgs + numRockImgs + numNonPavedImgs
#if numRep > 6*abs(numPavedImgs + numRockImgs - numNonPavedImgs):
#print(streetname)
#print("Rep: {}".format(numRep))
#print("dif: {}".format(abs(numPavedImgs + numRockImgs - numNonPavedImgs)))
#saveStreet(streetname)
if numPavedImgs +numNonPavedImgs+numRockImgs == 0 or abs(numPavedImgs + numRockImgs - numNonPavedImgs) < u*(numPavedImgs + numRockImgs + numNonPavedImgs):
return '?' , numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
if classes == ClassesDef.PAVED_NONPAVED:
if numPavedImgs + numRockImgs > numNonPavedImgs:
return 'PAVED', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
else:
return 'NON PAVED', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
if classes == ClassesDef.PAVED_NONPAVED_ROCK:
if numPavedImgs > numNonPavedImgs and numPavedImgs > numRockImgs:
return 'PAVED', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
if numNonPavedImgs > numPavedImgs and numNonPavedImgs > numRockImgs:
return 'NON PAVED', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
if numRockImgs > numNonPavedImgs and numRockImgs > numNonPavedImgs:
return 'ROCK', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
if numPavedImgs + numRockImgs > numNonPavedImgs:
if numRockImgs >= numPavedImgs:
return 'ROCK', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
else:
return 'PAVED', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
else:
return 'NON PAVED', numPavedImgs, numRockImgs, numNonPavedImgs, avgPavedPx, avgRockPx, avgNonPavedPx
|
"""Madlibs Stories."""
class Story:
"""Madlibs story.
To make a story, pass a list of prompts, and the text
of the template.
>>> s = Story(["noun", "verb"],
... "I love to {verb} a good {noun}.")
To generate text from a story, pass in a dictionary-like thing
of {prompt: answer, promp:answer):
>>> ans = {"verb": "eat", "noun": "mango"}
>>> s.generate(ans)
'I love to eat a good mango.'
"""
def __init__(self, words, text):
"""Create story with words and template text."""
labels = []
for label in words:
labels.append(label.replace('_', ' ').title())
# Let's make a dict of prompts and labels
self.prompts = dict(zip(words, labels))
self.template = text
def generate(self, answers):
"""Substitute answers into text."""
text = self.template
for (key, val) in answers.items():
text = text.replace("{" + key + "}", val)
return text
stories = []
stories.append(Story(
["place", "noun", "verb", "adjective", "plural_noun"],
"""Once upon a time in a long-ago {place}, there lived a/n {adjective} {noun}. It loved to {verb} {plural_noun}."""
))
stories.append(Story(
["place", "noun", "verb", "adjective", "plural_noun", "gender_pronoun"],
"""
The time to {adjective}ly {verb} has arrived. If {gender_pronoun} decides not to {verb} then so be it but when the
{noun} decides to confront the {plural_noun} at the {place} there will be no time to {verb} so the time is NOW I say.
"""
))
stories.append(Story(
["place", "noun", "verb", "adjective", "plural_noun"],
"""
Release the {noun}s, the director cried. There aren't nearly enough {noun}s in this {place}. And those {noun}s over there
aren't even {verb}ing as {adjective}ly as they could or should be. What we really need is more {plural_noun}.
"""
))
stories.append(Story(
["place", "noun", "verb", "adjective", "plural_noun", "gender_pronoun"],
"""
One day a/n {adjective} {noun} decided to {verb} for some {plural_noun}, not because {gender_pronoun} really wanted to
but because {gender_pronoun} thought it would be a good idea. So {gender_pronoun} went about {verb}ing and eventually
ended up {adjective}ly {verb}ing every day for ever and ever.
"""
))
|
# -*- coding: UTF-8 -*-
from System import *
from collections import deque
from System.Math import *
from processing.segmentation.connected import *
def detect_contours(segments, mask):
height = segments.GetLength(0)
width = segments.GetLength(1)
rmask = Array.CreateInstance(bool, height, width)
for i in range(0, height):
for j in range(0, width):
if mask != None and not(mask[i,j]):
continue
seg = segments[i,j]
contour = False
for k in range(-1,2):
if contour:
break
for l in range(-1,2):
y = (i+k)%height
x = (j+l)%width
if segments[y,x] != seg:
contour = True
break
rmask[i,j] = contour
for j in range(0, width):
rmask[0,j] = True
rmask[height-1,j] = True
for i in range(0, height):
rmask[i,0] = True
rmask[i,width-1] = True
return rmask
def calculate_chain(parents, x, y):
l = []
while x >= 0 and y >= 0:
l.append((y,x))
cy = parents[y,x,0]
cx = parents[y,x,1]
# print ("({},{}) -> ({},{})".format(x, y, cx, cy))
y = cy
x = cx
return l
def calculate_contour_lines(segments, cmask = None):
height = segments.GetLength(0)
width = segments.GetLength(1)
parents = Array.CreateInstance(int,height, width, 2)
lengths = Array.CreateInstance(int, height, width)
queued = Array.CreateInstance(bool, height, width)
segment_was = {}
for i in range(0, height):
for j in range(0, width):
for k in range(0, 2):
parents[i,j,k] = -1
for i in range(0, height):
for j in range(0, width):
if cmask != None and not(cmask[i,j]) and not(queued[i,j]):
continue
s = segments[i,j]
if segment_was.get(s) != None:
continue
q = deque()
q.append([i,j])
c = 1
# print ("Segment: {}".format(s))
queued[i,j] = True
segment_was[s] = True
while c > 0:
print("")
(qy,qx) = q.popleft()
# print ("queue ({},{}),cmask: {} seg: {}".format(qy,qx, cmask[qy,qx], segments[qy,qx]))
c -= 1
for k in range(-1,2):
for l in range(-1,2):
y = (qy+k)
x = (qx+l)
if x < 0 or y < 0 or x >= width or y >= height:
continue
# if cmask != None:
# print ("cmask for ({},{}) ({},{}) - {}, s: {}, queued: {}".format(y,x, k, l,cmask[y,x], segments[y,x], queued[y,x]))
if cmask != None and not(cmask[y,x]):
continue
s2 = segments[y,x]
if s != s2 or queued[y,x]:
continue
q.append([y,x])
queued[y,x] = True
lengths[y,x] = lengths[qy,qx]+1
parents[y,x,0] = qy
parents[y,x,1] = qx
c += 1
was_parent = Array.CreateInstance(bool, height, width)
for el in was_parent:
el = False
for i in range(0, height):
for j in range(0, width):
if cmask != None and not(cmask[i,j]):
continue
py = parents[i,j,0]
px = parents[i,j,1]
if py < 0 or px < 0:
continue
was_parent[py,px] = True
lchains = {}
longest_threshold = 20
for i in range(0, height):
for j in range(0, width):
if cmask != None and not(cmask[i,j]):
continue
if was_parent[i,j]:
continue
s = segments[i,j]
l = lengths[i,j]
# print ("End ({},{}) - seg: {}, length of the chain: {}"
# .format(j,i, s, l))
# print ("({},{}) was not parent of any el, seg: {}, length: {}"
# .format(j,i, s, l))
slchains = lchains.get(s)
if slchains == None:
slchains = [ (i,j) ]
else:
slchains.append((i,j))
lchains[s] = slchains
rchains = {}
# print ("Chains joining")
for seg in lchains.keys():
sslchains = sorted(lchains[seg],
key = lambda (i,j) : lengths[i,j])
sslchains.reverse()
# print ("Lengths: {}"
# .format([lengths[i,j] for (i,j) in sslchains]))
# print ("Sorted ssl chains: {}".format(sslchains))
filtered_chains = sslchains[0:longest_threshold]
# print ("Filtered chains: {}".format(filtered_chains))
nchains = len(filtered_chains)
chain_lists = [calculate_chain(parents, cx, cy) for (cy,cx) in filtered_chains]
chain_sets = [set(c) for c in chain_lists]
mi = -1
mj = -1
mlength = -1
for i in range(0, nchains):
for j in range(0, i):
# print ("Joining chain s1: {}, s2: {}"
# .format(filtered_chains[i], filtered_chains[j]))
s1 = chain_sets[i]
s2 = chain_sets[j]
# print ("ns1: {}, ns2: {}".format(len(s1), len(s2)))
# sand = s1.intersection(s2)
sor = s1.union(s2)
nsor = len(sor)
sand = s1.intersection(s2)
nsand = len(sand)
# print ("ns union: {}".format(nsor))
if nsor > mlength and nsand < 10:
mlength = nsor
mi = i
mj = j
# print ("New longest with length: {}, join s1: {} with s2: {}"
# .format(mlength, filtered_chains[i], filtered_chains[j]))
c1 = chain_lists[mi]
c2 = chain_lists[mj]
# print ("End: <{}, {}>".format(c1,c2))
c2.reverse()
rchains[seg] = c1 + c2
return rchains
def distance(x1, y1, x2, y2):
dx = x1-x2
dy = y1-y2
return Sqrt(dx*dx+dy*dy)
def detect_border_points(segments, cmask, slines, window = 50):
height = segments.GetLength(0)
width = segments.GetLength(1)
(counts,xmins,xmaxs,ymins, ymaxs, meansx, meansy) \
= calculate_segment_parameters(segments, cmask)
spoints = {}
for seg in slines.keys():
sline = slines[seg]
n = len(sline)
# (my,mx) = (0,0)
(my,mx) = (meansy[seg], meansx[seg])
# (my,mx) = (int(height/2), int(width/2))
points = []
for c in range(0, n):
(py,px) = sline[c]
r = distance(mx, my, px, py)
rminleft = None
rmaxleft = None
rminright = None
rmaxright = None
# dla lewego otoczenia
for j in range(-window, 0):
(oy,ox) = sline[(c+j)%n]
ro = distance(mx, my, ox, oy)
rminleft = ro if rminleft == None else min(rminleft, ro)
rmaxleft = ro if rmaxleft == None else min(rmaxleft, ro)
# dla prawego otoczenia
for j in range(1, window+1):
(oy,ox) = sline[(c+j)%n]
ro = distance(mx, my, ox, oy)
rminright = ro if rminright == None else min(rminright, ro)
rmaxright = ro if rmaxright == None else min(rmaxright, ro)
# minimu jeśli
if rminleft >= r and r <= rminright:
# minimum
points.append(sline[c])
elif rmaxleft <= r and r >= rmaxright:
# maximum
points.append(sline[c])
spoints[seg] = points
return spoints
def p2p_distance(ax, ay, bx, by):
dx = ax-bx
dy = ay-by
return Sqrt(dx*dx+dy*dy)
def p2l_distance(ax, ay, bx, by, cx, cy):
# c to punkt
# a, b linia
r1 = p2p_distance(ax, ay, cx, cy)
r2 = p2p_distance(bx, by, cx, cy)
d = p2p_distance(ax, ay, bx, by)
s = (r1*r1 - r2*r2 + d*d) / (2*d)
dist = Sqrt(r1*r1-s*s)
return dist
def contours_simplifications(slines, eps):
slines2 = {}
for (seg,sline) in slines.items():
n = len(sline)
# głupie x^2 , ale nie zostawia wrednych durnych punktów
# FIXME wydajność
mDist = -1
mi = -1
mj = -1
for i in range(0, n):
for j in range(0, i):
(p1y, p1x) = sline[i]
(p2y, p2x) = sline[j]
d = p2p_distance(p1x, p1y, p2x, p2y)
if d > mDist:
mDist = d
mi = i
mj = j
l1 = sline[j:i+1]
l2 = sline[i:n] + sline[0:j+1]
c1 = curve_simplification(l1, eps)
c2 = curve_simplification(l2, eps)
nc1 = len(c1)
c = c1[0:(nc1-1)]+c2
slines2[seg] = c
return slines2
def curve_simplification(sline, eps):
n = len(sline)
if n < 2:
return sline
p1 = (p1y,p1x) = sline[0]
p2 = (p2y,p2x) = sline[n-1]
dMaxIdx = -1
dMax = -1.0
for i in range(1, n-1):
(cy, cx) = sline[i]
d = p2l_distance(p1x, p1y, p2x,p2y, cx,cy)
if d > dMax:
dMax = d
dMaxIdx = i
if dMax > eps:
c1 = curve_simplification(sline[0:dMaxIdx+1],eps)
c2 = curve_simplification(sline[dMaxIdx:n],eps)
nc1 = len(c1)
nc2 = len(c2)
return c1[0:(nc1-1)] + c2
else:
return [p1,p2]
|
from abc import ABCMeta, abstractmethod
from UVMPMException import InvalidRequestSyntax
from Client import Client
class Request:
__metaclass__ = ABCMeta
def __init__(self, client: Client, raw_request: str):
self.client = client
self.raw_request = raw_request
@staticmethod
@abstractmethod
def is_of_type(to_match: str):
pass
class Handshake(Request):
def __init__(self, client: Client, raw_request: str):
super().__init__(client, raw_request)
@staticmethod
def is_of_type(to_match: str):
return to_match == "HELLO"
class Authentication(Request):
def __init__(self, client: Client, raw_request: str):
super().__init__(client, raw_request)
split = self.raw_request.split(":")
if len(split) != 3:
raise InvalidRequestSyntax(self.raw_request)
self.username = split[1]
self.password = split[2]
@staticmethod
def is_of_type(to_match: str):
return to_match.startswith("AUTH:")
class ListUsers(Request):
def __init__(self, client: Client, raw_request: str):
super().__init__(client, raw_request)
@staticmethod
def is_of_type(to_match: str):
return to_match == "LIST"
class SendMessage(Request):
def __init__(self, client: Client, raw_request: str):
super().__init__(client, raw_request)
split = self.raw_request.split(":")
if len(split) != 3:
raise InvalidRequestSyntax(self.raw_request)
self.receiving_username = split[1]
self.message = split[2]
@staticmethod
def is_of_type(to_match: str):
return to_match.startswith("To:")
class Logout(Request):
def __init__(self, client: Client, raw_request: str):
super().__init__(client, raw_request)
@staticmethod
def is_of_type(to_match: str):
return to_match == "BYE"
class Unknown(Request):
def __init__(self, client: Client, raw_request: str):
super().__init__(client, raw_request)
@staticmethod
def is_of_type(to_match: str):
return True
|
from .BaseEditor import BaseEditor
from PyQt5 import QtWidgets, QtCore
class ChoicesEditor(BaseEditor):
def __init__(self, parent, item, model):
BaseEditor.__init__(self, parent, item, model)
self.combo = QtWidgets.QComboBox(self)
#self.combo.currentIndexChanged.connect(self.__selectedItem)
self.layout().addWidget(self.combo)
def __selectedItem(self, index):
self.setModelData(self.model, self.item.index())
def setEditorData(self, index):
self.combo.blockSignals(True)
self.combo.clear()
data = self.getItemData()
prop = self.item.prop
for choice in prop.metaData.choices:
self.combo.addItem(choice.display_name)
self.combo.setCurrentText(data)
self.combo.blockSignals(False)
def setModelData(self, model, index):
model.setData(index, self.combo.currentText(), QtCore.Qt.EditRole)
|
from spack import *
import distutils.dir_util as du
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Csctrackfinderemulation(Package):
homepage = "http://www.example.com"
url = "http://www.example.com/example-1.2.3.tar.gz"
version('1.2', git='https://github.com/cms-externals/CSCTrackFinderEmulation', branch='cms/CMSSW_8_1_X')
def install(self, spec, prefix):
make()
make('install')
du.copy_tree('installDir',prefix)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import json
import os
from gchtheme import GchRstReader
_g = globals()
here = os.path.dirname(__file__)
with open(os.path.join(here, 'settings.json')) as f:
settings = json.load(f)
for k, v in settings.iteritems():
_g[k.upper()] = v
SITEURL = ''
RELATIVE_URL = True
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
READERS = {'rst': GchRstReader}
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
import pytest
from pants.backend.shell.goals import test
from pants.backend.shell.goals.test import ShellTestRequest, TestShellCommandFieldSet
from pants.backend.shell.target_types import (
ShellCommandTarget,
ShellCommandTestTarget,
ShellSourcesGeneratorTarget,
)
from pants.build_graph.address import Address
from pants.core.goals import package
from pants.core.goals.test import TestResult, get_filtered_environment
from pants.core.util_rules import archive, source_files
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*test.rules(),
*source_files.rules(),
*archive.rules(),
*package.rules(),
get_filtered_environment,
QueryRule(TestResult, (ShellTestRequest.Batch,)),
],
target_types=[
ShellSourcesGeneratorTarget,
ShellCommandTarget,
ShellCommandTestTarget,
],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def test_shell_command_as_test(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
shell_sources(name="src")
shell_command(
name="msg-gen",
command="echo message > msg.txt",
tools=["echo"],
output_files=["msg.txt"],
)
experimental_test_shell_command(
name="pass",
execution_dependencies=[":msg-gen", ":src"],
tools=["echo"],
command="./test.sh msg.txt message",
)
experimental_test_shell_command(
name="fail",
execution_dependencies=[":msg-gen", ":src"],
tools=["echo"],
command="./test.sh msg.txt xyzzy",
)
"""
),
"test.sh": dedent(
"""\
contents="$(<$1)"
if [ "$contents" = "$2" ]; then
echo "contains '$2'"
exit 0
else
echo "does not contain '$2'"
exit 1
fi
"""
),
}
)
(Path(rule_runner.build_root) / "test.sh").chmod(0o555)
def run_test(test_target: Target) -> TestResult:
input: ShellTestRequest.Batch = ShellTestRequest.Batch(
"", (TestShellCommandFieldSet.create(test_target),), None
)
return rule_runner.request(TestResult, [input])
pass_target = rule_runner.get_target(Address("", target_name="pass"))
pass_result = run_test(pass_target)
assert pass_result.exit_code == 0
assert pass_result.stdout == "contains 'message'\n"
fail_target = rule_runner.get_target(Address("", target_name="fail"))
fail_result = run_test(fail_target)
assert fail_result.exit_code == 1
assert fail_result.stdout == "does not contain 'xyzzy'\n"
|
import datetime
#Stdlib Imports
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.db import models
from django.templatetags.static import static
#from templated_email import send_templated_mail
from .functions import unique_slugify
provinces = ['Distrito Nacional', 'Altagracia', 'Azua', 'Bahoruco', 'Barahona',
'Dajabon', 'Duarte', 'El Seybo', 'Elias Pi\xc3\xb1a', 'Espaillat', 'Hato Mayor',
'Independencia', 'La Romana', 'La Vega', 'Maria Trinidad Sanchez',
'Monse\xc3\xb1or Nouel', 'Montecristi', 'Monte Plata', 'Pedernales', 'Peravia',
'Puerto Plata', 'Hermanas Mirabal', 'Samana', 'San Cristobal', 'San Juan',
'San Pedro de Macoris', 'Sanchez Ramirez', 'Santiago de los Caballeros',
'Santiago Rodriguez', 'Valverde', 'San Jose de Ocoa', 'Santo Domingo']
PROVINCES = tuple([(province, province) for province in provinces])
class AppUserManager(BaseUserManager):
"""Custom Manager for the Custom user Model"""
def create_user(self, email, full_name, password=None):
"""
Creates and saves a User with the given email, first_name, last_name
and password.
"""
if not email:
raise ValueError('Todo usuario debe tener un email')
user = self.model(email=AppUserManager.normalize_email(email),
full_name=full_name,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, full_name, password, email):
"""
Creates and saves a Superuser with the given email, first_name,
last_name and password.
"""
user = self.create_user(email=email,
full_name=full_name,
password=password
)
user.is_admin = True
user.save(using=self._db)
return user
class Category(models.Model):
"""All the categories for the services offered by the organizations."""
name = models.CharField(max_length=40)
def __unicode__(self):
return self.name
class Organization(models.Model):
"""
This class represents the institution profile, it must be approved by
one of the members of the organization, every organization must be
approved before being published.
"""
name = models.CharField(max_length=40,
verbose_name="Nombre de la Institucion"
)
slug = models.SlugField(default='', editable=False, unique=True)
url = models.URLField(max_length=255,
verbose_name="Pagina Web",
null=True,
blank=True
)
description = models.TextField(verbose_name="Descripcion",
null=True,
blank=True
)
logo = models.ImageField(upload_to="profile_pics",
null=True,
blank=True
)
phone = models.CharField(max_length=10,
null=True,
blank=True
)
address = models.CharField(max_length=100,
null=True,
blank=True,
verbose_name="Direccion"
)
province = models.CharField(max_length=100,
choices=PROVINCES,
null=True,
blank=True
)
categories = models.ManyToManyField(Category)
approved = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
def get_picture_url(self):
"""
If the organization has a pictures displays it
Otherwise it display a general picture
"""
if self.logo:
return self.logo.url
else:
return '/static/img/companies/default.jpg'
def save(self, *args, **kwargs):
unique_slugify(self, self.name)
super(Organization, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class UserProfile(AbstractBaseUser):
""""User profile class representing the institutions"""
email = models.EmailField(verbose_name="Correo Electronico",
max_length=255,
db_index=True,
unique=True,
)
full_name = models.CharField(max_length=40,
verbose_name="Nombre Completo"
)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = AppUserManager()
organization = models.ForeignKey(Organization, blank=True, null=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['full_name']
def get_full_name(self):
"""return the full name"""
return self.full_name
def get_short_name(self):
#The user is identified by their email address
return self.email
def __unicode__(self):
return self.email
def save(self, *args, **kwargs):
if not self.id:
if not Organization.objects.all():
org = Organization(name="admin's Organization")
org.save()
#Create Organization for the user
#template_name='user_creation',
self.organization = org
#send_templated_mail(
# from_email='info@mypimes.com',
# recipient_list = [self.email],
# context={
# 'username': self.email,
# 'full_name': self.get_full_name,
# 'signup_date': datetime.datetime.today(),
# },
# )
super(UserProfile, self).save(*args, **kwargs)
def has_perm(self, perm, obj=None):
"""Does the user have a specific permission?"""
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
class Event(models.Model):
"""
The purpose of this class is to manage the dates of all the events of the
national web of entrepreneurship.
"""
name = models.CharField(max_length=80)
slug = models.SlugField(default='', editable=False, unique=True)
description = models.TextField()
created = models.DateField()
from_date = models.DateField()
to_date = models.DateField()
organization = models.ForeignKey(Organization, null=True, blank=True)
url = models.URLField(null=True, blank=True)
cost = models.CharField(max_length=18, null=True, blank=True)
categories = models.ManyToManyField(Category)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
'''On save, fill created field'''
if not self.id:
self.created = datetime.date.today()
unique_slugify(self, self.name)
super(Event, self).save(*args, **kwargs)
class MailingList(models.Model):
"""
This form contains all the people being subscribed
to our mailing list. All fields are mandatory.
"""
full_name = models.CharField(max_length=80)
email = models.EmailField()
province = models.CharField(max_length=24)
def __unicode__(self):
return self.full_name
|
from dataclasses import dataclass
from functools import reduce
from typing import Optional, Iterable, Union
Rule = list[str]
@dataclass
class Node:
value: Union[str, list[list['Node']]]
def get_text_with_matching_nodes_removed(self, text: str) -> Optional[list[str]]:
if text == "":
# we have exhausted the string
# this is useful for self referential nodes
return None
if isinstance(self.value, str):
if text.startswith(self.value):
return [text.replace(self.value, '', 1)]
return None
options = []
for nodes in self.value:
# each individual branch, start with original text each time
options += reduce(resolve_node, nodes, [text])
return options
def resolve_node(text: Iterable[str], node: 'Node') -> list[str]:
remaining_text: list[str] = []
# keep adding it onto whatever text we've seen so far
for potential_text in text:
candidate = node.get_text_with_matching_nodes_removed(potential_text)
# if we prefixed correctly
if candidate is not None:
remaining_text += candidate
# set the next options for the next part in the branch
return remaining_text
def resolve_graph(rules: dict[str, list[Rule]]) -> dict[str, Node]:
# create the nodes up front so that self-referential nodes work
rules_graph = {num: Node([]) for num in rules.keys()}
for num, rule in rules.items():
for option in rule:
if (len(option) == 1 and option[0].startswith('"') and option[0].endswith('"')):
rules_graph[num].value = option[0][1]
else:
value = rules_graph[num].value
if isinstance(value, list):
value.append([rules_graph[n] for n in option])
return rules_graph
class MessageData: # pylint: disable=R0903
def __init__(self, rules: dict[str, list[Rule]], messages: list[str]):
self.__graph: dict[str, Node] = resolve_graph(rules)
self.__messages = messages
def get_number_of_matching_rules(self, rule_number: str):
answers = [self.__graph[rule_number].get_text_with_matching_nodes_removed(m)
for m in self.__messages]
return len([a for a in answers if a is not None and '' in a])
def get_message_data(substitutions: list[str]) -> MessageData:
with open("input/input19.txt") as message_file:
message = message_file.read()
split_data = message.split("\n\n")
rules = dict(to_rule(line) for line in split_data[0].split("\n"))
messages = [line.strip() for line in split_data[1].split("\n")]
for subst in substitutions:
key, val = to_rule(subst)
rules[key] = val
return MessageData(rules, messages)
def to_rule(text: str)-> tuple[str, list[Rule]]:
rule_number, rules = text.split(": ")
rule_parts = [rule.split(" ") for rule in rules.split(" | ")]
return (rule_number, rule_parts)
MESSAGE_DATA = get_message_data([])
MESSAGE_DATA_WITH_SUBSTITUTIONS = get_message_data(['8: 42 | 42 8', '11: 42 31 | 42 11 31'])
if __name__ == "__main__":
print(MESSAGE_DATA.get_number_of_matching_rules("0"))
print(MESSAGE_DATA_WITH_SUBSTITUTIONS.get_number_of_matching_rules("0"))
|
class CacheUnit:
def __init__(self):
self.nodeList = []
self.succAccum = {}
def getHost(self):
"""return val [ip: string, port: int]"""
host = self.nodeList.pop(0)
self.nodeList.append(host)
return host
def report(self, ip, port):
"""ip: string, port: int"""
if (ip, port) in self.succAccum:
self.succAccum[(ip, port)] += 1
self.succCnt += 1
|
# -*- coding:utf-8 -*-
# 最长单调递增子序列
# O(n^2)
def lis_len(seq_a, seq_b, dp, flag):
"""将序列X按非递减顺序排列,形成新序列Y,问题就转变成求解X和Y的LCS"""
len_a = len(seq_a)
len_b = len(seq_b)
for i in range(1, len_a + 1):
for j in range(1, len_b + 1):
if seq_a[i - 1] == seq_b[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
flag[i][j] = 1 # 斜上角
elif dp[i - 1][j] > dp[i][j - 1]:
dp[i][j] = dp[i - 1][j]
flag[i][j] = 2 # 上方
else:
dp[i][j] = dp[i][j - 1]
flag[i][j] = 3 # 左方
return dp[len_a][len_b]
# O(n^2)
def lis_len2(seq_a, dp2):
"""设d[i]为以第i个元素结尾的最长递增子序列的长度,
则d[i]=max{0,d[j] | j<i,a[j]<a[i]}+1,ans=max{d[i]}."""
len_a = len(seq_a)
dp2[0] = 1
for i in range(1, len_a):
temp_max = 0
for j in range(i):
if seq_a[i] >= seq_a[j]:
temp_max = max(temp_max, dp2[j])
dp2[i] = temp_max + 1
return max(dp2)
# O(n log n)
def lis_len3(seq_a, dp3, min_in_max):
""" 假设 result数组保存以X[i]结尾的最长递增子序列的长度,X的LIS的长度为K。
由于长度为i( 1 <= i <= k )的LIS可能不止一个,那么我们用数组minInMax记录
下长度相同的LIS的末尾元素中的最小值。如果X[i] 大于 minInMax[ result[i-1] ] ,
那么result[i] = result[i-1] +1,否则,result[i]与result[i-1]相等。由于
minInMax是递增的,所以使用二分查找确定array[i]应该放在哪个位置上"""
len_a = len(seq_a)
dp3[0] = 1
min_in_max[0] = seq_a[0]
for i in range(1, len_a):
if seq_a[i] >= min_in_max[dp3[i - 1] - 1]:
dp3[i] = dp3[i - 1] + 1
min_in_max[dp3[i] - 1] = seq_a[i]
else:
dp3[i] = dp3[i - 1]
low = 0
high = dp3[i - 1] - 1
mid = (low + high) // 2
while low <= high:
if seq_a[i] < min_in_max[mid]:
high = mid - 1
else:
low = mid + 1
mid = (low + high) // 2
min_in_max[low] = seq_a[i]
return dp3[len(seq_a) - 1]
def print_lis2(seq, dp2):
max_dp = max(dp2)
out = []
i = len(seq) - 1
while max_dp:
if dp2[i] == max_dp:
out.append(seq[i])
max_dp -= 1
i -= 1
return out
def print_lis(seq_a, len_a, len_b, flag, lcs):
if len_a == 0 or len_b == 0:
return lcs
if flag[len_a][len_b] == 0:
return lcs
elif flag[len_a][len_b] == 1:
lcs.append(seq_a[len_a - 1]) # 序列比flag长度小一
return print_lis(seq_a, len_a - 1, len_b - 1, flag, lcs)
elif flag[len_a][len_b] == 2:
return print_lis(seq_a, len_a - 1, len_b, flag, lcs)
else:
return print_lis(seq_a, len_a, len_b - 1, flag, lcs)
def main():
a = input("输入序列a:")
seq_a = list(map(int, a.split()))
seq_b = sorted(seq_a)
len_a = len(seq_a)
len_b = len(seq_b)
dp = [[0]*(len_a + 1) for i in range(len_a + 1)]
dp2 = [0]*len_a
dp3 = [0]*len_a
min_in_max = [0] * len_a
flag = [[0]*(len_a + 1) for i in range(len_a + 1)]
print("序列长度为:", lis_len(seq_a, seq_b, dp, flag))
lcs = []
print("序列为:", print_lis(seq_a, len_a, len_b, flag, lcs)[::-1])
print("法二序列长度为:", lis_len2(seq_a, dp2))
print("法二序列为:", print_lis2(seq_a, dp2)[::-1])
print("法三序列长度为:", lis_len3(seq_a, dp3, min_in_max))
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
import os
import time
import re
import html
from urllib.parse import urlencode
from urllib.request import Request,urlopen
from slackclient import SlackClient
from slacker import Slacker
from apiclient import discovery
from googleapiclient.http import *
from oauth2client import client, tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# constants
SCOPES = 'https://www.googleapis.com/auth/drive'
TEAMDRIVE_ID = '{TEAMDRIVE_ID}'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'
BOT_ID = os.environ.get("BOT_ID")
AT_BOT = "<@" + BOT_ID + ">"
SEARCH_COMMAND = "DOCFIND" # start command in slack
slack = Slacker(os.environ.get("SLACK_BOT_TOKEN"))
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
GA_URL = "https://www.google-analytics.com/collect"
GA_TID = "{GA_TID}"
DOC_NAME_SUFFIX = "_DOC"
def post_to_channel(message, channel):
slack.chat.post_message(channel, message, as_user=True)
def get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def search_paragraph_in_document(keyword, channel):
post_to_channel('Starts the search.... Please wait a moment :)', channel)
# GA
details = urlencode({'v' : '1', 'tid' : GA_TID , 'cid' : '{CID}', 't' : '{T}', 'ec' : '{EC}', 'ea' : '{EA}', 'el' : keyword})
details = details.encode('UTF-8')
url = Request(GA_URL, details)
url.add_header("User-Agent","Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.29 Safari/525.13")
urlopen(url).read()
count = 0
result = convert_keyword_unicode(html.unescape(keyword))
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
results = service.files().list(corpora="teamDrive", includeTeamDriveItems=True, supportsTeamDrives=True, teamDriveId=TEAMDRIVE_ID).execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
list = []
for item in items:
if item['name'].find(DOC_NAME_SUFFIX) != -1:
item['name'] = item['name'].split(DOC_NAME_SUFFIX)[0].encode('utf-8')
item['id'] = item['id'].encode('utf-8')
map = {'name' : item['name'], 'id' : item['id']}
list.append(map)
if list :
for item in list :
file_id = item['id'].decode('utf-8')
results = service.files().export(fileId=file_id,
mimeType="text/html").execute(http=http)
results = results.decode("utf-8") # without this line, Printing Error!!
p = re.compile((
u'<h[0-9] id="(.*?)"|<span style="color:#\d+;font-weight:\d+;text-decoration:none;vertical-align:baseline;font-size:\d+pt;font-family:"Malgun Gothic";font-style:normal">(.*?)<\/span>'),
re.UNICODE)
findAll = p.findall(results)
content = ''
head_id = ''
for i in findAll:
if i[0]:
if content and re.search(u'' + result, content):
count += 1
answer = "*•" + str(count) + " search results* // Author. _"+item['name'].decode('utf-8')+"_\n" + "```" + html.unescape(
content) + "\n\n" + "[Link] " + "https://docs.google.com/document/d/"+file_id+"/edit#heading=" + head_id + "```"
post_to_channel(answer, channel)
head_id = i[0]
content = ''
else:
content += '\n' + i[1]
post_to_channel('A total of '+str(count)+'search results were found.', channel)
def convert_keyword_unicode(kword):
pre = '&#'
suf = ';'
result = ''
pattern = r'[가-힣]+'
for stt in kword:
if re.search(pattern, stt):
result += (pre + str(ord(stt)) + suf)
else:
result += html.escape(stt)
return result
def parse_slack(msg):
output_list = msg
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and BOT_ID not in output:
channel = output['channel']
command = output['text'] # Get text in JSON
answer = slack_answer(command, channel) # Go to desk
if answer :
post_to_channel(answer, channel)
return None
def slack_answer(txt, channel): # Have Condition
if txt.find(SEARCH_COMMAND) != -1:
cmd = txt[7:]
if len(cmd) < 2 or re.search('[ㄱ-ㅎ]|[ㅏ-ㅣ]', cmd) or re.search('[`~!@#$%^&*_=+;:",./<>?]', cmd) or cmd.find('-') != -1 or cmd.find('\'') != -1 or cmd.find('[') != -1 or cmd.find(']') != -1 or cmd.find('{') != -1 or cmd.find('}') != -1 or cmd.find('(') != -1 or cmd.find(')') != -1 or cmd.find('|') != -1 or cmd.find('\\') != -1:
post_to_channel('*Search keywords can only contain words that do not contain more than one special character.*', channel)
return None
search_paragraph_in_document(txt[7:], channel)
return None
else:
return None
return answer
if __name__ == "__main__":
if slack_client.rtm_connect():
print("Connected!")
while True:
parse_slack(slack_client.rtm_read())
time.sleep(1)
else:
print("Connection failed.")
|
import itertools
import json
import os
from collections import Counter, defaultdict
from glob import glob
from itertools import combinations
import Levenshtein
import editdistance
import numpy as np
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tqdm import tqdm
from batcher import Batcher
from utils import create_new_dir, ensure_dir
class EditDistanceParser:
def __init__(self, output_dir):
self.cache = defaultdict(set)
self.ed_to_password_list_map = defaultdict(list)
self.output_dir = output_dir
def _finalize_cache(self):
for key, password_list in self.cache.items():
if len(password_list) > 1:
shp = find_shortest_hamiltonian_path_in_complete_graph(password_list, debug=False)
if len(shp) == 0:
continue # shortest_hamiltonian_path did not return well.
edit_distances = []
for a, b in zip(shp, shp[1:]):
ed = editdistance.eval(a, b)
edit_distances.append(ed)
self.ed_to_password_list_map[ed].append((a, b))
def call(self, emails_passwords):
for (email, password) in emails_passwords:
self.cache[email].add(password.strip())
def flush(self):
self.cache = defaultdict(set)
self.ed_to_password_list_map = defaultdict(list)
def persist(self):
self._finalize_cache()
for edit_distance in sorted(self.ed_to_password_list_map):
output_dir = os.path.join(os.path.expanduser(self.output_dir), 'edit-distances')
ensure_dir(output_dir)
csv_file = os.path.join(output_dir, str(edit_distance) + '.csv')
with open(csv_file, encoding='utf8', mode='a') as w:
password_pairs = self.ed_to_password_list_map[edit_distance]
lines = [str(edit_distance) + Batcher.SEP + x[0] + Batcher.SEP + x[1] + '\n' for x in password_pairs]
w.writelines(lines)
self.flush()
def find_shortest_hamiltonian_path_in_complete_graph(passwords, debug=True):
# passwords = ['hello1', 'hello22', 'h@llo22', 'h@llo223']
# print(find_shortest_hamiltonian_path_in_complete_graph(passwords, False))
# complexity is paramount! This script runs in factorial(n)
if len(passwords) > 6: # 6! = 720 combinations.
return []
map_edit_distance = {}
# shortest hamiltonian path in complete graph. NP-complete.
for combo in combinations(passwords, 2): # 2 for pairs, 3 for triplets, etc
ed = editdistance.eval(combo[0], combo[1])
if debug:
print(combo[0], combo[1], ed)
map_edit_distance[(combo[0], combo[1])] = ed
map_edit_distance[(combo[1], combo[0])] = ed
# factorial(n)
# permutations = list(itertools.permutations(passwords))
permutations = list(filter(lambda x: len(x[0]) == min([len(a) for a in x]),
list(itertools.permutations(passwords))))
all_solutions = {}
for permutation in permutations:
full_ed = 0
for a, b in zip(permutation, permutation[1:]):
full_ed += map_edit_distance[(a, b)]
if debug:
print(full_ed, permutation)
if full_ed not in all_solutions:
all_solutions[full_ed] = []
all_solutions[full_ed].append(permutation)
if debug:
print(json.dumps(all_solutions, indent=2))
lowest_ed = sorted(all_solutions.keys())[0]
if debug:
print(lowest_ed)
# we consider that the first password is the easiest one (at least the shortest one).
best_solutions = all_solutions[lowest_ed]
if debug:
print(best_solutions)
final_solution = best_solutions[int(np.argmin([len(bs[0]) for bs in best_solutions]))]
if debug:
print(final_solution)
return list(final_solution)
def preprocess(breach_compilation_folder, output_dir, max_num_files):
bc_dir = os.path.expanduser(breach_compilation_folder)
all_filenames = glob(bc_dir + '/**/*', recursive=True)
all_filenames = [f for f in list(filter(os.path.isfile, all_filenames)) if os.path.isfile(f)]
create_new_dir(output_dir)
print(f'Found {len(all_filenames)} files in {bc_dir}.')
if max_num_files is not None:
all_filenames = all_filenames[:max_num_files]
edp = EditDistanceParser(output_dir)
with tqdm(all_filenames) as bar:
for current_filename in bar:
with open(current_filename, 'r', encoding='utf8', errors='ignore') as r:
lines = r.readlines()
emails_passwords = extract_emails_and_passwords(lines)
edp.call(emails_passwords)
edp.persist()
print('DONE. SUCCESS.')
print(f'OUTPUT: Dataset was generated at: {output_dir}.')
def build_encodings(training_filename):
Batcher.build(training_filename)
def gen_large_chunk_single_thread(sed: Batcher, inputs_, targets_, chunk_size):
# make it simple now.
random_indices = np.random.choice(a=range(len(inputs_)), size=chunk_size, replace=True)
sub_inputs = inputs_[random_indices]
sub_targets = targets_[random_indices]
n = len(sub_inputs)
x = np.zeros((chunk_size, sed.ENCODING_MAX_PASSWORD_LENGTH, sed.chars_len()), dtype=float)
y2_char = np.zeros(shape=(n, sed.chars_len()))
y1_op = np.zeros(shape=(n, 3))
for i in range(n):
# ed = 1
edit_dist = Levenshtein.editops(sub_inputs[i], sub_targets[i])[0]
op = edit_dist[0]
assert edit_dist[1] == edit_dist[2]
if op == 'insert':
op_encoding = [1, 0, 0]
char_changed = sub_targets[i][edit_dist[1]]
elif op == 'replace':
op_encoding = [0, 1, 0]
char_changed = sub_targets[i][edit_dist[1]]
elif op == 'delete':
op_encoding = [0, 0, 1]
char_changed = sub_inputs[i][edit_dist[1]]
else:
raise Exception('Unsupported op.')
y1_op[i] = op_encoding
y2_char[i] = sed.encode(char_changed, 1)[0]
for i, element in enumerate(sub_inputs):
x[i] = sed.encode(element)
split_at = int(len(x) * 0.9)
(x_train, x_val) = x[:split_at], x[split_at:]
(y_train_1, y_val_1) = y1_op[:split_at], y1_op[split_at:]
(y_train_2, y_val_2) = y2_char[:split_at], y2_char[split_at:]
val_sub_targets = sub_targets[split_at:]
val_sub_inputs = sub_inputs[split_at:]
return x_train, y_train_1, y_train_2, x_val, y_val_1, y_val_2, val_sub_inputs, val_sub_targets
def predict_top_most_likely_passwords_monte_carlo(sed: Batcher, model, row_x, n, mc_samples=10000):
samples = predict_top_most_likely_passwords(sed, model, row_x, mc_samples)
return dict(Counter(samples).most_common(n)).keys()
def predict_top_most_likely_passwords(sed: Batcher, model, row_x, n):
p = model.predict(row_x, batch_size=32, verbose=0)[0]
most_likely_passwords = []
for i in range(n):
# of course should take the edit distance constraint.
pa = np.array([np.random.choice(a=range(sed.ENCODING_MAX_SIZE_VOCAB + 2), size=1, p=p[j, :])
for j in range(sed.ENCODING_MAX_PASSWORD_LENGTH)]).flatten()
most_likely_passwords.append(sed.decode(pa, calc_argmax=False))
return most_likely_passwords
def get_model(hidden_size, num_chars):
i = Input(shape=(Batcher.ENCODING_MAX_PASSWORD_LENGTH, num_chars))
x = LSTM(hidden_size)(i)
x = Dropout(0.2)(x)
x = Dense(Batcher.ENCODING_MAX_PASSWORD_LENGTH * num_chars, activation='relu')(x)
x = Dropout(0.2)(x)
# ADD, DEL, SUB
o1 = Dense(3, activation='softmax', name='op')(x)
o2 = Dense(num_chars, activation='softmax', name='char')(x)
return Model(inputs=[i], outputs=[o1, o2])
def train(hidden_size, batch_size):
batcher = Batcher()
print('Data:')
print(batcher.inputs.shape)
print(batcher.targets.shape)
model = get_model(hidden_size, batcher.chars_len())
model.compile(loss={'op': 'categorical_crossentropy', 'char': 'categorical_crossentropy'},
optimizer='adam', metrics=['accuracy'])
model.summary()
for grad_step in range(int(1e9)):
ppp = gen_large_chunk_single_thread(batcher, batcher.inputs, batcher.targets, chunk_size=batch_size)
x_train, y_train_1, y_train_2, x_val, y_val_1, y_val_2, val_sub_inputs, val_sub_targets = ppp
model.train_on_batch(x=x_train, y=[y_train_1, y_train_2])
print(dict(zip(model.metrics_names, model.test_on_batch(x=x_val, y=[y_val_1, y_val_2]))))
# guess = c_table.decode(preds[0], calc_argmax=False)
# top_passwords = predict_top_most_likely_passwords_monte_carlo(model, row_x, 100)
# p = model.predict(row_x, batch_size=32, verbose=0)[0]
# p.shape (12, 82)
# [np.random.choice(a=range(82), size=1, p=p[i, :]) for i in range(12)]
# s = [np.random.choice(a=range(82), size=1, p=p[i, :])[0] for i in range(12)]
# c_table.decode(s, calc_argmax=False)
# Could sample 1000 and take the most_common()
if grad_step % 100 == 0:
row_x, password_target, password_input = x_val, val_sub_targets, val_sub_inputs
ops, char = model.predict(row_x, verbose=0)
predicted_chars = list(batcher.decode(char))
ops = ops.argmax(axis=1)
decoded_op = []
for op in ops:
if op == 0:
decoded_op.append('insert')
elif op == 1:
decoded_op.append('replace')
else:
decoded_op.append('delete')
for i, (x, y, pc, po) in enumerate(zip(password_input, password_target, predicted_chars, decoded_op)):
print('x :', x)
print('y :', y)
print('predict char :', pc)
print('predict op :', po)
print('---------------------')
if i >= 100:
break
# if correct == guess:
# if correct.strip() in [vv.strip() for vv in top_passwords]:
# print(colors.ok + '☑' + colors.close)
# else:
# print(colors.fail + '☒' + colors.close)
# print('top :', ', '.join(top_passwords))
# print('---')
def extract_emails_and_passwords(txt_lines):
emails_passwords = []
for txt_line in txt_lines:
try:
if '@' in txt_line: # does it contain an email address?
if ':' in txt_line: # '_---madc0w---_@live.com:iskandar89
separator = ':'
elif ';' in txt_line: # '_---lelya---_@mail.ru;ol1391ga
separator = ';'
else:
continue
strip_txt_line = txt_line.strip()
email, password = strip_txt_line.split(separator, 1)
emails_passwords.append((email, password))
except Exception:
pass
return emails_passwords
|
def dogs_age(age):
if age <= 2:
return age * 10.5
elif age > 2:
return (2 * 10.5) + ((age - 2) * 4)
print(dogs_age(5))
|
# import plotly.offline as py
import sys
import codecs
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import pickle as pk
import numpy as np
from pandas import read_csv
from sklearn.cluster import KMeans
file_to_save_vector = 'results/multivariate/cpu/5minutes/bnn_multivariate_uber_ver2/vector_representation/18-3-10-8-16_4-1-3-2-16-1-0.75.csv'
def read_trained_data(file_trained_data):
vector_df = read_csv(file_trained_data, header=None, index_col=False, engine='python')
vector = vector_df.values
return vector
vectors = read_trained_data(file_to_save_vector)
number_of_vecs = len(vectors)
all_vec = []
for i in range(number_of_vecs):
# print (vectors[i])
all_vec.append(i)
def main():
# embeddings_file = sys.argv[1]
# wv, vocabulary = load_embeddings(embeddings_file)
tsne = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
Y = tsne.fit_transform(vectors[:number_of_vecs,:])
kmeans = KMeans(n_clusters=12)
kmeans.fit(Y)
plt.scatter(Y[:, 0], Y[:, 1],c=kmeans.labels_, cmap='rainbow')
plt.scatter(kmeans.cluster_centers_[:,0] ,kmeans.cluster_centers_[:,1], color='black')
for label, x, y in zip(all_vec, Y[:, 0], Y[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.show()
if __name__ == '__main__':
main() |
'''
Created on Dec 24, 2010
@author: jason
'''
import string
import bson
import logging
import tornado.web
import datetime
import simplejson
import MongoEncoder.MongoEncoder
import pymongo
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
#=======================================================================
# if not hasattr(self, '_db'):
# self._db = asyncmongo.Client(pool_id='mytestdb', host='127.0.0.1', port=27017, maxcached=10, maxconnections=50, dbname='TripShare')
#=======================================================================
#return self._db
return self.application.syncdb
@property
def syncdb(self):
return self.application.syncdb
def get_current_user(self):
#self.clear_all_cookies()
user_id = self.get_secure_cookie("user")
if not user_id: return None
#return tornado.escape.json_decode(user_id)
return self.syncdb.users.find_one({'user_id': bson.ObjectId(str(user_id))})
def get_current_user_friends(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return self.syncdb.users.find_one({'user_id': bson.ObjectId(str(user_id))})["friends"]
def get_current_username(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return self.syncdb.users.find_one({'user_id': bson.ObjectId(str(user_id))})["username"]
def get_db_user_id(self):
user_id = self.get_secure_cookie("user")
return bson.ObjectId(str(user_id))
def _on_response(self, response, error):
if error:
raise tornado.web.HTTPError(500)
#self.render('template', full_name=response['full_name'])
#logging.info('response: +++++++++++++++++++++++=' + str(response))
def _on_action(self, response, error):
if error:
raise tornado.web.HTTPError(500)
#self.render('template', full_name=response['full_name'])
#logging.info('_on_action: +++++++++++++++++++++++=' + str(response))
def _get_trips(self, response, error):
if error:
raise tornado.web.HTTPError(500)
friends = self.current_user['friends']
self.render("browsetrip.html", trips=response, friends = friends)
class BrowseHandler(BaseHandler):
#@tornado.web.asynchronous
def get(self):
if not self.current_user:
#trips = self.db.query("SELECT trip_id, slug, title FROM trips ORDER BY published DESC LIMIT 10")
#self.db.trips.find({}, limit = 10, sort = [('published', -1)], callback=self._get_trips)
self.redirect('/login')
else:
#trips = self.db.query("SELECT trip_id, slug, title FROM trips where owner_id = %s ORDER BY published DESC LIMIT 10", self.current_user.user_id)
self.syncdb.trips.find({'owner_id':self.get_db_user_id()}).limit(10).sort('published', -1)
#self.db.trips.find({}, limit = 10, sort = [('published', -1)], callback=self._get_trips)
#self.render("browsetrip.html", trips=trips, token = self.xsrf_token)
class EntryHandler(BaseHandler):
singletrip = None
trips = None
#@tornado.web.asynchronous
def get(self, slug):
if self.current_user:
self.singletrip = self.syncdb.trips.find_one({'slug':slug})
if not self.singletrip: raise tornado.web.HTTPError(404)
#self.db.trips.find({'owner_id':self.get_db_user_id()}, limit = 10, sort = [('published', -1)], callback=self._trip_entry)
response = self.syncdb.trips.find({'owner_id':self.get_db_user_id()}).limit(10).sort('published', -1)
users = []
for group in self.singletrip['groups']:
for user in group['members']:
users.append(user)
users.reverse()
self.render("Trips/edittrip.html", current_user=self.current_user, expense = self.singletrip['expense'], users = users ,group_id=self.singletrip['groups'][0]['group_id'] , singletrip=self.singletrip, dest_place = unicode(simplejson.dumps(self.singletrip['groups'][0]['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)),token = self.xsrf_token, trips=response)
else:
self.singletrip = self.syncdb.trips.find_one({'slug':slug})
users = []
for group in self.singletrip['groups']:
for user in group['members']:
users.append(user)
users.reverse()
self.render("Trips/edittrip_nologin.html", users = users ,group_id=self.singletrip['groups'][0]['group_id'] , singletrip=self.singletrip, dest_place = unicode(simplejson.dumps(self.singletrip['groups'][0]['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)))
def _trip_entry(self, response, error):
if error:
raise tornado.web.HTTPError(500)
users = []
for group in self.singletrip['groups']:
for user in group['members']:
users.append(user)
users.reverse()
self.render("Trips/edittrip.html", current_user=self.current_user, expense = self.singletrip['expense'], users = users ,group_id=self.singletrip['groups'][0]['group_id'] , singletrip=self.singletrip, dest_place = unicode(simplejson.dumps(self.singletrip['groups'][0]['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)),token = self.xsrf_token, trips=response)
class TripPageHandler(BaseHandler):
def get(self, _section, _index):
section = _section
index = string.atoi(_index)
skip_number = index*3
if section == "newtrips":
latest_trip_ids = self.syncdb.trips.find({"privacy": {"$ne": 2}}).skip(skip_number).limit(10).sort("published", pymongo.DESCENDING)
elif section =="mytrips":
latest_trip_ids = self.syncdb.trips.find({"privacy": {"$ne": 2}}).skip(skip_number).limit(10).sort("published", pymongo.DESCENDING)
elif section == "hottrips":
t = datetime.datetime.now()
latest_trip_ids = self.syncdb.trips.find({"end_date": {"$gt": t}}).skip(skip_number).limit(10).sort("members_count", pymongo.DESCENDING)
elif section == "endtrips":
t = datetime.datetime.now()
latest_trip_ids = self.syncdb.trips.find({"end_date": {"$lt": t}}).skip(skip_number).limit(10).sort("published", pymongo.DESCENDING)
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
members = latest_trip_id['groups'][0]['members']
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
# print("true")
break
#temp_dumps = json.dumps(latest_trip_id, cls=MongoEncoder.MongoEncoder)
#_latest_trip_id = json.loads(temp_dumps)
#_latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id) + "||||"
#self.write(_latest_trip_id['html'])
self.write(self.render_string("Module/trip.html", trip = latest_trip_id) + "||||")
|
#!/usr/bin/env python
import argparse
from math import sqrt
import pandas as pd
import numpy as np
from numpy.linalg import svd
from util import read_vector_file, openfile
from matrix import norm2_matrix
def main():
parser = argparse.ArgumentParser(
description='Computes an LSA model correlating associations and lexsem model.')
parser.add_argument('--lexspace', '-l', metavar='FILE',
help='The input lexical space.')
parser.add_argument('--assoc', '-a', metavar='FILE',
help='The input association space.')
args = parser.parse_args()
lex = norm2_matrix(read_vector_file(openfile(args.lexspace)))
assoc = norm2_matrix(read_vector_file(openfile(args.assoc)))
together = pd.concat(lex, assoc, keys=("lex", "assoc"))
org_matrix = together.as_matrix()
U, S, V = svd(org_matrix)
np.savez("svd.npz", U, S, V)
if __name__ == '__main__':
main()
|
import sys
sys.path.insert(0,'../sib/')
import sib
import csv
import os
import numpy as np
import pandas as pd
import sklearn.metrics as mm
import sys
import argparse
# sir_inference imports
from sir_model import FastProximityModel, patient_zeros_states
from ranking import csr_to_list
import os.path
from os import path
factor=1
N=int(500000/factor)
#N=50000
## new try with 100 spreaders
N_patient_zero = int(200/factor);
#N_patient_zero = 5;
lamb = 0.05;
mu = 0.02;
scale=1.0; # Easy Case
T=100;
parser = argparse.ArgumentParser(description="Run a simulation and don't ask.")
parser.add_argument('-s', type=int, default=1, dest="seed", help='seed')
args = parser.parse_args()
print(f"arguments {args}")
seed=args.seed
print("Generate network with N=%d T=%d scale=%.1f default lambda=%.2f seed=%d..."%(N,T,scale,lamb,seed), flush=True)
initial_states = patient_zeros_states(N, N_patient_zero)
model = FastProximityModel(N, scale, mu, lamb, initial_states)
location="networks"
if path.isdir(location) : print("Will save in "+location)
else :
print("log file not found. was looking for: \n "+location+"\n Bye Bye")
sys.exit()
model.run(T=T, print_every=2)
# model.get_counts().plot(
# title=f"N_patient_zero={N_patient_zero} lamb={lamb:.2f} mu={mu:.2f}"
# );
print("Saving transmissions...", flush=True)
logfile="interactions_proximity_N%dK_s%.1f_T%d_lamb%.2f_s%d.csv"%(N/1000,scale,T,lamb,seed)
with open(location+"/"+logfile, 'w', newline='') as csvfile:
fieldnames = ['t', 'i', 'j', 'lamb']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for t, A in enumerate(model.transmissions):
for i, j, lamb in csr_to_list(A):
writer.writerow(dict(t=t, i=i, j=j, lamb=lamb))
print("Bye-Bye") |
# -*- coding:utf-8 -*-
# Utils module: useful functions to build exploits
from ropgenerator.semantic.Engine import search, LMAX
from ropgenerator.Constraints import Constraint, RegsNotModified, Assertion, Chainable, StackPointerIncrement
from ropgenerator.semantic.ROPChains import ROPChain
from ropgenerator.Database import QueryType
from ropgenerator.exploit.Scanner import getFunctionAddress, findBytes
from ropgenerator.IO import verbose, string_bold, string_special, string_ropg
import itertools
import ropgenerator.Architecture as Arch
#### Pop values into registers
POP_MULTIPLE_LMAX = 6000
def popMultiple(args, constraint=None, assertion=None, clmax=None, optimizeLen=False):
"""
args is a list of pairs (reg, value)
OR a list of triples (reg, value, comment)
reg is a reg UID
value is an int
Creates a chain that pops values into regs
"""
if( clmax is None ):
clmax = POP_MULTIPLE_LMAX
elif( clmax <= 0 ):
return None
if( constraint is None ):
constr = Constraint()
else:
constr = constraint
if( assertion is None ):
a = Assertion()
else:
a = assertion
perms = itertools.permutations(args)
for perm in perms:
clmax_tmp = clmax
res = ROPChain()
constr_tmp = constr
for arg in perm:
if( len(arg) == 3 ):
comment = arg[2]
else:
comment = None
if( optimizeLen ):
pop = search(QueryType.CSTtoREG, arg[0], arg[1], constr_tmp, a, n=1, clmax=clmax_tmp, CSTtoREG_comment=comment, optimizeLen=True)
else:
pop = search(QueryType.CSTtoREG, arg[0], arg[1], constr_tmp, a, n=1, clmax=clmax_tmp, CSTtoREG_comment=comment)
if( not pop ):
break
else:
clmax_tmp -= len(pop[0])
# If Reached max length, exit
if( clmax_tmp < 0 ):
pop = None
break
else:
res.addChain(pop[0])
constr_tmp = constr_tmp.add(RegsNotModified([arg[0]]))
if( pop ):
return res
return None
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 22:33:58 2019
@author: CKK1
"""
import numpy as np
import h5py
import cv2
import matplotlib.pyplot as plt
from skimage import morphology
from matplotlib import cm
def scale_array(dat, out_range=(-1, 1)):
domain = [np.amin(dat), np.amax(dat)]
def interp(x):
return out_range[0] * (1.0 - x) + out_range[1] * x
def uninterp(x):
b = 0
if (domain[1] - domain[0]) != 0:
b = domain[1] - domain[0]
else:
b = 1.0 / domain[1]
return (x - domain[0]) / b
return interp(uninterp(dat))
def colour_scale_contour(contour, colour=[33,140,0]):
rgb_contour = cv2.merge((contour,contour,contour))
contour_sc = scale_array(rgb_contour, out_range=(0,255))
contour_sc[np.where((contour_sc==[255,255,255]).all(axis=2))] = colour
return contour_sc
def get_output_image(ct_sc, pet_sc, mask, true_mask):
# 1. Get contour positions of mask:
cont, hierchy = cv2.findContours(mask.astype(np.uint8, copy=False), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
true_cont, true_hierchy = cv2.findContours(true_mask.astype(np.uint8, copy=False), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# 2. Get contours:
contour = cv2.drawContours(mask, cont, -1, (0,166,33), 2)
true_contour = cv2.drawContours(true_mask, true_cont, -1, (166,0,50), 2)
# 3. add color to the contours:
rgb_contour = colour_scale_contour(contour, colour=[0,160,33])
rgb_truecontour = colour_scale_contour(true_contour, colour=[166,0,50])
# 4. make the gray-image rgb:
rgbct = cv2.cvtColor(ct_sc.astype(np.uint8, copy=True), cv2.COLOR_GRAY2BGR)
rgbpet = cv2.cvtColor(pet_sc.astype(np.uint8, copy=True), cv2.COLOR_GRAY2BGR)
# 5. Overlap mask and image:
petct = cv2.addWeighted(rgbpet, 1, rgbct, 1, 0.0)
# final_ct = cv2.addWeighted(rgb_contour.astype(np.uint8, copy=False), 1, rgbct, 1, 0.0)
# final_ct = cv2.addWeighted(rgb_truecontour.astype(np.uint8, copy=False), 1, final_ct, 1, 0.0)
#
# final_pet = cv2.addWeighted(rgb_contour.astype(np.uint8, copy=False), 1, rgbpet, 1, 0.0)
# final_pet = cv2.addWeighted(rgb_truecontour.astype(np.uint8, copy=False), 1, final_pet, 1, 0.0)
return petct
def get_outline(mask, color, width=2):
mask = mask.astype(bool).astype(float)
dilated = mask.copy()
for _ in range(width):
dilated = morphology.dilation(dilated)
outline = (dilated - mask).astype(float)
c_outline = np.stack([outline]*4, axis=-1)
c_outline[..., :-1] = color
c_outline[..., -1] = outline
return c_outline.squeeze()
def get_fused_output(ct, pet, mask, true_mask):
pet = pet.astype('float32')
pet = pet**(1/5)
pet_img = cm.hot(pet)
pet_img[..., -1] = pet
mask_outline = get_outline(mask, [1, 0, 0])
true_mask_outline = get_outline(true_mask, [0, 1, 0])
fig, sub = plt.subplots(figsize=(5, 5))
sub.axis('off')
sub.imshow(ct, cmap='gray')
sub.imshow(pet_img)
sub.imshow(mask_outline)
sub.imshow(true_mask_outline)
return fig
def get_fused(petct, mask, true_mask, id, performance, s=' '):
fig, sub = plt.subplots(figsize=(5, 5))
sub.axis('off')
sub.imshow(petct/2)
sub.imshow(mask)
sub.imshow(true_mask)
#sub.set_title(f' {id}, slice {s}, Dice: {performance:.3f}', fontsize=18, fontname='Palatino Linotype')
sub.text(5,230,f'slice {s}', fontsize=18, fontname='Palatino Linotype', color='white')
sub.text(155,230,f'Dice: {performance:.3f}', fontsize=18, fontname='Palatino Linotype', color='white')
return fig
def process_results(out_path, pat_ids, names, slice_ids, val=True):
"""
names (list) : list of the filenames
slice_ids (list) : list of slice ids
"""
group = 'val' if val else 'test'
with h5py.File(out_path, 'r') as f:
imgs = f[group]['images'].value
masks = f[group]['prediction'].value
targets = f[group]['masks'].value
dice = f[group]['dice'].value
#imgs[..., 1] = (imgs[..., 1] - imgs[..., 1].min())/(imgs[..., 1].max() - imgs[..., 1].min())
for ind, mask in enumerate(masks[:,:,:,0]):
print(ind)
ct = imgs[ind,:,:,0]
pet = imgs[ind,:,:,1]
true_mask = targets[ind]
ct_sc = scale_array(ct, out_range=(0,255))
pet_sc = scale_array(pet, out_range=(0,255))
#ct = (ct - ct.min())/(ct.max() - ct.min())
#petct = get_fused_output(ct, pet, mask, true_mask)
#petct = get_output_image(ct_sc, pet_sc, mask, true_mask)
mask_outline = get_outline(mask, [0, 174/255, 255/255])
true_mask_outline = get_outline(true_mask, [0, 148/255, 0])
plt.imsave('pet.png', pet_sc, cmap='hot')
plt.imsave('ct.png', ct_sc, cmap='gray')
pet = plt.imread('pet.png')
ct = plt.imread('ct.png')
petct = cv2.addWeighted(pet, 1, ct, 1, 0.0)
data = 'val' if val else 'test'
if str(f'{data}_fused{ind}.png') in names:
s = slice_ids[np.where(np.array(names) == f'{data}_fused{ind}.png')[0][0]]
fig = get_fused(petct,
mask_outline,
true_mask_outline,
pat_ids[ind],
dice[ind],
s=s)
fig.savefig(f'.\\{data}_fused_{pat_ids[ind]}_{s}.pdf')
else:
fig = get_fused(petct,
mask_outline,
true_mask_outline,
pat_ids[ind],
dice[ind])
fig.savefig(f'..\\resulting_images\\{data}_fused{ind}.pdf')
#fig = get_fused_output(ct_sc, pet_sc, mask, true_mask)
#%%
from scipy.io import loadmat as spio
CT = spio('.\Data_070119\M007\Base\DPCT.mat', squeeze_me=True)['LVA_images']
CTV = spio('.\Data_070119\M007\ROI\CTV.mat', squeeze_me=True)['LVA_images']
GTV = spio('.\Data_070119\M007\ROI\GTV.mat', squeeze_me=True)['LVA_images']
ind=28
ct = CT[:,:,ind].transpose()
ctv = CTV[:,:,ind].transpose()
gtv = GTV[:,:,ind].transpose()
ct_sc = scale_array(ct, out_range=(0,255))
gtv_mask_outline = get_outline(gtv, [0, 174/255, 255/255])
ctv_mask_outline = get_outline(ctv, [203/255,101/255,104/255])
fig, sub = plt.subplots(figsize=(5, 5))
sub.axis('off')
sub.imshow(ct, cmap='gray')
sub.imshow(gtv_mask_outline)
sub.imshow(ctv_mask_outline)
plt.show(fig)
fig.savefig(f'CTV_GTV_DPCT.png')
#%%
import glob
from natsort import natsorted
val_slice_ids = [3, 7, 11, 12, 4, 9, 14, 16, 20, 22,
23, 3, 21, 28, 31, 33, 36, 41, 42, 44,
47, 25, 27, 29]
test_slice_ids = [3, 7, 2, 4, 7, 12, 10, 14, 16, 21,
29, 22, 27, 29, 10, 19, 23, 28, 34,
37, 16, 24, 28, 30]
tests = []
vals = []
for i,j in zip(natsorted(glob.glob('test*.png')), natsorted(glob.glob('val*.png'))):
tests.append(i)
vals.append(j)
with h5py.File('../data_070119_MRI_final.h5','r') as o:
pat_ids_val = (o['validation']['pat_ids'].value).astype(str)
pat_ids_test = (o['test']['pat_ids'].value).astype(str)
#%%
out_path = '../code\logs\PETCT_petct_windowing_c32_w220_aug_basic_f1_adam_03\outputs_4998.h5'
process_results(out_path, pat_ids_val, vals, val_slice_ids, val=True)
process_results(out_path, pat_ids_test, tests, test_slice_ids, val=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.