text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
# -----------------------------------------------------------------------
# rebook.py
# Author: Sophie Li, Connie Xu, Jayson Wu
# -----------------------------------------------------------------------
import os
from sys import stderr
from flask import Flask, request, make_response, redirect, url_for, jsonify
from flask import render_template, session
from database import Database
from getbookinfo import getBookInfo
import book
from getuserinfo import getUserInfo
from sendemail import sendBuyerPurchaseEmail, sendSellerPurchaseEmail, sendBuyerCancelEmail, sendSellerCancelEmail
from CASClient import CASClient
from database_files import booklistings, bookbag, purchases, listingphotos
import time
from dotenv import load_dotenv
import cloudinary
from cloudinary.uploader import upload, destroy
from cloudinary.utils import cloudinary_url
# -----------------------------------------------------------------------
app = Flask(__name__, template_folder='templates')
# Generated by os.urandom(16)
app.secret_key = b'\xcdt\x8dn\xe1\xbdW\x9d[}yJ\xfc\xa3~/'
# app.config['SESSION_TYPE'] = 'filesystem'
# app.config['SESSION_FILE_DIR'] = '.'
load_dotenv()
cloudinary.config(
cloud_name=os.getenv('CLOUD_NAME'),
api_key=os.getenv('API_KEY'),
api_secret=os.getenv('API_SECRET')
)
# destroy('jkg4gp7njzd44jnajha7')
# -----------------------------------------------------------------------
# NON-ROUTE FUNCTIONS
# -----------------------------------------------------------------------
# checks if the user completed an action, such as post, edit, delete,
# in the current browser session, and signals what alert to send to the frontend
def alert(alert):
if alert in session:
return session.pop(alert, False)
else:
return False
# Alters database fields to indicate that a book has been bought,
# thereby completing purchase; is protected from URL hacking
def buy(listing_id, version):
username = CASClient().authenticate().rstrip()
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
# update booklistings to show pending status instead of active
seller_status = 'pending'
listinginfo = [listing['isbn'], listing['seller'], listing['condition'], listing['price'],
seller_status, listing['description'], listing['coursenum'], listing['title'],
listing['authors'], listing['time_created'], listing_id]
booklistings.update_row(database, listinginfo)
# update buyer's bookbag
if bookbag.contains(database, username, listing_id):
bookbag.delete_row(database, [username, listing_id])
# for everyone else, change status to taken
bookbag.change_status(database, listing_id, 'taken')
# for all other incomplete transactions with the same listing_id remove it
# transactions.delete_row(database, [listing_id])
# add to purchases
purchases.insert_row(
database, [listing_id, username, 'pending'])
database.disconnect()
buyer = getUserInfo(username)
seller = getUserInfo(listing['seller'])
# sends emails to buyers and seller
sendBuyerPurchaseEmail(buyer, seller, listing)
sendSellerPurchaseEmail(buyer, seller, listing)
except Exception as e:
print("buy(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
html = render_template('buy.html', username=username,
email=seller.getEmail(), listing=listing, version=version)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# ROUTE FUNCTIONS
# -----------------------------------------------------------------------
# Renders the landing page, which is accessible to those without CAS logins
@app.route('/', methods=['GET'])
def landing():
html = render_template('landing.html')
response = make_response(html)
return response
# Renders the homepage of the website, which has a big search bar and post listing button
@app.route('/home', methods=['GET'])
# @app.route('/index', methods=['GET'])
def home():
username = CASClient().authenticate().rstrip()
html = render_template('index.html',
username=username)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Renders the Buyer Bookbag page
@app.route('/buyerbookbag', methods=['GET'])
def buyerbookbag():
username = CASClient().authenticate().rstrip()
# send a one-time alert if redirected from a delete, cancel, remove, or add request
page = ''
removebookbag_success = alert('removebookbag')
cancel_success = alert('cancelbookbag')
remove_purchase_success = alert('removepurchase')
add_success = alert('addbookbag')
if cancel_success:
page = 'pending'
elif remove_purchase_success:
page = 'completed'
listingRemovedFromBookbag_success = False
# obtain books in bookbag from database
try:
database = Database()
database.connect()
results = database.get_buyer_bookbag(username)
# classify the books in the buyer bookbag by their status
active = []
pending = []
completed = []
removed = []
for listing_id in results['other']:
listing = booklistings.get(database, listing_id, username)
print(listing)
if bookbag.contains(database, username, listing_id):
removed.append(listing['title'])
listingRemovedFromBookbag_success = True
bookbag.delete_row(database, [username, listing_id])
for listing_id in results['active']:
listing = booklistings.get(database, listing_id, username)
active.append(listing)
for listing_id in results['pending']:
listing = booklistings.get(database, listing_id, username)
email = getUserInfo(listing['seller']).getEmail()
listing['email'] = email
pending.append(listing)
for listing_id in results['completed']:
listing = booklistings.get(database, listing_id, username)
email = getUserInfo(listing['seller']).getEmail()
listing['email'] = email
completed.append(listing)
database.disconnect()
except Exception as e:
print("buyerbookbag(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
html = render_template('buyerbookbag.html',
username=username,
active=active,
pending=pending,
completed=completed,
removebookbag_success=removebookbag_success,
cancelbookbag_success=cancel_success,
removepurchase_success=remove_purchase_success,
listingRemovedFromBookbag_success=listingRemovedFromBookbag_success,
removed=removed,
add_success=add_success,
page=page)
response = make_response(html)
return response
# Renders the Buyer Bookbag page when the tabs on the page are clicked
@app.route('/buyerbookbagajax/<state>', methods=['POST'])
def buyerbookbagajax(state):
username = CASClient().authenticate().rstrip()
# send a one-time alert if redirected from a delete, cancel, remove, or add request
page = state
listingRemovedFromBookbag_success = False
# obtain books in bookbag from database
try:
database = Database()
database.connect()
results = database.get_buyer_bookbag(username)
# classify the books in the buyer bookbag by their status
active = []
pending = []
completed = []
removed = []
for listing_id in results['other']:
listing = booklistings.get(database, listing_id, username)
print(listing)
if bookbag.contains(database, username, listing_id):
removed.append(listing['title'])
listingRemovedFromBookbag_success = True
bookbag.delete_row(database, [username, listing_id])
for listing_id in results['active']:
listing = booklistings.get(database, listing_id, username)
active.append(listing)
for listing_id in results['pending']:
listing = booklistings.get(database, listing_id, username)
email = getUserInfo(listing['seller']).getEmail()
listing['email'] = email
pending.append(listing)
for listing_id in results['completed']:
listing = booklistings.get(database, listing_id, username)
email = getUserInfo(listing['seller']).getEmail()
listing['email'] = email
completed.append(listing)
database.disconnect()
except Exception as e:
print("buyerbookbagajax(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
html = render_template('buyerbookbagajax.html',
username=username,
active=active,
pending=pending,
completed=completed,
removed=removed,
listingRemovedFromBookbag_success=listingRemovedFromBookbag_success,
page=page)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Renders the Seller Station page
@app.route('/sellerstation', methods=['GET'])
def sellerstation():
username = CASClient().authenticate().rstrip()
# send a one-time alert if redirected from a delete, cancel, remove, or confirm request
delete_success = alert('deletelisting')
cancel_success = alert('cancellistingseller')
remove_success = alert('removesellinghistory')
confirm_success = alert('confirmtransaction')
page = ''
if cancel_success:
page = 'pending'
elif remove_success or confirm_success:
page = 'completed'
# obtain books in seller station from database
try:
database = Database()
database.connect()
results = database.get_seller_station(username)
# classify the books in the seller station by their status
active = []
pending = []
completed = []
for listing_id in results['active']:
listing = booklistings.get(database, listing_id, username)
active.append(listing)
for listing_id in results['pending']:
listing = booklistings.get(database, listing_id, username)
buyer = purchases.get_buyer(database, listing_id)
email = getUserInfo(buyer).getEmail()
listing['buyer'] = buyer
listing['email'] = email
pending.append(listing)
for listing_id in results['completed']:
listing = booklistings.get(database, listing_id, username)
buyer = purchases.get_buyer(database, listing_id)
email = getUserInfo(buyer).getEmail()
listing['buyer'] = buyer
listing['email'] = email
completed.append(listing)
database.disconnect()
except Exception as e:
print("sellerstation(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
html = render_template('sellerstation.html',
username=username,
active=active,
pending=pending,
completed=completed,
delete_success=delete_success,
cancel_success=cancel_success,
remove_success=remove_success,
confirm_success=confirm_success,
page=page)
response = make_response(html)
return response
# Loads the Seller Station page when the tabs on the page are clicked
@app.route('/sellerstationajax/<state>', methods=['POST'])
def sellerstationajax(state):
username = CASClient().authenticate().rstrip()
page = state
# obtain books in seller station from database
try:
database = Database()
database.connect()
results = database.get_seller_station(username)
# classify the books in the seller station by their status
active = []
pending = []
completed = []
for listing_id in results['active']:
listing = booklistings.get(database, listing_id, username)
active.append(listing)
for listing_id in results['pending']:
listing = booklistings.get(database, listing_id, username)
buyer = purchases.get_buyer(database, listing_id)
email = getUserInfo(buyer).getEmail()
listing['buyer'] = buyer
listing['email'] = email
pending.append(listing)
for listing_id in results['completed']:
listing = booklistings.get(database, listing_id, username)
buyer = purchases.get_buyer(database, listing_id)
email = getUserInfo(buyer).getEmail()
listing['buyer'] = buyer
listing['email'] = email
completed.append(listing)
database.disconnect()
except Exception as e:
print("sellerstationajax(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
html = render_template('sellerstationajax.html',
active=active,
pending=pending,
completed=completed,
username=username,
page=page)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Helper function which accesses the database to find listings corresponding to the search query
def _searchdatabase(query, sort, filter_condition, filter_price):
username = CASClient().authenticate().rstrip()
queryPS = query.replace('%', '\%').replace('_', '\_')
queryToUse = queryPS.replace(" ", "")
database = Database()
database.connect()
results = database.search(queryPS, username, sort=sort,
filter_condition=filter_condition, filter_price=filter_price)
database = Database()
database.connect()
results_coursenum = database.search(queryToUse, username, sort=sort,
filter_condition=filter_condition, filter_price=filter_price)
for listing_id in results_coursenum:
if listing_id not in results:
results.append(listing_id)
result_listings = []
for listing_id in results:
result_listings.append(booklistings.get(
database, listing_id, username))
database.disconnect()
return result_listings
# Renders the search page
@app.route('/search', methods=['GET'])
def search():
username = CASClient().authenticate().rstrip()
# obtain the search query, sort, and filter parameters
query = request.args.get("search")
sort = request.args.get("sortOptions")
lower = request.args.get("lower-bound")
upper = request.args.get("upper-bound")
filter_price = [lower, upper]
new = request.args.get("new")
good = request.args.get("good")
fair = request.args.get("fair")
poor = request.args.get("poor")
if sort is None:
sort = "time_most_recent"
if (not (new or good or fair or poor)):
filter_condition = None
else:
filter_condition = [new, good, fair, poor]
if query is None:
query = ''
# fetch search results from database
try:
result_listings = _searchdatabase(
query, sort, filter_condition, filter_price)
except Exception as e:
print("search(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
html = render_template('search.html',
username=username,
query=query,
results=result_listings,
lower=lower,
upper=upper,
sort=sort,
new=new,
good=good,
fair=fair,
poor=poor,
filter_condition=filter_condition)
response = make_response(html)
return response
# Renders the search page when the filters are clicked
@app.route('/searchfilters', methods=['POST'])
def searchfilters():
username = CASClient().authenticate().rstrip()
query = request.args.get("search")
sort = request.args.get("sortOptions")
lower = request.args.get("lower-bound")
upper = request.args.get("upper-bound")
filter_price = [lower, upper]
new = request.args.get("new")
good = request.args.get("good")
fair = request.args.get("fair")
poor = request.args.get("poor")
if sort == 'undefined':
sort = None
filter_condition = []
if new == 'true':
filter_condition.append('New')
if good == 'true':
filter_condition.append('Good')
if fair == 'true':
filter_condition.append('Fair')
if poor == 'true':
filter_condition.append('Poor')
if filter_condition == []:
filter_condition = None
if query is None:
query = ''
# fetch search results from database
try:
result_listings = _searchdatabase(
query, sort, filter_condition, filter_price)
except Exception as e:
print("searchfilters(): " + str(e), file=stderr)
html = render_template('errortext.html')
response = make_response(html)
return response
html = render_template('searchresults.html',
results=result_listings)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Renders the isbn search page, which is the initial step in posting a listing
@app.route('/postlisting', methods=['GET'])
def postListing():
username = CASClient().authenticate().rstrip()
html = render_template('postlisting.html', username=username)
response = make_response(html)
return response
# Renders the final page of the post process, a form where users input the listing information
@app.route('/postlisting2', methods=['GET', 'POST'])
def postListing2():
username = CASClient().authenticate().rstrip()
isbn = request.args.get("isbn")
book = getBookInfo(isbn)
html = render_template('postlisting2.html', book=book,
username=username, isbn=isbn)
response = make_response(html)
return response
# Posts the listing to the database, making it available and visible to buyers
@app.route('/post2', methods=['POST'])
def post2():
username = CASClient().authenticate().rstrip()
# get information from the posting form
isbn = request.form["isbn"]
book = getBookInfo(isbn)
price = request.form["price"]
coursenum = request.form["coursenum"].replace(" ", "").upper()
info = request.form["info"]
condition = request.form["conditionRadios"]
uploaded_files = request.files.getlist("file")
title = book.getTitle()
author = book.getAuthorString()
seller_status = "active"
input1 = [isbn, username, condition, price,
seller_status, info, coursenum, title,
author, time.time()]
# add newly created listing to database
try:
database = Database()
database.connect()
listing_id = booklistings.insert_row(database, input1)
for file_to_upload in uploaded_files:
if file_to_upload:
upload_result = upload(file_to_upload)
thumbnail_url, options = cloudinary_url(
upload_result['public_id'],
format="jpg",
crop="fill")
listingphotos.insert_row(
database, [upload_result['public_id'], listing_id, thumbnail_url])
database.disconnect()
except Exception as e:
print("post2(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
session['post'] = True
return redirect(url_for('show_listing', listing_id=listing_id))
# -----------------------------------------------------------------------
# Deletes listing with listing_id
@app.route('/delete_listing/<listing_id>', methods=['POST'])
def delete_listing(listing_id):
username = CASClient().authenticate().rstrip()
try:
database = Database()
database.connect()
session['deletelisting'] = True
listing = booklistings.get(database, listing_id, username)
if listing['seller_status'] != 'active':
database.disconnect()
session['deletelisting'] = False
html = render_template('error.html', username=username)
response = make_response(html)
return response
if listing['photos'] is not None:
for photo in listing['photos']:
destroy(photo[0])
listingphotos.delete_row(database, [photo[0]])
# update booklistings to show pending status instead of active
seller_status = 'removed'
listinginfo = [listing['isbn'], listing['seller'], listing['condition'], listing['price'],
seller_status, listing['description'], listing['coursenum'], listing['title'],
listing['authors'], listing['time_created'], listing_id]
booklistings.update_row(database, listinginfo)
# update bookbag
bookbag.change_status(database, listing_id, "removed")
database.disconnect()
except Exception as e:
print("delete_listing(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('sellerstation'))
# -----------------------------------------------------------------------
# Displays listing details for listing with listing_id
@app.route('/listing/<listing_id>', methods=['POST', 'GET'])
def show_listing(listing_id):
# check if there are any alert messages if this request follows a post or success call
post_success = alert('post')
update_success = alert('updatelisting')
username = CASClient().authenticate().rstrip()
# retrieve listing details from database
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
buyer = purchases.get_buyer(database, listing_id)
listing['email'] = ''
listing['buyer'] = buyer
if buyer is not None:
email = getUserInfo(buyer).getEmail()
listing['email'] = email
database.disconnect()
print(listing['seller_status'])
if len(listing) is 0 or listing['seller_status'] == 'removed':
html = render_template('nolisting.html', username=username)
response = make_response(html)
else:
book = getBookInfo(listing['isbn'])
print(book.getImage())
email = getUserInfo(listing['seller']).getEmail()
html = render_template('listing.html', book=book,
username=username, email=email, listing=listing,
post_success=post_success,
update_success=update_success)
response = make_response(html)
return response
except Exception as e:
print("show_listing(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Renders the edit listing form for the listing with the specified listing_id
@app.route('/edit/<listing_id>', methods=['POST'])
def edit(listing_id):
username = CASClient().authenticate().rstrip()
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
database.disconnect()
except Exception as e:
print("edit(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
if listing['seller_status'] != 'active':
html = render_template('nolisting.html', username=username)
response = make_response(html)
return response
book = getBookInfo(listing['isbn'])
html = render_template('edit.html', listing=listing,
book=book, username=username)
response = make_response(html)
return response
# Updates the database with the new, edited listing information for the listing with listing_id
@app.route('/update_listing/<listing_id>', methods=['POST'])
def update_listing(listing_id):
username = CASClient().authenticate().rstrip()
# store in session that the user is updating a listing in order to generate the correct alert
session['updatelisting'] = True
# retrieve updated fields from the update listing form
price = request.form["price"]
coursenum = request.form["coursenum"].replace(" ", "").upper()
info = request.form["info"]
condition = request.form["conditionRadios"]
clearimages = request.form["clearimages"]
uploaded_files = request.files.getlist("file")
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
if listing['seller_status'] != 'active':
database.disconnect()
session['updatelisting'] = False
print("can't update a non-active listing", file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
# update the database
update = [listing['isbn'], username, condition, price,
listing['seller_status'], info, coursenum, listing['title'],
listing['authors'], listing['time_created']]
update.append(listing_id)
booklistings.update_row(database, update)
if clearimages is not "":
print('here')
for photo in listing['photos']:
print('inloop')
destroy(photo[0])
listingphotos.delete_row(database, [photo[0]])
# handle with updated photos
if uploaded_files[0]:
for photo in listing['photos']:
destroy(photo[0])
listingphotos.delete_row(database, [photo[0]])
for i in range(len(uploaded_files)):
if not uploaded_files[i]:
break
upload_result = upload(uploaded_files[i])
thumbnail_url, options = cloudinary_url(
upload_result['public_id'],
format="jpg",
crop="fill")
listingphotos.insert_row(
database, [upload_result['public_id'], listing_id, thumbnail_url])
database.disconnect()
except Exception as e:
print("update_listing(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('show_listing', listing_id=listing_id))
# -----------------------------------------------------------------------
# Adds the listing with listing_id to the buyer’s bookbag
@app.route('/add_to_bookbag/<listing_id>', methods=['POST'])
def add_to_bookbag(listing_id):
username = CASClient().authenticate().rstrip()
buyer = username
# add to session that there was an add request to generate alerts
session['addbookbag'] = True
try:
database = Database()
database.connect()
bookbag.insert_row(
database, [buyer, listing_id, 'active'])
database.disconnect()
except Exception as e:
print("add_to_bookbag(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('buyerbookbag'))
# Removes the listing with listing_id from the buyer’s bookbag
@app.route('/remove_from_bookbag/<listing_id>', methods=['POST'])
def remove_from_bookbag(listing_id):
# store in session that theire is a remove request to generate alerts
session['removebookbag'] = True
username = CASClient().authenticate().rstrip()
buyer = username
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
if listing['seller_status'] != 'active':
session['removebookbag'] = False
database.disconnect()
html = render_template('error.html', username=username)
response = make_response(html)
return response
bookbag.delete_row(database, [buyer, listing_id])
database.disconnect()
except Exception as e:
print("remove_from_bookbag(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('buyerbookbag'))
# -----------------------------------------------------------------------
# Handles payment method for listing with listing_id - checks what payment method the user chose, and routes accordingly
@app.route('/buypage/<listing_id>', methods=['POST'])
def buypage(listing_id):
version = request.form['version']
print(version)
if version == "0":
return buy(listing_id, version)
# -----------------------------------------------------------------------
# After buyer makes purchase, allows buyer to cancel transaction.
@app.route('/cancel/<listing_id>', methods=['POST'])
def cancel(listing_id):
username = CASClient().authenticate().rstrip()
# store in session that a transaction was cancelled for alerts
session['cancelbookbag'] = True
try:
database = Database()
database.connect()
# reset listing status to active in booklistings
listing = booklistings.get(database, listing_id, username)
if listing['seller_status'] != 'pending':
session['cancelbookbag'] = False
html = render_template('error.html', username=username)
response = make_response(html)
database.disconnect()
return response
# remove row from purchases
purchases.delete_row(database, [listing_id])
# update booklistings
seller_status = 'active'
listinginfo = [listing['isbn'], listing['seller'], listing['condition'], listing['price'],
seller_status, listing['description'], listing['coursenum'], listing['title'],
listing['authors'], listing['time_created'], listing_id]
booklistings.update_row(database, listinginfo)
bookbag.change_status(database, listing_id, 'active')
database.disconnect()
buyer = getUserInfo(username)
seller = getUserInfo(listing['seller'])
sendBuyerCancelEmail(buyer, seller, listing)
sendSellerCancelEmail(buyer, seller, listing)
except Exception as e:
print("cancel(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('buyerbookbag'))
# allow seller to cancel transaction
@app.route('/cancel_from_seller/<listing_id>', methods=['POST'])
def cancel_from_seller(listing_id):
username = CASClient().authenticate().rstrip()
# store in session that a transaction was cancelled for alerts
session['cancellistingseller'] = True
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
if listing['seller_status'] != 'pending':
database.disconnect()
session['cancellistingseller'] = False
html = render_template('error.html', username=username)
response = make_response(html)
return response
# remove row from purchases
buyer = purchases.get_buyer(database, listing_id)
purchases.delete_row(database, [listing_id])
# reset listing status to active in booklistings
seller_status = 'active'
listinginfo = [listing['isbn'], listing['seller'], listing['condition'], listing['price'],
seller_status, listing['description'], listing['coursenum'], listing['title'],
listing['authors'], listing['time_created'], listing_id]
booklistings.update_row(database, listinginfo)
bookbag.change_status(database, listing_id, 'active')
database.disconnect()
buyer = getUserInfo(buyer)
seller = getUserInfo(listing['seller'])
sendBuyerCancelEmail(buyer, seller, listing)
sendSellerCancelEmail(buyer, seller, listing)
except Exception as e:
print("cancel_from_seller(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('sellerstation'))
# -----------------------------------------------------------------------
# Allows a user (buyer) to confirm transaction
@app.route('/confirm_transaction/<listing_id>', methods=['POST'])
def confirm_transaction(listing_id):
username = CASClient().authenticate().rstrip()
# store in session that a transaction was confirmed for alerts
session['confirmtransaction'] = True
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
if listing['seller_status'] != 'pending':
session['confirmtransaction'] = False
database.disconnect()
html = render_template('error.html', username=username)
response = make_response(html)
return response
seller_status = 'completed'
# update status to completed in purchases
purchases.update_row(database, [seller_status, listing_id])
# update listing status to completed in booklistings
listinginfo = [listing['isbn'], listing['seller'], listing['condition'], listing['price'],
seller_status, listing['description'], listing['coursenum'], listing['title'],
listing['authors'], listing['time_created'], listing_id]
booklistings.update_row(database, listinginfo)
# update status to completed in everyone's bookbags
bookbag.change_status(database, listing_id, 'completed')
database.disconnect()
except Exception as e:
print("confirm_transaction(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
return redirect(url_for('sellerstation'))
# -----------------------------------------------------------------------
# Removes a book from a user’s purchase history
@app.route('/remove_from_purchase/<listing_id>', methods=['POST'])
def remove_from_purchase(listing_id):
# store in session that the user is removing from purchase history in order to generate the correct alert
session['removepurchase'] = True
try:
database = Database()
database.connect()
except Exception as e:
print("remove_from_purchase(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
# update status in the database to finished in purchases
purchases.update_row(database, ['finished', listing_id])
database.disconnect()
return redirect(url_for('buyerbookbag'))
# Removes a book from a user’s selling history
@app.route('/remove_from_selling_history/<listing_id>', methods=['POST'])
def remove_from_selling_history(listing_id):
username = CASClient().authenticate().rstrip()
# store in session that the user is removing from seller history in order to generate the correct alert
session['removesellinghistory'] = True
try:
database = Database()
database.connect()
listing = booklistings.get(database, listing_id, username)
except Exception as e:
print("remove_from_selling_history(): " + str(e), file=stderr)
html = render_template('error.html', username=username)
response = make_response(html)
return response
# update seller_status in database to 'finished' in purchases
seller_status = 'finished'
listinginfo = [listing['isbn'], listing['seller'], listing['condition'], listing['price'],
seller_status, listing['description'], listing['coursenum'], listing['title'],
listing['authors'], listing['time_created'], listing_id]
booklistings.update_row(database, listinginfo)
database.disconnect()
return redirect(url_for('sellerstation'))
# -----------------------------------------------------------------------
# Renders FAQ page
@app.route('/faq', methods=['GET'])
def faq():
username = CASClient().authenticate().rstrip()
html = render_template('faq.html', username=username)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Renders a 404 not found error page
@app.errorhandler(404)
def page_not_found(error):
username = CASClient().authenticate().rstrip()
html = render_template('notfound.html', username=username)
response = make_response(html)
return response
# Renders a generic error page when an database error occurs.
@app.route('/error', methods=['GET'])
def error():
username = CASClient().authenticate().rstrip()
html = render_template('error.html', username=username)
response = make_response(html)
return response
# -----------------------------------------------------------------------
# Logs the user out of Princeton CAS and redirects to the rebook home page
@app.route('/logout', methods=['GET'])
def logout():
casClient = CASClient()
CASClient().authenticate()
casClient.logout()
|
import logging
def get_default_format():
return '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# old formatter: return '%(asctime)s %(levelname)-8s %(message)s'
def get_console_logger(name):
logger = logging.getLogger(name)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
# create formatter
log_format = get_default_format()
formatter = logging.Formatter(log_format)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
def get_file_logger(name, log_file):
log_format = get_default_format()
logging.basicConfig(
level=logging.DEBUG,
format=log_format,
filename=log_file,
filemode="w"
)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger(name)
logger.addHandler(console)
return logger
def set_global_file_logger(log_file, debug=False):
log_format = get_default_format()
logging.basicConfig(
level=logging.DEBUG,
format=log_format,
filename=log_file,
filemode="w"
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(log_format))
if debug:
console.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(console)
|
'''
MappingSettings.py: Objected Orientated Google Maps for Python
ReWritten by Chris Pham
Copyright OSURC, orginal code from GooMPy by Alec Singer and Simon D. Levy
This code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this code. If not, see <http://www.gnu.org/licenses/>.
'''
#####################################
# Imports
#####################################
# Python native imports
import math
import urllib2
from io import StringIO, BytesIO
import os
import time
import PIL.ImageDraw
import PIL.Image
import PIL.ImageFont
import signing
import RoverMapHelper as MapHelper
import cv2
import csv
import numpy as np
from datetime import datetime
#####################################
# Constants
#####################################
_KEYS = []
# Number of pixels in half the earth's circumference at zoom = 21
_EARTHPIX = 268435456
# Number of decimal places for rounding coordinates
_DEGREE_PRECISION = 6
_PRECISION_FORMAT = '%.' + str(_DEGREE_PRECISION) + 'f'
# Larget tile we can grab without paying
_TILESIZE = 640
# Fastest rate at which we can download tiles without paying
_GRABRATE = 4
# Pixel Radius of Earth for calculations
_PIXRAD = _EARTHPIX / math.pi
_DISPLAYPIX = _EARTHPIX / 2000
file_pointer = open('key', 'r')
for i in file_pointer:
_KEYS.append(i.rstrip())
file_pointer.close()
class GMapsStitcher(object):
def __init__(self, width, height,
latitude, longitude, zoom,
maptype, radius_meters=None, num_tiles=4, debug=False):
self.helper = MapHelper.MapHelper()
self.latitude = latitude
self.longitude = longitude
self.start_latitude = latitude
self.start_longitude = longitude
self.width = width
self.height = height
self.zoom = zoom
self.maptype = maptype
self.radius_meters = radius_meters
self.num_tiles = num_tiles
self.display_image = self.helper.new_image(width, height)
self.debug = debug
# Get the big image here
self._fetch()
self.center_display(latitude, longitude)
def __str__(self):
"""
This string returns when used in a print statement
Useful for debugging and to print current state
returns STRING
"""
string_builder = ""
string_builder += ("Center of the displayed map: %4f, %4f\n" %
(self.center_x, self.center_y))
string_builder += ("Center of the big map: %4fx%4f\n" %
(self.start_longitude, self.start_longitude))
string_builder += ("Current latitude is: %4f, %4f\n" %
(self.longitude, self.latitude))
string_builder += ("The top-left of the box: %dx%d\n" %
(self.left_x, self.upper_y))
string_builder += ("Number of tiles genreated: %dx%d\n" %
(self.num_tiles, self.num_tiles))
string_builder += "Map Type: %s\n" % (self.maptype)
string_builder += "Zoom Level: %s\n" % (self.zoom)
string_builder += ("Dimensions of Big Image: %dx%d\n" %
(self.big_image.size[0], self.big_image.size[1]))
string_builder += ("Dimensions of Displayed Image: %dx%d\n" %
(self.width, self.height))
string_builder += ("LatLong of Northwest Corner: %4f, %4f\n" %
(self.northwest))
string_builder += ("LatLong of Southeast Corner: %4f, %4f\n" %
(self.southeast))
return string_builder
def _grab_tile(self, longitude, latitude, sleeptime=0):
"""
This will return the tile at location longitude x latitude.
Includes a sleep time to allow for free use if there is no API key
returns PIL.IMAGE OBJECT
"""
# Make the url string for polling
# GET request header gets appended to the string
urlbase = 'https://maps.googleapis.com/maps/api/staticmap?'
urlbase += 'center=' + _PRECISION_FORMAT + ',' + _PRECISION_FORMAT + '&zoom=%d&maptype=%s'
urlbase += '&size=%dx%d&format=png&key=%s'
# Fill the formatting
specs = (self.helper.fast_round(latitude, _DEGREE_PRECISION),
self.helper.fast_round(longitude, _DEGREE_PRECISION),
self.zoom, self.maptype, _TILESIZE, _TILESIZE, _KEYS[0])
filename = 'Resources/Maps/' + ((_PRECISION_FORMAT + '_' + _PRECISION_FORMAT + '_%d_%s_%d_%d_%s') % specs)
filename += '.png'
# Tile Image object
tile_object = None
if os.path.isfile(filename):
tile_object = PIL.Image.open(filename)
# If file on filesystem
else:
# make the url
url = urlbase % specs
url = signing.sign_url(url, _KEYS[1])
try:
result = urllib2.urlopen(urllib2.Request(url)).read()
except urllib2.HTTPError, e:
print "Error accessing url for reason:", e
print url
return
tile_object = PIL.Image.open(BytesIO(result))
if not os.path.exists('Resources/Maps'):
os.mkdir('Resources/Maps')
tile_object.save(filename)
# Added to prevent timeouts on Google Servers
time.sleep(sleeptime)
return tile_object
def _pixels_to_lon(self, iterator, lon_pixels):
"""
This converts pixels to degrees to be used in
fetching squares and generate correct squares
returns FLOAT(degrees)
"""
# Magic Lines, no idea
degrees = self.helper.pixels_to_degrees(
(iterator - self.num_tiles / 2) * _TILESIZE, self.zoom)
return math.degrees((lon_pixels + degrees - _EARTHPIX) / _PIXRAD)
def _pixels_to_lat(self, iterator, lat_pixels):
"""
This converts pixels to latitude using meridian projection
to get the latitude to generate squares
returns FLOAT(degrees)
"""
# Magic Lines
return math.degrees(math.pi / 2 - 2 * math.atan(math.exp(((lat_pixels +
self.helper.pixels_to_degrees(
(iterator - self.num_tiles /
2) * _TILESIZE, self.zoom)) -
_EARTHPIX) / _PIXRAD)))
def fetch_tiles(self):
"""
Function that handles fetching of files from init'd variables
returns PIL.IMAGE OBJECT, (WEST, NORTH), (EAST, SOUTH)
North/East/South/West are in FLOAT(degrees)
"""
# cap floats to precision amount
self.latitude = self.helper.fast_round(self.latitude,
_DEGREE_PRECISION)
self.longitude = self.helper.fast_round(self.longitude,
_DEGREE_PRECISION)
# number of tiles required to go from center
# latitude to desired radius in meters
if self.radius_meters is not None:
self.num_tiles = (int(
round(2 * self.helper.pixels_to_meters(
self.latitude, self.zoom) /
(_TILESIZE / 2. / self.radius_meters))))
lon_pixels = _EARTHPIX + self.longitude * math.radians(_PIXRAD)
sin_lat = math.sin(math.radians(self.latitude))
lat_pixels = _EARTHPIX - _PIXRAD * math.log((1 + sin_lat) / (1 - sin_lat)) / 2
self.big_size = self.num_tiles * _TILESIZE
big_image = self.helper.new_image(self.big_size, self.big_size)
for j in range(self.num_tiles):
lon = self._pixels_to_lon(j, lon_pixels)
for k in range(self.num_tiles):
lat = self._pixels_to_lat(k, lat_pixels)
tile = self._grab_tile(lon, lat)
big_image.paste(tile, (j * _TILESIZE, k * _TILESIZE))
west = self._pixels_to_lon(0, lon_pixels)
east = self._pixels_to_lon(self.num_tiles - 1, lon_pixels)
north = self._pixels_to_lat(0, lat_pixels)
south = self._pixels_to_lat(self.num_tiles - 1, lat_pixels)
return big_image, (north, west), (south, east)
def move_pix(self, dx, dy):
"""
Function gets change in x and y (dx, dy)
then displaces the displayed map that amount
NO RETURN
"""
self._constrain_x(dx)
self._constrain_y(dy)
self.update()
def _constrain_x(self, diff):
"""
Helper for move_pix
"""
new_value = self.left_x - diff
if ((not new_value > 0) and
(new_value < self.big_image.size[0] - self.width)):
return self.left_x
else:
return new_value
def _constrain_y(self, diff):
"""
Helper for move_pix
"""
new_value = self.upper_y - diff
if ((not new_value > 0) and
(new_value < self.big_image.size[1] - self.height)):
return self.upper_y
else:
return new_value
def update(self):
"""
Function remakes display image using top left corners
"""
self.display_image.paste(self.big_image, (-self.left_x, -self.upper_y))
# self.display_image.resize((self.image_zoom, self.image_zoom))
def _fetch(self):
"""
Function generates big image
"""
self.big_image, self.northwest, self.southeast = self.fetch_tiles()
def move_latlon(self, lat, lon):
"""
Function to move the object/rover
"""
x, y = self._get_cartesian(lat, lon)
self._constrain_x(self.center_x - x)
self._constrain_y(self.center_y - y)
self.update()
def _get_cartesian(self, lat, lon):
"""
Helper for getting the x, y given lat and lon
returns INT, INT (x, y)
"""
viewport_lat_nw, viewport_lon_nw = self.northwest
viewport_lat_se, viewport_lon_se = self.southeast
# print "Lat:", viewport_lat_nw, viewport_lat_se
# print "Lon:", viewport_lon_nw, viewport_lon_se
viewport_lat_diff = viewport_lat_nw - viewport_lat_se
viewport_lon_diff = viewport_lon_se - viewport_lon_nw
# print viewport_lon_diff, viewport_lat_diff
bigimage_width = self.big_image.size[0]
bigimage_height = self.big_image.size[1]
pixel_per_lat = bigimage_height / viewport_lat_diff
pixel_per_lon = bigimage_width / viewport_lon_diff
# print "Pixel per:", pixel_per_lat, pixel_per_lon
new_lat_gps_range_percentage = (viewport_lat_nw - lat)
new_lon_gps_range_percentage = (lon - viewport_lon_nw)
# print lon, viewport_lon_se
x = new_lon_gps_range_percentage * pixel_per_lon
y = new_lat_gps_range_percentage * pixel_per_lat
return int(x), int(y)
def add_gps_location(self, lat, lon, shape, size, fill):
"""
Function adds a shape at lat x lon
"""
x, y = self._get_cartesian(lat, lon)
draw = PIL.ImageDraw.Draw(self.big_image)
if shape is "ellipsis":
draw.ellipsis((x - size, y - size, x + size, y + size), fill)
else:
draw.rectangle([x - size, y - size, x + size, y + size], fill)
self.update()
def center_display(self, lat, lon):
"""
Function centers the display image
"""
x, y = self._get_cartesian(lat, lon)
self.center_x = x
self.center_y = y
self.left_x = (self.center_x - (self.width / 2))
self.upper_y = (self.center_y - (self.height / 2))
self.update()
# def update_rover_map_location(self, lat, lon):
# print "I did nothing"
# def draw_circle(self, lat, lon, radius, fill):
# print "I did nothing"
def connect_signals_and_slots(self):
pass
class OverlayImage(object):
def __init__(self, latitude, longitude, northwest, southeast,
big_width, big_height, width, height):
self.northwest = northwest
self.southeast = southeast
self.old_latitude = latitude
self.old_longitude = longitude
self.big_width = big_width
self.big_height = big_height
self.width = width
self.height = height
self.big_image = None
self.big_image_copy = None
self.display_image = None
self.display_image_copy = None
self.indicator = None
self.helper = MapHelper.MapHelper()
self.paths_image = None
self.paths_image_copy = None
self.paths_width = big_width
self.paths_height = big_height
self.waypoint_filename = "~/waypoint_paths/"
self.show_path = True
self.curr_path_num = 0
self.paths_list = {} # 1: (x, y), (x2, y2), ...; 2: (x, y), ...
x, y = self._get_cartesian(latitude, longitude)
self.center_x = x
self.center_y = y
self.left_x = (self.center_x - (self.width / 2))
self.upper_y = (self.center_y - (self.height / 2))
self.generate_image_files()
self.setup_waypoint_output()
self.write_once = True
# Text Drawing Variables
self.font = cv2.FONT_HERSHEY_TRIPLEX
self.font_thickness = 1
self.font_baseline = 0
self.nav_coordinates_text_image = None
def generate_image_files(self):
"""
Creates big_image and display image sizes
Returns NONE
"""
self.big_image = self.helper.new_image(self.big_width, self.big_height,
True)
self.big_image_copy = self.big_image.copy()
self.display_image = self.helper.new_image(self.width, self.height,
True)
self.display_image_copy = self.display_image.copy()
self.paths_image = self.helper.new_image(self.paths_width, self.paths_height, True)
self.paths_image_copy = self.paths_image.copy()
self.load_rover_icon()
self.indicator.save("location.png")
def setup_waypoint_output(self):
dirname = self.waypoint_filename + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
try:
os.makedirs(dirname)
except FileExistsError:
print("Directory " , dirname , " already exists")
def _get_cartesian(self, lat, lon):
"""
Helper for getting the x, y given lat and lon
returns INT, INT (x, y)
"""
viewport_lat_nw, viewport_lon_nw = self.northwest
viewport_lat_se, viewport_lon_se = self.southeast
# print "Lat:", viewport_lat_nw, viewport_lat_se
# print "Lon:", viewport_lon_nw, viewport_lon_se
viewport_lat_diff = viewport_lat_nw - viewport_lat_se
viewport_lon_diff = viewport_lon_se - viewport_lon_nw
# print viewport_lon_diff, viewport_lat_diff
pixel_per_lat = self.big_height / viewport_lat_diff
pixel_per_lon = self.big_width / viewport_lon_diff
# print "Pixel per:", pixel_per_lat, pixel_per_lon
new_lat_gps_range_percentage = (viewport_lat_nw - lat)
new_lon_gps_range_percentage = (lon - viewport_lon_nw)
# print lon, viewport_lon_se
x = new_lon_gps_range_percentage * pixel_per_lon
y = new_lat_gps_range_percentage * pixel_per_lat
return int(x), int(y)
def update_new_location(self, latitude, longitude,
compass, navigation_list, landmark_list):
self.big_image = self.big_image_copy.copy()
self.display_image = self.display_image_copy.copy()
size = 5
draw = PIL.ImageDraw.Draw(self.big_image)
draw_path = PIL.ImageDraw.Draw(self.paths_image)
for element in navigation_list:
x, y = self._get_cartesian(float(element[1]), float(element[2]))
draw.text((x + 10, y - 5), str(element[0]))
draw.ellipse((x - size, y - size, x + size, y + size), fill=(element[3].red(), element[3].green(), element[3].blue()))
for element in landmark_list:
x, y = self._get_cartesian(element[1], element[2])
draw.text((x + 10, y - 5), str(element[0]))
draw.ellipse((x - size, y - size, x + size, y + size), fill=(element[3].red(), element[3].green(), element[3].blue()))
self._draw_rover(latitude, longitude, compass)
if latitude == 0.0 and longitude == 0.0:
return self.display_image
# add a point to path if tracking path and new gps fix is reached
path_size = 3
if self.show_path and (self.old_latitude != latitude or self.old_longitude != longitude):
x, y = self._get_cartesian(latitude, longitude)
old_x, old_y = self._get_cartesian(self.old_latitude, self.old_longitude)
# new path to add
if self.curr_path_num not in self.paths_list:
self.paths_list[self.curr_path_num] = []
self.paths_image = self.paths_image_copy.copy()
self.paths_list[self.curr_path_num].append([x, y])
draw_path.line([(old_x, old_y), (x, y)], width=5, fill='yellow')
draw_path.ellipse((x - path_size, y - path_size, x + path_size, y + path_size), fill='red')
self._write_waypoints_to_csv(latitude, longitude)
print("change in fix...", old_x, old_y, x, y, "compass = ", compass)
self.update(latitude, longitude)
self.old_latitude = latitude
self.old_longitude = longitude
return self.display_image
def load_rover_icon(self):
self.indicator = PIL.Image.open("Resources/Images/rover.png").resize((40, 40))
def _draw_rover(self, lat, lon, angle=0):
x, y = self._get_cartesian(lat, lon)
x -= 25 # Half the height of the icon
y -= 25
rotated = self.indicator.copy()
rotated = rotated.rotate(-angle, resample=PIL.Image.BICUBIC)
# rotated.save("rotated.png")
self.big_image.paste(rotated, (x, y), rotated)
print("_draw_rover() entered", lat, lon, x, y)
if self.write_once:
# self.display_image.save("Something.png")
self.write_once = False
def _write_waypoints_to_csv(self, lat, lon):
# fields = ['Latitude', 'Longitude', 'Type', 'Timestamp', 'Heading']
# Types: 0:Wreckage, 1:Supply Crate (intact), 2:Supply Crate (damaged), 3:Rover Path, 4:Picture Location, 5:Sample Site
row = [lat, lon, '3', datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 0]
filename = self.waypoint_filename + str(self.curr_path_num) + ".csv"
with open(os.path.expanduser(filename), 'a+') as csvfile:
csvwriter = csv.writer(csvfile)
# csvwriter.writerow(fields)
csvwriter.writerow(row)
def update(self, latitude, longitude):
# self.left_x -= 50
# self.upper_y -= 50
if self.show_path:
combined_overlay = PIL.Image.alpha_composite(self.big_image, self.paths_image)
# self.big_image.paste(self.paths_image, (-self.left_x, -self.upper_y))
self.display_image.paste(combined_overlay, (-self.left_x, -self.upper_y))
# self._draw_coordinate_text(latitude, longitude)
def connect_signals_and_slots(self):
pass
|
def chknum(n):
if n>0:
print("Positive Number");
elif n<0:
print("negative Number");
else:
print("zero");
if __name__=='__main__':
n=int(input());
chknum(n); |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Gergely Nagy <algernon@madhouse-project.org>
# Copyright (c) 2013 James King <james@agentultra.com>
# Copyright (c) 2013 Julien Danjou <julien@danjou.info>
# Copyright (c) 2013 Konrad Hinsen <konrad.hinsen@fastmail.net>
# Copyright (c) 2013 Thom Neale <twneale@gmail.com>
# Copyright (c) 2013 Will Kahn-Greene <willg@bluesock.org>
# Copyright (c) 2013 Bob Tolbert <bob@tolbert.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# hymagic is an adaptation of the HyRepl to allow ipython iteration
# hymagic author - Todd Iverson
# Available as github.com/yardsale8/hymagic
#
# Credits for the starting point of the magic:
# https://github.com/yardsale8/hymagic/blob/master/hymagic/__init__.py
#
# and special mentions to:
# Ryan (https://github.com/kirbyfan64) and
# Tuukka (https://github.com/tuturto) in the hylang discuss forum:
# https://groups.google.com/forum/#!forum/hylang-discuss
# who made it possible for me to resolve all essential obstacles
# when struggling with macros
from IPython.core.magic import Magics, magics_class, line_cell_magic
import ast
try:
from hy.lex import LexException, PrematureEndOfInput, tokenize
from hy.compiler import hy_compile, HyTypeError
from hy.importer import ast_compile
except ImportError as e:
print("To use this magic extension, please install Hy (https://github.com/hylang/hy) with: pip install git+https://github.com/hylang/hy.git")
from sys import exit
exit(e)
print("Use for example: %plc (1 and? 1)")
print("Operators available: nope? ¬ and? ∧ xor? ⊕ or? ∨ nand? ↑ nxor? ↔ nor? ↓")
print("Operands available: True 1 ⊤ False 0 ⊥")
hy_program = """
; these two method behave a little bit different when using import / require
;(eval-when-compile (setv operators []))
(eval-and-compile
; without eval setv doesn't work as a global variable for macros
(setv operators []
operators-precedence []))
; add operators to global variable so that on a parser loop
; we can use them on if clauses to compare operator functions
; for singular usage: #>operator
; for multiple: #>[operator1 operator 2 ...]
(defreader > [items]
(do
; transforming singular value to a list for the next for loop
(if (not (coll? items)) (setv items [items]))
(for [item items]
; discard duplicates
(if-not (in item operators)
(.append operators item)))))
; set the order of precedence for operators
; for singular usage: #<operator
; for multiple: #<[operator1 operator 2 ...]
; note that calling this macro will empty the previous list of precedence!
; to keep the previous set one should do something like:
; #<(doto operators-precedence (.extend [operator-x operator-y ...]))
(defreader < [items]
(do
; (setv operators-precedence []) is not working here
; for some macro evaluation - complilation order reason
; so emptying the current operators-precedence list more verbose way
(if (pos? (len operators-precedence))
(while (pos? (len operators-precedence))
(.pop operators-precedence)))
; transforming singular value to a list for the next for loop
(if-not (coll? items) (setv items [items]))
(for [item items]
; discard duplicates
(if-not (in item operators-precedence)
(.append operators-precedence item)))))
; define math boolean operands
(setv ⊤ 1)
(setv ⊥ 0)
; define operator function and math alias (op-symbol)
; plus set them to operators global list
(defmacro defoperator [op-name op-symbol params &rest body]
`(do
(defn ~op-name ~params ~@body)
#>~op-name
(setv ~op-symbol ~op-name)
#>~op-symbol))
; add custom or native operators to the list
; somebody might like this syntax more than using
; reader macro directly. so calling (defoperators + - * /)
; is same as calling #>[+ - * /]
(defmacro defoperators [&rest args] `#>~args)
; define true comparison function
(defn true? [value]
(or (= value 1) (= value True)))
; same as nor at the moment... not? is a reserved word
(defoperator nope? ¬ [&rest truth-list]
(not (any truth-list)))
; and operation : zero or more arguments, zero will return false,
; otherwise all items needs to be true
(defoperator and? ∧ [&rest truth-list]
(all (map true? truth-list)))
; negation of and
(defoperator nand? ↑ [&rest truth-list]
(not (apply and? truth-list)))
; or operation : zero or more arguments, zero will return false,
; otherwise at least one of the values needs to be true
(defoperator or? ∨ [&rest truth-list]
(any (map true? truth-list)))
; negation of or
(defoperator nor? ↓ [&rest truth-list]
(not (apply or? truth-list)))
; xor operation (parity check) : zero or more arguments, zero will return false,
; otherwise odd number of true's is true
(defoperator xor? ⊕ [&rest truth-list]
(setv boolean False)
(for [truth-value truth-list]
(if (true? truth-value)
(setv boolean (not boolean))))
boolean)
;synonym for xor
(setv ↮ xor?)
#>↮
; negation of xor
(defoperator xnor? ↔ [&rest truth-list]
(not (apply xor? truth-list)))
; equivalence
; https://en.wikipedia.org/wiki/Logical_equivalence
; with two values same as xnor but with more values
; result differs: [1 1 1] = True = [0 0 0]
(defoperator eqv? ≡ [&rest truth-list]
(setv boolean (if (pos? (len truth-list)) True False))
(for [truth-value truth-list]
(if (not? truth-value (first truth-list))
(do (setv boolean False) (break))))
boolean)
; unquivalence
(defoperator neqv? ≢ [&rest truth-list]
(not (apply eqv? truth-list)))
; Four implications macro
; Behaviour:
; (1 op 0 op 0) -> (op 1 0 0 ) -> (op (op 1 0) 0)
; Tests:
; (for [y (range 2)]
; (print "(→ y) =>" (x y)))
; (for [y (range 2)]
; (for [z (range 2)]
; (print (% "(op %s" y) (% "%s) =>" z) (x y z))))
; Also note that [(op 1) (op 0)] = [True, False]
(defmacro defimplication [op-name op-symbol func]
`(defoperator ~op-name ~op-symbol [&rest truth-list]
(do
; passed arguments is a tuple
; so it needs to be cast to list for pop
(setv args (list truth-list))
(if (= (len args) 1) (true? (first args))
; else
(do
; default return value is False
(setv result False)
; take the first element of list and remove it
(setv prev (first args))
(.remove args prev)
; loop over all args
(while
(pos? (len args))
(do
; there are at least two items on a list at the moment
; so we can get the next and remove it too
(setv next (first args))
(.remove args next)
; recurisvely get the result. previous could be a list as
; well as next could be a list, thus prev needs to be evaluated
; at least once more.
(setv result ~func)
;(print 'prev prev 'next next 'result result)
; and set result for the previous one
(setv prev result)))
; return resulting boolean value
result)))))
; Converse implication (P ∨ ¬Q)
; https://en.wikipedia.org/wiki/Converse_implication
(defimplication cimp? ← (any [(← prev) (not (← next))]))
; Material nonimplication (P ∧ ¬Q)
; https://en.wikipedia.org/wiki/Material_nonimplication
(defimplication mnimp? ↛ (all [(↛ prev) (not (↛ next))]))
; Converse nonimplication (¬P ∨ Q)
; https://en.wikipedia.org/wiki/Converse_nonimplication
(defimplication cnimp? ↚ (any [(not (↚ prev)) (↚ next)]))
; Material implication (¬P ∧ Q)
; https://en.wikipedia.org/wiki/Material_conditional
(defimplication mimp? → (all [(not (→ prev)) (→ next)]))
; helper functions for defmixfix ($) macros.
(eval-and-compile
; this takes a list of items at least 3
; index must be bigger than 1 and smaller than the length of the list
; left and right side of the index will be picked to a new list where
; centermost item is moved to left and left to center
; [1 a 2 b 3 c 4] idx=3 -> [1 a [b 2 3] c 4]
(defn list-nest [lst idx]
(setv tmp
(doto
(list (take 1 (drop idx lst)))
(.append (get lst (dec idx)))
(.append (get lst (inc idx)))))
(doto
(list (take (dec idx) lst))
(.append tmp)
(.extend (list (drop (+ 2 idx) lst)))))
(defn one-not-operator? [code]
(and (= (len code) 1) (not (in (first code) operators))))
(defn second-operator? [code]
(and (pos? (len code)) (in (second code) operators)))
(defn first-operator? [code]
(and (> (len code) 1) (in (first code) operators)))
(defn third [lst]
(get lst 2)))
; macro to change precedence order of the operations.
; argument list will be passed to the #< readermacro which
; will reset arguments to a new operators-precedence list
; example: (defprecedence and? xor? or?)
; or straight to reader macro way: #<[and? xor? or?]
;
; note that calling this macro will empty the previous list of precedence!
; to keep the previous set one should do something like:
; (defprecedence (doto operators-precedence (.extend [operator-x operator-y ...])))
;
; call (defprecedence) to empty the list to the default state
; in that case left-wise order of precedence is used when evaluating
; the list of propositional logic or other symbols
(defmacro defprecedence [&rest args] `#<~args)
; macro that takes mixed prefix and infix notation clauses
; for evaluating their value. this is same as calling
; $ reader macro directly but might be more convenient way
; inside lips code to use than reader macro syntax
; there is no need to use parentheses with this macro
(defmacro defmixfix [&rest items] `#$~items)
; pass multiple (n) evaluation clauses. each of the must be
; wrapped by () parentheses
(defmacro defmixfix-n [&rest items]
(list-comp `#$~item [item items]))
; quote rather than evaluate!
(defreader £ [code]
(if
; scalar value
(not (coll? code)) code
; empty list
(zero? (len code)) False
; list with lenght of 1 and the single item not being the operator
; NOTE that small ' char, the only difference between #$ reader macro!
(one-not-operator? code) `'#£~@code
; list with three or more items, second is the operator
(second-operator? code)
(do
; the second operator on the list is the default index
(setv idx 1)
; loop over all operators
(for [op operators-precedence]
; set new index if operator is found from the code and break in that case
(if (in op code) (do (setv idx (.index code op)) (break))))
; make list nested based on the found index and evaluate again
`#£~(list-nest code idx))
; list with more than 1 items and the first item is the operator
(first-operator? code)
; take the first item i.e. operator and use
; rest of the items as arguments once evaluated by #$
`(~(first code) ~@(list-comp `#£~part [part (drop 1 code)]))
; possibly syntax error on clause
; might be caused by arbitrary usage of operators and operands
; something like: (1 1 and? 0 and?)
`(raise (Exception "Expression error!"))))
; main parser loop for propositional logic clauses
(defreader $ [code]
(if
; scalar value
(not (coll? code)) code
; empty list
(zero? (len code)) False
; list with lenght of 1 and the single item not being the operator
(one-not-operator? code) `#$~@code
; list with three or more items, second is the operator
(second-operator? code)
(do
; the second operator on the list is the default index
(setv idx 1)
; loop over all operators
(for [op operators-precedence]
; set new index if operator is found from the code and break in that case
(if (in op code) (do (setv idx (.index code op)) (break))))
; make list nested based on the found index and evaluate again
`#$~(list-nest code idx))
; list with more than 1 items and the first item is the operator
(first-operator? code)
; take the first item i.e. operator and use
; rest of the items as arguments once evaluated by #$
`(~(first code) ~@(list-comp `#$~part [part (drop 1 code)]))
; possibly syntax error on clause
; might be caused by arbitrary usage of operators and operands
; something like: (1 1 and? 0 and?)
`(raise (Exception "Expression error!"))))
; add input here
%s
"""
def get_tokens(source, filename):
try:
return tokenize(source)
except PrematureEndOfInput as e:
print(e)
except LexException as e:
if e.source is None:
e.source = source
e.filename = filename
print(e)
def parse(tokens, source, filename, shell, interactive):
try:
_ast = hy_compile(tokens, "__console__", root = interactive)
shell.run_ast_nodes(_ast.body, filename, compiler = ast_compile)
except HyTypeError as e:
if e.source is None:
e.source = source
e.filename = filename
print(e)
except Exception:
shell.showtraceback()
@magics_class
class PLCMagics(Magics):
"""
Jupyter Notebook Magics (%plc and %%plc) for Propositional Logic Clauses (PLC)
written in Hy language (Lispy Python).
"""
def __init__(self, shell):
super(PLCMagics, self).__init__(shell)
@line_cell_magic
def hylang(self, line = None, cell = None, filename = '<input>'):
# both line %hylang and cell %%hylang magics are prepared here.
source = line if line else cell
# get input tokens for compile
tokens = get_tokens(source, filename)
if tokens:
return parse(tokens, source, filename, self.shell, ast.Interactive)
@line_cell_magic
def plc(self, line = None, cell = None, filename = '<input>'):
# both line %plc and cell %%plc magics are prepared here.
# if line magic is used then we prepend code #$ reader macro
# to enable prefix hy code evaluation
source = hy_program % ("#$%s" % line if line else cell)
# get input tokens for compile
tokens = get_tokens(source, filename)
if tokens:
return parse(tokens, source, filename, self.shell, ast.Interactive)
def load_ipython_extension(ip):
""" Load the extension in Jupyter. """
ip.register_magics(PLCMagics)
|
# -*- coding: utf-8 -*-
# Copyright 2022 Network to Code
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
"""Documentation fragment for netauto modules."""
DOCUMENTATION = r"""
options:
platform:
description:
- Switch platform based on Pyntc library.
required: false
default: null
choices: [
"arista_eos_eapi",
"cisco_aireos_ssh",
"cisco_asa_ssh",
"cisco_ios_ssh",
"cisco_nxos_nxapi",
"f5_tmos_icontrol",
"juniper_junos_netconf",
]
type: str
host:
description:
- Hostame or IP address of switch.
required: false
default: null
type: str
username:
description:
- Username used to login to the target device.
required: false
default: null
type: str
password:
description:
- Password used to login to the target device.
required: false
default: null
type: str
provider:
description:
- Dictionary which acts as a collection of arguments used to define the characteristics
of how to connect to the device.
Note - host, username, password and platform must be defined in either provider
or local param.
Note - local param takes precedence, e.g. hostname is preferred to provider['host'].
required: false
type: dict
suboptions:
platform:
description:
- Switch platform based on Pyntc library.
required: false
default: null
choices: [
"arista_eos_eapi",
"cisco_aireos_ssh",
"cisco_asa_ssh",
"cisco_ios_ssh",
"cisco_nxos_nxapi",
"f5_tmos_icontrol",
"juniper_junos_netconf",
]
type: str
host:
description:
- Hostame or IP address of switch.
required: false
default: null
type: str
username:
description:
- Username used to login to the target device.
required: false
default: null
type: str
password:
description:
- Password used to login to the target device.
required: false
default: null
type: str
secret:
description:
- Enable secret for devices connecting over SSH.
required: false
default: null
type: str
transport:
description:
- Transport protocol for API-based devices.
required: false
default: null
choices: [http, https]
type: str
port:
description:
- TCP/UDP port to connect to target device. If omitted standard port numbers will be used.
80 for HTTP; 443 for HTTPS; 22 for SSH.
required: false
default: null
type: str
ntc_host:
description:
- The name of a host as specified in an NTC configuration file.
required: false
default: null
type: str
ntc_conf_file:
description:
- The path to a local NTC configuration file. If omitted, and ntc_host is specified,
the system will look for a file given by the path in the environment variable PYNTC_CONF,
and then in the users home directory for a file called .ntc.conf.
required: false
default: null
type: str
secret:
description:
- Enable secret for devices connecting over SSH.
required: false
default: null
type: str
transport:
description:
- Transport protocol for API-based devices.
required: false
default: null
choices: [http, https]
type: str
port:
description:
- TCP/UDP port to connect to target device. If omitted standard port numbers will be used.
80 for HTTP; 443 for HTTPS; 22 for SSH.
required: false
default: null
type: str
ntc_host:
description:
- The name of a host as specified in an NTC configuration file.
required: false
default: null
type: str
ntc_conf_file:
description:
- The path to a local NTC configuration file. If omitted, and ntc_host is specified,
the system will look for a file given by the path in the environment variable PYNTC_CONF,
and then in the users home directory for a file called .ntc.conf.
required: false
default: null
type: str
"""
COMMAND_OPTION = r"""
options:
commands:
description:
- Command to execute on target device
required: false
default: null
type: list
commands_file:
description:
- Command to execute on target device
required: false
type: str
"""
|
import numpy as np
import cv2 as cv
import tensorflow as tf
def guided_filter_cv(I, p, r, eps):
h, w = I.shape
r = (2*r, 2*r)
mean_I = cv.boxFilter(I, -1, r)
mean_p = cv.boxFilter(p, -1, r)
mean_Ip = cv.boxFilter(I*p, -1, r)
cov_Ip = mean_Ip - mean_I * mean_p
# this is the covariance of(I, p) in each local patch.
mean_II = cv.boxFilter(I*I, -1, r)
var_I = mean_II - mean_I * mean_I
a = cov_Ip / (var_I + eps)
# Eqn. (5) in the paper
b = mean_p - a * mean_I
# Eqn. (6) in the paper
mean_a = cv.boxFilter(a, -1, r)
mean_b = cv.boxFilter(b, -1, r)
q = mean_a * I + mean_b
# Eqn. (8) in the paper
return q
|
#|default_exp p02_opencv_mediapipe_segment
# sudo pacman -S python-opencv rocm-opencl-runtime python-mss
import time
import numpy as np
import cv2 as cv
import mss
import mediapipe as mp
import mediapipe.tasks
import mediapipe.tasks.python
start_time=time.time()
debug=True
_code_git_version="fe5991e8e673fa16f0e56b68c53696b9568f1977"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/105_amd_opencv/source/"
_code_generation_time="19:38:22 of Sunday, 2023-04-02 (GMT+1)"
model_path="/home/martin/Downloads/deeplabv3.tflite"
BaseOptions=mp.tasks.BaseOptions
ImageSegmenter=mp.tasks.vision.ImageSegmenter
ImageSegmenterOptions=mp.tasks.vision.ImageSegmenterOptions
VisionRunningMode=mp.tasks.vision.RunningMode
gResult=None
oldResult=None
def print_result(result: list[mp.Image], output_image: mp.Image, timestamp_ms: int):
print("{} result len(result)={}".format(((time.time())-(start_time)), len(result)))
global gResult
gResult=result
# output can be category .. single uint8 for every pixel
# or confidence_mask .. several float images with range [0,1]
options=ImageSegmenterOptions(base_options=BaseOptions(model_asset_path=model_path), running_mode=VisionRunningMode.LIVE_STREAM, output_type=ImageSegmenterOptions.OutputType.CATEGORY_MASK, result_callback=print_result)
def view(category, imgr):
category_edges=cv.Canny(image=category, threshold1=10, threshold2=1)
imgr[((category_edges)!=(0))]=[0, 0, 255]
cv.imshow("screen", imgr)
print("{} nil cv.ocl.haveOpenCL()={}".format(((time.time())-(start_time)), cv.ocl.haveOpenCL()))
loop_time=time.time()
clahe=cv.createCLAHE(clipLimit=(15. ), tileGridSize=(32,18,))
with ImageSegmenter.create_from_options(options) as segmenter:
with mss.mss() as sct:
loop_start=time.time()
while (True):
img=np.array(sct.grab(dict(top=160, left=0, width=((1920)//(2)), height=((1080)//(2)))))
mp_image=mp.Image(image_format=mp.ImageFormat.SRGB, data=img)
timestamp_ms=int(((1000)*(((time.time())-(loop_start)))))
segmenter.segment_async(mp_image, timestamp_ms)
lab=cv.cvtColor(img, cv.COLOR_RGB2LAB)
lab_planes=cv.split(lab)
lclahe=clahe.apply(lab_planes[0])
lab=cv.merge([lclahe, lab_planes[1], lab_planes[2]])
imgr=cv.cvtColor(lab, cv.COLOR_LAB2RGB)
if ( (gResult is None) ):
if ( (oldResult is None) ):
cv.imshow("screen", imgr)
else:
view(oldResult[0].numpy_view(), imgr)
else:
view(gResult[0].numpy_view(), imgr)
oldResult=gResult
gResult=None
delta=((time.time())-(loop_time))
target_period=((((1)/((60. ))))-((1.00e-4)))
if ( ((delta)<(target_period)) ):
time.sleep(((target_period)-(delta)))
fps=((1)/(delta))
fps_wait=((1)/(((time.time())-(loop_time))))
loop_time=time.time()
if ( ((0)==(((timestamp_ms)%(2000)))) ):
print("{} nil fps={} fps_wait={}".format(((time.time())-(start_time)), fps, fps_wait))
if ( ((ord("q"))==(cv.waitKey(1))) ):
cv.destroyAllWindows()
break |
#!/usr/bin/python
import smbus
import math
import time
import pyfirmata
# Register
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
AngleDegX = 0
AngleDegY = 0
AngleDegZ = 0
# First serial harcware body control serial connection
try:
hardware = pyfirmata.ArduinoMega("/dev/ttyACM0")
print("Catbot hardware serial connection successfully!")
except:
print("Hardware serial connection error trying to reconnect...")
try:
hardware =pyfirmata.ArduinoMega("/dev/ttyACM1")
print("Rerouting first serial successfully!")
except:
print("Hardware serial error please checking physical hardware this time!")
# Seccond hardware control serial connection
try:
hardware2 = pyfirmata.ArduinoMega("/dev/ttyUSB0")
print("Catbot hardware serial connection successfully!")
except:
print("Seccond Hardware serial connection error trying to reconnect...")
try:
hardware2 =pyfirmata.ArduinoMega("/dev/ttyUSB1")
print("Rerouting seccond serial successfully!")
except:
print("Seccond Hardware serial error please checking physical hardware this time!")
#Hardware pins connection with the pinmap micro controller
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# X axis Gyrocontrol
#Shoulder X - Left
Shoulder2Left = hardware.get_pin('d:6:s')
#Shoulder X-Right
Shoulder2Right = hardware.get_pin('d:13:s')
#Abduct X - left
AbductLeft = hardware2.get_pin('d:5:s')
#Abduct X - right
AbductRight = hardware2.get_pin('d:6:s')
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Y axis Gyrocontrol
#Head
Head = hardware.get_pin('d:8:s')
#Shoulder Y - Left
ShoulderLeft = hardware.get_pin('d:4:s')
#Shoulder Y - left
ShoulderRight = hardware.get_pin('d:5:s')
#Hip Y - Left
HipLeft = hardware.get_pin('d:3:s')
#Hip Y - Right
HipRight = hardware.get_pin('d:2:s')
#Elbow Y - left
ElbowLeft = hardware.get_pin('d:14:s')
#Elbow Y - Right
ElbowRight = hardware.get_pin('d:11:s')
#Abduct knee Y - Left
Abductknee3Left = hardware.get_pin('d:7:s')
Abductknee2Left = hardware2.get_pin('d:4:s')
#Abduct Knee Y - Right
Abductknee3Right = hardware2.get_pin('d:7:s')
Abductknee2Right = hardware2.get_pin('d:8:s')
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Z axis Gyrocontrol
Headz = hardware.get_pin('d:15:s')
def read_byte(reg):
return bus.read_byte_data(address, reg)
def read_word(reg):
h = bus.read_byte_data(address, reg)
l = bus.read_byte_data(address, reg+1)
value = (h << 8) + l
return value
def read_word_2c(reg):
val = read_word(reg)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
bus = smbus.SMBus(1) # bus = smbus.SMBus(0) fuer Revision 1
address = 0x68 # via i2cdetect
# Activtion, um das Modul ansprechen zu koennen
bus.write_byte_data(address, power_mgmt_1, 0)
print("Gyroskop")
print("--------")
gyroskop_xout = read_word_2c(0x43)
gyroskop_yout = read_word_2c(0x45)
gyroskop_zout = read_word_2c(0x47)
Loggerfile = open("Gyrologger.txt",'w+')
def ActuatorsXcontrol(AngleDegX):
if AngleDegX == 4.196468226055834:
Shoulder2Left.write(175)
Shoulder2Right.write(5)
AbductLeft.write(0)
AbductRight.write(180)
if AngleDegX > 4.196468226055834:
Shoulder2Left.write(175 + abs(AngleDegX))
Shoulder2Right.write(abs(AngleDegX + 8))
AbductLeft.write(abs(AngleDegX))
AbductRight.write(180 - abs(AngleDegX))
if AngleDegX < 4.196468226055834:
Shoulder2Left.write(175 - abs(AngleDegX))
Shoulder2Right.write(abs(AngleDegX + 8))
AbductLeft.write(abs(AngleDegX))
AbductRight.write(180 - abs(AngleDegX))
def ActuatorsYcontrol(AngleDegY):
Head.write(140 + AngleDegY)
if AngleDegY == 0.9511994645726558:
ShoulderLeft.write(120)
ShoulderRight.write(50)
ElbowLeft.write(50)
ElbowRight.write(100)
Abductknee3Left.write(170)
Abductknee2Left.write(170)
Abductknee3Right.write(10)
Abductknee2Right.write(10)
HipLeft.write(160)
HipRight.write(20)
if AngleDegY > 0.9511994645726558:
ShoulderLeft.write(120 - AngleDegY)
ShoulderRight.write(50 + AngleDegY)
ElbowLeft.write(50 - AngleDegY)
ElbowRight.write(120 + AngleDegY)
Abductknee3Left.write(170 - AngleDegY)
Abductknee2Left.write(170 - AngleDegY)
Abductknee3Right.write(10 + AngleDegY)
Abductknee2Right.write(10 + AngleDegY)
HipLeft.write(160 - abs(AngleDegX))
HipRight.write(20 + abs(AngleDegX))
if AngleDegY < 0.9511994645726558:
ShoulderLeft.write(120 - AngleDegY)
ShoulderRight.write(50 + AngleDegY )
ElbowLeft.write(50 + AngleDegY)
ElbowRight.write(120 - AngleDegY)
Abductknee3Left.write(170 + AngleDegY)
Abductknee2Left.write(170 + AngleDegY)
Abductknee3Right.write(10 - AngleDegY)
Abductknee2Right.write(10 - AngleDegY)
HipLeft.write(160 + abs(AngleDegX))
HipRight.write(abs(AngleDegX))
def ActuatorsZcontrol(AngleDegZ):
if AngleDegZ == 62.401482521450255:
Headz.write(105)
if AngleDegZ > 62.401482521450255:
Headz.write(50 + AngleDegZ)
if AngleDegZ < 62.401482521450255:
Headz.write(50 + AngleDegZ)
while True:
# print("gyroskop_xout: ", ("%5d" % gyroskop_xout), " skaliert: ", (gyroskop_xout / 131))
# print("gyroskop_xout: ", ("%5d" % gyroskop_xout), " skaliert: ", (gyroskop_xout / 131))
# print("gyroskop_yout: ", ("%5d" % gyroskop_yout), " skaliert: ", (gyroskop_yout / 131))
# print("gyroskop_zout: ", ("%5d" % gyroskop_zout), " skaliert: ", (gyroskop_zout / 131))
print("Catbot Gyroscope")
print("---------------------")
beschleunigung_xout = read_word_2c(0x3b)
beschleunigung_yout = read_word_2c(0x3d)
beschleunigung_zout = read_word_2c(0x3f)
beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0
beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0
beschleunigung_zout_skaliert = beschleunigung_zout / 16384.0
AngleDegX = math.degrees(beschleunigung_xout_skaliert)
AngleDegY = math.degrees(beschleunigung_yout_skaliert)
AngleDegZ = math.degrees(beschleunigung_zout_skaliert)
print("AngleDegX",(AngleDegX))
print("AngleDegY",(AngleDegY))
print("AngleDegZ",(AngleDegZ))
ActuatorsXcontrol(AngleDegY)
ActuatorsYcontrol(AngleDegX)
ActuatorsZcontrol(AngleDegZ)
Loggerfile.writelines("\n"+"AngleDegX"+","+ str(AngleDegX)+","+"AngleDegY"+","+ str(AngleDegY)+","+"AngleDegZ"+","+ str(AngleDegZ))
#print("beschleunigung_xout: ", ("%6d" % beschleunigung_xout), " skaliert: ", beschleunigung_xout_skal$
#print("beschleunigung_yout: ", ("%6d" % beschleunigung_yout), " skaliert: ", beschleunigung_yout_skal$
#print("beschleunigung_zout: ", ("%6d" % beschleunigung_zout), " skaliert: ", beschleunigung_zout_skal$
#print("X Rotation: " , get_x_rotation(beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, bes$
time.sleep(0.2)
|
def has_palindrome_permutation(s):
s = list(s)
hashset = set()
for c in s:
if c in hashset:
hashset.remove(c)
else:
hashset.add(c)
return len(hashset) <= 1
|
def pick_peaks(arr):
prev_dex = prev_val = None
result = {'pos': [], 'peaks': []}
upwards = False
for i, a in enumerate(arr):
if prev_val == a:
continue
elif prev_val is None or prev_val < a:
upwards = True
else:
if prev_dex and upwards:
result['pos'].append(prev_dex)
result['peaks'].append(prev_val)
upwards = False
prev_dex = i
prev_val = a
return result
|
import torch
from torch import nn
from torch.nn import functional as F
from .utils.utils import (
drop_connect,
get_same_padding_conv2d,
Swish,
MemoryEfficientSwish,
calculate_output_image_size
)
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
Args:
block_args (namedtuple): BlockArgs, defined in utils.py.
global_params (namedtuple): GlobalParam, defined in utils.py.
image_size (tuple or list): [image_height, image_width].
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
# Expansion phase (Inverted Bottleneck)
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Pointwise convolution phase
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish() |
"""print ตั๋ว"""
print('[CONSOLE] Program starting...')
import spreadsheet as sh #spreadsheet.py
from pathlib import Path
from tkinter import *
# Explicit imports to satisfy Flake8
from tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage, ttk
from PIL import Image, ImageTk, ImageFont, ImageDraw
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path("./assets")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
def booking():
'''func หลัก'''
#window หลัก
window = Tk()
window.geometry("1152x700")
window.configure(bg = "#FFFFFF")
window.title('Booking Airline-Ticket')
#canvas หลัก
canvas = Canvas(window, bg = "#FFFFFF", height = 700, width = 1152, bd = 0, highlightthickness = 0, relief = "ridge")
canvas.place(x = 0, y = 0)
#พื้นหลัง
image_image_1 = PhotoImage(file=relative_to_assets("image_1.png"))
image_1 = canvas.create_image(576.0, 350.0,image=image_image_1)
#พื้นหลัง ช่องกรอกข้อมูล
image_image_2 = PhotoImage(file=relative_to_assets("image_2.png"))
image_2 = canvas.create_image(357.0, 350.0,image=image_image_2)
#พื้นหลัง รูปเครื่องบินก่อนสร้างตั๋ว
image_image_3 = PhotoImage(file=relative_to_assets("image_3.png"))
image_3 = canvas.create_image(872.0,351.0,image=image_image_3)
#พื้นหลังตั๋วก่อนใส่ข้อมูล
image_image_4 = Image.open('image_4.png')
#list_combobox
list_airport = ['Suvarnabhumi', 'Don Mueang', 'Chiang Mai']
list_class = ['First Class', 'Business Class', 'Economy Class']
list_time = ['07:00 AM', '11:00 AM', '04:00 PM', '08:00 PM']
list_seat_chr = ['A', 'B', 'C', 'D', 'E', 'F']
list_seat_num = [i for i in range(1,11)]
#Name
entry_name = StringVar()
entry_image_2 = PhotoImage(file=relative_to_assets("entry_2.png"))
entry_bg_2 = canvas.create_image(285.0, 236.0, image=entry_image_2)
entry_2 = Entry(bd=0, bg="#FFFFFF", textvariable=entry_name, highlightthickness=0)
entry_2.place(x=130.0, y=224.0, width=320.0, height=25.0)
#Tel
entry_tel = StringVar()
entry_image_5 = PhotoImage(file=relative_to_assets("entry_5.png"))
entry_bg_5 = canvas.create_image(225.0, 313.0, image=entry_image_5)
entry_5 = Entry(bd=0, bg="#FFFFFF", textvariable=entry_tel, highlightthickness=0)
entry_5.place(x=130.0, y=302.0, width=198.0, height=25.0)
#Email
entry_email = StringVar()
entry_image_6 = PhotoImage(file=relative_to_assets("entry_6.png"))
entry_bg_6 = canvas.create_image(473.0, 313.0, image=entry_image_6)
entry_6 = Entry(bd=0, bg="#FFFFFF", textvariable=entry_email, highlightthickness=0)
entry_6.place(x=358.0, y=302.0, width=236.0, height=25.0)
#คำนำหน้าชื่อ
radio_name_start = StringVar()
mr = Radiobutton(window, text='Mr.', variable=radio_name_start, value="Mr.")
mr.place(x=175.0, y=188)
ms = Radiobutton(window, text='Ms.', variable=radio_name_start, value="Ms.")
ms.place(x=235.0, y=188)
miss = Radiobutton(window, text='Miss.', variable=radio_name_start, value="Miss")
miss.place(x=298.0, y=188)
#Age
entry_age = StringVar()
entry_image_3 = PhotoImage(file=relative_to_assets("entry_3.png"))
entry_bg_3 = canvas.create_image(532.0, 236.0, image=entry_image_3)
entry_3 = Entry(bd=0, bg="#FFFFFF", textvariable=entry_age, highlightthickness=0)
entry_3.place(x=482.0, y=225.0, width=100.0,height=25.0)
#Departure date
entry_date_departure = StringVar()
entry_image_1 = PhotoImage(file=relative_to_assets("entry_1.png"))
entry_bg_1 = canvas.create_image(497.0, 323+65, image=entry_image_1)
entry_1 = Entry(bd=0, bg="#FFFFFF", textvariable=entry_date_departure, highlightthickness=0)
entry_1.place( x=410.0, y=311+65, width=180.0, height=25.0)
#Return date
entry_date_return = StringVar()
entry_image_4 = PhotoImage(file=relative_to_assets("entry_4.png"))
entry_bg_4 = canvas.create_image(497.0, 410+65, image=entry_image_4)
entry_4 = Entry(bd=0, bg="#FFFFFF", textvariable=entry_date_return, highlightthickness=0)
entry_4.place(x=410.0, y=399+65, width=180.0, height=25.0)
#Time
combo_time_start = StringVar()
canvas.create_text(255.0, 279+65, anchor="nw", text="Time 🕒", fill="#000000",font=("fonts/Manrope Regular", 16 * -1))
time = ttk.Combobox(window, textvariable=combo_time_start, values=list_time)
time.current()
time.place(x=250.0, y=311+65, width=117)
combo_time_return = StringVar()
canvas.create_text(255.0, 366+65, anchor="nw", text="Time 🕘", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
time_return = ttk.Combobox(window, textvariable=combo_time_return, values=list_time)
time_return.current()
time_return.place(x=250.0, y=394+65, width=117)
#Entry text
canvas.create_text(122.0, 191.0, anchor="nw", text="Name️", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
canvas.create_text(122.0, 270.0, anchor="nw", text="Tel.", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
canvas.create_text(351.0, 270.0, anchor="nw", text="Email", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
canvas.create_text(476.0, 191.0, anchor="nw", text="Age", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
canvas.create_text(401.0, 279+65, anchor="nw", text="Departure date ", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
canvas.create_text(515.0, 281+65, anchor="nw", text="( dd-mm-yyyy )", fill="#919191", font=("fonts/Manrope Regular", 12 * -1))
canvas.create_text(401.0, 366+65, anchor="nw", text="Return date", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
canvas.create_text(490.0, 368+65, anchor="nw", text="( dd-mm-yyyy )", fill="#919191", font=("fonts/Manrope Regular", 12 * -1))
#Origin
combo_origin = StringVar()
canvas.create_text(122.0, 279+65, anchor="nw", text="Origin", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
origin = ttk.Combobox(window, textvariable=combo_origin, values=list_airport)
origin.current()
origin.place(x=117.0, y=311+65, width=117)
#Destination
combo_destination = StringVar()
canvas.create_text(122.0, 366+65, anchor="nw", text="Destination", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
destination = ttk.Combobox(window, textvariable=combo_destination)
destination.current()
destination.place(x=117.0, y=394+65, width=117)
#Class
combo_class = StringVar()
canvas.create_text(122.0, 449+65, anchor="nw", text="Class 💵", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
class_airport = ttk.Combobox(window, textvariable=combo_class, values=list_class)
class_airport.current()
class_airport.place(x=117.0, y=477+65, width=117)
#Seat
combo_seat_chr = StringVar()
canvas.create_text(255.0, 449+65, anchor="nw", text="Seat number", fill="#000000", font=("fonts/Manrope Regular", 16 * -1))
seat_chr = ttk.Combobox(window, textvariable=combo_seat_chr)
seat_chr.current()
seat_chr.place(x=250.0, y=477+65, width=55)
combo_seat_num = StringVar()
seat_num = ttk.Combobox(window, textvariable=combo_seat_num, values=list_seat_num)
seat_num.current()
seat_num.place(x=250+55+7, y=477+65, width=55)
#text fight booking
canvas.create_text(108.0, 126.0, anchor="nw", text="Flight Booking", fill="#FFFFFF", font=("fonts/Manrope Regular", 16 * -1))
#function ยอ่ย
def clearinput():
entry_1.delete(0, 'end')
entry_2.delete(0, 'end')
entry_3.delete(0, 'end')
entry_4.delete(0, 'end')
entry_5.delete(0, 'end')
entry_6.delete(0, 'end')
time.set('')
time_return.set('')
origin.set('')
destination.set('')
class_airport.set('')
seat_chr.set('')
seat_num.set('')
def check():
'''ตรวจสอบข้อมูล และราคา'''
print('[CONSOLE] Datas checking...')
# get data from inputs
name_start = radio_name_start.get()
name = entry_name.get()
tel = entry_tel.get()
email = entry_email.get()
age = entry_age.get()
date_departure = entry_date_departure.get()
date_return = entry_date_return.get()
origin = combo_origin.get()
destination = combo_destination.get()
seat_chr = combo_seat_chr.get()
seat_num = combo_seat_num.get()
time_start = combo_time_start.get()
time_return = combo_time_return.get()
class_seat = combo_class.get()
try:
date_departure2 = date_departure.split('-')
d_departure = "%02d.%02d.%s" %(int(date_departure2[0]), int(date_departure2[1]), date_departure2[2][2:])
except:
print('[CONSOLE] ERROR: Departure date cannot be made.')
d_departure = ''
try:
date_return2 = date_return.split('-')
d_return = "%02d.%02d.%s" %(int(date_return2[0]), int(date_return2[1]), date_return2[2][2:])
except:
print('[CONSOLE] ERROR: Return date cannot be made.')
d_return = ''
try:
seat = "%s%02d" %(seat_chr, int(seat_num))
except:
print('[CONSOLE] ERROR: Seat cannot be made.')
seat = ''
#หน้าต่าง error
def error_warning():
"""error window"""
error_window = Tk()
error_window.title('Error')
#แต่ละ condition ที่จะพิมพ์คำออกมา
if sh.error_blank(name_start):
Label(error_window, text='- Please enter your name prefix.').pack()
if sh.error_blank(name):
Label(error_window, text='- Please enter your name.').pack()
if sh.error_numeric(age):
if sh.error_numeric(age) == 'blank':
Label(error_window, text='- Please enter your age.').pack()
elif sh.error_numeric(age) == 'num':
Label(error_window, text='- Age must be a number.').pack()
if sh.error_numeric(tel):
if sh.error_numeric(tel) == 'blank':
Label(error_window, text='- Please enter your phone number.').pack()
elif sh.error_numeric(tel) == 'num':
Label(error_window, text='- Phone number must be a number.').pack()
if sh.error_email(email):
if sh.error_email(email) == 'blank':
Label(error_window, text='- Please enter your email.').pack()
elif sh.error_email(email) == 'format':
Label(error_window, text='- Email must be written in a right format.').pack()
if sh.error_blank(origin):
Label(error_window, text='- Please select your origin.').pack()
if sh.error_blank(time_start):
Label(error_window, text='- Please select your origin time.').pack()
if sh.error_date(date_departure): #ถ้าทำที่เลือกวันที่เสร็จก็ลบทิ้งเลย
if sh.error_date(date_departure) == 'blank':
Label(error_window, text='- Please select your origin date.').pack()
elif sh.error_date(date_departure) == 'format':
Label(error_window, text='- Origin date must be written in a right format.').pack()
elif sh.error_date(date_departure) == 'day':
Label(error_window, text='- Day in origin date is invalid.').pack()
elif sh.error_date(date_departure) == 'month':
Label(error_window, text='- Month in origin date is invalid.').pack()
if sh.error_blank(destination):
Label(error_window, text='- Please select your destination.').pack()
if sh.error_blank(time_return):
Label(error_window, text='- Please select your destination time.').pack()
if sh.error_date(date_return): #ถ้าทำที่เลือกวันที่เสร็จก็ลบทิ้งเลย
if sh.error_date(date_return) == 'blank':
Label(error_window, text='- Please select your destination date.').pack()
elif sh.error_date(date_return) == 'format':
Label(error_window, text='- Destination date must be written in a right format.').pack()
elif sh.error_date(date_return) == 'day':
Label(error_window, text='- Day in destination date is invalid.').pack()
elif sh.error_date(date_return) == 'month':
Label(error_window, text='- Month in destination date is invalid.').pack()
if sh.error_blank(class_seat):
Label(error_window, text='- Please select your class.').pack()
if sh.error_blank(seat_chr) or sh.error_blank(seat_num):
Label(error_window, text='- Your seat number is invalid.').pack()
if sh.dupl_name(name):
Label(error_window, text='- This name has been used.').pack()
if sh.dupl_tel(tel):
Label(error_window, text='- This phone number has been used.').pack()
if sh.dupl_email(email):
Label(error_window, text='- This email has been used.').pack()
if sh.dupl_seat(seat, origin, d_departure, time_start, 'start'):
Label(error_window, text='- The seat you picked for your departure flight is not available.').pack()
if sh.dupl_seat(seat, destination, d_return, time_return, 'end'):
Label(error_window, text='- The seat you picked for your return flight is not available.').pack()
Button(error_window, text='OK', command=error_window.destroy).pack()
def check_window():
def create_ticket():
'''สร้างตั๋ว'''
check_data.destroy()
# save data
data = { #put data into dict to use with sh
"name_start": name_start,
"name": name,
"age": age,
"tel": tel,
"email": email,
"start": origin,
"start_time": time_start,
"start_date": d_departure,
"dest": destination,
"dest_time": time_return,
"dest_date": d_return,
"class_airport": class_seat,
"seat": seat
}
sh.write(data)
print('[CONSOLE] Data wrote to database.')
# draw images
draw=ImageDraw.Draw(image_image_4)
draw.text((703.0-673, 283.0-106), text="JKF", fill="#000000", font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 64))
draw.text((920.0-673, 283.0-106), text="ROM", fill="#000000", font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 64 ))
draw.text((715.0-673, 381.0-106), text="DATE", fill="#000000", font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
draw.text((801.0-673, 380.0-106), text="%s %s" %(d_departure, d_return), fill="#0284B9", font=ImageFont.truetype("fonts/Manrope-Medium.ttf", 24))
draw.text((801.0-673, 380.0-106), text=" |", fill="#000000", font=ImageFont.truetype("fonts/Manrope-Medium.ttf", 24))
draw.text((801.0-673, 423.0-106), text="%s %s" %(time_start, time_return), fill="#0284B9", font=ImageFont.truetype("fonts/Manrope-Medium.ttf", 24)) # time
draw.text((801.0-673, 423.0-106), text=" |", fill="#000000", font=ImageFont.truetype("fonts/Manrope-Medium.ttf", 24))
draw.text((716.0-673, 426.0-106), text="TIME", fill="#000000", font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
draw.text((715.0-673, 191.0-106), text="PASSENGER NAME", fill="#000000", font=ImageFont.truetype("fonts/Manrope-Regular.ttf", 10))
draw.text((715.0-673, 210.0-106), text=name_start + " " + name.upper(), fill="#0284B9", font=ImageFont.truetype("fonts/Manrope-Regular.ttf", 16)) # passenger name
draw.text((715.0-673, 237.0-106), text="FROM", fill="#000000", font=ImageFont.truetype("fonts/Manrope-Regular.ttf", 10))
draw.text((716.0-673, 256.0-106), text=origin,fill="#0284B9",font=ImageFont.truetype("fonts/Manrope-Regular.ttf", 16)) # origin
draw.text((898.0-673, 237.0-106), text="TO",fill="#000000",font=ImageFont.truetype("fonts/Manrope-Regular.ttf", 10))
draw.text((898.0-673, 256.0-106), text=destination,fill="#0284B9",font=ImageFont.truetype("fonts/Manrope-Regular.ttf", 16)) # destination
draw.text((727.0-673, 511.0-106), text="JR1103",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
draw.text((857.0-673, 511.0-106), text="R3",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
draw.text((955.0-673, 511.0-106), text=seat,fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24)) # seat 1
draw.text((727.0-673, 541.0-106), text="RJ1503",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
draw.text((857.0-673, 541.0-106), text="J3",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
draw.text((955.0-673, 541.0-106), text=seat,fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24)) # seat 2
draw.text((740.0-673, 483.0-106), text="FLIGHT",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 16))
draw.text((853.0-673, 483.0-106), text="GATE",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 16))
draw.text((958.0-673, 483.0-106), text="SEAT",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 16))
draw.text((806.0-673, 124.0-106), text="PSIT Airline",fill="#FFFFFF",font=ImageFont.truetype("fonts/Manrope-ExtraBold.ttf", 24))
#image_image_4.show()
photo = ImageTk.PhotoImage(image_image_4)
canvas.create_image(872.0,351.0,image=photo)
canvas.create_image(x=670,y=100,image=photo)
print('[CONSOLE] Image processed.')
check_data = Tk()
check_data.title('Check Information')
#เฟรมตรวจสอบข้อมูลที่กรอกมา
labelframe = LabelFrame(check_data, text='Please check your information')
labelprice = LabelFrame(check_data, text='Please check your flight')
Label(labelframe, text=' Name').grid(row=1, column=0, pady=3, sticky=W)
Label(labelframe, text=name_start+' '+name.upper()).grid(row=1, column=1, columnspan=2, pady=3, sticky=W)
Label(labelframe, text=' Age').grid(row=2, column=0, pady=3, sticky=W)
Label(labelframe, text=age+' years').grid(row=2, column=1, pady=3, sticky=W)
Label(labelframe, text=' Tel').grid(row=3, column=0, pady=3, sticky=W)
Label(labelframe, text=tel).grid(row=3, column=1, pady=3, sticky=W)
Label(labelframe, text=' Email').grid(row=4, column=0, pady=3, sticky=W)
Label(labelframe, text=email).grid(row=4, column=1, pady=3, sticky=W)
Label(labelprice, text=' From').grid(row=0, column=0, pady=3, sticky=W)
Label(labelprice, text=origin+' | '+time_start+' | '+d_departure).grid(row=0, column=1, pady=3, sticky=W)
Label(labelprice, text=' To').grid(row=1, column=0, pady=3, sticky=W)
Label(labelprice, text=destination+' | '+time_return+' | '+d_return).grid(row=1, column=1, pady=3,sticky=W)
Label(labelprice, text=' Class').grid(row=2, column=0, pady=3, sticky=W)
Label(labelprice, text=class_seat).grid(row=2, column=1, pady=3, sticky=W)
Label(labelprice, text=' Seat').grid(row=3, column=0, pady=3, sticky=W)
Label(labelprice, text=seat).grid(row=3, column=1, pady=3, sticky=W)
labelframe.grid(row=0, column=0, padx=10, pady=5, ipadx=5, ipady=5)
labelprice.grid(row=0, column=1, padx=10, pady=5, ipadx=5, ipady=5)
Label(check_data, text='THB 1,250', font=("fonts/Manrope Blod", 46)).grid(sticky='news',row=1, column=0 , columnspan=2)
label_button = Label(check_data)
Button(label_button, text='Confirm', command=create_ticket).grid(row=0, column=1, pady=5)
Button(label_button, text='Cancel', command=check_data.destroy).grid(row=0, column=0, pady=5)
label_button.grid(row=2, column=0 , columnspan=2)
check_data.resizable(False, False)
check_data.mainloop()
any_error = sh.check_error(name_start, name, age, tel, email, origin, time_start,
date_departure, destination, time_return, date_return,
class_seat, seat_chr, seat_num)
any_duplicate = sh.check_duplicate(name, tel, email, origin, d_departure, time_start,
destination, d_return, time_return, seat)
if any_error or any_duplicate:
error_warning()
else:
check_window()
print('[CONSOLE] Datas checked.')
#change destination
def change_dest(var, indx, mode):
destination.set("")
list_airport2 = list_airport[:]
list_airport2.remove(combo_origin.get())
destination.config(values=list_airport2)
combo_origin.trace_add("write", change_dest)
#change seat_char by class
#list_class = ['First Class', 'Business Class', 'Economy Class']
def change_seat(var, indx, mode):
seat_chr.set("")
if combo_class.get() == "First Class":
seat_chr.config(values=list_seat_chr[:2])
elif combo_class.get() == "Business Class":
seat_chr.config(values=list_seat_chr[2:4])
elif combo_class.get() == "Economy Class":
seat_chr.config(values=list_seat_chr[4:])
combo_class.trace_add("write", change_seat)
#Button เอาไว้ล่างสุดเพราะมีการเรียกใช้ func ข้างบน
button_image_1 = PhotoImage(file=relative_to_assets("button_1.png"))
button_save = Button(image=button_image_1, borderwidth=0, highlightthickness=0, command=lambda: image_image_4.save('img3.png'), relief="flat")
button_save.place(x=948.0, y=617.0, width=124.0, height=46.0)
button_image_2 = PhotoImage(file=relative_to_assets("button_2.png"))
button_clear = Button(image=button_image_2, borderwidth=0, highlightthickness=0, command=clearinput, relief="flat")
button_clear.place(x=363.0, y=617.0, width=124.0, height=46.0)
button_image_3 = PhotoImage(file=relative_to_assets("button_3.png"))
button_check = Button(image=button_image_3, borderwidth=0, highlightthickness=0, command=check, relief="flat")
button_check.place(x=511.0, y=617.0, width=124.0, height=46.0)
window.resizable(False, False)
window.mainloop()
print('[CONSOLE] Program started.')
booking()
|
import argparse
from data_manager import DataManager
from da_manager import DaData
from plotter import Plotter
from plotter import ATTR
from intersect import Intersect
from modes import BUFFER_LIST
from modes import BUFFER_METHOD
# from my_utils import base_path_from_date
class Runner(object):
"""
This program plots routes/stops
"""
def __init__(self, args):
# self._markers = args.markers
# self._all_markers = args.all_markers
self._da_id = args.da_id
self._raster_flag = args.rasters
self._marker_flag = args.markers
self._buffer_method = args.buffer_method
self._dataset = args.dataset
self._dataman = DataManager(self._dataset, link_route_shapes=False, link_stops=False)
self._daman = DaData()
self._plot_stops = False
def run(self):
plotter = Plotter()
# saskatoon_bb = self._daman.get_saskatoon_bounding_box()
# plotter.add_polygon(saskatoon_bb)
if self._da_id is None:
das = self._daman.get_das()
file_name = "temp/maps/das_all.html"
else:
das = [self._daman.get_da(self._da_id)]
file_name = "temp/maps/da_%d.html" % self._da_id
da = das[0]
p = da.get_polygon()
self._plot_stops = True
if self._buffer_method not in BUFFER_LIST:
for buffer_method in BUFFER_LIST:
print "Valid buffer method: %s" % buffer_method
raise ValueError("Need valid buffer method")
intersect = Intersect()
all_stops = self._dataman.get_stops()
intersect.load(self._buffer_method, self._dataset, all_stops)
intersecting_stops = intersect.get_intersections_for_group2_id(da.get_id())
for stop_tuple in intersecting_stops:
p = stop_tuple[0]
stop_id = stop_tuple[1]
print stop_tuple
plotter.add_polygon(p)
if self._marker_flag:
title = "%d" % stop_id
plotter.add_marker(p.get_centroid(), title, title)
stop = self._dataman.get_stop(stop_id)
plotter.add_marker(stop.get_point(), title, title)
stop.make_buffer(BUFFER_METHOD.CIRCLE_400)
stop_p = stop.get_buffer()
stop_p.set_attribute(ATTR.FILL_OPACITY, 0)
plotter.add_polygon(stop_p)
for da in das:
p = da.get_polygon()
p.set_attribute(ATTR.FILL_COLOR, "#0000ff")
plotter.add_polygon(p)
p2 = da.get_clipped_polygon()
p2.set_attribute(ATTR.FILL_COLOR, "#0000ff")
plotter.add_polygon(p2)
if self._marker_flag:
centroid = p.get_centroid()
title = "%d" % da.get_id()
hover = "hover"
plotter.add_marker(centroid, title, hover)
if self._raster_flag:
rasters = da.get_rasters(100)
for raster in rasters:
p = raster.get_polygon()
p.set_attribute(ATTR.FILL_OPACITY, 0)
plotter.add_polygon(p)
plotter.plot(file_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot Dissemination Areas')
parser.add_argument("-d", "--dataset", help="Dataset", type=str, required=True)
parser.add_argument("-a", "--da_id", help="DA ID", type=int)
parser.add_argument("-m", "--markers", help="Include stop markers (slow and messy)", required=False, action='store_true')
parser.add_argument("-r", "--rasters", help="Include rasters", required=False, action='store_true')
parser.add_argument("-b", "--buffer_method", help="Stop buffer method", required=False, type=str)
args = parser.parse_args()
runner = Runner(args)
runner.run()
|
"""
Michael Heskett
10X single cell rna-seq pipeline
This script uses pandas data frames and numpy ndarrays somewhat loosely.
"""
import os
import re
import csv
import numpy as np
import pandas as pd
import scipy.io
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import scipy.stats
from sklearn.metrics import silhouette_samples, silhouettIe_score
import matplotlib.cm as cm
def read_10x(directory,name):
data = {}
data["name"] = name
for filename in os.listdir(directory):
if os.path.basename(filename) == "barcodes.tsv":
data["barcodes"] = [row[0] for row in csv.reader(open(directory + filename), delimiter="\t")]
elif os.path.basename(filename) == "genes.tsv":
data["genes"] = [row[1] for row in csv.reader(open(directory + filename), delimiter="\t")]
elif os.path.basename(filename) == "matrix.mtx":
data["matrix"] = scipy.io.mmread(directory + filename).todense()
if len(data) != 4:
print("missing argument")
quit()
return data
def write_10x(data,out_dir):
"""
Input the data object created by read_10x() and an output directory
"""
with open(out_dir + data["name"] + ".txt", 'w') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quotechar='"')
writer.writerow([''] + data["barcodes"])
for i in range(data["matrix"].shape[0]):
writer.writerow([data["genes"][i]] + data["matrix"][i,:].tolist()[0])
csvfile.close()
return
def remove_dup_genes(df):
"""
Input genes by cells (output of write10x), deduplicates gene names and
"""
names = df.columns.values[0]
df = df.join(pd.Series(df.sum(axis="columns"), name="sums"))\
.sort_values(by=[names,"sums"], ascending=[True,False])\
.drop_duplicates(subset=names, keep="first")\
.drop(labels="sums", axis="columns")\
.set_index(names)\
.astype("int")\
.transpose()\
return df
def read_matrix(table):
"""
must be a cells by genes matrix in a specific format.
This function reads and assigns to a pandas data frame object.
"""
return pd.read_table(table,sep="\t",header=0,index_col=0)
def log_norm(df,log=True,size=10**5,median=False):
"""
Input is a pandas data frame of samples by features.
For example a cells by genes count matrix from scRNA-seq.
Outputs a median library size normalized matrix that is log2 transformed.
"""
library_size_vector = df.sum(axis="columns") # Calculate the library size by summing the values for all genes
grand_median = np.median(library_size_vector) # Find the median library size across all cells
if median:
df = df.div(library_size_vector,axis="rows").multiply(grand_median)
else:
df = df.div(library_size_vector,axis="rows").multiply(size) # Each gene expression value becomes a fraction of it's library size, and then is multiplied by the grand median lib size
return np.log2(df+1) if log else df # add one to avoid taking the log of 0
def make_cells_per_gene_plot(df,out_path):
fig=df.astype("bool").sum(axis="rows").hist(bins=30,figsize=(12,6))
fig.set_xlabel("Cells per gene")
fig.set_ylabel("frequency")
fig.savefig(out_path)
plt.close(fig)
return
def make_genes_per_cell_plot(df,out_path):
fig=df.astype("bool").sum(axis="columns").hist(bins=30,figsize=(12,6))
fig.set_xlabel("Genes per cell")
fig.set_ylabel("frequency")
fig.savefig(out_path)
plt.close(fig)
return
def filter_matrix(
df,min_cells_per_gene=5,
min_genes_per_cell=1700,
max_genes_per_cell=10000,
highest_genes=False,
max_library_size=100000,
num_keep=5000,
filter_mito=True): # could change this to remove bottom and top 10 percent or somethin
if filter_mito:
no_mito = [x for x in df.columns if not re.search("^MT-",x,re.IGNORECASE)]
df = df[no_mito]
library_size_vector=df.sum(axis="columns")
df=df.loc[library_size_vector<max_library_size,:] # remove cells with super high umi count
df=df.loc[df.astype('bool').sum(axis="columns") >= min_genes_per_cell,:]
df=df.loc[df.astype('bool').sum(axis="columns") <= max_genes_per_cell,:]
df=df.loc[:,df.astype('bool').sum(axis="rows") >= min_cells_per_gene]
if highest_genes:
top = df.mean(axis="rows").sort_values(ascending=False)[:num_keep].index.values
df = df.loc[:,top]
return df
def center_and_scale(df): # transform so mean of each gene is 0, scale of each gene is -1 to 1
return ((df-df.mean(axis="rows")) / (df.max(axis="rows") - df.min(axis="rows")))
### I don't think this is implemented correctly...
def pca_svd(df):
mat = df.as_matrix()
U,s,V = linalg.svd(mat,full_matrices=True)
S = linalg.diagsvd(s,mat.shape[0],mat.shape[1])
pcs = U.dot(S)
explained_variance_ = (s ** 2) / mat.shape[0]
return (pcs,explained_variance_)
def pca_by_svd(df):
mat = df.as_matrix()
U, s, V = np.linalg.svd(mat, full_matrices=True)
S = linalg.diag(s)
def plot_pca(df,prin_comps,out_path,x=0,y=1):
lib_size = np.log2(df.sum(axis="columns"))
fig = plt.figure(figsize=(12,12))
plt.scatter(prin_comps[:,x],prin_comps[:,y],c=lib_size,cmap='Greys',s=30)
fig.savefig(out_path)
plt.close(fig)
def plot_tsne(df,out_path,prin_comps=[]):
lib_size = np.log2(df.sum(axis="columns"))
if not prin_comps:
tsne=TSNE(n_components=2,random_state=0)
tsne_model = tsne.fit_transform(df.as_matrix())
else:
tsne=TSNE(n_components=2,random_state=0)
tsne_model = tsne.fit_transform(prin_comps[:,range(10)])
fig = plt.figure(figsize=(12,12))
plt.scatter(tsne_model[:,0],tsne_model[:,1],c=lib_size,cmap="Greys",s=50,lw=0.4,edgecolor="black")
fig.savefig(out_path)
plt.close(fig)
def remove_zero(df):
df=df.loc[:,df.astype("bool").sum(axis="rows")>0]
df=df.loc[df.astype("bool").sum(axis="columns")>0,:]
return df
def silhouette_kmeans(model,sample_name="sample",max_clust=10):
sil_avg = []
for n_clust in range(2,max_clust+1):
# Create subplot with 1 row 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
ax1.set_xlim([-0.1, 1])
ax1.set_ylim([0, len(model) + (n_clust + 1) * 10])
# Create cluster object
clusterer = KMeans(n_clusters=n_clust)
cluster_labels = clusterer.fit_predict(model)
# Silhouette average scores
silhouette_avg = silhouette_score(model, cluster_labels)
f = open(sample_name + "_kmeans_scores.txt","a")
print("For n_clusters =", n_clust,
"The average silhouette_score is :", silhouette_avg, file=f)
sil_avg += [[n_clust,silhouette_avg]]
# Silhouette score for each sample
sample_silhouette_values = silhouette_samples(model, cluster_labels)
y_lower = 10
for i in range(n_clust):
ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clust)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clust)
ax2.scatter(model[:, 0], model[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clust),
fontsize=14, fontweight='bold')
plt.savefig(sample_name + str(i+1),dpi=300)
#plot silhouette scores on scatterplot
plt.figure(figsize=(12,6))
plt.scatter(range(2,max_clust+1),[x[1] for x in sil_avg])
plt.savefig(sample_name + "_sil_plot.png")
return sil_avg
def cluster_kmeans(model,n):
clusters = KMeans(n_clusters=n)
labels_array = clusters.fit_predict(model)
return labels_array
def export_for_edger(df,labels,out_path,name="name",n_samples=10):
result = pd.DataFrame()
df = df.assign(labels="labels")
n_clusters = len(df["labels"].unique())
for i in range(0,n_clusters):
for j in range(0,n_samples):
result["cluster_"+str(i)+"_sample_"+str(j)] = df[df["labels"]==i].sample(frac=0.2).sum()
result = result.drop("labels")
result.to_csv(out_path+name+"_edgeR_format.txt",sep="\t",index=True,header=True)
return
def cell_cycle_score(df,**kwargs): # can be generalized to any arbitrary gene sets
result = pd.DataFrame()
for gene_set in kwargs:
result[gene_set] = df\
.loc[:,kwargs[gene_set]]\
.dropna(axis="columns",how="any")\
.mean(axis="columns")
result = result.apply(lambda x: (x - np.mean(x)) / (np.std(x)),axis="rows")
result = result.apply(lambda x: (x - np.mean(x)) / (np.std(x)),axis="columns") ## still need to normalize for each cell. via macosko et al 2015
return result.idxmax(axis="columns")
def refine_gene_sets(df,gene_set):
result = []
for gene in gene_set:
if gene in df.columns:
spear = scipy.stats.spearmanr(df.loc[:,gene_set].drop(gene,axis="columns").mean(axis="columns"),\
df.loc[:,gene])
corr,p = spear.correlation,spear.pvalue
result += [[gene,corr,p]]
return [x[0] for x in result if x[1]>0.15]
def mannwhitney_DE(df,labels):
result = []
df = df.assign(labels=labels)
n_clusters = len(df["labels"].unique())
df = df.set_index("labels",drop=True,append=False)
for i in range(n_clusters):
within_cluster = df[df.index==i]
outside_cluster = df[df.index!=i]
for j in df.columns:
result += [[i,j,scipy.stats.mannwhitneyu(within_cluster.loc[:,j],
outside_cluster.loc[:,j])[1]]]
return pd.DataFrame(result,columns=["cluster","gene","pvalue"]).sort_values("pvalue",ascending=True)
def list_all_files(root_dir,substring):
mylist = []
for root,_,filenames in os.walk(root_dir):
for f in filenames:
mylist += [os.path.abspath(os.path.join(root, f))]
return [x for x in mylist if re.search(".*"+substring+".*\.txt",x)]
def create_dict_of_dfs(file_list):
data = {}
for i in range(len(file_list)):
data[os.path.basename(file_list[i].rstrip(".txt"))] = pd.read_table(file_list[i],sep="\t",header=0,index_col=0)
return data
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-10 09:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('host', '0005_hostdetails_uuid'),
]
operations = [
migrations.RenameField(
model_name='hostdetails',
old_name='memorySize',
new_name='memorysize',
),
]
|
i = input("Enter Frst name")
j = input("Enter last name")
print(i+""+j)
|
def mineLocation(field):
for arr_idx, arr in enumerate(field):
if 1 in arr:
return [arr_idx, arr.index(1)]
'''
You've just discovered a square (NxN) field and you notice a warning sign.
The sign states that there's a single bomb in the 2D grid-like field in front of you.
Write a function mineLocation/MineLocation that accepts a 2D array,
and returns the location of the mine. The mine is represented as the
integer 1 in the 2D array. Areas in the 2D array that are not the mine
will be represented as 0s.
The location returned should be an array (Tuple<int, int> in C#) where the
first element is the row index, and the second element is the column index
of the bomb location (both should be 0 based). All 2D arrays passed into your
function will be square (NxN), and there will only be one mine in the array.
Examples:
mineLocation( [ [1, 0, 0], [0, 0, 0], [0, 0, 0] ] ) => returns [0, 0]
mineLocation( [ [0, 0, 0], [0, 1, 0], [0, 0, 0] ] ) => returns [1, 1]
mineLocation( [ [0, 0, 0], [0, 0, 0], [0, 1, 0] ] ) => returns [2, 1]
'''
|
# -*- coding: utf-8 -*-
"""
界面选项卡模型
"""
__author__ = 'JohnnyB0Y'
class ItemModel:
def __init__(self):
self.property_atomic_text = ['nonatomic', 'atomic']
self.property_assign_text = ['assign', 'strong', 'weak', 'copy']
self.property_readonly_text = ['readonly', 'readwrite']
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 14:33:10 2020
@author: user
"""
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
import time
x,y,z = mc.player.getTilePos()
mc.player.setTilePos(x,y,z)
time.sleep(5)
y = y + 100
mc.player.setTilePos(x,y,z)
time.sleep(5)
y = y + 100
mc.player.setTilePos(x,y,z) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 15:14:02 2018
@author: Kazantsev Andrey
03.02.2019
Changes"
Rewriting file for work with best params from file
"""
import os
import numpy as np
import matplotlib.pyplot as plt
name_pulsar = input('Enter name pulsar: ')
par_data = np.genfromtxt('best_par_' + name_pulsar + '.txt', dtype=str).T
with open(name_pulsar + '_start.par', 'r') as file:
lines = file.readlines()
iteration = 0
for i in range(len(par_data[0])):
lines[1] = 'RAJ ' + par_data[1][i] + '\n'
lines[2] = 'DECJ ' + par_data[2][i] + '\n'
lines[3] = 'F0 ' + par_data[0][i] + ' 1' + '\n'
with open(name_pulsar + '.par', 'w') as file:
for line in lines:
file.write(line)
os.system('tempo ' + name_pulsar + '.tim > outtempo.log')
os.system(
'~/work/tempo/util/print_resid/./print_resid -mre > ' +
'resid_' + name_pulsar + '.ascii')
data = np.genfromtxt('resid_' + name_pulsar + '.ascii').T
plt.close()
plt.title(
str(iteration)
+ '/' + par_data[0][i]
+ '/' + par_data[1][i]
+ '/' + par_data[2][i]
+ '/' + par_data[3][i])
plt.xlabel('MJD')
plt.ylabel('Residuals, us')
plt.plot(data[0], data[1], '+')
# создание директории в том случае, если она не существует
if os.path.isdir('./plot_res_' + name_pulsar + '/'):
pass
else:
os.system('mkdir ' + './plot_res_' + name_pulsar + '/')
plt.savefig(
'./plot_res_' + name_pulsar + '/'
+ str(iteration)
+ '_'
+ name_pulsar
+ '.png',
format='png')
iteration += 1
|
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv, cholesky
import math as mt
def sigma_points(x_esti, Sigma, kappa):
n = len(x_esti)
Xi = np.zeros((n, 2*n+1))
W = np.zeros(2*n+1)
kappa=3-n
Xi[:, 0] = x_esti
W[0] = kappa / (n + kappa)
U = cholesky((n + kappa)*Sigma)
for i in range(n):
Xi[:, i+1] = x_esti + U[:, i]
Xi[:, n+i+1] = x_esti - U[:, i]
W[i+1] = 1 / (2*(n+kappa))
W[n+i+1] = W[i+1]
return Xi, W
def UT(Xi, W, noiseCov):
mean = np.sum(W * Xi, axis=1)
cov = W * (Xi - mean.reshape(-1, 1)) @ (Xi - mean.reshape(-1, 1)).T
return mean, cov + noiseCov
def fx(Xi):
cosy=mt.cos(x_esti[2])
siny=mt.sin(x_esti[2])
A=np.array([[1,0,0,dt*cosy,0],
[0,1,0,dt*siny,0],
[0,0,1,0,dt],
[0,0,0,1,0],
[0,0,0,0,1]])
return A @ Xi
def hx(Xi):
B = np.array([[1,0,0,0,0],
[0,1,0,0,0],
[0,0,1,0,0]])
return B @ Xi
def unscented_kalman_filter(z_meas, x_esti, P,WD,L):
"""Unscented Kalman Filter Algorithm."""
# (1) Sample Sigma Points and Weights.
Xi, W = sigma_points(x_esti, P, kappa)
# (2) Predict Mean and Error Covariance of States.
fXi = fx(Xi)
x_pred, P_x = UT(fXi, W, Q)
z_meas_trans=np.array([])
z_meas_trans[0]=z_meas[0]-x_pred[0]
z_meas_trans[1]=z_meas[1]-x_pred[1]
Rot_inverse=np.array([[mt.cos(x_esti[2]),mt.sin(x_esti[2])],
[-mt.sin([x_esti[2]]),mt.cos(x_esti[2])]])
z_meas_rot=Rot_inverse@z_meas_trans
if -WD/2<z_meas_rot[0]<WD/2 and -L/2<z_meas_rot[1]<L/2:
# (3) Calculate Mean and Error Covariance for the Expected Observation.
hXi = hx(fXi)
z_pred, P_z = UT(hXi, W, R)
# (4) Calculate Off Diagonal Elements of Error Covariance and Kalman Gain.
Pxz = W * (fXi - x_pred.reshape(-1, 1)) @ (hXi - z_pred.reshape(-1, 1)).T
K = Pxz @ inv(P_z)
# (5) Estimate Mean and Error Covariance of States.
x_esti = x_pred + K @ (z_meas - z_pred)
P = P_x - K @ P_z @ K.T
return x_esti, P
else: return x_pred, P_x
# Input parameters.
time_end = 10
dt = 0.1
# Initialization for system model.
# Matrix: A, H, Q, R, P_0
# Vector: x_0
Q = np.array([[0.1, 0, 0, 0, 0],
[0, 0.1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
R = np.array([[0.01, 0, 0],
[0, 0.01, 0],
[0, 0, 1]]) #
# Initialization for estimation.
x_0 = np.array([0, 0, 0, 0,0]) # [horizontal position, horizontal velocity, vertical position].
P_0 = 1 * np.eye(5)
# Initialization for sigma points.
kappa = 0
time = np.arange(0, time_end, dt)
n_samples = len(time)
xpos_esti_save = np.zeros(n_samples)
ypos_esti_save = np.zeros(n_samples)
vel_esti_save = np.zeros(n_samples)
yaw_esti_save = np.zeros(n_samples)
yawr_esti_save = np.zeros(n_samples)
xpos_meas_save = np.zeros(n_samples)
ypos_meas_save=np.zeros(n_samples)
yaw_meas_save=np.zeros(n_samples)
x_esti, P = None, None
x_t=np.array(range(100)) + np.random.normal(0,0.1**2)
y_t=x_t**2
vel_w = np.random.normal(0,0.1**2)
for i in range(0,n_samples):
yaw_t=mt.atan2(2*x_t[i],1) + np.random.normal(0,0.09**2)
z_meas=np.array([x_t[i],y_t[i],yaw_t])
if i == 0:
v_t=0
x_esti, P = x_0, P_0
else:
v_t=mt.sqrt(1+(y_t[i]-y_t[i-1])**2)/dt
x_esti, P = unscented_kalman_filter(z_meas, x_esti, P)
print('x_esti: ',x_esti)
print('z_meas: ',z_meas)
xpos_esti_save[i] = x_esti[0]
ypos_esti_save[i] = x_esti[1]
yaw_esti_save[i] = x_esti[2]
vel_esti_save[i] = x_esti[3]
yawr_esti_save[i] = x_esti[4]
xpos_meas_save[i] = z_meas[0]
ypos_meas_save[i]=z_meas[1]
yaw_meas_save[i]=z_meas[2]
|
import numpy as np
import math
from ntpath import basename
from os.path import getsize
import os
import pprint
def CalcMSE(OrigY,OrigU,OrigV,ReconY,ReconU,ReconV):
OrigYRow, OrigYCol, _ = OrigY.shape
OrigURow, OrigUCol, _ = OrigU.shape
OrigVRow, OrigVCol, _ = OrigV.shape
ReconYRow, ReconYCol, _ = ReconY.shape
ReconURow, ReconUCol, _ = ReconU.shape
ReconVRow, ReconVCol, _ = ReconV.shape
InnerSumY =0
InnerSumU=0
InnerSumV=0
Innertotal = 0
for i in range(OrigYRow):
for j in range(OrigYCol):
InnerSumY = InnerSumY + ((OrigY[i,j,0]- ReconY[i,j]))**2
for i in range(OrigURow):
for j in range(OrigUCol):
InnerSumU = InnerSumU + ((OrigU[i,j,0]- ReconU[i,j]))**2
for i in range(OrigURow):
for j in range(OrigUCol):
InnerSumV = InnerSumV + ((OrigV[i,j,0]- ReconV[i,j]))**2
Innertotal = InnerSumY+InnerSumU+InnerSumV
TotalMSE = (Innertotal * (1/(OrigYRow*OrigYCol +OrigURow*OrigUCol*2)))
MSEY =(InnerSumY * (1/(OrigYRow*OrigYCol)))
MSEU =(InnerSumU * (1/(OrigURow*OrigUCol)))
MSEV =(InnerSumV * (1/(OrigURow*OrigUCol)))
return MSEY,MSEU,MSEV
def YUV_Imread (filename):
#filen1ame should be a path, i.e .\user\name\directory\FILENAME.YUV
#filename example : BasketballDrill_832x480_50fps_8bit_420.yuv
YUVFile = basename(filename)
FileMetrics = YUVFile.split('_')
YUVResolution = FileMetrics[1].split('x')
YUVFPS = FileMetrics[2]
YUVBit_sign = FileMetrics[3]
width = int(YUVResolution[0])
height = int( YUVResolution[1])
Total = width*height
FileByteSize = int(getsize(filename))
if (YUVBit_sign == '8bit'):
Frames = int( FileByteSize/(width*height*1.5))
read_bit_str = np.uint8
else:
Frames = int(FileByteSize/(width*height*1.5)/2)
read_bit_str = np.uint16
Y = np.zeros([height,width,Frames])
U = np.zeros([int(height/2),int(width/2),Frames])
V = np.zeros([int(height/2),int(width/2),Frames])
with open (filename, 'rb') as fid:
for i in range(Frames):
FullFileArray = np.fromfile(fid, read_bit_str)
#print (FullFileArray)
counter =0;
for j in range(height):
for k in range(width):
Y[j,k,i] = FullFileArray[counter]
counter +=1
for j in range(int(height/2)):
for k in range(int(width/2)):
U[j,k,i] = FullFileArray[counter]
counter +=1
for j in range(int(height/2)):
for k in range(int(width/2)):
V[j,k,i] = FullFileArray[counter]
counter +=1
#Y=torch.from_numpy(Y)
#U=torch.from_numpy(U)
#V=torch.from_numpy(V)
return Y, U, V
def CalcPSNR(OrigImgAdd,ReconImageAdd):
OrigY,OrigU,OrigV = YUV_Imread(OrigImgAdd)
ReconY,ReconU,ReconV = YUV_Imread(ReconImageAdd)
MSEY,MSEU,MSEV = CalcMSE(OrigY,OrigU,OrigV,ReconY,ReconU,ReconV)
PSNRY = round(10*math.log10((((2**8)-1)**2)/MSEY),2)
PSNRU = round(10*math.log10((((2**8)-1)**2)/MSEU),2)
PSNRV = round(10*math.log10((((2**8)-1)**2)/MSEV),2)
#print(PSNRY,' ' ,PSNRU,' ' ,PSNRV)
return [PSNRY,PSNRU,PSNRV]
#Note, Everything below works only when the Reconstructed Image filename has the following:
# "Lambda"+LambdaValue+"OriginalImageName"
lambdalist = [0.1]
FolderNameList = [r'C:\Path\To\Folder\Containing\Reconstructed\BQTerrace',
r'C:\Path\To\Folder\Containing\Reconstructed\FourPeople',
r'C:\Path\To\Folder\Containing\Reconstructed\Johnny',
r'C:\Path\To\Folder\Containing\Reconstructed\KristenAndSara',
r'C:\Path\To\Folder\Containing\Reconstructed\PartyScene',
r'C:\Path\To\Folder\Containing\Reconstructed\RaceHorses1',
r'C:\Path\To\Folder\Containing\Reconstructed\RaceHorses2',
r'C:\Path\To\Folder\Containing\Reconstructed\SlideEditing',
r'C:\Path\To\Folder\Containing\Reconstructed\SlideShow',
r'C:\Path\To\Folder\Containing\Reconstructed\ArenaOfValor',
r'C:\Path\To\Folder\Containing\Reconstructed\BasketballDrill',
r'C:\Path\To\Folder\Containing\Reconstructed\BasketballDrillText',
r'C:\Path\To\Folder\Containing\Reconstructed\BasketballDrive',
r'C:\Path\To\Folder\Containing\Reconstructed\BasketballPass',
r'C:\Path\To\Folder\Containing\Reconstructed\BlowingBubbles',
r'C:\Path\To\Folder\Containing\Reconstructed\BQMall',
r'C:\Path\To\Folder\Containing\Reconstructed\BQSquare']
#pprint.pprint(FolderNameList)
originalNameList=['BQTerrace_1920x1080_60fps_8bit_420_00.yuv',
'FourPeople_1280x720_60fps_8bit_420_00.yuv',
'Johnny_1280x720_60fps_8bit_420_00.yuv',
'KristenAndSara_1280x720_60fps_8bit_420_00.yuv',
'PartyScene_832x480_50fps_8bit_420_00.yuv',
'RaceHorses_416x240_30fps_8bit_420_00.yuv',
'RaceHorses_832x480_30fps_8bit_420_00.yuv',
'SlideEditing_1280x720_30fps_8bit_420_00.yuv',
'SlideShow_1280x720_20fps_8bit_420_00.yuv',
'ArenaOfValor_1920x1080_60fps_8bit_420_00.yuv',
'BasketballDrill_832x480_50fps_8bit_420_00.yuv',
'BasketballDrillText_832x480_50fps_8bit_420_00.yuv',
'BasketballDrive_1920x1080_50fps_8bit_420_00.yuv',
'BasketballPass_416x240_50fps_8bit_420_00.yuv',
'BlowingBubbles_416x240_50fps_8bit_420_00.yuv',
'BQMall_832x480_60fps_8bit_420_00.yuv',
'BQSquare_416x240_60fps_8bit_420_00.yuv']
Index =0
PSNRVALS = {}
for orig in originalNameList :
OrigImgAddr= os.path.join(r'C:\Path\To\Folder\Containing\original\Image\\',orig)
FileMetrics = orig.split('_')
nameofit = FileMetrics[0]
#Create the address of the image
#ReconImgAddr0_01 = FolderNameList[Index]+r'\Lambda0.01'+orig
#ReconImgAddr0_025 = FolderNameList[Index]+r'\Lambda0.025'+orig
#ReconImgAddr0_03 = FolderNameList[Index]+r'\Lambda0.03'+orig
#ReconImgAddr0_035 = FolderNameList[Index]+r'\Lambda0.035'+orig
#ReconImgAddr0_04 = FolderNameList[Index]+r'\Lambda0.04'+orig
#ReconImgAddr0_05 = FolderNameList[Index]+r'\Lambda0.05'+orig
ReconImgAddr0_1 = FolderNameList[Index]+r'\Lambda0.1'+orig
#print(ReconImgAddr0_01)
#print(orig)
#PSNR Values to Dictionary
#PSNRVALS[nameofit +' Lam0.01']= CalcPSNR(OrigImgAddr,ReconImgAddr0_01)
#PSNRVALS[nameofit + ' Lam0.025']= CalcPSNR(OrigImgAddr,ReconImgAddr0_025)
#PSNRVALS[nameofit +' Lam0.03']= CalcPSNR(OrigImgAddr,ReconImgAddr0_03)
#PSNRVALS[nameofit +' Lam0.035']= CalcPSNR(OrigImgAddr,ReconImgAddr0_035)
#PSNRVALS[nameofit +' Lam0.04']= CalcPSNR(OrigImgAddr,ReconImgAddr0_04)
#PSNRVALS[nameofit +' Lam0.05']= CalcPSNR(OrigImgAddr,ReconImgAddr0_05)
PSNRVALS[nameofit +' Lam0.1']= CalcPSNR(OrigImgAddr,ReconImgAddr0_1)
#print(PSNRVALS)
Index = Index +1
pprint.pprint(PSNRVALS)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, subprocess, functools
from PyQt5 import QtWidgets, QtGui, QtCore
from playerVLC import *
import mainWindow # import of mainWindow.py made with pyuic5
from musicBase import *
from musicDirectory import *
from database import *
from dialogMusicDirectoriesLoader import *
from streamObserver import *
from albumThread import *
from musicBaseThread import *
from playlistWidget import *
def open_file(filename):
if sys.platform == "win32":
os.startfile(filename)
else:
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
class MainWindowLoader(QtWidgets.QMainWindow):
def __init__(self, parent=None,app=None,musicbase=None,player=None,translator=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.app = app
self.translator = translator
self.musicBase = musicbase
self.player = player
self.settings = QtCore.QSettings('pyzik', 'pyzik')
self.firstShow = True
self.playList = None
self.currentArtist = artist("",0)
self.currentAlbum = album("")
self.coverPixmap = QtGui.QPixmap()
self.defaultPixmap = QtGui.QPixmap()
self.ui = mainWindow.Ui_MainWindow()
self.ui.setupUi(self)
self.setTitleLabel("")
self.setWindowTitle("PyZik")
self.initAlbumTableWidget()
self.initTrackTableWidget()
self.showArtists()
self.loadSettings()
#Connect UI triggers
self.ui.listViewArtists.selectionModel().currentChanged.connect(self.onArtistChange)
self.ui.actionMusic_directories.triggered.connect(self.onMenuMusicDirectories)
self.ui.actionExplore_music_directories.triggered.connect(self.onMenuExplore)
self.ui.actionRandom_album.triggered.connect(self.ramdomAlbum)
self.ui.actionDelete_database.triggered.connect(self.onMenuDeleteDatabase)
self.ui.actionFuzzyGroovy.triggered.connect(self.onPlayFuzzyGroovy)
self.ui.actionPlaylist.triggered.connect(self.showPlaylist)
self.ui.actionLanguageSpanish.triggered.connect(functools.partial(self.changeLanguage, 'es'))
self.ui.actionLanguageFrench.triggered.connect(functools.partial(self.changeLanguage, 'fr'))
self.ui.actionLanguageEnglish.triggered.connect(functools.partial(self.changeLanguage, 'en'))
self.ui.playButton.clicked.connect(self.onPlayAlbum)
self.ui.addAlbumButton.clicked.connect(self.onAddAlbum)
self.ui.pauseButton.clicked.connect(self.onPauseAlbum)
#self.ui.nextButton.clicked.connect(self.player.mediaListPlayer.next)
self.ui.openDirButton.clicked.connect(self.onOpenDir)
#self.ui.previousButton.clicked.connect(self.player.mediaListPlayer.previous)
self.ui.searchEdit.textChanged.connect(self.onSearchChange)
self.ui.searchEdit.returnPressed.connect(self.onSearchEnter)
self.ui.tableWidgetAlbums.selectionModel().currentRowChanged.connect(self.onAlbumChange)
self.ui.tableWidgetAlbums.customContextMenuRequested.connect(self.handleHeaderMenu)
self.shortcutRandomAlbum = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+R"), self)
self.shortcutRandomAlbum.activated.connect(self.ramdomAlbum)
#Connect VLC triggers
self.player.mpEnventManager.event_attach(vlc.EventType.MediaPlayerMediaChanged, self.onPlayerMediaChangedVLC)
self.player.mpEnventManager.event_attach(vlc.EventType.MediaPlayerPaused, self.paused)
self.player.mpEnventManager.event_attach(vlc.EventType.MediaPlayerPlaying, self.isPlaying)
#self.player.mpEnventManager.event_attach(vlc.EventType.MediaPlayerPositionChanged, self.onPlayerPositionChanged)
#self.player.mpEnventManager.event_attach(vlc.EventType.MediaPlayerAudioVolume , self.setVolumeSliderFromPlayer)
self.ui.volumeSlider.setMaximum(100)
self.ui.volumeSlider.setValue(self.volume)
self.player.setVolume(self.volume)
self.ui.volumeSlider.valueChanged.connect(self.setVolume)
#Write message in status bar
self.ui.statusBar.showMessage("PyZik")
self.threadStreamObserver = streamObserver()
self.threadStreamObserver.player = self.player
self.threadStreamObserver.titleChanged.connect(self.setStatus)
self.player.mpEnventManager.event_attach(vlc.EventType.MediaPlayerStopped, self.threadStreamObserver.resetPreviousTitle)
self.threadStreamObserver.start()
self.loadAlbumFilesThread = loadAlbumFilesThread()
self.loadAlbumFilesThread.setTerminationEnabled(True)
self.loadAlbumFilesThread.imagesLoaded.connect(self.showAlbumCover)
self.loadAlbumFilesThread.tracksLoaded.connect(self.showAlbumTracks)
self.exploreAlbumsDirectoriesThread = exploreAlbumsDirectoriesThread()
#self.exploreAlbumsDirectoriesThread.progressChanged.connect(self.showAlbumTracks)
self.exploreAlbumsDirectoriesThread.exploreCompleted.connect(self.showArtists)
self.ui.coverWidget.resizeEvent = self.resizeEvent
def showEvent(self,event):
#This function is called when the mainWindow is shown
if self.firstShow == True:
self.ramdomAlbum()
self.firstShow = False
def onPlayFuzzyGroovy(self):
self.player.playFuzzyGroovy()
self.showPlaylist(True)
self.setVolume(self.getVolumeFromSlider())
def ramdomAlbum(self):
alb = self.musicBase.albumCol.getRandomAlbum()
self.currentAlbum = alb
if alb is not None:
print("RamdomAlb="+alb.title)
art = self.musicBase.artistCol.getArtistByID(alb.artistID)
self.currentArtist = art
self.selectArtistListView(art)
self.showArtist(art)
#self.showAlbum(alb)
def setVolume(self, volume):
self.player.setVolume(volume)
def getVolumeFromSlider(self):
return self.ui.volumeSlider.value()
def setVolumeSliderFromPlayer(self,event):
volume = self.player.getVolume()
self.ui.volumeSlider.setValue(volume)
def setStatus(self,msg):
#self.ui.labelArtist.setText(msg)
self.ui.statusBar.showMessage(msg)
def paused(self,event):
print("Paused!")
def isPlaying(self,event):
print("isPlaying!")
'''
Init widgets
'''
def initAlbumTableWidget(self):
self.ui.tableWidgetAlbums.setRowCount(0)
hHeader = self.ui.tableWidgetAlbums.horizontalHeader()
vHeader = self.ui.tableWidgetAlbums.verticalHeader()
vHeader.hide()
hHeader.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
hHeader.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
hHeader.hideSection(2)
def initTrackTableWidget(self):
self.ui.tableWidgetTracks.setRowCount(0)
hHeader = self.ui.tableWidgetTracks.horizontalHeader()
vHeader = self.ui.tableWidgetTracks.verticalHeader()
vHeader.hide()
hHeader.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
hHeader.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
hHeader.hideSection(2)
'''
Menu Actions
'''
def onMenuMusicDirectories(self):
self.musicBase.db = database()
dirDiag = DialogMusicDirectoriesLoader(self.musicBase)
dirDiag.show()
dirDiag.exec_()
def onMenuExplore(self):
self.exploreAlbumsDirectoriesThread.musicBase = self.musicBase
self.wProgress = progressWidget()
self.exploreAlbumsDirectoriesThread.progressChanged.connect(self.wProgress.setValue)
self.exploreAlbumsDirectoriesThread.directoryChanged.connect(self.wProgress.setDirectoryText)
self.exploreAlbumsDirectoriesThread.exploreCompleted.connect(self.wProgress.close)
self.exploreAlbumsDirectoriesThread.exploreCompleted.connect(self.onExploreCompleted)
self.wProgress.progressClosed.connect(self.exploreAlbumsDirectoriesThread.stop)
self.exploreAlbumsDirectoriesThread.start()
def onExploreCompleted(self,event):
self.musicBase.db = database()
def onMenuDeleteDatabase(self):
self.musicBase.db.dropAllTables()
self.musicBase.emptyDatas()
self.showArtists()
self.initAlbumTableWidget()
def handleHeaderMenu(self, pos):
print('column(%d)' % self.ui.tableWidgetAlbums.horizontalHeader().logicalIndexAt(pos))
menu = QtWidgets.QMenu()
menu.addAction('Add')
menu.addAction('Delete')
menu.exec(QtGui.QCursor.pos())
'''
Artist listView functions
'''
def showArtists(self):
# Add artists in the QListView
model = QtGui.QStandardItemModel(self.ui.listViewArtists)
for art in self.musicBase.artistCol.artists:
itemArt = QtGui.QStandardItem(art.name)
itemArt.artist = art
art.itemListViewArtist = itemArt
model.appendRow(itemArt)
self.ui.listViewArtists.setModel(model)
self.ui.listViewArtists.show()
self.ui.listViewArtists.selectionModel().currentChanged.connect(self.onArtistChange)
def setHiddenAllArtistItem(self,hide):
#Hide all artists
model = self.ui.listViewArtists.model()
for i in range(model.rowCount()):
self.ui.listViewArtists.setRowHidden(i,hide)
def getFirstVisibleArtistItem(self):
model = self.ui.listViewArtists.model()
for i in range(model.rowCount()):
if(not self.ui.listViewArtists.isRowHidden(i)):
return model.item(i)
def onArtistChange(self,item):
#When call from listView, item is a QModelIndex
nrow = item.row()
model = self.ui.listViewArtists.model()
if self.currentArtist.artistID != model.item(nrow).artist.artistID :
self.showArtist(model.item(nrow).artist)
def selectArtistListView(self,artist):
item = artist.itemListViewArtist
selModel = self.ui.listViewArtists.selectionModel()
selModel.reset()
selModel.select(item.index(), QtCore.QItemSelectionModel.SelectCurrent)
self.ui.listViewArtists.scrollTo(item.index(), QtWidgets.QAbstractItemView.PositionAtCenter)
'''
Search artist functions
'''
def onSearchEnter(self):
#After typing, the user hit enter
#to select the first artist found
item = self.getFirstVisibleArtistItem()
if item is not None:
selModel = self.ui.listViewArtists.selectionModel()
selModel.reset()
selModel.select(item.index(), QtCore.QItemSelectionModel.Select)
self.showArtist(item.artist)
def onSearchChange(self,item):
#When user write a search, shows only matching artists
search = self.ui.searchEdit.text()
if(len(search)==0):
self.setHiddenAllArtistItem(False)
else:
self.setHiddenAllArtistItem(True)
items = self.ui.listViewArtists.model().findItems(search,QtCore.Qt.MatchContains)
for item in items:
i = item.row()
self.ui.listViewArtists.setRowHidden(i,False)
'''
Album tableWidget functions
'''
def getAlbumFromTable(self):
#Return the selected album
selAlbItems = self.ui.tableWidgetAlbums.selectedItems()
for item in selAlbItems:
r = item.row()
albumIDSel = self.ui.tableWidgetAlbums.item(r,2).text()
alb = self.musicBase.albumCol.getAlbum(albumIDSel)
if(alb.albumID == 0):
print("Album is Empty. Item:"+str(item))
return alb
def onAlbumChange(self,item):
if item.row() >= 0:
print("OnAlbumChange:"+str(item.row()))
albumIDSel = self.ui.tableWidgetAlbums.item(item.row(),2).text()
alb = self.musicBase.albumCol.getAlbum(albumIDSel)
if(alb.albumID != 0):
self.showAlbum(alb)
else:
print("No album to show")
def showArtist(self,artist):
self.currentArtist = artist
self.showAlbums(self.currentArtist)
def showAlbums(self,artist):
#Add albums in the QTableView
print("Show albums Art="+artist.name)
if self.currentAlbum is None:
self.currentAlbum = artist.getRandomAlbum()
if self.currentAlbum.artistID is not artist.artistID:
self.currentAlbum = artist.getRandomAlbum()
self.ui.tableWidgetAlbums.setRowCount(0)
indexToSel = 0
i=0
artist.sortAlbums()
for alb in artist.albums:
self.ui.tableWidgetAlbums.insertRow(i)
titleItem = QtWidgets.QTableWidgetItem(alb.title)
titleItem.setFlags(titleItem.flags() ^ QtCore.Qt.ItemIsEditable)
self.ui.tableWidgetAlbums.setItem(i,0,titleItem)
yearItem = QtWidgets.QTableWidgetItem(str(alb.year))
yearItem.setFlags(yearItem.flags() ^ QtCore.Qt.ItemIsEditable)
self.ui.tableWidgetAlbums.setItem(i,1,yearItem)
idItem = QtWidgets.QTableWidgetItem(str(alb.albumID))
idItem.setFlags(idItem.flags() ^ QtCore.Qt.ItemIsEditable)
self.ui.tableWidgetAlbums.setItem(i,2,idItem)
if(i==0 and self.currentAlbum == None):
print("Show first album")
elif(alb.albumID==self.currentAlbum.albumID):
print("showAlbums() --> Select album="+alb.title)
indexToSel = i
#self.ui.tableWidgetAlbums.selectRow(i)
i+=1
self.ui.tableWidgetAlbums.selectRow(indexToSel)
self.ui.tableWidgetAlbums.scrollTo(self.ui.tableWidgetAlbums.currentIndex(), QtWidgets.QAbstractItemView.PositionAtCenter)
#self.ui.tableWidgetAlbums.show()
def showAlbum(self,album):
print("showAlbum: "+album.title)
self.currentAlbum = album
self.setTitleLabel(self.currentArtist.name,album.title,album.year)
#Start a thread to load album datas from directory
#When updated, triggers launch showAlbumCover and showAlbumTracks
if self.loadAlbumFilesThread.isRunning() :
print("Stop Thread loadAlbum")
self.loadAlbumFilesThread.stop()
self.loadAlbumFilesThread.wait()
self.loadAlbumFilesThread.album = album
self.loadAlbumFilesThread.player = self.player
self.loadAlbumFilesThread.start()
def showAlbumTracks(self,result):
#self.ui.tableWidgetTracks.setColumnCount(1)
self.ui.tableWidgetTracks.setRowCount(0)
i=0
for track in self.currentAlbum.tracks:
self.ui.tableWidgetTracks.insertRow(i)
titleItem = QtWidgets.QTableWidgetItem(track.title)
titleItem.setFlags(titleItem.flags() ^ QtCore.Qt.ItemIsEditable)
self.ui.tableWidgetTracks.setItem(i,0,titleItem)
i+=1
def showAlbumCover(self,result):
album = self.currentAlbum
if album.cover != "":
self.showCover(os.path.join(album.getAlbumDir(),album.cover))
else:
self.showCover("")
'''
Interactions with vlc module
'''
def playAlbum(self,alb):
'''Add tracks in playlist and start playing'''
#self.player.dropMediaList()
self.player.playAlbum(alb)
self.setVolume(self.getVolumeFromSlider())
self.showPlaylist(True)
def addAlbum(self,alb):
'''Add tracks in playlist and start playing'''
self.player.addAlbum(alb)
self.setVolume(self.getVolumeFromSlider())
self.showPlaylist(True)
def showPlaylist(self,showOnlyIfNew=False):
isNew = False
if self.playList is None:
isNew = True
self.playList = playlistWidget(self.player)
self.playList.trackChanged.connect(self.player.setPlaylistTrack)
self.threadStreamObserver.titleChanged.connect(self.onPlayerMediaChangedStreamObserver)
self.playList.showMediaList()
if isNew or showOnlyIfNew==False: self.playList.show()
def onPlayerMediaChangedVLC(self,event):
print("onPlayerMediaChangedVLC")
if self.playList is not None:
self.playList.setCurrentTrack()
def onPlayerMediaChangedStreamObserver(self,title):
print("onPlayerMediaChangedStreamObserver="+title)
if self.playList is not None:
self.playList.setCurrentTrack(title)
def onPlayAlbum(self,item):
print("onPlayAlbum "+self.currentAlbum.getAlbumDir())
self.playAlbum(self.currentAlbum)
def onAddAlbum(self,item):
print("onAddAlbum "+self.currentAlbum.getAlbumDir())
self.addAlbum(self.currentAlbum)
def onPauseAlbum(self):
self.player.pauseMediaList()
def onOpenDir(self):
open_file(self.currentAlbum.getAlbumDir())
'''
Miscellanious UI functions
'''
def setTitleLabel(self,artName="",albTitle="",year=""):
if self.currentArtist is not None and artName=="":
artName = self.currentArtist.name
if self.currentAlbum is not None and albTitle=="":
albTitle = self.currentAlbum.title
year = self.currentAlbum.year
sAlbum = albTitle
sYear =str(year)
if(not sYear in ["0",""]): sAlbum += " ("+sYear+")"
sTitle = '''<html><head/><body>
<p><span style=\" font-size:14pt; font-weight:600;\">{Artist}</span></p>
<p><span style=\" font-style:italic;\">{Album}</span></p>
</body></html>'''
sTitle = sTitle.format(Artist=artName,Album=sAlbum)
self.ui.labelArtist.setText(sTitle)
def showCover(self,path):
if path != "":
print("MyCover="+path)
self.coverPixmap = QtGui.QPixmap(path)
scaledCover = self.coverPixmap.scaled(self.ui.cover.size(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
self.ui.cover.setPixmap(scaledCover)
self.ui.cover.show()
else:
self.ui.cover.setPixmap(self.defaultPixmap)
def resizeEvent(self,event):
self.resizeCover()
def resizeCover(self):
if (not self.coverPixmap.isNull()):
scaledCover = self.coverPixmap.scaled(self.ui.cover.size(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
self.ui.cover.setPixmap(scaledCover)
def closeEvent(self, event):
self.saveSettings()
def saveSettings(self):
self.settings.setValue('volume', self.player.getVolume())
def loadSettings(self):
if self.settings.contains('volume'):
self.volume = self.settings.value('volume', type=int)
else:
self.volume = 100
def changeLanguage(self,locale):
# translator for built-in qt strings
self.translator.unInstallTranslators()
self.translator.installTranslators(locale)
self.ui.retranslateUi(self)
if self.playList is not None: self.playList.retranslateUi()
self.update()
self.setWindowTitle("PyZik")
self.setTitleLabel()
if __name__ == '__main__':
from pyzik import *
main()
|
"""
Robotritons troubleshooting version for gps navigation.
Purpose: Use reliable GPS data to navigate the vehicle
Requirements: A vehicle with at least one speed controller and one servo, and one Ublox NEO-M8N Standard Precision GNSS Module. The python modules sys, time, GPSConfigBackEnd, and VehiclePWMModule
Use: Instantiate esc, servo, and ublox objects then use their included methods as well as those defined here in order to wait until usable GPS data is secured.
Instantiate objects for an esc using vehiclePWM("esc"), a servo using vehiclePWM("servo"), and a ublox using U_blox()
Resources:
https://docs.emlid.com/navio/Navio-dev/read-gps-data/
https://shahriar.svbtle.com/importing-star-in-python
"""
import sys
import time
from GPSConfigBackEnd import *
import VehiclePWMModule
#make ubl object
ubl = U_blox()
def comm(msg):
for x in range(0,10):
ubl.bus.xfer2(msg)
#reset the Ublox messages
CFGmsg8_NAVposllh_no = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0xb9]#Disable Ublox from publishing a NAVposllh
CFGmsg8_NAVstatus_no = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0xc0]#Disable Ublox from publishing a NAVstatus
#enable the Ublox messages
CFGmsg8_NAVstatus_yes = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x03,0x00,0x00,0x00,0x00,0x01,0x00,0x14,0xc2]#Enable Ublox to publish a NAVstatus
CFGmsg8_NAVposllh_yes = [0xb5,0x62,0x06,0x01,0x08,0x00,0x01,0x02,0x00,0x00,0x00,0x00,0x01,0x00,0x13,0xbb]#Enable Ublox to publish a NAVposllh
vehicle_servo = VehiclePWMModule.vehiclePWM("servo")
vehicle_esc = VehiclePWMModule.vehiclePWM("esc")
#ubl.debug=True
x=1
while(True):
try:
if (x==1):
vehicle_esc.stop()
vehicle_servo.rest()
vehicle_servo.center()
comm(CFGmsg8_NAVstatus_no)
comm(CFGmsg8_NAVposllh_no)
comm(CFGmsg8_NAVstatus_yes)
#sTime = time.time()
a = 0
while not(a):
a = ubl.GPSfetch()
print(a)
print 'once'
x=2
if (x==2):
comm(CFGmsg8_NAVstatus_no)
vehicle_servo.steer(45)
time.sleep(0.5)
vehicle_servo.steer(105)
time.sleep(0.5)
vehicle_servo.center()
#ubl.debug=True
print 'twice'
x=3
z=1
comm(CFGmsg8_NAVposllh_yes)
b = 0
while not(b):
#vehicle_esc.accel(3)
b = ubl.GPSfetch(1)
#vehicle_esc.accel(-6)
z=z+1
print 'z %d' % z
except KeyboardInterrupt:
vehicle_esc.stop()
vehicle_servo.rest()
sys.exit()
|
from series_types import SeriesType
from choices import Choice
from questions import Question
from user_question_answer import UserQuestionAnswer
from question_test_series import QuestionTestSeries
from subject import Subject
from topic import Topic
from series_types import SeriesType
|
import requests
import lxml.html
data = []
r = requests.get("https://turb0translation.blogspot.com/p/blog-page.html")
root = lxml.html.fromstring(r.content)
#links = root.xpath('//li[@title="Bookmark Chapter"]/a')
links = root.xpath('//*[@id="post-body-6306637228371808967"]/div[2]/a')
for link in links:
print(f"{link.text} = {link.get('href')}")
data.insert(0, link.get('href'))
links = root.xpath('//*[@id="post-body-6306637228371808967"]/a')
for link in links:
print(f"{link.text} = {link.get('href')}")
data.insert(0, link.get('href'))
links = root.xpath('//*[@id="post-body-6306637228371808967"]/div/a')
for link in links:
print(f"{link.text} = {link.get('href')}")
data.insert(0, link.get('href'))
with open("link.txt", "w+") as f:
for row in data:
f.write(row + '\n')
|
import visual_recognition
import json
import random
local_image = 'test/kevin.jpg'
with open('util/advices.json') as json_file:
advices = json.load(json_file)
def predict_image(image):
classes_result = visual_recognition.predict_mood(image)
result = json.dumps(classes_result, indent=2)
return result
def predict_mood(image):
with open(image, 'rb') as images_file:
predicted_class = visual_recognition.classify(
images_file,
threshold='0.6',
classifier_ids='DefaultCustomModel_1997094634').get_result()
def get_emotion_json(result_of_image):
data = json.loads(result_of_image)
first_dict = data['images']
first_list = first_dict[0]
second_dict = first_list['classifiers']
second_list = second_dict[0]
third_dict = second_list['classes']
third_list = third_dict[0]
emotion = third_list['class']
return emotion
def get_advice_on_emotion(emotion):
get_advices = advices[emotion]
random_advice = get_advices[random.randrange(len(get_advices))]
return random_advice
if __name__ == "__main__":
result_ = predict_image(local_image)
emotion_ = get_emotion_json(result_)
a = get_advice_on_emotion(emotion_)
print(a)
|
__author__ = 'AdminZJ'
def maximum(x, y):
if x > y:
return x
elif x == y:
return 'equal'
else:
return y
print(maximum(12, 35)) |
num = int(input('Write a number from 1 to 10: '))
while num > 10 or num < 1:
print('Your number is incorrect!')
num = int(input('Write a number from 1 to 10: '))
for element in range(1, 11):
result = element * num
print(f"{element} * {num} = {result}") |
import heapq
def kClosest(iterable, k, x):
smallest = []
closest = []
for value in iterable:
newValue = abs(value - x)
heapq.heappush(smallest, (-newValue, -value))
if len(smallest) > k:
heapq.heappop(smallest)
if (len(smallest) < k):
return None
print(smallest)
while len(smallest) > 0:
closest.append(-smallest[0][1])
heapq.heappop(smallest)
return sorted(closest)
print(kClosest([1, 2, 3, 4, 5], 4, 3))
|
t = int(input())
while t > 0:
r,b,d = map(int,input().split())
packs = min(r,b)
if (d+1)*packs < max(r,b):
print("NO")
else:
print("YES")
t = t-1 |
from django_pipes.middleware.stats_middleware import PipesStatsMiddleware
|
import unittest
import datetime
from test.util import ClangTest
'''Test if macro are correctly generated.
'''
import logging
# logging.basicConfig(level=logging.DEBUG)
class Macro(ClangTest):
# @unittest.skip('')
def setUp(self):
# we need to generate macro. Which is very long for some reasons.
self.full_parsing_options = True
def test_simple_integer_literal(self):
self.convert('''#define MY_VAL 1''')
self.assertEqual(self.namespace.MY_VAL, 1)
self.convert('''#define __MY_VAL 1''')
self.assertEqual(getattr(self.namespace, "__MY_VAL"), 1)
def test_long(self):
self.convert('''#define BIG_NUM_L 1000000L''')
self.assertEqual(getattr(self.namespace, "BIG_NUM_L"), 1000000)
def test_signed(self):
self.convert('''
#define ZERO 0
#define POSITIVE 1
#define NEGATIVE -1
''')
self.assertIn("ZERO", self.namespace)
self.assertEqual(self.namespace.ZERO, 0)
self.assertIn("POSITIVE", self.namespace)
self.assertEqual(self.namespace.POSITIVE, 1)
self.assertIn("NEGATIVE", self.namespace)
self.assertEqual(self.namespace.NEGATIVE, -1)
def test_signed_long_long(self):
self.convert('''
#define ZERO 0x0000000000000000LL
#define POSITIVE 0x0000000080000000LL
#define NEGATIVE -0x0000000080000000LL
''')
self.assertIn("ZERO", self.namespace)
self.assertEqual(self.namespace.ZERO, 0)
self.assertIn("POSITIVE", self.namespace)
self.assertIn("NEGATIVE", self.namespace)
self.assertEqual(self.namespace.POSITIVE, 0x0000000080000000)
self.assertEqual(self.namespace.NEGATIVE, -0x0000000080000000)
def test_signed_long(self):
self.convert('''
#define ZERO 0x0000000000000000L
#define POSITIVE 0x0000000080000000L
#define NEGATIVE -0x0000000080000000L
''')
self.assertIn("ZERO", self.namespace)
self.assertEqual(self.namespace.ZERO, 0)
self.assertIn("POSITIVE", self.namespace)
self.assertIn("NEGATIVE", self.namespace)
self.assertEqual(self.namespace.POSITIVE, 0x0000000080000000)
self.assertEqual(self.namespace.NEGATIVE, -0x0000000080000000)
def test_unsigned_long_long(self):
self.convert('''
#define ZERO 0x0000000000000000ULL
#define POSITIVE 0x0000000080000000ULL
#define NEGATIVE -0x0000000080000000ULL
''')
self.assertIn("ZERO", self.namespace)
self.assertEqual(self.namespace.ZERO, 0)
self.assertIn("POSITIVE", self.namespace)
self.assertIn("NEGATIVE", self.namespace)
self.assertEqual(self.namespace.POSITIVE, 0x0000000080000000)
self.assertEqual(self.namespace.NEGATIVE, -0x0000000080000000)
def test_decimals_typicals(self):
self.convert('''
#define ONE_TWO_THREE 1.2e3
#define FOUR_SIX_SEVEN .4e67
#define EIGHT_NIGNT_TEN 89.10
#define ELEVEN +11.f
''')
self.assertIn("ONE_TWO_THREE", self.namespace)
self.assertIn("FOUR_SIX_SEVEN", self.namespace)
self.assertIn("EIGHT_NIGNT_TEN", self.namespace)
self.assertIn("ELEVEN", self.namespace)
self.assertEqual(self.namespace.ONE_TWO_THREE, 1.2e3)
self.assertEqual(self.namespace.FOUR_SIX_SEVEN, .4e67)
self.assertEqual(self.namespace.EIGHT_NIGNT_TEN, 89.10)
self.assertEqual(self.namespace.ELEVEN, 11.)
def test_not_decimals(self):
self.convert('''
#define ONE_BILLION "1000000000.0"
''')
self.assertIn("ONE_BILLION", self.namespace)
self.assertEqual(self.namespace.ONE_BILLION, "1000000000.0")
def test_decimals_dot_ones(self):
self.convert('''
#define one1 .1
#define one2 .1f
#define one3 .1l
#define one4 .1L
#define one5 .1F
#define one6 +.1
#define one7 +.1f
#define one8 +.1l
#define one9 +.1L
#define one10 +.1F
#define one11 -.1
#define one12 -.1f
#define one13 -.1l
#define one14 -.1L
#define one15 -.1F
#define one16 .1e0
#define one17 .1e0f
#define one18 .1e0l
#define one19 .1e0L
#define one20 .1e0F
#define one21 +.1e0
#define one22 +.1e0f
#define one23 +.1e0l
#define one24 +.1e0L
#define one25 +.1e0F
#define one26 -.1e0
#define one27 -.1e0f
#define one28 -.1e0l
#define one29 -.1e0L
#define one30 -.1e0F
#define one31 .1E0
#define one32 .1E0f
#define one33 .1E0l
#define one34 .1E0L
#define one35 .1E0F
#define one36 +.1E0
#define one37 +.1E0f
#define one38 +.1E0l
#define one39 +.1E0L
#define one40 +.1E0F
#define one41 -.1E0
#define one42 -.1E0f
#define one43 -.1E0l
#define one44 -.1E0L
#define one45 -.1E0F
''')
for name, value in self.namespace.items():
if not name.startswith("one"):
continue
self.assertIn(
value, (-0.1, 0.1), msg="%s: %s != +/-0.1" % (name, value))
def test_decimals_ones(self):
self.convert('''
#define one1 1.0
#define one2 1.f
#define one3 1.l
#define one4 1.L
#define one5 1.F
#define one6 1.0
#define one7 +1.f
#define one8 +1.l
#define one9 +1.L
#define one10 +1.F
#define one11 +1.0
#define one12 -1.f
#define one13 -1.l
#define one14 -1.L
#define one15 -1.F
#define one16 -1.0
#define one17 1e0
#define one18 1.e0
#define one19 1.0e0
#define one20 1e+0
#define one21 1.e+0
#define one22 1.0e+0
#define one23 1e-0
#define one24 1.e-0
#define one25 1.0e-0
#define one26 +1e0
#define one27 +1.e0
#define one28 +1.0e0
#define one29 +1e+0
#define one30 +1.e+0
#define one31 +1.0e+0
#define one32 +1e-0
#define one33 +1.e-0
#define one34 +1.0e-0
#define one35 -1e0
#define one36 -1.e0
#define one37 -1.0e0
#define one38 -1e+0
#define one39 -1.e+0
#define one40 -1.0e+0
#define one41 -1e-0
#define one42 -1.e-0
#define one43 -1.0e-0
#define one44 +1e0f
#define one45 +1.e0f
#define one46 +1.0e0f
#define one47 +1e+0f
#define one48 +1.e+0f
#define one49 +1.0e+0f
#define one50 +1e-0f
#define one51 +1.e-0f
#define one52 +1.0e-0f
#define one53 -1e0f
#define one54 -1.e0f
#define one55 -1.0e0f
#define one56 -1e+0f
#define one57 -1.e+0f
#define one58 -1.0e+0f
#define one59 -1e-0f
#define one60 -1.e-0f
#define one61 -1.0e-0f
#define one62 +1e0F
#define one63 +1.e0F
#define one64 +1.0e0F
#define one65 +1e+0F
#define one66 +1.e+0F
#define one67 +1.0e+0F
#define one68 +1e-0F
#define one69 +1.e-0F
#define one70 +1.0e-0F
#define one71 -1e0F
#define one72 -1.e0F
#define one73 -1.0e0F
#define one74 -1e+0F
#define one75 -1.e+0F
#define one76 -1.0e+0F
#define one78 -1e-0F
#define one79 -1.e-0F
#define one80 -1.0e-0F
#define one81 +1e0l
#define one82 +1.e0l
#define one83 +1.0e0l
#define one84 +1e+0l
#define one85 +1.e+0l
#define one86 +1.0e+0l
#define one87 +1e-0l
#define one88 +1.e-0l
#define one89 +1.0e-0l
#define one90 -1e0l
#define one91 -1.e0l
#define one92 -1.0e0l
#define one93 -1e+0l
#define one94 -1.e+0l
#define one95 -1.0e+0l
#define one96 -1e-0l
#define one97 -1.e-0l
#define one98 -1.0e-0l
#define one99 +1e0L
#define one100 +1.e0L
#define one101 +1.0e0L
#define one102 +1e+0L
#define one103 +1.e+0L
#define one104 +1.0e+0L
#define one105 +1e-0L
#define one106 +1.e-0L
#define one107 +1.0e-0L
#define one108 -1e0L
#define one109 -1.e0L
#define one110 -1.0e0L
#define one111 -1e+0L
#define one112 -1.e+0L
#define one113 -1.0e+0L
#define one114 -1e-0L
#define one115 -1.e-0L
#define one116 -1.0e-0L
#define one117 1E0
#define one118 1.E0
#define one119 1.0E0
#define one120 1E+0
#define one121 1.E+0
#define one122 1.0E+0
#define one123 1E-0
#define one124 1.E-0
#define one125 1.0E-0
#define one126 +1E0
#define one127 +1.E0
#define one128 +1.0E0
#define one129 +1E+0
#define one130 +1.E+0
#define one131 +1.0E+0
#define one132 +1E-0
#define one133 +1.E-0
#define one134 +1.0E-0
#define one135 -1E0
#define one136 -1.E0
#define one137 -1.0E0
#define one138 -1E+0
#define one139 -1.E+0
#define one140 -1.0E+0
#define one141 -1E-0
#define one142 -1.E-0
#define one143 -1.0E-0
#define one144 +1E0f
#define one145 +1.E0f
#define one146 +1.0E0f
#define one147 +1E+0f
#define one148 +1.E+0f
#define one149 +1.0E+0f
#define one150 +1E-0f
#define one151 +1.E-0f
#define one152 +1.0E-0f
#define one153 -1E0f
#define one154 -1.E0f
#define one155 -1.0E0f
#define one156 -1E+0f
#define one157 -1.E+0f
#define one158 -1.0E+0f
#define one159 -1E-0f
#define one160 -1.E-0f
#define one161 -1.0E-0f
#define one162 +1E0F
#define one163 +1.E0F
#define one164 +1.0E0F
#define one165 +1E+0F
#define one166 +1.E+0F
#define one167 +1.0E+0F
#define one168 +1E-0F
#define one169 +1.E-0F
#define one170 +1.0E-0F
#define one171 -1E0F
#define one172 -1.E0F
#define one173 -1.0E0F
#define one174 -1E+0F
#define one175 -1.E+0F
#define one176 -1.0E+0F
#define one177 -1E-0F
#define one178 -1.E-0F
#define one180 -1.0E-0F
#define one181 +1E0l
#define one182 +1.E0l
#define one183 +1.0E0l
#define one184 +1E+0l
#define one185 +1.E+0l
#define one186 +1.0E+0l
#define one187 +1E-0l
#define one188 +1.E-0l
#define one189 +1.0E-0l
#define one190 -1E0l
#define one191 -1.E0l
#define one192 -1.0E0l
#define one193 -1E+0l
#define one194 -1.E+0l
#define one195 -1.0E+0l
#define one196 -1E-0l
#define one197 -1.E-0l
#define one198 -1.0E-0l
#define one199 +1E0L
#define one200 +1.E0L
#define one201 +1.0E0L
#define one202 +1E+0L
#define one203 +1.E+0L
#define one204 +1.0E+0L
#define one205 +1E-0L
#define one206 +1.E-0L
#define one207 +1.0E-0L
#define one208 -1E0L
#define one209 -1.E0L
#define one210 -1.0E0L
#define one211 -1E+0L
#define one212 -1.E+0L
#define one213 -1.0E+0L
#define one214 -1E-0L
#define one215 -1.E-0L
#define one216 -1.0E-0L
''')
for name, value in self.namespace.items():
if not name.startswith("one"):
continue
self.assertIn(
value, (-1.0, 1.0), msg="%s: %s != +/- 1.0" % (name, value))
def test_char_arrays(self):
self.convert('''
#define PRE "before"
#define POST " after"
#define APREPOST PRE POST
char a[] = "what";
char b[] = "why" " though";
char c[] = PRE POST;
char d[] = APREPOST;''')
self.assertEqual(self.namespace.a, "what")
self.assertEqual(self.namespace.b, "why though")
self.assertEqual(self.namespace.c, 'before after')
self.assertEqual(self.namespace.d, 'before after')
# print(self.text_output)
def test_char_arrays_arm_linux(self):
"""c_char is c_ubyte on arm-linux-gnueabihf"""
self.convert('''
#define PRE "before"
#define POST " after"
#define APREPOST PRE POST
char a[] = "what";
char b[] = "why" " though";
char c[] = PRE POST;
char d[] = APREPOST;''', ['-target', 'arm-linux-gnueabihf'])
self.assertEqual(self.namespace.a, "what")
self.assertEqual(self.namespace.b, "why though")
self.assertEqual(self.namespace.c, 'before after')
self.assertEqual(self.namespace.d, 'before after')
# print(self.text_output)
@unittest.expectedFailure
def test_define_wchar_t(self):
"""'L' means wchar_t"""
# currently this fails because of wchar being an int on this arch.
self.convert("""
#define SPAM "spam"
#define STRING_NULL "NULL"
#define FOO L"foo"
#include <wchar.h>
wchar_t * my_foo = FOO;
""")
# print(self.text_output)
self.assertEqual(self.namespace.SPAM, "spam")
self.assertEqual(self.namespace.STRING_NULL, "NULL")
self.assertEqual(self.namespace.FOO, "foo")
self.assertEqual(self.namespace.my_foo, "foo")
def test_simple_replace_typedef(self):
"""When macro are used as typedef, it's transparent to us. """
# Python does not have typedef so who care what type name is a variable ?
self.convert('''
#define macro_type int
macro_type i = 10;
''')
# macro_type = int # macro
# i = 10 # Variable ctypes.c_int32
# very little
self.assertIn("i", self.namespace)
self.assertEqual(self.namespace.i, 10)
# print(self.text_output)
def test_simple_replace_function(self):
"""When macro are used as typedef, it's transparent to us. """
# Python does not have typedef so who care what type name is a variable ?
self.convert('''
#define macro_type int
macro_type fn(int a, int b) {return a+b} ;
''', )
# macro_type = int # macro
# i = 10 # Variable ctypes.c_int32
# very little
# print(self.text_output)
self.assertIn("fn", self.namespace)
# self.assertIn("fn", self.text_output)
# self.assertEqual(self.namespace.i, 10)
def test_function(self):
self.convert('''
#define fn_type void
#define fn_name(a,b) real_name(a,b)
fn_type fn_name(int a, int b);
''')
self.assertIn("real_name", self.namespace)
def test_simple_macro_function(self):
self.convert('''
#define HI(x) x
HI(int) y;
''')
# print(self.text_output)
self.assertIn("y", self.namespace)
self.assertEqual(self.namespace.y, 0)
self.assertIn("HI", self.text_output)
# only comments for functions
self.assertNotIn("HI", self.namespace)
def test_example(self):
self.convert('''
#define DEBUG
#define PROD 1
#define MACRO_EXAMPLE(x,y) {x,y}
// #define MY 1 2 3 4 5 6
int tab1[] = MACRO_EXAMPLE(1,2);
''')
# print(self.text_output)
self.assertIn("tab1", self.namespace)
self.assertEqual(self.namespace.tab1, [1, 2])
self.assertEqual(self.namespace.DEBUG, True)
self.assertEqual(self.namespace.PROD, 1)
# we don't gen macro functions
self.assertNotIn('MACRO_EXAMPLE', self.namespace)
# self.assertEqual(self.namespace.MY, 123456)
# that is not a thing that compiles
def test_macro_to_variable(self):
"""Test which macros are going to be defined """
self.convert('''
#define SPAM "spam"
#define NO "no"
#define SPACE " "
#define FOO L"foo"
#define NOSPAM NO SPAM
#define NO_SPAM NO SPACE SPAM
#define NO_SPAM_FOO NO SPACE SPAM SPACE FOO
''')
# print(self.text_output)
self.assertIn('SPAM', self.namespace)
self.assertEqual('spam', self.namespace.SPAM)
self.assertIn('NO', self.namespace)
self.assertEqual('no', self.namespace.NO)
self.assertIn('SPACE', self.namespace)
self.assertEqual(' ', self.namespace.SPACE)
self.assertIn('NO_SPAM', self.namespace)
self.assertEqual('no spam', self.namespace.NO_SPAM)
self.assertIn('NO_SPAM_FOO', self.namespace)
self.assertEqual('no spam foo', self.namespace.NO_SPAM_FOO)
def test_all(self):
"""Test which macros are going to be defined """
self.convert('''
#define DATE __DATE__
#define DEBUG
#define PROD 1
#define MACRO_STRING "abcde"
#define MACRO_FUNC(x,y) {x,y}
// #define MACRO_LIST 1 2 3 4 5 6
int tab1[] = MACRO_FUNC(1,2);
char date[] = DATE;
''')
# print(self.text_output)
self.assertIn('DEBUG', self.namespace.__all__)
self.assertIn('PROD', self.namespace.__all__)
self.assertIn('MACRO_STRING', self.namespace.__all__)
self.assertNotIn('DATE', self.namespace.__all__)
self.assertNotIn('__DATE__', self.namespace.__all__)
self.assertNotIn('MACRO_FUNC', self.namespace.__all__)
# self.assertIn('MACRO_LIST', self.namespace.__all__)
"""
Bug #77
2021-03
Both compiler's Predefined Macros and standard's Preprocessor Macros handling works for string values.
But predef macros for INTEGER_LITERAL do NOT work.
https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html
https://blog.kowalczyk.info/article/j/guide-to-predefined-macros-in-c-compilers-gcc-clang-msvc-etc..html
"""
def test_macro_value_with_parenthesis(self):
self.convert('''
#define CPU_DEF_SET (-1)
#define another_one (2)
#define a_tuple (2,3)
#define HI(x) x
''')
print(self.text_output)
# we want to allow for macro substitution of (int)
self.assertIn("CPU_DEF_SET", self.namespace)
self.assertIn("another_one", self.namespace)
self.assertIn("a_tuple", self.namespace)
self.assertEqual(self.namespace.CPU_DEF_SET, -1)
self.assertEqual(self.namespace.another_one, (2))
self.assertEqual(self.namespace.a_tuple, (2, 3))
# but not functions.
self.assertNotIn("HI", self.namespace)
@unittest.expectedFailure
def test_defines_predefined(self):
self.convert('''
#define DATE __DATE__
char c1[] = DATE;
char f[] = __FILE__;
char v2[] = __clang_version__;
// this fails for now
int v = __STDC_VERSION__;
''')
# print(self.text_output)
self.assertIn("c1", self.namespace)
# replace leading 0 in day by a whitespace.
this_date = datetime.datetime.now().strftime("%b %d %Y").replace(" 0", " ")
self.assertEqual(self.namespace.c1, this_date)
self.assertIn("# DATE = __DATE__", self.text_output)
self.assertIn("f", self.namespace)
self.assertIn("v", self.namespace)
self.assertIn("v2", self.namespace)
# v2 = '11.0.0' for example
self.assertIn("v2 = '", self.text_output)
# this is the current limit
self.assertNotEqual(self.namespace.v, [])
def test_internal_defines_recursive(self):
self.convert('''
#define DATE __DATE__
#define DATE2 DATE
char c1[] = DATE2;
''')
# print(self.text_output)
self.assertIn("c1", self.namespace)
# replace leading 0 in day by a whitespace.
this_date = datetime.datetime.now().strftime("%b %d %Y").replace(" 0", " ")
self.assertIn("# DATE = __DATE__", self.text_output)
self.assertIn("# DATE2 = __DATE__", self.text_output)
@unittest.skip
def test_internal_defines_recursive_with_operation(self):
self.convert('''
#define VERSION __clang_major__
#define VPLUS (VERSION+1)
int version = VERSION;
int vplus = VPLUS;
''')
# print(self.text_output)
self.assertIn("version", self.namespace)
self.assertIn("vplus", self.namespace)
self.assertIn("# VERSION = __clang_major__", self.text_output)
self.assertIn("# VPLUS = ", self.text_output)
def test_internal_defines_identifier(self):
self.convert('''
#define DATE "now"
#define DATE2 DATE
char c1[] = DATE2;
''')
# print(self.text_output)
self.assertIn("c1", self.namespace)
self.assertEqual(self.namespace.c1, 'now')
self.assertIn("DATE", self.namespace)
self.assertEqual(self.namespace.DATE, 'now')
self.assertIn("DATE2", self.namespace)
self.assertEqual(self.namespace.DATE2, 'now')
def test_pack_attribute(self):
self.convert('''
#define PACK __attribute__((aligned(2)))
#define PACKTO __attribute__((packed))
int x PACK = 0;
struct foo {
char a;
int x[2] PACKTO;
};
''')
# print(self.text_output)
self.assertIn("# PACK = __attribute__", self.text_output)
self.assertIn("# PACKTO = __attribute__", self.text_output)
self.assertIn("struct_foo", self.namespace)
def test_enum_macro(self):
from ctypeslib import translate
self.namespace = translate('''
#include <stdint.h>
enum myEnum {
MIN=INT32_MIN,
MAX=INT32_MAX
};
''')
# Expect enum stored as 1 byte
import ctypes
self.assertEqual(ctypes.sizeof(self.namespace.myEnum), 4)
self.assertEqual(self.namespace.MIN, -2147483648)
self.assertEqual(self.namespace.MAX, 2147483647)
def test_enum_stringize(self):
""" Stringizing operator (#)
https://www.geeksforgeeks.org/and-operators-in-c/# """
self.convert('''
#define mkstr(s) #s
char * ret = mkstr(mytext value);
''')
print(self.text_output)
self.assertIn("ret", self.namespace)
self.assertEqual(self.namespace.ret, "mytext value")
@unittest.expectedFailure
def test_enum_token_pasting(self):
"""
Token-pasting operator (##)
https://www.geeksforgeeks.org/and-operators-in-c/# """
from ctypeslib import translate
self.namespace = translate('''
#define concat(a, b) a##b
// char * ret = concat("mytext", "value");
int add = concat(1, 2);
''')
print(self.text_output)
self.assertIn("add", self.namespace)
# expected failure, see bug #77
# "Bug #77 - integer literal from macros don't work"
self.assertEqual(self.namespace.add, 12)
self.assertIn("ret", self.namespace)
self.assertEqual(self.namespace.ret, "mytextvalue")
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0027_statline_ast_points'),
]
operations = [
migrations.AddField(
model_name='statline',
name='ast_fga',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='statline',
name='unast_fga',
field=models.PositiveIntegerField(default=0),
),
]
|
def mergeSort(array):
if len(array) == 1:
return array
middleIdx = len(array) // 2
leftHalf = array[:middleIdx]
rightHalf = array[middleIdx:]
return mergeSortedArrays(mergeSort(leftHalf), mergeSort(rightHalf))
def mergeSortedArrays(leftHalf, rightHalf):
sortedArrays = [None] * (len(leftHalf) + len(rightHalf))
k = i = j = 0
while i < len(leftHalf) and j < len(rightHalf):
if leftHalf[i] <= rightHalf[j]:
sortedArrays[k] = leftHalf[i]
i += 1
else:
sortedArrays[k] = rightHalf[j]
j += 1
k += 1
while i < len(leftHalf):
sortedArrays[k] = leftHalf[i]
i += 1
k += 1
while j < len(rightHalf):
sortedArrays[k] = rightHalf[j]
j += 1
k += 1
return sortedArrays
# def mergeSort(array):
# if len(array) <= 1:
# return array
# auxilaryArray = array[:]
# mergeSortHelper(array, 0, len(array)-1, auxilaryArray)
# return array
# def mergeSortHelper(mainArray, startIdx, endIdx, auxilaryArray):
# if startIdx == endIdx:
# return
# middleIdx = (startIdx + endIdx) // 2
# mergeSortHelper(auxilaryArray, startIdx, middleIdx, mainArray)
# mergeSortHelper(auxilaryArray, middleIdx+1, endIdx, mainArray)
# doMerge(mainArray, startIdx, middleIdx, endIdx, auxilaryArray)
# def doMerge(mainArray, startIdx, middleIdx, endIdx, auxilaryArray):
# k = startIdx
# i = startIdx
# j = middleIdx + 1
# while i <= middleIdx and j <= endIdx:
# if auxilaryArray[i] <= auxilaryArray[j]:
# mainArray[k] = auxilaryArray[i]
# i += 1
# else:
# mainArray[k] = auxilaryArray[j]
# j += 1
# k += 1
# while i <= middleIdx:
# mainArray[k] = auxilaryArray[i]
# i += 1
# k += 1
# while j <= endIdx:
# mainArray[k] = auxilaryArray[j]
# j += 1
# k += 1
print(mergeSort([2, 3, 1]))
|
import sys
sys.path.append('../500_common')
import lib_reserve
a = "Chrome1res"
b = "Profile 1"
path = "../504_kyoto01/data/result.html"
lib_reserve.main(a, b, None, None, waitTime=10, preTime=20)
|
from Minecraft_Blog import db,login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from datetime import datetime
@login_manager.user_loader
def Load_user(user_id):
return User.query.get(user_id)
class User (db.Model, UserMixin):
__tablename__ ='users'
id=db.Column(db.Integer,primary_key=True)
profile_image= db.Column(db.String(64), nullable=False, default='default_profile.png')
email=db.Column(db.String(64),unique=True,index=True)
username=db.Column(db.String(64),unique=True,index=True)
password_hash= db.Column(db.String(128))
posts= db.relationship ('BlogPost',backref='author',lazy=True)
def __init__(self,email,username,password):
self.email=email
self.username=username
self.password_hash=generate_password_hash(password)
def check_password (self,password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f"Username {self.username}"
class BlogPost (db.Model):
users=db.relationship(User)
id=db.Column(db.Integer, primary_key=True)
user_id=db.Column(db.Integer, db.ForeignKey('users.id'),nullable=False)
date=db.Column(db.DateTime,nullable=False,default=datetime.utcnow)
title=db.Column (db.String(128),nullable=False)
text=db.Column(db.Text,nullable=False)
def __init__(self, title, text, user_id):
self.title=title
self.text=text
self.user_id=user_id
def __repr__(self):
return f"Post ID: {self.id}"
|
import os
import requests
import time
uid = os.environ['UID42']
secret = os.environ['SECRET42']
url = 'https://api.intra.42.fr/'
urlv2 = 'https://api.intra.42.fr/v2/'
def getToken():
payload = {
'grant_type': 'client_credentials',
'client_id': uid,
'client_secret': secret
}
r = requests.post(url + 'oauth/token', data=payload)
return r.json()['access_token']
def prepareApiCalls():
global token, headers
token = getToken()
headers = {'Authorization': 'Bearer {}'.format(token)}
def getFromApi(path, params={}):
r = requests.get(urlv2 + path, headers=headers, params=params)
if r.status_code != requests.codes.ok:
return (-1)
else:
return(r.json())
def getUser(login):
return getFromApi('users/'+login)
objet = getUser('nklarsfe')
userName = objet['login']
projectName = objet['projects_users'][0]['project']['name']
wasValidated = objet['projects_users'][0]['validated?']
import arrow
text = objet['projects_users'][0]['marked_at']
date = arrow.get(text)
dateAsFloat = float(date.format('X'))
dateInDays = (dateAsFloat / (3600*24)) - 18175
studsNames = []
# ----------------------------------
# pickle
import pickle
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def get_object(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
# toto={'a':1, 'b':2}
# save_object(studs, '../dataFrom42api/studsOld.pkl')
# studs = get_object('../dataFrom42api/studs.pkl')
# -------------------------------------------------
def getNamesInPool(year, month):
global names
prepareApiCalls()
params = {
'filter[pool_year]': year,
'filter[pool_month]': month,
'filter[primary_campus_id]': '1',
'page[number]': '0',
'page[size]': '100'
}
while True:
pageNumber = int(params['page[number]'])
print(pageNumber)
params['page[number]'] = str(pageNumber + 1)
res = -1
while res == -1:
print('...trying...')
res = getFromApi('users', params)
# if (res == -1):
# prepareApiCalls()
for usr in res:
names.append(usr['login'])
if len(res) != 100:
break
names = []
for year in range(2013, 2019):
print(year)
for month in ['july', 'august', 'september']:
print(month)
getNamesInPool(year, month)
myIterator = filter(lambda stud: stud[0] != '3', names)
nonAnonymizedNames = list(myIterator)
names = nonAnonymizedNames
# names = get_object('names.pkl')
# save_object(names, '../dataFrom42api/names.pkl')
# matches = (x for x in names if x[0] == 'c' and x[1] == 'a')
# list(matches)
# -----------------------------------------------
# on recupere les donnees dans studs
studs = {}
compteur = 0
for i in range(0,len(names)):
gotIt = False
while not gotIt:
objet = getUser(names[i])
gotIt = (objet != -1)
if not gotIt:
compteur += 1
print(i, 'fail', compteur)
if compteur >= 10:
time.sleep(float(1))
prepareApiCalls()
compteur = 0
time.sleep(float(1))
studs[names[i]] = objet['projects_users']
print(i, 'ok', names[i])
# save_object(studs, '../dataFrom42api/studsOld.pkl')
# studs = get_object('../dataFrom42api/old_studs.pkl')
# ------------------------------------
# on mets les donnees dans un format avec juste l'essentiel
import arrow
studs2={}
for name, projects in studs.items():
studs2[name] = {}
for project in projects:
if len(project['cursus_ids']) >= 1:
if project['cursus_ids'][0] == 1: # 21
nameProject = project['project']['name']
wasValidated = project['validated?']
text = project['marked_at']
if text != None:
date = arrow.get(text)
dateAsFloat = float(date.format('X'))
dateInDays = (dateAsFloat / (3600*24)) - 16028 # 18175
studs2[name][nameProject] = {
'wasValidated':wasValidated,
'dateInDays':dateInDays
}
#--------------------------
# 2013: 18 novembre 2013
# 2014: 20 novembre 2014 ?
# 2015: 26 novembre 2015 ?
# 2016: 26 septembre 2016 ?
# 2017: 29 septembre 2017 ?
# 2018: 24 septembre 2018 ?
debut=[388, 813, 1255, 1888, 2494, 3334]
decalage = [367, 738, 1043, 1411, 1771]
for i in range(len(debut) - 1):
for j in range(debut[i], debut[i+1]):
projects = studs2[names[j]]
for nameProject, details in projects.items():
details['dateInDays'] -= decalage[i]
# save_object(studs2, '../dataFrom42api/old_studs2_2.pkl')
# studs2 = get_object('../dataFrom42api/old_studs2_2.pkl')
# ----------------------------
# on cree le fichier excel
# projectsNames = [
# 'Libft',
# 'netwhat', 'get_next_line', 'ft_printf',
# 'Exam Rank 02', 'ft_server', 'miniRT', 'cub3d',
# 'Exam Rank 03', 'libasm', 'ft_services', 'minishell',
# 'Exam Rank 04', 'CPP Module 08', 'Philosophers',
# 'Exam Rank 05', 'ft_containers', 'ft_irc', 'webserv',
# 'Exam Rank 06', 'ft_transcendence'
# ]
# Piscine reloaded and Fillit starting 2016 (2013 2014 and 2015 without them)
projectNames_0 = ['Piscine Reloaded', 'Libft','GET_Next_Line', 'Fillit']
# (Wolf3d + Doom) or (RTv1 + RT)
projectNames_graphics=['FdF', "Fract'ol", 'Wolf3d', 'Doom Nukem', 'RTv1', 'RT'] # GUImp
projectNames_system=['ft_ls', 'minishell', '21sh', '42sh'] # ft_select
# Push_swap or Filler
projectNames_security=['ft_printf', 'Push_swap', 'Filler', 'Lem_in', 'Corewar'] # mod1
projectNames_web=['Piscine PHP','Camagru', 'Matcha', 'Hypertube']
internship = ['Contract Upload', 'Company mid evaluation', 'Company final evaluation', 'First Internship']
projectsNames = projectNames_0 + projectNames_graphics + projectNames_system + projectNames_security + projectNames_web + internship
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
for i in range(len(projectsNames)):
ws.cell(row=1, column=i+2).value = projectsNames[i]
# name='nklarsfe'
# projects = studs2[name]
indexName = 2
# for name, projects in studs2.items():
for i in range(0, len(names)):
name = names[i]
projects = studs2[name]
ws.cell(row=indexName, column=1).value = name
for i in range(len(projectsNames)):
projectName = projectsNames[i]
if(projectName in projects):
# print(i, projectName, projects[projectName])
if(projects[projectName]['wasValidated']):
dateInDays = projects[projectName]['dateInDays']
ws.cell(row=indexName, column=i+2).value = dateInDays
indexName += 1
print(indexName)
wb.save('old_result4.xlsx')
# for key, value in studs.items():
|
from datetime import datetime
from django.conf import settings
from django.db import models
from model_utils.models import TimeStampedModel
class Posts(TimeStampedModel):
"""
Model for user Posts
--- Methods ---
1) get_comments() : Return all comments on post
2) get_time() : Return modified time of post
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL)
text = models.TextField()
def __str__(self):
return "{0}".format(self.text[:10])
def get_commets(self):
return self.posts_comments.all()
def get_time(self):
return datetime.strftime(self.modified, "%H:%M:%S %b/%d/%Y")
class Comments(TimeStampedModel):
"""
Model for Comments on Posts
--- Methods ---
1) get_time() : Return modified time of post
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL)
post = models.ForeignKey(Posts, related_name="posts_comments")
text = models.TextField()
def __str__(self):
return "{0}".format(self.text)
def get_time(self):
return datetime.strftime(self.modified, "%H:%M:%S %b/%d/%Y")
|
from django.conf.urls import url
from . import views
urlpatterns = [
#url(r'^(?P<user_id>[0-9]+)/$', views.user_profile, name='user'),
#url(r'^$', views.all_profile, name='all'),
#url(r'^$', views.user_profile, name='user'),
url(r'^$', views.login_page, name='login'),
]
#fix user_id regex
|
# Generated by Django 2.0.5 on 2018-06-14 12:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calculation', '0023_auto_20180613_1651'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='approved',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='menu_approved', to='calculation.People', verbose_name='утверждено'),
),
migrations.AlterField(
model_name='menu',
name='food_intake',
field=models.PositiveIntegerField(choices=[(1, 'Завтрак'), (2, 'Обед'), (3, 'Полдник'), (4, '1-й ужин'), (5, '2-й ужин')], db_index=True, default=1, verbose_name='приём пищи'),
),
]
|
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import numpy as np
import pandas as pd
def parseXML2object(filepath):
parsedFile = ET.parse(filepath)
root = parsedFile.getroot()
return root
def combineChildrenAttributeValuesInADicionary(root, *args):
dict = {}
for arg in args:
dict.update({arg:[]})
for child in root:
for arg in args:
dict[arg].append(child.attrib[arg])
return dict
def meanWaitSteps(results_Filepath, step_size):
root = parseXML2object(results_Filepath)
dict = combineChildrenAttributeValuesInADicionary(root, "waitSteps")
waitSteps = map(int, dict["waitSteps"])
waitTime = map(lambda x : x*step_size, waitSteps)
return np.mean(waitTime)
def meanDepartDelay(results_Filepath):
root = parseXML2object(results_Filepath)
dict = combineChildrenAttributeValuesInADicionary(root, "departDelay")
departDelay = map(float, dict["departDelay"])
return np.mean(departDelay)
if __name__ == "__main__":
step_size = 0.1
results_Filepath = "tripsoutput.xml"
root = parseXML2object(results_Filepath)
dict = combineChildrenAttributeValuesInADicionary(root, "id", "waitSteps", "duration", "departDelay")
waitSteps = map(int, dict["waitSteps"])
waitTime = map(lambda x : x*step_size, waitSteps)
print(np.mean(waitTime))
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index, name = 'index'),
path('geolocate',views.geolocate,name = 'geolocate')
] |
# -*- coding: utf-8 -*-
"""Export from the 3DNL database.
Copyright (c) 2019, 3D geoinformation group, Delft University of Technology
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import re
from datetime import date, time, datetime, timedelta
from typing import Mapping, Sequence, Tuple, List
from concurrent.futures import ThreadPoolExecutor, as_completed
import json
from click import ClickException, echo
from cjio import cityjson
from cjio.models import CityObject, Geometry
from psycopg2 import sql, pool, Error as pgError
from cjio_dbexport import settings, db, utils
log = logging.getLogger(__name__)
def export(tile, filepath, cfg):
"""Export a tile from PostgreSQL, convert to CityJSON and write to file."""
try:
dbexport = query(
conn_cfg=cfg["database"],
tile_index=cfg["tile_index"],
cityobject_type=cfg["cityobject_type"],
threads=1,
tile_list=(tile,),
)
except BaseException as e:
log.error(f"Failed to export tile {str(tile)}\n{e}")
return False, filepath
try:
cm = to_citymodel(dbexport, cfg=cfg, compress=True, important_digits=3)
finally:
del dbexport
if cm is not None:
cm.j["metadata"]["fileIdentifier"] = filepath.name
try:
with open(filepath, "w") as fout:
json_str = json.dumps(cm.j, separators=(',', ':'))
fout.write(json_str)
return True, filepath
except IOError as e:
log.error(f"Invalid output file: {filepath}\n{e}")
return False, filepath
finally:
del cm
else:
log.error(
f"Failed to create CityJSON from {filepath.stem},"
f" check the logs for details."
)
return False, filepath
def to_citymodel(dbexport, cfg, compress: bool = True, important_digits: int = 3):
try:
cm = convert(dbexport, cfg=cfg)
except BaseException as e:
log.error(f"Failed to convert database export to CityJSON\n{e}")
return None
if cm and compress:
try:
cm.compress(important_digits=important_digits)
except BaseException as e:
log.error(f"Failed to compress cityjson\n{e}")
return None
return cm
elif cm and not compress:
try:
cm.remove_duplicate_vertices()
except BaseException as e:
log.error(f"Failed to remove duplicate vertices\n{e}")
return None
try:
cm.remove_orphan_vertices()
except BaseException as e:
log.error(f"Failed to remove orphan vertices\n{e}")
return None
return cm
def convert(dbexport, cfg):
"""Convert the exported citymodel to CityJSON. """
# Set EPSG
epsg = 7415
# Set rounding for floating point attributes
cfg["rounding"] = 4
log.info(
f"Floating point attributes are rounded up to {cfg['rounding']} decimal digits")
cm = cityjson.CityJSON()
cm.cityobjects = dict(dbexport_to_cityobjects(dbexport, cfg))
log.debug("Referencing geometry")
cityobjects, vertex_lookup = cm.reference_geometry()
log.debug("Adding to json")
cm.add_to_j(cityobjects, vertex_lookup)
log.debug("Updating metadata")
cm.update_metadata(overwrite=True, new_uuid=True)
log.debug("Setting EPSG")
cm.set_epsg(epsg)
log.info(f"Exported CityModel:\n{cm}")
return cm
def dbexport_to_cityobjects(dbexport, cfg):
for coinfo, tabledata in dbexport:
cotype, cotable = coinfo
cfg_geom = None
for _c in cfg["cityobject_type"][cotype]:
if _c["table"] == cotable:
cfg_geom = _c["field"]["geometry"]
cfg_geom['lod'] = _c["field"].get('lod')
cfg_geom['semantics'] = _c["field"].get('semantics')
cfg_geom['semantics_mapping'] = cfg.get('semantics_mapping')
# Loop through the whole tabledata and create the CityObjects
cityobject_generator = table_to_cityobjects(
tabledata=tabledata, cotype=cotype, cfg_geom=cfg_geom,
rounding=cfg['rounding']
)
for coid, co in cityobject_generator:
yield coid, co
def table_to_cityobjects(tabledata, cotype: str, cfg_geom: dict, rounding: int):
"""Converts a database record to a CityObject."""
for record in tabledata:
coid = record["coid"]
co = CityObject(id=coid)
# Parse the geometry
co.geometry = record_to_geometry(record, cfg_geom)
# Parse attributes, except special fields that serve some purpose,
# eg. primary key (pk) or cityobject ID (coid)
special_fields = ['pk', 'coid', cfg_geom['lod'], cfg_geom['semantics']]
for key, attr in record.items():
if key not in special_fields and "geom_" not in key:
if isinstance(attr, float):
co.attributes[key] = round(attr, rounding)
elif isinstance(attr, date) or isinstance(attr, time) or isinstance(attr, datetime):
co.attributes[key] = attr.isoformat()
elif isinstance(attr, timedelta):
co.attributes[key] = str(attr)
else:
co.attributes[key] = attr
# Set the CityObject type
co.type = cotype
yield coid, co
def record_to_geometry(record: Mapping, cfg_geom: dict) -> Sequence[Geometry]:
"""Create a CityJSON Geometry from a boundary array that was retrieved from
Postgres.
"""
geometries = []
lod_column = cfg_geom.get('lod')
semantics_column = cfg_geom.get('semantics')
for lod_key in [k for k in cfg_geom if k != 'lod' and k != 'semantics' and k != 'semantics_mapping']:
if lod_column:
lod = record[lod_column]
else:
lod = utils.parse_lod_value(lod_key)
geomtype = cfg_geom[lod_key]["type"]
geom = Geometry(type=geomtype, lod=lod)
if geomtype == "Solid":
solid = [
record.get(settings.geom_prefix + lod_key),
]
geom.boundaries = solid
elif geomtype == "MultiSurface":
geom.boundaries = record.get(settings.geom_prefix + lod_key)
if semantics_column:
geom.surfaces = record_to_surfaces(
geomtype=geomtype,
boundary=geom.boundaries,
semantics=record[semantics_column],
semantics_mapping=cfg_geom['semantics_mapping']
)
geometries.append(geom)
return geometries
def record_to_surfaces(geomtype: str, boundary: Sequence,
semantics: Sequence[int], semantics_mapping: dict) -> dict:
"""Create a CityJSON Semantic Surface object from an array of labels and a
CityJSON geometry representation.
"""
surfaces = {}
for key, type in semantics_mapping.items():
surfaces[key] = {'surface_idx': [], 'type': type}
if geomtype == "Solid":
if len(boundary) > 1:
log.warning("Cannot assign semantics to Solids with inner shell(s)")
shell = boundary[0]
if len(shell) != len(semantics):
log.warning("Encountered unequal sized geometry shell and semantics arrays")
else:
for i, srf in enumerate(shell):
surfaces[semantics[i]]['surface_idx'].append([0,i])
elif geomtype == "MultiSurface":
for i, srf in enumerate(boundary):
surfaces[semantics[i]]['surface_idx'].append(i)
return {sem: idx for sem, idx in surfaces.items() if len(idx['surface_idx']) > 0}
def query(
conn_cfg: Mapping,
tile_index: Mapping,
cityobject_type: Mapping,
threads=None,
tile_list=None,
bbox=None,
extent=None,
):
"""Export a table from PostgreSQL. Multithreading, with connection pooling."""
# see: https://realpython.com/intro-to-python-threading/
# see: https://stackoverflow.com/a/39310039
# Need one thread per table
if threads is None:
threads = sum(len(cotables) for cotables in cityobject_type.values())
if threads == 1:
log.debug(f"Running on a single thread.")
conn = db.Db(**conn_cfg)
try:
for cotype, cotables in cityobject_type.items():
for cotable in cotables:
tablename = cotable["table"]
log.debug(f"CityObject {cotype} from table {tablename}")
features = db.Schema(cotable)
tx = db.Schema(tile_index)
sql_query = build_query(
conn=conn,
features=features,
tile_index=tx,
tile_list=tile_list,
bbox=bbox,
extent=extent,
)
try:
# Note that resultset can be []
yield (cotype, tablename), conn.get_dict(sql_query)
except pgError as e:
log.error(f"{e.pgcode}\t{e.pgerror}")
raise ClickException(
f"Could not query {cotable}. Check the "
f"logs for details."
)
finally:
conn.close()
elif threads > 1:
log.debug(f"Running with ThreadPoolExecutor, nr. of threads={threads}")
pool_size = sum(len(cotables) for cotables in cityobject_type.values())
conn_pool = pool.ThreadedConnectionPool(
minconn=1, maxconn=pool_size + 1, **conn_cfg
)
try:
with ThreadPoolExecutor(max_workers=threads) as executor:
future_to_table = {}
for cotype, cotables in cityobject_type.items():
# Need a thread for each of these
for cotable in cotables:
tablename = cotable["table"]
# Need a connection from the pool per thread
conn = db.Db(conn=conn_pool.getconn(key=(cotype, tablename)))
# Need a connection and thread for each of these
log.debug(f"CityObject {cotype} from table {cotable['table']}")
features = db.Schema(cotable)
tx = db.Schema(tile_index)
sql_query = build_query(
conn=conn,
features=features,
tile_index=tx,
tile_list=tile_list,
bbox=bbox,
extent=extent,
)
# Schedule the DB query for execution and store the returned
# Future together with the cotype and table name
future = executor.submit(conn.get_dict, sql_query)
future_to_table[future] = (cotype, tablename)
# If I put away the connection here, then it locks the main
# thread and it becomes like using a single connection.
# conn_pool.putconn(conn=conn.conn, key=(cotype, tablename),
# close=True)
for future in as_completed(future_to_table):
cotype, tablename = future_to_table[future]
try:
# Note that resultset can be []
yield (cotype, tablename), future.result()
except pgError as e:
log.error(f"{e.pgcode}\t{e.pgerror}")
raise ClickException(
f"Could not query {tablename}. Check the "
f"logs for details."
)
finally:
conn_pool.closeall()
else:
raise ValueError(f"Number of threads must be greater than 0.")
def build_query(
conn: db.Db,
features: db.Schema,
tile_index: db.Schema,
tile_list=None,
bbox=None,
extent=None,
):
"""Build an SQL query for extracting CityObjects from a single table.
..todo: make EPSG a parameter
"""
# Set EPSG
epsg = 7415
# Exclude columns from the selection
table_fields = conn.get_fields(features.schema + features.table)
if 'exclude' in features.field.__dict__:
exclude = [f.string for f in features.field.exclude if f is not None]
else:
exclude = []
geom_cols = [
getattr(features.field.geometry, lod).name.string
for lod in features.field.geometry.keys()
]
attr_select = sql.SQL(", ").join(
sql.Identifier(col)
for col in table_fields
if col != features.field.pk.string
and col not in geom_cols
and col != features.field.cityobject_id.string
and col not in exclude
)
# polygons subquery
if bbox:
log.info(f"Exporting with BBOX {bbox}")
polygons_sub, attr_where, extent_sub = query_bbox(features, bbox, epsg)
elif tile_list:
log.info(f"Exporting with a list of tiles {tile_list}")
polygons_sub, attr_where, extent_sub = query_tiles_in_list(
features=features, tile_index=tile_index, tile_list=tile_list
)
elif extent:
log.info(f"Exporting with polygon extent")
ewkt = utils.to_ewkt(polygon=extent, srid=epsg)
polygons_sub, attr_where, extent_sub = query_extent(
features=features, ewkt=ewkt
)
else:
log.info(f"Exporting the whole database")
polygons_sub, attr_where, extent_sub = query_all(features=features)
# Main query
query_params = {
"pk": features.field.pk.sqlid,
"coid": features.field.cityobject_id.sqlid,
"tbl": features.schema + features.table,
"attr": attr_select,
"where_instersects": attr_where,
"extent": extent_sub,
"polygons": polygons_sub,
}
query = sql.SQL(
"""
WITH
{extent}
attr_in_extent AS (
SELECT {pk} pk,
{coid} coid,
{attr}
FROM {tbl} a
{where_instersects}
),
{polygons}
SELECT *
FROM polygons b
INNER JOIN attr_in_extent a ON
b.pk = a.pk;
"""
).format(**query_params)
log.debug(conn.print_query(query))
return query
def query_all(features) -> Tuple[sql.Composed, ...]:
"""Build a subquery of all the geometry in the table."""
query_params = {
"pk": features.field.pk.sqlid,
"coid": features.field.cityobject_id.sqlid,
"tbl": features.schema + features.table,
"geometries": sql_cast_geometry(features),
}
sql_polygons = sql.SQL(
"""
polygons AS (
SELECT
{pk} pk,
{geometries}
FROM
{tbl}
)
"""
).format(**query_params)
sql_where_attr_intersects = sql.Composed("")
sql_extent = sql.Composed("")
return sql_polygons, sql_where_attr_intersects, sql_extent
def query_bbox(
features: db.Schema, bbox: Sequence[float], epsg: int
) -> Tuple[sql.Composed, ...]:
"""Build a subquery of the geometry in a BBOX."""
# One geometry column is enough to restrict the selection to the BBOX
lod = list(features.field.geometry.keys())[0]
query_params = {
"pk": features.field.pk.sqlid,
"coid": features.field.cityobject_id.sqlid,
"geometries": sql_cast_geometry(features),
"geometry_0": getattr(features.field.geometry, lod).name.sqlid,
"epsg": sql.Literal(epsg),
"xmin": sql.Literal(bbox[0]),
"ymin": sql.Literal(bbox[1]),
"xmax": sql.Literal(bbox[2]),
"ymax": sql.Literal(bbox[3]),
"tbl": features.schema + features.table,
}
sql_polygons = sql.SQL(
"""
polygons AS (
SELECT {pk} pk,
{geometries}
FROM
{tbl}
WHERE ST_3DIntersects(
{geometry_0},
ST_MakeEnvelope({xmin}, {ymin}, {xmax}, {ymax}, {epsg})
)
)
"""
).format(**query_params)
sql_where_attr_intersects = sql.SQL(
"""
WHERE ST_3DIntersects(
a.{geometry_0},
ST_MakeEnvelope({xmin}, {ymin}, {xmax}, {ymax}, {epsg})
)
"""
).format(**query_params)
sql_extent = sql.Composed("")
return sql_polygons, sql_where_attr_intersects, sql_extent
def query_extent(features: db.Schema, ewkt: str) -> Tuple[sql.Composed, ...]:
"""Build a subquery of the geometry in a polygon."""
# One geometry column is enough to restrict the selection to the BBOX
lod = list(features.field.geometry.keys())[0]
query_params = {
"pk": features.field.pk.sqlid,
"coid": features.field.cityobject_id.sqlid,
"geometries": sql_cast_geometry(features),
"geometry_0": getattr(features.field.geometry, lod).name.sqlid,
"tbl": features.schema + features.table,
"poly": sql.Literal(ewkt),
}
sql_polygons = sql.SQL(
"""
polygons AS (
SELECT
{pk} pk,
{geometries}
FROM
{tbl}
WHERE ST_3DIntersects(
{geometry_0},
{poly}
)
)
"""
).format(**query_params)
sql_where_attr_intersects = sql.SQL(
"""
WHERE ST_3DIntersects(
a.{geometry_0},
{poly}
)
"""
).format(**query_params)
sql_extent = sql.Composed("")
return sql_polygons, sql_where_attr_intersects, sql_extent
def query_tiles_in_list(
features: db.Schema, tile_index: db.Schema, tile_list: Sequence[str]
) -> Tuple[sql.Composed, ...]:
"""Build a subquery of the geometry in the tile list."""
tl_tup = tuple(tile_list)
# One geometry column is enough to restrict the selection to the BBOX
lod = list(features.field.geometry.keys())[0]
query_params = {
"tbl": features.schema + features.table,
"tbl_pk": features.field.pk.sqlid,
"tbl_coid": features.field.cityobject_id.sqlid,
"tbl_geom": getattr(features.field.geometry, lod).name.sqlid,
"geometries": sql_cast_geometry(features),
"tile_index": tile_index.schema + tile_index.table,
"tx_geom": tile_index.field.geometry.sqlid,
"tx_pk": tile_index.field.pk.sqlid,
"tile_list": sql.Literal(tl_tup),
}
sql_extent = sql.SQL(
"""
extent AS (
SELECT ST_Union({tx_geom}) geom
FROM {tile_index}
WHERE {tx_pk} IN {tile_list}),
"""
).format(**query_params)
sql_polygon = sql.SQL(
"""
geom_in_extent AS (
SELECT a.*
FROM {tbl} a,
extent t
WHERE ST_3DIntersects(t.geom,
a.{tbl_geom})),
polygons AS (
SELECT
{tbl_pk} pk,
{geometries}
FROM geom_in_extent b)
"""
).format(**query_params)
sql_where_attr_intersects = sql.SQL(
"""
,extent t WHERE ST_3DIntersects(t.geom, a.{tbl_geom})
"""
).format(**query_params)
return sql_polygon, sql_where_attr_intersects, sql_extent
def with_list(conn: db.Db, tile_index: db.Schema, tile_list: Tuple[str]) -> List[str]:
"""Select tiles based on a list of tile IDs."""
if "all" == tile_list[0].lower():
log.info("Getting all tiles from the index.")
in_index = all_in_index(conn=conn, tile_index=tile_index)
else:
log.info("Verifying if the provided tiles are in the index.")
in_index = tiles_in_index(conn=conn, tile_index=tile_index, tile_list=tile_list)
if len(in_index) == 0:
raise AttributeError("None of the provided tiles are present in the" " index.")
else:
return in_index
def tiles_in_index(
conn: db.Db, tile_index: db.Schema, tile_list: Tuple[str]
) -> Tuple[List[str], List[str]]:
"""Return the tile IDs that are present in the tile index."""
if not isinstance(tile_list, tuple):
tile_list = tuple(tile_list)
log.debug(f"tile_list was not a tuple, casted to tuple {tile_list}")
query_params = {
"tiles": sql.Literal(tile_list),
"index_": tile_index.schema + tile_index.table,
"tile": tile_index.field.pk.sqlid,
}
query = sql.SQL(
"""
SELECT DISTINCT {tile}
FROM {index_}
WHERE {tile} IN {tiles}
"""
).format(**query_params)
log.debug(conn.print_query(query))
in_index = [t[0] for t in conn.get_query(query)]
not_found = set(tile_list) - set(in_index)
if len(not_found) > 0:
log.warning(
f"The provided tile IDs {not_found} are not in the index, "
f"they are skipped."
)
return in_index
def all_in_index(conn: db.Db, tile_index: db.Schema) -> List[str]:
"""Get all tile IDs from the tile index."""
query_params = {
"index_": tile_index.schema + tile_index.table,
"tile": tile_index.field.pk.sqlid,
}
query = sql.SQL(
"""
SELECT DISTINCT {tile} FROM {index_}
"""
).format(**query_params)
log.debug(conn.print_query(query))
return [t[0] for t in conn.get_query(query)]
def parse_polygonz(wkt_polygonz):
"""Parses a POLYGON Z array of WKT into CityJSON Surface"""
# match: 'POLYGON Z (<match everything in here>)'
outer_pat = re.compile(r"(?<=POLYGON Z \().*(?!$)")
# match: '(<match everything in here>)'
ring_pat = re.compile(r"\(([^)]+)\)")
outer = outer_pat.findall(wkt_polygonz)
if len(outer) > 0:
rings = ring_pat.findall(outer[0])
for ring in rings:
pts = [tuple(map(float, pt.split())) for pt in ring.split(",")]
yield pts[1:] # WKT repeats the first vertex
else:
log.error("Not a POLYGON Z")
def sql_cast_geometry(features: db.Schema) -> sql.Composed:
"""Create a clause for SELECT statements for the geometry columns.
For each geometry column in the table (one column per LoD) that is mapped in
the configuration file, prepare the clauses for the SELECT statement.
:return: An SQL snippent for example:
'cjdb_multipolygon_to_multisurface(wkb_geometry_lod1) geom_lod1,
cjdb_multipolygon_to_multisurface(wkb_geometry_lod2) geom_lod2'
"""
lod_fields = [
sql.SQL("cjdb_multipolygon_to_multisurface({geom_field}) {geom_alias}").format(
geom_field=getattr(features.field.geometry, lod).name.sqlid,
geom_alias=sql.Identifier(settings.geom_prefix + lod),
)
for lod in features.field.geometry.keys()
]
return sql.SQL(",").join(lod_fields)
|
from ._base import IterativeInitialGuess, _IterativeSolver
import src.matrix_operations as matops
class JacobiSolver(_IterativeSolver):
def __init__(self, **kwargs):
"""
:key matA: Matrix A
:key matb: Matrix b
:key guess_source: how guess should be derived, should be an IterativeInitialGuess
"""
self._allowed_guess_sources = [
IterativeInitialGuess.DEFAULT,
IterativeInitialGuess.MATRIX_OF_ZEROS,
IterativeInitialGuess.RANDOM_MATRIX,
]
super().__init__(**kwargs)
if self._guess_source == IterativeInitialGuess.DEFAULT:
self._guess_source = IterativeInitialGuess.MATRIX_OF_ZEROS
@staticmethod
def get_solver_name():
return "Jacobi"
def _build_interim_matricies(self):
matC = matops.create_based_on_non_diagonal_terms(self._matA)
matD = matops.create_based_on_diagonal_terms(self._matA)
matD = matops.create_inverted(matD)
self._matC = matC
self._matD = matD
def _calculate_iteration_result(self, mat_guess):
if not matops.is_vector(mat_guess):
raise ValueError("Guess matrix must be a vector")
cg = matops.multiply(self._matC, mat_guess)
b_less_cg = matops.subtract(self._matb, cg)
ret = matops.multiply(self._matD, b_less_cg)
return ret
def _create_guess(self):
if not hasattr(self, "_matD"):
self._build_interim_matricies()
ret = (
matops.create_random(matops.count_rows(self._matb), True)
if self._guess_source == IterativeInitialGuess.RANDOM_MATRIX
else matops.create_zeros(matops.count_rows(self._matb))
)
ret = matops.reshape(ret, (-1, 1))
return ret
|
print round(1.23, 1)
print round(1.27, 1)
print round(-1.27, 1)
print round(1.25361, 3)
a = 1627731
print round(a, -1)
print round(a, -2)
print round(a, -3)
x = 1.23456
print format(x, '0.2f')
print format(x, '0.3f')
print "value is {:0.3f}".format(x)
a = 2.1
b = 4.2
c = a + b
print c
print round(c, 2) |
#!/usr/bin/env python
# encoding: utf-8
"""Usage:
named_id.py train <protocol> <experiment_dir> [options] [--from=<epoch>] [(--augment=<ratio> [--uniform])]
named_id.py validate <protocol> <train_dir> [options] [--evergreen --interactive]
named_id.py test <protocol> <validate_dir> [options] [--interactive]
named_id.py oracle <protocol> [options]
named_id.py visualize <protocol> [<validate_dir>]
Common options:
--subset=<subset> Protocol subset, one of 'train', 'development' or 'test'.
Defaults to 'train', 'development' and 'test' in
'train', 'validate', and 'test' mode, respectively.
--batch=<batch> Batch size (# of windows) [default: 128]
--window=<window> Window size (# of speaker turns) [default: 8]
--step=<step> Step size (overlap between windows) [default: 1]
--max_len=<max_len> Maximum # of tokens input to BERT. Maximum 512 [default: 256]
--easy Only keep text windows with named speakers in it.
--sep_change Add a special "[SEP]" token between every speech turn.
Training options:
--from=<epoch> Start training back from a specific checkpoint (epoch #)
--augment=<ratio> If different from 0, will generate `|augment|` synthetic examples per real example
If less than 0, will discard real example.
See batchify for details.
Defaults to no augmentation.
--uniform When augmenting data, pick fake names with uniform distribution
regardless of their frequency in the name database.
Validation options:
--evergreen Start with the latest checkpoints
--interactive Open-up python debugger after each forward pass
To use meta-protocols, name it like: "X.SpeakerDiarization.<serie1>+<serie2>"
(with '+' separated serie names so that we're able to load the appropriate mapping)
e.g. "X.SpeakerDiarization.BuffyTheVampireSlayer+Friends"
File structure should look like:
<experiment_dir>
└───config.yml
│ <train_dir>
│ └────weights
│ │ └───*.tar
│ │ <validate_dir>
│ │ └───params.yml
│ │ │ <test_dir>
│ │ │ └───params.yml
│ │ │ │ eval
config.yml is optional to set additional parameters (e.g. change the default model architecture)
It should look like:
architecture:
nhead: 8
num_layers: 6
dropout: 0.1
training:
lr: 0.001
freeze: [bert]
"""
from docopt import docopt
from tqdm import tqdm
from pathlib import Path
import json
import yaml
import warnings
from typing import List
from tabulate import tabulate
from itertools import zip_longest
from collections import Counter
from pyannote.core import Segment
from pyannote.core.utils.distance import pdist
from pyannote.database import get_protocol
from pyannote.audio.features.wrapper import Wrapper, Wrappable
import Plumcot as PC
import re
import numpy as np
from scipy.spatial.distance import squareform
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from torch import save, load, manual_seed, no_grad, argmax, Tensor, zeros, from_numpy, \
zeros_like, LongTensor
from torch.utils.tensorboard import SummaryWriter
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.nn import BCELoss, DataParallel
from transformers import BertTokenizer
from prune.sidnet import SidNet
# set random seed
np.random.seed(0)
manual_seed(0)
EPOCH_FORMAT = '{:04d}.tar'
BERT = 'bert-base-cased'
# constant paths
DATA_PATH = Path(PC.__file__).parent / 'data'
CHARACTERS_PATH = DATA_PATH.glob('*/characters.txt')
def token_accuracy(targets: Tensor, predictions: Tensor, pad: int=0):
"""Compute accuracy at the token level.
Ignores padded targets.
"""
indices = targets != pad
where = (predictions[indices] == targets[indices]).nonzero(as_tuple=True)
return where[0].shape[0] / indices.nonzero(as_tuple=True)[0].shape[0]
def batch_word_accuracy(targets: List[str], predictions: List[str],
pad='[PAD]', split=True):
correct, total = 0, 0
for target, prediction in zip(targets, predictions):
if split:
target, prediction = target.split(), prediction.split()
for t, p in zip_longest(target, prediction, fillvalue=pad):
if t == pad:
continue
if t == p:
correct += 1
total += 1
return correct/total
def str_example(inp_eg, tgt_eg, pred_eg, step=20):
example = []
for i in range(0, len(inp_eg) - step, step):
tab = tabulate((['inp:'] + inp_eg[i:i + step],
['tgt:'] + tgt_eg[i:i + step],
['hyp:'] + pred_eg[i:i + step])).split('\n')
example += tab[1:]
return '\n'.join(example)
def plot_output(output_eg, inp_eg, tgt_eg, save=None):
# merge target and input into a single list
merge = []
i = 0
for token in inp_eg:
merge.append(f"{token} ({tgt_eg[i]})")
if not token.startswith('##'):
i += 1
max_len = len(inp_eg)
plt.figure(figsize=(max_len//6, max_len//6))
# shift by 1 to discard [CLS] and [SEP] tokens
plt.imshow(output_eg.detach().cpu().numpy()[:max_len, 1: max_len-1])
plt.colorbar()
plt.xticks(range(max_len), inp_eg[:max_len], fontsize='x-small', rotation='vertical')
plt.yticks(range(max_len), merge[:max_len], fontsize='x-small', rotation='horizontal')
if save is None:
plt.show()
else:
plt.savefig(save/"output.png")
def mode(prediction, pad='[PAD]'):
"""Returns most common predicted item or pad if no items were predicted"""
if prediction:
return prediction.most_common(1)[0][0]
return pad
def eval(batches, model, tokenizer, log_dir,
test=False, evergreen=False, interactive=False, step_size=1, window_size=10):
"""Load model from checkpoint and evaluate it on batches.
When testing, only the best model should be tested.
Parameters
----------
batches: List[Tuple[Tensor]]:
(text_batch, target_batch, input_ids, target_ids, audio_similarity, src_key_padding_mask, tgt_key_padding_mask)
see batch_encode_multi
model: SidNet
instance of SidNet, ready to be loaded
tokenizer: BertTokenizer
used to decode output (i.e. de-tensorize, de-tokenize)
log_dir: Path
either:
- [!test]: Path to log validation accuracy and load model weights (from ../weights)
- [test]: Path to log test accuracy, load model weights (from ../../weights)
and load best epoch (from ../params.yml)
test: bool, optional
Whether to test only the best model.
Defaults to False.
evergreen: bool, optional
Whether to start validation with the latest checkpoints.
Defaults to False.
interactive: bool, optional
Opens-up python debugger after each forward pass.
Defaults to False.
step_size: int, optional
Overlap between two subsequent text-windows (i.e. item in batch)
Defaults to 1.
window_size: int, optional
Number of speaker turns in one window
Defaults to 10.
"""
params_file = log_dir.parent / 'params.yml'
if test:
weights_path = log_dir.parents[1] / 'weights'
with open(params_file) as file:
epoch = yaml.load(file, Loader=yaml.SafeLoader)["epoch"]
weights = [weights_path/EPOCH_FORMAT.format(epoch)]
best = 0.
else:
weights_path = log_dir.parents[0] / 'weights'
weights = sorted(weights_path.iterdir(), reverse=evergreen)
if params_file.exists():
with open(params_file) as file:
best = yaml.load(file, Loader=yaml.SafeLoader)["accuracy"]
else:
best = 0.
criterion = BCELoss(reduction='none')
tb = SummaryWriter(log_dir)
for weight in tqdm(weights, desc='Evaluating'):
checkpoint = load(weight, map_location=model.src_device_obj)
epoch = checkpoint["epoch"]
model.module.load_state_dict(checkpoint['model_state_dict'])
# manage device FIXME this should be ok after map_location ??
model.module.to(model.src_device_obj)
model.eval()
with no_grad():
epoch_loss, epoch_word_acc = 0., 0.
uris, file_token_acc, file_word_acc = [], [], []
previous_uri = None
for uri, windows, inp, tgt, input_ids, target_ids, audio_similarity, src_key_padding_mask, tgt_key_padding_mask in batches:
# forward pass: (batch_size, sequence_length, sequence_length)
output = model(input_ids, audio_similarity, src_key_padding_mask)
# manage devices
target_ids = target_ids.to(output.device)
# get model prediction per token: (batch_size, sequence_length)
relative_out = argmax(output, dim=2)
# retrieve token ids from input (batch_size, sequence_length) and manage device
prediction_ids = zeros_like(input_ids, device=output.device)
for j, (input_window_id, relative_window_out) in enumerate(zip(input_ids, relative_out)):
prediction_ids[j] = input_window_id[relative_window_out]
# decode and compute word accuracy
predictions = tokenizer.batch_decode(prediction_ids, clean_up_tokenization_spaces=False)
epoch_word_acc += batch_word_accuracy(tgt, predictions, tokenizer.pad_token)
# calculate loss
loss = criterion(output, target_ids)
loss = reduce_loss(loss, tgt_key_padding_mask)
epoch_loss += loss.item()
# handle file-level stuff
if uri != previous_uri:
# compute file-level accuracy
if previous_uri is not None:
uris.append(previous_uri)
# merge window-level predictions
file_predictions = [mode(p, tokenizer.pad_token) for p in file_predictions]
# compute word accuracy
file_word_acc.append(batch_word_accuracy([file_target],
[file_predictions],
pad=tokenizer.pad_token,
split=False))
# TODO audio ER
# reset file-level variables
file_length = windows[-1][-1] - windows[0][0]
i, shift = 0, 0
file_target = [tokenizer.pad_token] * file_length
file_predictions = [Counter() for _ in range(file_length)]
# save target and output for future file-level accuracy
for target_i, pred_i in zip(tgt, predictions):
target_i, pred_i = target_i.split(), pred_i.split()
for start, end in windows[i: i+window_size]:
file_target[start:end] = target_i[start-shift: end-shift]
for counter, p in zip(file_predictions[start:end], pred_i[start-shift: end-shift]):
counter[p] += 1
i += step_size
# shift between batch and original file
shift = windows[i][0] # start
if interactive:
eg = np.random.randint(len(tgt))
inp_eg, tgt_eg, pred_eg = inp[eg], tgt[eg], predictions[eg]
# print random example
print(str_example(inp_eg.split(), tgt_eg.split(), pred_eg.split()))
# plot model output
plot_output(output[eg], tokenizer.tokenize(inp_eg),
tgt_eg.split(), log_dir)
# print current metrics
metrics = {
'Loss/eval': [epoch_loss],
'Accuracy/eval/batch/word': [epoch_word_acc]
}
print(tabulate(metrics, headers='keys'))
breakpoint()
previous_uri = uri
# compute file-level accuracy for the last file
uris.append(previous_uri)
# merge window-level predictions
file_predictions = [mode(p, tokenizer.pad_token) for p in file_predictions]
# compute word accuracy
file_word_acc.append(batch_word_accuracy([file_target],
[file_predictions],
pad=tokenizer.pad_token,
split=False))
# average file-accuracies
uris.append('TOTAL')
file_word_acc.append(np.mean(file_word_acc))
# log tensorboard
tb.add_scalar('Accuracy/eval/file/word', file_word_acc[-1], epoch)
epoch_loss /= len(batches)
tb.add_scalar('Loss/eval', epoch_loss, epoch)
epoch_word_acc /= len(batches)
tb.add_scalar('Accuracy/eval/batch/word', epoch_word_acc, epoch)
# print and write metrics
if test:
metrics = {
'Loss/eval': [epoch_loss],
'Accuracy/eval/batch/word': [epoch_word_acc]
}
metrics = tabulate(metrics, headers='keys', tablefmt='latex')
metrics += tabulate(zip(uris, file_word_acc),
headers=['uri', 'word-level'],
tablefmt='latex')
print(metrics)
with open(log_dir / 'eval', 'w') as file:
file.write(metrics)
# dump best metrics
elif epoch_word_acc > best:
best = epoch_word_acc
with open(log_dir / 'params.yml', 'w') as file:
yaml.dump({"accuracy": best, "epoch": epoch}, file)
def reduce_loss(loss, tgt_key_padding_mask):
"""Masks loss using tgt_key_padding_mask then mean-reduce"""
# mask and average loss
return loss[tgt_key_padding_mask.bool()].mean()
def train(batches, model, tokenizer, train_dir=Path.cwd(),
lr=1e-3, max_grad_norm=None,
epochs=100, freeze=['bert'], save_every=1, start_epoch=None):
"""Train the model for `epochs` epochs
Parameters
----------
batches: List[Tuple[Tensor]]
(text_batch, target_batch, input_ids, target_ids, audio_similarity, src_key_padding_mask, tgt_key_padding_mask)
see batch_encode_multi
model: SidNet
instance of SidNet, ready to be trained
tokenizer: BertTokenizer
used to get tokenization constants (e.g. tokenizer.pad_token_id == 0)
train_dir: Path, optional
Path to log training loss and save model weights (under experiment_path/weights)
Defaults to current working directory.
lr: float, optional
Learning rate used to optimize model parameters.
Defaults to 1e-3
max_grad_norm: float, optional
Clips gradient L2 norm at max_grad_norm
Defaults to no clipping.
epochs: int, optional
Train the model for `epochs` epochs.
Defaults to 100
freeze : List[str], optional
Names of modules to freeze.
Defaults to freezing bert (['bert']).
save_every: int, optional
Save model weights and optimizer state every `save_every` epoch.
Defaults to save at every epoch (1)
start_epoch: int, optional
Starts training back at start_epoch.
Defaults to raise an error if training in an existing directory
"""
optimizer = Adam(model.module.parameters(), lr=lr)
weights_path = train_dir / 'weights'
# load previous checkpoint
if start_epoch is not None:
checkpoint = load(weights_path / EPOCH_FORMAT.format(start_epoch)
,map_location=model.src_device_obj)
assert start_epoch == checkpoint["epoch"]
model.module.load_state_dict(checkpoint['model_state_dict'])
# manage device FIXME this should be ok after map_location ??
model.module.to(model.src_device_obj)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# increment epoch
start_epoch += 1
else:
# put parallelized module to model.src_device_obj
model.module.to(model.src_device_obj)
# be careful not to erase previous weights
weights_path.mkdir(exist_ok=False)
# defaults to start from 0
start_epoch = 0
model.module.freeze(freeze)
model.train()
criterion = BCELoss(reduction='none')
tb = SummaryWriter(train_dir)
for epoch in tqdm(range(start_epoch, epochs+start_epoch), desc='Training'):
# shuffle batches
np.random.shuffle(batches)
epoch_loss = 0.
for _, _, _, _, input_ids, target_ids, audio_similarity, src_key_padding_mask, tgt_key_padding_mask in batches:
optimizer.zero_grad()
# forward pass
output = model(input_ids, audio_similarity, src_key_padding_mask)
# manage devices
target_ids = target_ids.to(output.device)
# calculate loss
loss = criterion(output, target_ids)
# mask and reduce loss
loss = reduce_loss(loss, tgt_key_padding_mask)
loss.backward()
if max_grad_norm is not None:
clip_grad_norm_(model.module.parameters(), max_grad_norm)
optimizer.step()
epoch_loss += loss.item()
tb.add_scalar('Loss/train', epoch_loss/len(batches), epoch)
tb.add_scalar('lr', lr, epoch)
if (epoch+1) % save_every == 0:
save({
'epoch': epoch,
'model_state_dict': model.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss
}, weights_path / EPOCH_FORMAT.format(epoch))
return model, optimizer
def any_in_text(items, text):
"""Utility function.
Returns True if any of the item in items is in text, False otherwise.
"""
for item in items:
if item in text:
return True
return False
def batchify(tokenizer, protocol, mapping, subset='train', audio_emb=None,
batch_size=128, window_size=10, step_size=1, mask=True, easy=False,
sep_change=False, augment=0, uniform=False, shuffle=True, oracle=False):
"""
Iterates over protocol subset, segment transcription in speaker turns,
Divide transcription in windows then split windows in batches.
And finally, encode batch (i.e. tokenize, tensorize...)
Parameters
----------
tokenizer: BertTokenizer
used to tokenize, pad and tensorize text
protocol: Protocol
pyannote Protocol to get transcription from
mapping: dict
used to convert normalized speaker names into its most common name.
Note that it's important that this name is as written in the input text.
subset: str, optional
subset of the protocol to get transcription from
Defaults to training set.
audio_emb: `Wrappable`, optional
Describes how raw speaker embeddings should be obtained.
See pyannote.audio.features.wrapper.Wrapper documentation for details.
Defaults to None, indicating that the model should rely only on the text.
batch_size: int, optional
Remainder batch (of size <= batch_size) is kept.
Defaults to 128.
window_size: int, optional
Number of speaker turns in one window
Defaults to 10.
step_size: int, optional
Defaults to 1.
mask: bool, optional
Compute attention_mask according to max_length.
Defaults to True.
easy: bool, optional
Only keep windows with named speakers in it
(the name must match one of the labels as provided in mapping)
Defaults to keep every window regardless of it's content.
sep_change: bool, optional
Add special token tokenizer.sep_token ("[SEP]") between every speech turn.
Defaults to keep input as is.
augment: int, optional
Data augmentation ratio.
If different from 0, will generate `|augment|` synthetic examples per real example
by replacing speaker names in input text and target by a random name.
Note that it doesn't have any effect if no speaker names (as provided in mapping)
are present in the input text. If less than 0, will discard real example.
Defaults to no augmentation.
uniform: bool, optional
When augmenting data, pick fake names with uniform distribution
regardless of their frequency in the name database.
Has no effect if augment==0
shuffle: bool, optional
Whether to shuffle windows before batching.
Should be set to False when testing to get file-homogeneous batches,
and to True while training to ensure stochasticity.
Defaults to True
oracle: bool, optional
Compute oracle accuracy for protocol's subset
Enforces shuffle = False
Oracles knows who the speaker is if it's name (case-insensitive)
is mentioned in the input. Most of the other arguments are not relevant
in this case, and yields (uri, accuracy, n_tokens) instead of what's documented below.
Defaults to False
Yields
-------
batch: Tuple[str, List[Tuple[int]], List[str], Tensor]:
(uri, windows, text_batch, target_batch, input_ids, target_ids, audio_similarity, src_key_padding_mask, tgt_key_padding_mask)
- see batch_encode_multi.
- uri: str,
file-identifier of the batch and is set to None if shuffle, as batch
are then file-heterogeneous
- windows: List[Tuple[int]]
indices of the start and end words index of the speaker turns in the batch
Empty if shuffling or augmenting data
"""
assert not tokenizer.do_basic_tokenize, "Basic tokenization is handle beforehand"
if audio_emb is not None:
audio_emb = Wrapper(audio_emb)
# load list of names
if augment != 0:
names = []
for character_file in CHARACTERS_PATH:
with open(character_file) as file:
names += [line.split(',')[3].split()[0]
for line in file.read().split("\n") if line != '']
names = np.array(names)
if uniform:
names = np.unique(names)
text_windows, audio_windows, target_windows, audio_masks = [], [], [], []
if oracle and shuffle:
shuffle = False
warnings.warn("Setting 'shuffle = False' because 'oracle' mode is on.")
# iterate over protocol subset
for current_file in tqdm(getattr(protocol, subset)(), desc='Loading transcriptions'):
if not shuffle:
oracle_correct, oracle_total = 0, 0
n_tokens = []
text_windows, audio_windows, target_windows, audio_masks = [], [], [], []
transcription = current_file['transcription']
uri = current_file['uri']
current_audio_emb = None
# get audio embeddings from current_file
if audio_emb is not None:
current_audio_emb = audio_emb(current_file)
# format transcription into 3 lists: tokens, audio, targets
# and segment it in speaker turns (i.e. speaker homogeneous)
windows = []
start, end = 0, 0
tokens, audio, targets = [], [], []
previous_speaker = None
for word in transcription:
if word._.speaker != previous_speaker:
# mark speaker change with special token tokenizer.sep_token ("[SEP]")
if sep_change:
tokens.append(tokenizer.sep_token)
targets.append(tokenizer.pad_token)
audio.append(None)
end += 1
windows.append((start, end))
start = end
# get audio embedding for word if alignment timing is confident enough
if audio_emb is not None and word._.confidence > 0.5:
segment = Segment(word._.time_start, word._.time_end)
segment = current_audio_emb.crop(segment, mode="loose")
# skip segments so small we don't have any embedding for it
if len(segment) < 1:
segment = None
# average audio-embedding over the segment frames
else:
segment = np.mean(segment, axis=0)
else:
segment = None
# if we don't have a proper target we should mask the loss function
target = mapping.get(word._.speaker, tokenizer.pad_token)
# handle basic tokenization (e.g. punctuation) before Word-Piece
# in order to align input text and speakers
for token in tokenizer.basic_tokenizer.tokenize(word.text):
tokens.append(token)
targets.append(target)
audio.append(segment)
end += 1
previous_speaker = word._.speaker
windows.append((start, end))
windows.pop(0)
# slide through the transcription speaker turns w.r.t. window_size, step_size
# filter out windows w.r.t. easy
# and augment them w.t.t. augment
for i in range(0, len(windows) - window_size + 1, step_size):
start, _ = windows[i]
_, end = windows[i + window_size - 1]
text_window = " ".join(tokens[start:end])
target_window = " ".join(targets[start:end])
audio_window, audio_mask = align_audio_targets(tokenizer,
audio[start:end],
target_window,
audio_emb)
# compute oracle-accuracy
if oracle:
n_tokens.append(end-start)
for target in targets[start:end]:
if target in tokenizer.all_special_tokens:
continue
if re.search(target, text_window, flags=re.IGNORECASE):
oracle_correct += 1
oracle_total += 1
# set of actual targets (i.e. excluding [PAD], [SEP], etc.)
target_set = sorted(set(targets[start:end]) - set(tokenizer.all_special_tokens))
# easy mode -> Only keep windows with named speakers in it
if easy and not any_in_text(target_set, text_window):
continue
# augment < 0 (=) discard real example
if augment >= 0:
text_windows.append(text_window)
audio_windows.append(audio_window)
target_windows.append(target_window)
audio_masks.append(audio_mask)
# add `augment` windows of synthetic data
for augmentation in range(abs(augment)):
synthetic_text = text_window
synthetic_targets = target_window
# augment data by replacing
# speaker names in input text and target by a random name
for target in target_set:
# except if the name is not present in the input text
# this would only add noise
# TODO make this optional
if False and not re.search(target, text_window, flags=re.IGNORECASE):
continue
random_name = np.random.choice(names)
synthetic_text = re.sub(fr'\b{target}\b', random_name,
synthetic_text, flags=re.IGNORECASE)
synthetic_targets = re.sub(fr'\b{target}\b', random_name,
synthetic_targets, flags=re.IGNORECASE)
audio_window, audio_mask = align_audio_targets(tokenizer,
audio[start:end],
synthetic_targets,
audio_emb)
audio_windows.append(audio_window)
audio_masks.append(audio_mask)
text_windows.append(synthetic_text)
target_windows.append(synthetic_targets)
# yield file-homogeneous batches along with file-uri
if not shuffle and not oracle:
indices = np.arange(len(text_windows))
for batch in batchify_windows(tokenizer, text_windows, target_windows,
audio_windows, indices, batch_size=batch_size,
mask=mask, audio_masks=audio_masks):
# skip fully-padded batches, this might happen with unknown speakers
if (batch[-1] == tokenizer.pad_token_id).all():
continue
yield (uri, windows) + batch
# yield (uri, oracle_accuracy)
elif oracle:
yield uri, oracle_correct/oracle_total, n_tokens
if shuffle:
# shuffle all windows
indices = np.arange(len(text_windows))
np.random.shuffle(indices)
for batch in tqdm(batchify_windows(tokenizer, text_windows, target_windows,
audio_windows, indices, batch_size=batch_size,
mask=mask, audio_masks=audio_masks),
desc='Encoding batches'):
# skip fully-padded batches, this might happen with unknown speakers
if (batch[-1] == tokenizer.pad_token_id).all():
continue
yield (None, []) + batch
def align_audio_targets(tokenizer, audio_window, target_window, audio_emb=None):
"""align audio windows with word-piece tokenization"""
mask = []
if audio_emb is None:
return None, mask
tokens = tokenizer.tokenize(target_window)
aligned_audio = []
previous_a = np.ones((1, audio_emb.dimension))
for i, (a, tgt) in enumerate(zip_longest(audio_window, tokens)):
if i >= max_length:
break
# sub-word -> add audio representation of the previous word
if tgt.startswith('##'):
aligned_audio.append(previous_a)
else:
if a is None:
mask.append(i)
a = np.ones(audio_emb.dimension)
a = a.reshape(1, -1)
aligned_audio.append(a)
previous_a = a
mask = np.array(mask, dtype=int)
aligned_audio = np.concatenate(aligned_audio)
return aligned_audio, mask
def batchify_windows(tokenizer, text_windows, target_windows, audio_windows, indices,
batch_size=128, mask=True, audio_masks=None):
"""
Parameters
----------
see batchify
Yields
-------
see batch_encode_multi
"""
# split windows in batches w.r.t. batch_size
# keep remainder (i.e. last batch of size <= batch_size)
for i in range(0, len(indices), batch_size):
text_batch, target_batch, audio_batch, audio_mask_batch = [], [], [], []
for j in indices[i: i + batch_size]:
text_batch.append(text_windows[j])
target_batch.append(target_windows[j])
audio_window = audio_windows[j]
if audio_window is not None:
audio_batch.append(audio_window)
audio_mask_batch.append(audio_masks[j])
# encode batch (i.e. tokenize, tensorize...)
batch = batch_encode_multi(tokenizer, text_batch, target_batch, audio_batch,
mask=mask, audio_mask_batch=audio_mask_batch)
# append original text and target to be able to evaluate
# (FIXME: this might add extra memory usage, unnecessary to train the model)
yield (text_batch, target_batch) + batch
def batch_encode_plus(tokenizer, text_batch, mask=True, is_pretokenized=False,
add_special_tokens=False):
"""Shortcut function to encode a text (either input or target) batch
using tokenizer.batch_encode_plus with the appropriate parameters.
Parameters
----------
tokenizer: BertTokenizer
text_batch:
- List[List[str]] if is_pretokenized
- List[str] otherwise
mask: bool, optional
Compute attention_mask according to max_length.
Defaults to True.
is_pretokenized, add_special_tokens: bool, optional
see tokenizer.batch_encode_plus
Defaults to False
Returns
-------
input_ids: Tensor
(batch_size, max_length). Encoded input tokens using BertTokenizer
attention_mask: Tensor
(batch_size, max_length). Used to mask input_ids.
None if not mask.
"""
text_encoded_plus = tokenizer.batch_encode_plus(text_batch,
add_special_tokens=add_special_tokens,
max_length=max_length,
pad_to_max_length='right',
return_tensors='pt',
return_attention_mask=mask,
is_pretokenized=is_pretokenized)
input_ids = text_encoded_plus['input_ids']
attention_mask = text_encoded_plus['attention_mask'] if mask else None
return input_ids, attention_mask
def batch_encode_multi(tokenizer, text_batch, target_batch, audio_batch=None,
mask=True, audio_mask_batch=None):
"""Encode input, target text and audio consistently in torch Tensor
Parameters
----------
tokenizer: BertTokenizer
used to tokenize, pad and tensorize text
text_batch: List[str]
(batch_size, ) Input text
target_batch: List[str]
(batch_size, ) Target speaker names
audio_batch: List[np.ndarray], optional
(batch_size, ) Audio embeddings of the input text, aligned with target_ids
Defaults to None (model only relies on the text).
mask: bool, optional
Compute attention_mask according to max_length.
Defaults to True.
audio_mask_batch: List[np.ndarray], optional
indices where audio embeddings are not reliable
and thus should not weight model's output
Returns
-------
input_ids: Tensor
(batch_size, max_length). Encoded input tokens using BertTokenizer
relative_targets: Tensor
(batch_size, max_length, max_length). one-hot target index w.r.t. input_ids
e.g. "My name is Paul ." -> one-hot([3, 3, 3, 3, 3])
audio_similarity: Tensor, optional
(batch_size, max_length, max_length). Similarity (e.g. cosine distance)
between audio embeddings of words, aligned with target_ids.
Defaults to None, indicating that the model should rely only on the text.
src_key_padding_mask: Tensor, optional
(batch_size, max_length). Used to mask input_ids.
tgt_key_padding_mask: Tensor, optional
(batch_size, max_length). Used to mask relative_targets.
"""
if len(audio_batch) != 0:
# compute audio similarity matrix (with numpy as torch doesn't have squareform, yet)
audio_similarity = np.zeros((len(audio_batch), max_length, max_length), dtype=np.float32)
for i, (fX, audio_mask) in enumerate(zip(audio_batch, audio_mask_batch)):
d = squareform(pdist(fX, metric='cosine'))
# distance to similarity
d = 1-d
# mask similarity matrix : masked items are only similar to themselves
d[audio_mask] = 0
d[audio_mask, audio_mask] = 1
audio_similarity[i, : d.shape[0], : d.shape[1]] = d
# np.ndarray to Tensor
audio_similarity = from_numpy(audio_similarity)
else:
audio_similarity = None
# tokenize and encode input text: (batch_size, max_length)
input_ids, src_key_padding_mask = batch_encode_plus(tokenizer, text_batch,
mask=mask, is_pretokenized=False,
add_special_tokens=True)
# encode target text: (batch_size, max_length)
target_ids, tgt_key_padding_mask = batch_encode_plus(tokenizer, target_batch,
mask=mask, is_pretokenized=False,
add_special_tokens=False)
# fix tgt_key_padding_mask for previously padded targets
tgt_key_padding_mask[target_ids==tokenizer.pad_token_id] = tokenizer.pad_token_id
# convert targets to relative targets: (batch_size, max_length, max_length)
relative_targets = zeros(target_ids.shape + (max_length,))
for i, (input_id, target_id) in enumerate(zip(input_ids, target_ids)):
for j, t in enumerate(target_id):
if t == tokenizer.pad_token_id:
continue
where = input_id == t
# speaker name is not mentioned in input -> pad target
if not where.any():
tgt_key_padding_mask[i, j] = tokenizer.pad_token_id
continue
where = where.nonzero().reshape(-1)
relative_targets[i, j, where] = 1.
return input_ids, relative_targets, audio_similarity, src_key_padding_mask, tgt_key_padding_mask
def visualize(words, model, tokenizer, validate_dir=None):
"""
Parameters
----------
words: Iterable[str]
model: SidNet
tokenizer: BertTokenizer
validate_dir: Path
"""
# load model from validate_dir
if validate_dir is not None:
with open(validate_dir / 'params.yml') as file:
epoch = yaml.load(file, Loader=yaml.SafeLoader)["epoch"]
weight = validate_dir.parent / 'weights' / EPOCH_FORMAT.format(epoch)
checkpoint = load(weight, map_location=model.src_device_obj)
epoch = checkpoint["epoch"]
model.module.load_state_dict(checkpoint['model_state_dict'])
# else keep pre-trained BERT
else:
validate_dir = Path.cwd()
model.module.to(model.src_device_obj)
model.eval()
# tokenize and encode words
tokens = tokenizer.tokenize(' '.join(words))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = LongTensor(input_ids).unsqueeze(0).to(model.src_device_obj)
# get token embeddings
embeddings = model.module.bert.embeddings.word_embeddings(input_ids)
embeddings = embeddings.squeeze(0).detach().cpu().numpy()
# apply t-SNE
tsne = TSNE(n_components=2, metric="cosine")
embeddings_2d = tsne.fit_transform(embeddings)
# plot the result
assert len(tokens) == embeddings_2d.shape[0], \
f"Shape mismatch between token ({len(tokens)}) and embeddings ({embeddings_2d.shape})"
plt.figure(figsize=(15, 15))
plt.scatter(*embeddings_2d.T)
for token, xy in zip(tokens, embeddings_2d):
plt.annotate(token, xy)
save_path = validate_dir / "embeddings_TSNE.png"
plt.savefig(save_path)
print(f"Succesfully saved figure to {save_path}")
def load_config(parent_path):
"""Returns empty dict if unable to load config file"""
config_path = parent_path / 'config.yml'
if not config_path.is_file():
return dict()
with open(config_path) as file:
return yaml.load(file, Loader=yaml.SafeLoader)
if __name__ == '__main__':
# parse arguments and get protocol
args = docopt(__doc__)
protocol_name = args['<protocol>']
batch_size = int(args['--batch']) if args['--batch'] else 128
window_size = int(args['--window']) if args['--window'] else 8
step_size = int(args['--step']) if args['--step'] else 1
max_length = int(args['--max_len']) if args['--max_len'] else 256
mask = True
easy = args['--easy']
sep_change = args['--sep_change']
augment = int(args['--augment']) if args['--augment'] else 0
uniform = args['--uniform']
protocol = get_protocol(protocol_name)
# handle meta-protocols
serie, _, x = protocol_name.split('.')
if serie == 'X':
series = x.split('+')
else:
series = [serie]
# load mapping(s)
mapping = {}
for serie in series:
mapping_path = DATA_PATH / serie / 'annotated_transcripts' / 'names_dict.json'
with open(mapping_path) as file:
mapping.update(json.load(file))
# instantiate tokenizer
tokenizer = BertTokenizer.from_pretrained(BERT)
# override basic-tokenization parameter as we need to align speakers with input tokens
tokenizer.do_basic_tokenize = False
if args['train']:
subset = args['--subset'] if args['--subset'] else 'train'
start_epoch = int(args['--from']) if args['--from'] else None
train_dir = Path(args['<experiment_dir>'], f'{protocol_name}.{subset}')
train_dir.mkdir(exist_ok=True)
config = load_config(train_dir.parents[0])
architecture = config.get('architecture', {})
audio = config.get('audio')
model = DataParallel(SidNet(BERT, max_length, **architecture))
# get batches from protocol subset
batches = list(batchify(tokenizer, protocol, mapping, subset, audio_emb=audio,
batch_size=batch_size,
window_size=window_size,
step_size=step_size,
mask=mask,
easy=easy,
sep_change=sep_change,
augment=augment,
uniform=uniform,
shuffle=True))
model, optimizer = train(batches, model, tokenizer, train_dir,
start_epoch=start_epoch,
**config.get('training', {}))
elif args['validate']:
subset = args['--subset'] if args['--subset'] else 'development'
evergreen = args['--evergreen']
interactive = args['--interactive']
validate_dir = Path(args['<train_dir>'], f'{protocol_name}.{subset}')
validate_dir.mkdir(exist_ok=True)
config = load_config(validate_dir.parents[1])
architecture = config.get('architecture', {})
audio = config.get('audio')
model = DataParallel(SidNet(BERT, max_length, **architecture))
# get batches from protocol subset
batches = list(batchify(tokenizer, protocol, mapping, subset, audio_emb=audio,
batch_size=batch_size,
window_size=window_size,
step_size=step_size,
mask=mask,
easy=easy,
sep_change=sep_change,
augment=augment,
uniform=uniform,
shuffle=False))
eval(batches, model, tokenizer, validate_dir,
test=False, evergreen=evergreen, interactive=interactive,
step_size=step_size, window_size=window_size)
elif args['test']:
subset = args['--subset'] if args['--subset'] else 'test'
interactive = args['--interactive']
test_dir = Path(args['<validate_dir>'], f'{protocol_name}.{subset}')
test_dir.mkdir(exist_ok=True)
config = load_config(test_dir.parents[2])
architecture = config.get('architecture', {})
audio = config.get('audio')
model = DataParallel(SidNet(BERT, max_length, **architecture))
# get batches from protocol subset
batches = list(batchify(tokenizer, protocol, mapping, subset, audio_emb=audio,
batch_size=batch_size,
window_size=window_size,
step_size=step_size,
mask=mask,
easy=easy,
sep_change=sep_change,
augment=augment,
uniform=uniform,
shuffle=False))
eval(batches, model, tokenizer, test_dir,
test=True, interactive=interactive,
step_size=step_size, window_size=window_size)
elif args['visualize']:
validate_dir = args['<validate_dir>']
if validate_dir is not None:
validate_dir = Path(validate_dir)
config = load_config(validate_dir.parents[1])
else:
config = {}
architecture = config.get('architecture', {})
model = DataParallel(SidNet(BERT, max_length, **architecture))
# get list of names
words = set(mapping.values())
visualize(words, model, tokenizer, validate_dir)
elif args['oracle']:
subset = args['--subset'] if args['--subset'] else 'test'
full_name = f"{protocol_name}.{subset}"
# get oracle accuracy for protocol subset
uris, accuracies, n_tokens = [], [], []
for uri, accuracy, n_token in batchify(tokenizer, protocol, mapping, subset,
batch_size=batch_size,
window_size=window_size,
step_size=step_size,
mask=mask,
easy=easy,
sep_change=sep_change,
augment=augment,
shuffle=False,
oracle=True):
uris.append(uri)
accuracies.append(accuracy)
n_tokens.extend(n_token)
n_tokens = f"{np.mean(n_tokens):.2f} $\\pm$ {np.std(n_tokens):.2f}"
uris.append(full_name)
accuracies.append(np.mean(accuracies))
caption = (f"Oracle accuracy (word/batch-level), protocol {full_name}, "
f"Windows of {window_size} with {step_size} step. "
f"Average \\# of words: {n_tokens}.")
# print oracle accuracy
print(tabulate(zip(uris, accuracies), headers=('uri', 'accuracy'), tablefmt='latex'))
print("\\caption{%s}" % caption)
|
city_choices = {
'lahore' : 'Lahore',
'sialkot' : 'Sialkot',
'karachi' : 'Karachi',
}
price_choices = {
'1000':'1000',
'2000':'2000',
'3000':'3000',
'5000':'5000',
'6000':'60000',
'10000':'10000',
}
state_choices = {
'Alaska': 'Alaska',
'Alabama': 'Alabama',
'Punjab': 'Punjab',
'Sindh': 'Sindh',
}
title_choices = {
'Wedding destination' : 'Wedding destination',
'Photoshoot' : 'Photoshoot',
'Meeting' : 'Meeting',
'Bridal Shower' : 'Bridal Shower',
'WorkShop' : 'WorkShop ',
'Birthday Party': 'Birthday Party',
} |
# Generated by Django 2.0.3 on 2018-03-19 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0002_aboutus_basic_info_faq'),
]
operations = [
migrations.CreateModel(
name='Slider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('small_text', models.CharField(blank=True, max_length=100, null=True)),
('big_text', models.CharField(max_length=100)),
('paragraph', models.CharField(max_length=250)),
('slide_image', models.ImageField(upload_to='Slider_Image')),
],
),
migrations.AddField(
model_name='basic_info',
name='google_map_link',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='singlevideo',
name='upload_time',
field=models.DateTimeField(auto_now_add=True),
),
]
|
from django.shortcuts import render
from .models import OtherUser, Category, Item, ItemImageAndVideos, Offers, Searches, Message, Notifications, ShipmentDetails, ContactUs
import json
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from django.db import transaction, connection
from django.utils import timezone
from django.contrib.auth.models import User, Group, Permission
from rest_framework.authtoken.models import Token
from django.contrib.auth import authenticate
from django.contrib.auth.tokens import default_token_generator
from datetime import datetime, timedelta, tzinfo
import requests
import codecs
import traceback
from django.http import HttpRequest
from django.utils import timezone
import base64, random, pytz
from django.contrib.auth.hashers import make_password
from django.core.mail import send_mail , EmailMessage
from zenoo import settings
from django.db.models import Q
from app1.serializers import UserSerializer, OtherUserSerializer, CategorySerializer, ItemSerializer, ItemImageAndVideosSerializer, OffersSerializer, SearchesSerializer, MessageSerializer, NotificationsSerializer, ShipmentDetailsSerializer, contactUsSerializer
# Create your views here.
errorMessage = "Something went wrong, please try after sometime."
addSuccessMessage = "Successfully added"
updateSuccessMessage = "Successfully updated"
removeSuccessMessage = "Deleted successfully"
searchSuccessMessage = "search completed"
sendSuccessMessage = "sent message"
# Create your views here.
@api_view(['POST'])
def addZenoAdmin(request):
try:
with transaction.atomic():
timeZone = request.META.get('HTTP_TIMEZONE')
if timeZone is not None:
# serializer.save()
email = request.data['email']
password = request.data['password']
phone_no = request.data['phone_no']
firstname = request.data['firstname']
lastname = request.data['lastname']
gender = request.data['gender']
username = request.data['username']
address = request.data['address']
timeZone = pytz.timezone(request.META.get('HTTP_TIMEZONE'))
nowTime = timezone.now().replace(microsecond=0)
authuser = User.objects.create(username=phone_no,
email='',
first_name='',
last_name='',
password=make_password(password),
is_superuser=0,
is_staff=0,
is_active=1,)
# date_joined=timezone.now())
print(authuser.id, "id", type(authuser.id))
g = Group.objects.get(name='Superuser')
g.user_set.add(authuser)
print(g)
user1 = OtherUser.objects.create(
email=email,
phone_no=phone_no,
firstname=firstname,
lastname=lastname,
gender=gender,
user_auth_id=authuser.id,
role=1,
password=password,
address=address
)
print(user1)
token = Token.objects.create(user=authuser)
userDetail = {
'token': token.key,
'id': user1.id,
'firstname': user1.firstname,
'lastname' : user1.lastname,
'email': user1.email,
'notificationStatus': user1.onOffNotification,
'address':user1.address,
# 'phone_no' :phone_no,
}
return Response({"status": "1", 'message': 'User has been successfully registered.', 'data': userDetail}, status=status.HTTP_200_OK)
else:
return Response({'status': "0", 'message': 'Timezone is missing!'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def LoginZenoAdmin(request):
try:
with transaction.atomic():
phone_no = request.data['phone_no']
password = request.data['password']
timeZone = request.META.get('HTTP_TIMEZONE')
if timeZone is not None:
timeZone = pytz.timezone(request.META.get('HTTP_TIMEZONE'))
nowTime = timezone.now().replace(tzinfo=None).replace(microsecond=0)
if phone_no != "":
try:
existedUser = OtherUser.objects.get(phone_no=phone_no, password=password, role=1)
print(existedUser)
except Exception as e3:
existedUser = None
if existedUser is not None:
authUser = authenticate(username=phone_no, password=password)
print(authUser)
if authUser is not None:
checkGroup = authUser.groups.filter(name='Superuser').exists()
if checkGroup:
OtherUser.objects.filter(id=existedUser.id).update(timezone=timeZone)
token = ''
try:
user_with_token = Token.objects.get(user=authUser)
except:
user_with_token = None
if user_with_token is None:
token1 = Token.objects.create(user=authUser)
token = token1.key
else:
token = user_with_token.key
userDetail = {
'token': token,
# 'id': existedUser.id,
'firstname': existedUser.firstname,
'lastname': existedUser.lastname,
'email': existedUser.email,
'notificationStatus': existedUser.onOffNotification,
'address':existedUser.address,
}
return Response({"status": "1", 'message': 'Login successfully!', 'data': userDetail}, status=status.HTTP_200_OK)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": "Email linked with another account", "status": "0"}, status=status.HTTP_200_OK)
else:
return Response({"message": "Email or password incorrect", "status": "0"}, status=status.HTTP_200_OK)
else:
return Response({'status': "0", 'message': 'Email is missing.'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'status': "0", 'message': 'Timezone is missing!'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def SignUpZenoUser(request):
try:
with transaction.atomic():
timeZone = request.META.get('HTTP_TIMEZONE')
if timeZone is not None:
# serializer.save()
email = request.data['email']
password = request.data['password']
phone_no = request.data['phone_no']
firstname = request.data['firstname']
lastname = request.data['lastname']
gender = request.data['gender']
latitude = request.data['latitude']
longitude = request.data['longitude']
username = request.data['username']
deviceId = request.data['deviceId']
deviceType = request.data['deviceType']
address = request.data['address']
timeZone = pytz.timezone(request.META.get('HTTP_TIMEZONE'))
nowTime = timezone.now().replace(microsecond=0)
if phone_no != "":
try:
existedUser = OtherUser.objects.get(phone_no=phone_no)
except Exception as e1:
existedUser = None
if existedUser is None:
authuser = User.objects.create(username=phone_no,
email='',
first_name='',
last_name='',
password=make_password(password),
is_superuser=0,
is_staff=0,
is_active=1,)
# date_joined=timezone.now())
print(authuser.id, "id", type(authuser.id))
g = Group.objects.get(name='User')
g.user_set.add(authuser)
print(g)
user1 = OtherUser.objects.create(
email=email,
phone_no=phone_no,
firstname=firstname,
lastname=lastname,
gender=gender,
latitude=latitude,
longitude=longitude,
user_auth_id=authuser.id,
deviceId=deviceId,
deviceType=deviceType,
role=2,
password=password,
address=address
)
print(user1)
token = Token.objects.create(user=authuser)
if deviceId != "" and deviceType != "":
OtherUser.objects.filter(id=user1.id).update(deviceId=deviceId, deviceType=deviceType)
userDetail = {
'token': token.key,
'id': user1.id,
'firstname': user1.firstname,
'lastname' : user1.lastname,
'email': user1.email,
'notificationStatus': user1.onOffNotification,
'address':user1.address,
# 'phone_no' :phone_no,
}
return Response({"status": "1", 'message': 'User has been successfully registered.', 'data': userDetail}, status=status.HTTP_200_OK)
else:
return Response({"message" : "Sorry Something Went Wrong", "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'status': "0", 'message': 'phone_no is missing!'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'status': "0", 'message': 'Timezone is missing!'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def LoginZenoUser(request):
try:
with transaction.atomic():
phone_no = request.data['phone_no']
password = request.data['password']
deviceId = request.data['deviceId']
deviceType = request.data['deviceType']
timeZone = request.META.get('HTTP_TIMEZONE')
if timeZone is not None:
timeZone = pytz.timezone(request.META.get('HTTP_TIMEZONE'))
nowTime = timezone.now().replace(tzinfo=None).replace(microsecond=0)
if phone_no != "":
try:
existedUser = OtherUser.objects.get(phone_no=phone_no, password=password, role=2)
print(existedUser)
except Exception as e3:
existedUser = None
if existedUser is not None:
authUser = authenticate(username=phone_no, password=password)
print(authUser)
if authUser is not None:
checkGroup = authUser.groups.filter(name='User').exists()
if checkGroup:
if deviceId != "" and deviceType != "":
OtherUser.objects.filter(id=existedUser.id).update(deviceId=deviceId, deviceType=deviceType)
OtherUser.objects.filter(id=existedUser.id).update(timezone=timeZone)
token = ''
try:
user_with_token = Token.objects.get(user=authUser)
except:
user_with_token = None
if user_with_token is None:
token1 = Token.objects.create(user=authUser)
token = token1.key
else:
token = user_with_token.key
userDetail = {
'token': token,
# 'id': existedUser.id,
'firstname': existedUser.firstname,
'lastname': existedUser.lastname,
'email': existedUser.email,
'notificationStatus': existedUser.onOffNotification,
'address':existedUser.address,
'latitude':existedUser.latitude,
'longitude':existedUser.longitude,
}
return Response({"status": "1", 'message': 'Login successfully!', 'data': userDetail}, status=status.HTTP_200_OK)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": "Email linked with another account", "status": "0"}, status=status.HTTP_200_OK)
else:
return Response({"message": "Email or password incorrect", "status": "0"}, status=status.HTTP_200_OK)
else:
return Response({'status': "0", 'message': 'Email is missing.'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'status': "0", 'message': 'Timezone is missing!'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def SendContactUsEmail(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
except:
return Response({"message": "Session expired!! please login again", "status": "0"},
status=status.HTTP_401_UNAUTHORIZED)
if checkGroup :
authuser = OtherUser.objects.get(user_auth_id=user.id)
authUserName = authuser.firstname
email = request.data['email']
subject = request.data['subject']
message = request.data['message']
user_id = request.data["user"]
u1 = OtherUser.objects.get(id=user_id)
user1 = ContactUs.objects.create(email=email,
name=authUserName,
user_id=u1.id,
);
email_body = """\
<html>
<head></head>
<body>
<h2>%s</h2>
<p>%s</p>
<p> This email was sent from: </p>
<h5>%s</h5>
<h5>email:%s</h5>
<h5>address:%s</h5>
</body>
</html>
""" % (subject, message, authUserName, email, authuser.address)
recipient = []
recipient.append(settings.EMAIL_HOST_USER)
email = EmailMessage('Contact Us Mail ! ', email_body, to=recipient)
email.content_subtype = "html" # this is the crucial part
response = email.send()
if response:
return Response({"status": "1", 'message': 'Email sent successfully.'}, status=status.HTTP_200_OK)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
def LogOutZenoUser(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
except:
token1 = None
user = None
if user is not None:
user.auth_token.delete()
return Response({"message": "Logged out successfully", "status": "1"}, status=status.HTTP_200_OK)
else:
return Response({"message": "session Expired ! Please Login Again.", "status": "0"},
status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
# transaction.rollback()
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def addCategory(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
print(API_key)
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='Superuser').exists()
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_category'):
name = request.data["name"]
authuser = Category.objects.create(
name=name,
);
if authuser is not None:
return Response({"message" : addSuccessMessage, "status" : "1", "object" : {"name" : Category.name}}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def addItems(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
print(API_key)
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
print(user)
checkGroup = user.groups.filter(name='User').exists()
print(checkGroup)
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_item'):
title = request.data["title"]
category_id = request.data["category"]
description = request.data["description"]
price = request.data["price"]
user_id = request.data["user"]
added_date = request.data["added_date"]
print("category_id = ", category_id)
print("user_id = ", user_id)
u = Category.objects.get(id=category_id)
u1 = OtherUser.objects.get(id=user_id)
authuser = Item.objects.create(title=title,
category_id=u.id,
description=description,
price=price,
user_id=u1.id,
added_date=added_date,
);
if authuser is not None:
return Response({"message" : addSuccessMessage, "status" : "1", "object" : {"id" : Item.id}}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def addItemsImageAnsVideos(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
print(permissions)
if user.has_perm('app1.add_itemimageandvideos'):
item_id = request.data["item"]
type = request.data["type"]
path = request.data["path"]
print("item_id = ", item_id)
u = Item.objects.get(id=item_id)
authuser = ItemImageAndVideos.objects.create(item_id=u.id,
type=type,
path=path,
);
if authuser is not None:
return Response({"message" : addSuccessMessage, "status" : "1", "object" : {"id" : ItemImageAndVideos.id}}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def zenoSearch(request):
try:
with transaction.atomic():
API_key = request.META.get("HTTP_AUTHORIZATION")
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_searches'):
user_id = request.data["user"]
category_id = request.data["category"]
# latitude = request.data["latitude"]
# longitude = request .data["longitude"]
min_price = request.data["min_price"]
max_price = request.data["max_price"]
# condition = ""
# if min_price != "-1" and max_price != -1:
# condition = " where price between " + str(min_price) + " and " + str(max_price)
# elif max_price != "-1":
# condition = " where price = " + str(max_price)
# elif min_price != -1:
# condition = " where price = " + str(min_price)
cursor = connection.cursor()
cursor.execute("select item.title,item.category_id ,item.price,( 3959 * acos(cos(radians(37)) * cos(radians(otheruser.latitude)) * cos(radians(otheruser.longitude) - radians(-122)) + sin(radians(37)) * sin(radians(otheruser.latitude)))) As distance from item inner join otheruser on item.user_id = otheruser.id where item.price between " + str(min_price) + " AND " + str(max_price) + " and category_id=" + str(category_id) + " having distance<8000")
itemsRaw = dictfetchall(cursor)
cursor.close()
print(itemsRaw)
u = Category.objects.get(id=category_id)
u1 = OtherUser.objects.get(id=user_id)
u2 = OtherUser.objects.get(id=user_id)
rr = u2.save_search
if rr == 1:
authuser = Searches.objects.create(user_id=u1.id,
category_id=u.id,
min_price=min_price,
max_price=max_price
);
return Response({"message" : addSuccessMessage, "status" : "1", "object" : {"id" : Searches.id}}, status=status.HTTP_201_CREATED)
else:
return Response({"Cursor":itemsRaw}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
@api_view(['GET'])
def getUserList(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkUser = user.groups.filter(name='User').exists()
print(checkUser)
except:
return Response({'message' : "Session expired! Please login again", "status":"0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkUser is not None:
user1 = OtherUser.objects.get(user_auth_id=user.id)
userdetail = {
"firstname":user1.firstname,
"lastname":user1.lastname,
"phone_no" : user1.phone_no,
"gender" : user1.gender,
"username" : user1.username,
"email" : user1.email,
"address" : user1.address
}
return Response({"status": "1", 'message': 'Get successfully.', 'data':userdetail}, status=status.HTTP_200_OK)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
# print(e)
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
def getItemList(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
print(user)
checkGroup = user.groups.filter(name='User').exists()
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
# if user.has_perm('appadmin.can_view_busTypes'):
user1 = OtherUser.objects.get(user_auth_id=user.id)
items = Item.objects.filter(user_id=user1.id)
itemsSerializer = ItemSerializer(items, many=True)
#
return Response(itemsSerializer.data, status=status.HTTP_200_OK)
# else:
# return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception:
print(traceback.format_exc())
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def makeOffer(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
print(checkGroup)
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_offers'):
item_id = request.data["item"]
price = request.data["price"]
print("item_id = ", item_id)
u = Item.objects.get(id=item_id)
u1 = OtherUser.objects.get(user_auth_id=user.id)
print(u.id)
print(u1.id)
authuser = Offers.objects.create(item_id=u.id,
offer_status=1,
offered_by_id=u1.id,
price=price,
)
if authuser is not None:
return Response({"message" : addSuccessMessage}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def sendMessage(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
print(checkGroup)
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_message'):
receiver_id = request.data["receiver"]
message = request.data["message"]
u1 = OtherUser.objects.get(user_auth_id=user.id)
print(u1.id)
authuser = Message.objects.create(sender_id=u1.id,
receiver_id=receiver_id,
message=message,
)
if authuser is not None:
return Response({"message" : sendSuccessMessage}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
def getMessage(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
print(checkGroup)
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
u1 = OtherUser.objects.get(user_auth_id=user.id)
# msg = Message.objects.filter(sender_id=u1.id).order_by('-msg_time')
# msgSerializer = MessageSerializer(msg, many=True)
# return Response(msgSerializer.data, status=status.HTTP_200_OK)
cursor = connection.cursor()
cursor.execute("select * from message where ((sender_id =" + str(u1.id) + " and sender_status = 1) and (receiver_id = " + str(u1.id) + " or receiver_status = 1)) and id in (select max(id) from message group by if(sender_id = " + str(u1.id) + ", concat(sender_id,' ',receiver_id), concat(receiver_id, ' ', sender_id)));")
itemsRaw = dictfetchall(cursor)
cursor.close()
print(itemsRaw)
return Response(itemsRaw, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(["GET"])
def receiveMessage(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkUser = user.groups.filter(name='User').exists()
print(checkUser)
except:
return Response({'message' : "Session expired! Please login again", "status":"0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkUser is not None:
u = OtherUser.objects.get(user_auth_id=user.id)
authuser = Message.objects.filter(receiver_id=u.id).update(is_read=1)
print(authuser)
return Response({"message": "received successfully", "status":"1"}, status=status.HTTP_200_OK)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(["GET"])
def deteteMessage(request, pk=None):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkUser = user.groups.filter(name='User').exists()
print(checkUser)
except:
return Response({'message' : "Session expired! Please login again", "status":"0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkUser is not None:
u = OtherUser.objects.get(user_auth_id=user.id)
if pk:
cursor = connection.cursor()
cursor.execute(" update message SET sender_status = 0 where (sender_id=" + str(u.id) + " and receiver_id=" + str(pk) + ")")
# itemsRaw = dictfetchall(cursor)
cursor.execute("update message SET receiver_status = 0 where ( receiver_id = " + str(u.id) + " and sender_id = " + str(pk) + ")")
cursor.close()
# print(itemsRaw)
return Response({"message": "delete successfully", "status":"1"}, status=status.HTTP_200_OK)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message": errorMessage, "status": "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def getNotification(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
print(checkGroup)
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_notifications'):
receiver_id = request.data["receiver"]
table_id = request.data['table_id']
tag = request.data['tag']
message = request.data["message"]
u1 = OtherUser.objects.get(user_auth_id=user.id)
print(u1.id)
authuser = Notifications.objects.create(sender_id=u1.id,
receiver_id=receiver_id,
message=message,
table_id=table_id,
tag=tag,
)
if authuser is not None:
return Response({"message" : sendSuccessMessage}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def sendShipmentDetail(request):
try:
with transaction.atomic():
API_key = request.META.get('HTTP_AUTHORIZATION')
if API_key is not None:
try:
token1 = Token.objects.get(key=API_key)
user = token1.user
checkGroup = user.groups.filter(name='User').exists()
print(checkGroup)
except:
return Response({"message" : "Session Expired!! Please Login Again", "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
if checkGroup:
permissions = Permission.objects.filter(user=user)
if user.has_perm('app1.add_shipmentdetails'):
shippername = request.data["shippername"]
package_id = request.data['package_id']
item_id = request.data['item']
u1 = OtherUser.objects.get(user_auth_id=user.id)
u = Item.objects.get(id=item_id)
authuser = ShipmentDetails.objects.create(user_id=u1.id,
item_id=item_id,
shippername=shippername,
package_id=package_id,
)
if authuser is not None:
return Response({"message" : sendSuccessMessage}, status=status.HTTP_201_CREATED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({"message" : errorMessage, "status" : "0"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
print(traceback.format_exc())
return Response({"message" : str(e), "status" : "0"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
n = int(input())
a = list(map(int,input().split()))
ans = []
tmp = []
flag = 0
for i in range(len(a)):
if len(tmp) == 0:
tmp.append(a[i])
elif flag == 0:
flag = 1 if tmp[-1] < a[i] else -1 if tmp[-1] > a[i] else 0
tmp.append(a[i])
elif (flag == 1 and tmp[-1] <= a[i]) or (flag == -1 and tmp[-1] >= a[i]):
tmp.append(a[i])
else:
ans.append(tmp)
tmp = [a[i]]
flag = 0
print(len(ans) + 1)
|
#__date__ = 6/14/18
#__time__ = 4:08 PM
#__author__ = isminilourentzou
import torch
import torch.nn as nn
import torch.nn.functional as F
class WordRepr(nn.Module):
def __init__(self, opt, vocabs):
super(WordRepr, self).__init__()
self.opt = opt
self.word_vocab, self.char_vocab, self.case_vocab, self.tag_vocab = vocabs
self.outsize = len(self.tag_vocab.itos)
self.input_size = 0
self.word_drop = nn.Dropout(opt.dropout)
if(self.word_vocab.vectors is not None):
self.word_embedding = nn.Embedding(len(self.word_vocab), self.word_vocab.vectors.shape[1])
self.word_embedding.weight.data.copy_(self.word_vocab.vectors)
self.input_size += self.word_vocab.vectors.shape[1]
else:
self.word_embedding = nn.Embedding(len(self.word_vocab),self.opt.word_emb_dim)
self.input_size += self.opt.word_emb_dim
if(self.opt.char_emb_dim):
self.char_drop = nn.Dropout(opt.dropout)
self.char_embedding = nn.Embedding(len(self.char_vocab), self.opt.char_emb_dim)
self.char_cnn = nn.Conv1d(self.opt.char_emb_dim, opt.char_hidden_dim, kernel_size=3, padding=1)
self.input_size += self.opt.char_hidden_dim
if(self.opt.case_emb_dim):
self.case_drop = nn.Dropout(opt.dropout)
self.case_embedding = nn.Embedding(len(self.case_vocab), self.opt.case_emb_dim)
self.input_size += self.opt.case_emb_dim
def forward(self, batch):
word_represent = self.word_embedding(batch.inputs_word)
word_represent = self.word_drop(word_represent)
if(self.opt.char_emb_dim):
char_embeds = self.char_embedding(batch.inputs_char)
char_embeds = char_embeds.transpose(2,1).contiguous()
char_embeds = char_embeds.view(-1, char_embeds.size(-2), char_embeds.size(-1)).transpose(2,1) #(batch_size*seq_len) x char_emb_dim x char_len
char_cnn_out = self.char_cnn(char_embeds)
char_cnn_out = F.max_pool1d(char_cnn_out, char_cnn_out.size(2)).view(word_represent.size(0),word_represent.size(1),-1)
char_cnn_out = self.char_drop(char_cnn_out)
word_represent = torch.cat([word_represent, char_cnn_out], 2)
if(self.opt.case_emb_dim):
case_embeds = self.case_embedding(batch.inputs_case)
case_embeds = self.case_drop(case_embeds)
word_represent = torch.cat([word_represent, case_embeds], 2)
return word_represent
|
import os.path as osp
import pandas as pd
from .manager import BaseManager
from utils import seed_everything, make_datapath_list, show
from dataset import TestDataset, Anno_xml2list, DataTransform, od_collate_fn, get_dataloader
from models import ObjectDetectionModel
class Infer(BaseManager):
def __call__(self):
print("Inference")
if self.get("infer_flag"):
_, __, val_img_list, val_anno_list = make_datapath_list(self.data_path)
if self.debug:
val_img_list = val_img_list[:2]
val_anno_list = val_anno_list[:2]
test_dataset = TestDataset(
val_img_list,
phase="val",
transform=DataTransform(**self.get("val_transform_params")),
)
testloader = get_dataloader(
test_dataset,
batch_size=self.get("batch_size"),
num_workers=self.get("num_workers"),
shuffle=False,
drop_last=False
)
for seed in self.seeds:
self.params["seed"] = seed
self.params["phase"] = "inference"
model = ObjectDetectionModel(self.params)
model.read_weight()
preds = model.predict(testloader)
try:
print(type(preds))
print(preds.shape)
except:
pass
def infer_oneimage(self, image):
# PIL image (W, H)
import numpy as np
import cv2
from dataset import OneTestDataset
image = np.array(image, dtype=np.uint8)[:, :, ::-1] # to numpy
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # to cv2
test_dataset = OneTestDataset(
image,
"val",
DataTransform(self.image_size, self.get("val_transform_params"))
)
testloader = get_dataloader(
test_dataset,
batch_size=self.get("batch_size"),
num_workers=self.get("num_workers"),
shuffle=False,
drop_last=False
)
seed = self.seeds[0]
self.params["seed"] = seed
self.params["phase"] = "inference"
model = ObjectDetectionModel(self.params)
model.read_weight()
preds = model.predict(testloader)
try:
print(type(preds))
print(preds.shape)
except:
pass
show(image, preds, self.voc_classes, self.get("data_confidence_level"), save_path=osp.join(self.ROOT, "app"))
return image
|
factors = [1, 2, 5, 10]
names = ['Anand', 'April', 'Justin']
mixed = [3, True, 'Yellow']
print(factors[3])
print(names[0])
print(mixed[0:2])
print(len(names))
s = 'hello'
# For a string, both a single position and a slice return strings
print(type(s[0]))
print(type(s[0:1]))
# For a list, a single position returns a value, a slice returns a list
print(factors[1])
print(factors[0:1])
print(type(factors[1]))
print(type(factors[0:1]))
# nested lists
nested = [[2, [37]], 4, ['hello']]
print(nested)
print(nested[0])
print(nested[1])
print(nested[2][0][3])
print(nested[0][1:2])
# Unlike strings, lists can be updated in place i.e. In Python, lists are mutable unlike strings
nested[1] = 7
print(nested)
nested[0][1][0] = 19
print(nested)
# Does assignment copy the value or make both the names point to the same value?
# For immutable values, we can assume that assignment makes a fresh copy of a value. Updating one value does not affect the copy. Values of type int, float, bool, str are immutabe.
x = 5
y = x
x = 7
print(y)
# For mutable values, assignment does not make a fresh copy. It rather makes both the names point to the same value. So with either name if we happen to update the mutable value the other name is also affected. 'list1' and 'list2' in the below example are the two names for the same list.
list1 = [1, 3, 5, 7]
list2 = list1
list1[2] = 4
print(list2)
# How can we make a copy of a list?
# A slice creates a new (sub)list from an old one. The outcome of a slice operation is a new list. We know that l[:k] is l[0:k], l[k:] is l[k:len(l)] Omitting both end points gives us a full slice l[:] = l[0:len(l)]
list2 = list1[:]
list1[0] = 2
print(list1)
print(list2)
# Digression on equality
list1 = [1, 3, 5, 7]
list2 = [1, 3, 5, 7]
list3 = list2
# All three lists are equal, but there is a difference in the way they are equal
# list1 and list2 are two different lists but they have the same value. So if we operate on one it need not preserve this equality anymore.
# list2 and list3 and equal because they precisely point to the same value. list2 and list3 are two names for the same list. Now, if we update list3 or list2 they continue to remain equal.
# x == y checks if x annd y have same value
# x is y checks if x and y refer to the same object
# We can use '==' and 'is' to check whether two names are equal to only in value or physically pointing to the same object
print(list1 == list2)
print(list2 == list3)
print(list2 is list3)
print(list1 is list2)
list2[2] = 4
print(list2)
print(list1 == list2)
print(list2 == list3)
# Concatenation
# Like strings, lists can be glued together using +
list1 = [1, 3, 5, 7]
list2 = [4, 5, 6, 8]
list3 = list1 + list2
print(list3)
# Note that + always produces a new list
list1 = [1, 3, 5, 7]
list2 = list1
list1 = list1 + [9] # list1 and list2 no longer point to the same object
print(list1 is list2)
print(list1)
|
import sqlite3
class DBHelper:
def __init__(self,db_name):
self.conn=sqlite3.connect(db_name,check_same_thread=False)
self.conn.row_factory=sqlite3.Row
self.cursor=self.conn.cursor()
def get_regions(self):
# print('assskdjsjjakjka diskskskkscks')
return self.cursor.execute('SELECT id, ibora,meaning FROM iboralar order by id asc limit 60').fetchall()
def get_meaning(self,id):
return self.cursor.execute(f"SELECT meaning FROM iboralar WHERE id=={id}").fetchone() |
from model.Group import Group
import logging
def loadGrupos(measurs):
try:
grupos = {}
grupos['5'] = Group(5,30)
grupos['10'] = Group(10,50)
for grupo in grupos.values():
for measur in measurs.values():
if measur.grupo_time == grupo.time_interval:
grupo.insert_measur(measur)
return grupos
except Exception as e:
print e
logging.error(str(e))
def check_group_time(grupos):
#For nao funciona mas deveria, depois verificar
#for grupo in grupos:
# grupo.set_time()
grupos['5'].set_time()
grupos['10'].set_time()
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/',methods=["GET","POST"])
@app.route('/persona/<cadena>/<int:edad>',methods=["GET","POST"])
def saluda(cadena="NADIE",edad=0):
nombre = cadena
return render_template("inicio.html",nombre=nombre,edad=edad)
@app.route("/articulos/<int:numero>")
def mostrar_ariculo(numero):
return render_template("articulos.html",id=numero)
app.run(debug=True) |
from django.apps import AppConfig
class Blog22Config(AppConfig):
name = 'blog22'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
f = open("水聯盟資料清單 - 水太少.csv", "r")
all_row = []
for row in f:
e_row = []
new_row = row.split(",")
for item in new_row:
e_row.append(item)
all_row.append(e_row)
# Text = "<html><p><b>水聯盟資料清單 水太多</b><p><html>\n"
# Text = Text + "<p>新增國家災害防救科技中心於機關分類</p>"
cnt = 0
Text = "<tr>"
for item in all_row:
if cnt > 0:
Text = Text + " <td>"+"<b>"+item[0]+"</b>"+"</td>\
<td>"+"<b>"+item[2]+"</b>"+"</td>\
<td>"+"<a href="+item[5]+">URL</a>"+"</td>\
<td>"+"<a href="+item[6]+">API</a>"+"</td>\
</tr>"
else:
Text = Text + " <td>"+"<b>"+item[0]+"</b>"+"</td>\
<td>"+"<b>"+item[2]+"</b>"+"</td>\
<td>"+"<b>"+"URL"+"</b>"+"</td>\
<td>"+"<b>"+"API"+"</b>"+"</td>\
</tr>"
cnt += 1
# Text = Text +"</tbody>\n"
Html_file= open("forum.html","w")
Html_file.write(Text)
Html_file.close() |
# -*- coding: utf-8 -*-
import pytest
from fractions import Fraction
from openprocurement.auction.esco.utils import prepare_initial_bid_stage
@pytest.mark.parametrize(
'input,expected', [
(
{
'bidder_name': '2',
'bidder_id': '5675acc9232942e8940a034994ad883e',
'time': '2017-09-19T08:22:24.038426+00:00',
'contractDurationDays': 252,
'contractDurationYears': 8,
'amount': 9023.638356164383,
'annualCostsReduction': [200.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0,
800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0]
},
{
'amount': 9023.638356164383,
'contractDurationDays': 252,
'contractDurationYears': 8,
'yearlyPaymentsPercentage': 0,
'annualCostsReduction': [200.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0,
800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0, 800.0],
'bidder_id': u'5675acc9232942e8940a034994ad883e',
'label': {'en': 'Bidder #2',
'ru': '\xd0\xa3\xd1\x87\xd0\xb0\xd1\x81\xd1\x82\xd0\xbd\xd0\xb8\xd0\xba \xe2\x84\x962',
'uk': '\xd0\xa3\xd1\x87\xd0\xb0\xd1\x81\xd0\xbd\xd0\xb8\xd0\xba \xe2\x84\x962'},
'time': '2017-09-19T08:22:24.038426+00:00'
}
),
(
{
'bidder_name': '1',
'bidder_id': 'd3ba84c66c9e4f34bfb33cc3c686f137',
'time': '2017-09-19T08:22:21.726234+00:00',
'amount_features': Fraction(41531767727917712194043060279553, 4555619344570199334662963200),
'coeficient': Fraction(16573246628723425, 15492382718154506),
'amount': 9752.643835616438,
'yearlyPaymentsPercentage': 0.82,
'annualCostsReduction': [400.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0,
900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0]
},
{
'amount': 9752.643835616438,
'contractDurationDays': 0,
'contractDurationYears': 0,
'yearlyPaymentsPercentage': 0.82,
'annualCostsReduction': [400.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0,
900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0, 900.0],
'bidder_id': u'd3ba84c66c9e4f34bfb33cc3c686f137',
'label': {'en': 'Bidder #1',
'ru': '\xd0\xa3\xd1\x87\xd0\xb0\xd1\x81\xd1\x82\xd0\xbd\xd0\xb8\xd0\xba \xe2\x84\x961',
'uk': '\xd0\xa3\xd1\x87\xd0\xb0\xd1\x81\xd0\xbd\xd0\xb8\xd0\xba \xe2\x84\x961'},
'amount_features': '41531767727917712194043060279553/4555619344570199334662963200',
'coeficient': '16573246628723425/15492382718154506',
'time': '2017-09-19T08:22:21.726234+00:00'
}
),
(
{
'bidder_name': '2',
'bidder_id': '5675acc9232942e8940a034994ad883e',
'time': '2017-09-19T08:22:24.038426+00:00',
'amount': 9023.638356164383,
'contractDurationDays': 252,
'contractDurationYears': 8,
'yearlyPaymentsPercentage': 0.65,
},
{
'amount': 9023.638356164383,
'contractDurationDays': 252,
'contractDurationYears': 8,
'yearlyPaymentsPercentage': 0.65,
'annualCostsReduction': [],
'bidder_id': u'5675acc9232942e8940a034994ad883e',
'label': {'en': 'Bidder #2',
'ru': '\xd0\xa3\xd1\x87\xd0\xb0\xd1\x81\xd1\x82\xd0\xbd\xd0\xb8\xd0\xba \xe2\x84\x962',
'uk': '\xd0\xa3\xd1\x87\xd0\xb0\xd1\x81\xd0\xbd\xd0\xb8\xd0\xba \xe2\x84\x962'},
'time': '2017-09-19T08:22:24.038426+00:00'
}
),
], ids=['without features', 'with features', 'without annualCostsReduction']
)
def test_prepare_initial_bid_stage(input, expected):
result = prepare_initial_bid_stage(**input)
assert result == expected
|
import os
import ray
import json
import tqdm
import torch
import logging
from typing import List
logger = logging.getLogger(__name__)
class FullDocIterator:
def __init__(self):
pass
class FullDocBatchIterator:
def __init__(self):
pass
|
from django.db import models
class Comment(models.Model):
user= models.CharField(max_length=50)
comment= models.TextField()
|
sal = float(input('Qual é o salário do funcionário? R$'))
aum = (sal * 15) / 100
Nsal = (sal + aum)
print('Um funcionário que ganhava R${:.2f}, com 15% de aumento, passa a receber R${:.2f}'.format(sal, Nsal))
|
# created by Ryan Spies
# 3/24/2015
# Python 2.7
# Description: generate an input file with for the MAT preprocessor
# MAT input format: http://www.nws.noaa.gov/oh/hrl/nwsrfs/users_manual/part3/_pdf/38mat.pdf
import os
import math
import dateutil
os.chdir("../..")
maindir = os.getcwd()
################### user input #########################
########################################################
RFC = 'APRFC_FY2017'
run_type = 'MAT' # choices: 'CONS' or 'MAT'
fxgroup = 'ANAK'
consis_check = 'off' # choices: 'on' or 'off' (on will generate consistency check card, off will generate MAT card)
year1 = 1960; year2 = 2015
networks = ['nhds_daily','raws_hourly'] # choices: 'asos_hourly','nhds_daily','raws_hourly','scan_hourly'
workingdir = maindir + os.sep + 'Calibration_NWS'+ os.sep + RFC[:5] + os.sep + RFC + os.sep + 'MAP_MAT_development' + os.sep + 'station_data'
daily_obs_file = workingdir + os.sep + 'nhds_daily' + os.sep + 'nhds_site_obs_time_' + fxgroup+ '.csv' # file with the obs time changes for some nhds daily stations
limit_stations = [] #['PANI','PASV','5769','3009','3215','3573','0754','7570','7783','5366','AKIL','AMCK','APOO','ASTR','ATEL']
## define basins below
if fxgroup == 'ANAK':
mat_basins = ['KNKA2LWR','KNKA2UPR','KNKA2GL'] #
if fxgroup == 'NWAK':
mat_basins = ['ABLA2LWR','ABLA2UPR','KIAA2LWR','KIAA2UPR','WULA2LWR','WULA2UPR']
mat_basins_desc = {'ABLA2LWR':'ABLA2 LOWER','ABLA2UPR':'ABLA2 UPPER','KIAA2LWR':'KIAA2 LOWER','KIAA2UPR':'KIAA2 UPPER','WULA2LWR':'WULA2 LOWER'
,'WULA2UPR':'WULA2 UPPER','KNKA2LWR':'KNKA2 LOWER','KNKA2UPR':'KNKA2 UPPER','KNKA2GL':'KNKA2 GLACIER'}
mat_basins_area = {'ABLA2LWR':4710,'ABLA2UPR':1701,'KIAA2LWR':2699,'KIAA2UPR':372,'WULA2LWR':440
,'WULA2UPR':265,'KNKA2LWR':318,'KNKA2UPR':413,'KNKA2GL':488}
if consis_check == 'on':
mtp_basins = [fxgroup + '_consis_check']
consis_input = 0
out_ext = '_consis_check.mat'
ctmp = ','; ctim = ',,'; cons = 'CONS'; cont = 'STOP' # @B CARD -- note: need n+1 commas for defaults (n is number of default variables)
output_MAT = 'OUTN' # @D CARD input
else:
consis_input = len(mat_basins)
out_ext = '.mat'
ctmp = 'CTMP'; ctim = 'CTIM'; cons = ',,'; cont = 'CONT' # @B CARD -- note: need n+1 commas for defaults (n is number of default variables)
output_MAT = 'OUT' # @D CARD input
dummy_file = workingdir + os.sep + 'MAT_input' + os.sep + fxgroup + '_dummy_station_input_info.csv'
########################################################
################## end user input #######################
if len(networks) > 1:
out_file = open(workingdir + os.sep + 'MAT_input' + os.sep + 'MAT_input_'+fxgroup+'_' + str(year1) + '_' + str(year2) + out_ext,'wb')
else:
out_file = open(workingdir + os.sep + 'MAT_input' + os.sep + 'MAT_' + networks[0] + '.mat','wb')
print 'Creating file -> ' + str(out_file)
################ A card block ####################
month1 = 10; year1 = 1960; month2 = 9; year2 = 2015
out_file.write('{:2s} {:2d} {:4d} {:2d} {:4d}'.format('@A',month1,year1,month2,year2))
out_file.write('\n')
################ B card block ####################
MAT_compute = consis_input; tempck_num = 0; MAT_units = 'ENGL'; mnt = 'MNT'
# inputs for B card read from above -- dependant on consis_check vs MAT
out_file.write('{:2s} {:2d} {:2d} {:4s} {:3s} {:4s} {:4s} {:4s} {:4s}'.format('@B',MAT_compute,tempck_num,MAT_units,mnt,ctmp,ctim,cons,cont))
out_file.write('\n')
################ C card block ####################
# ignored for TEMPCK runs and consistency checks without MAT
#if tempck_num > 0 or MAT_compute == 0:
weight_option = 'PRE' #default is grid -> ignores power
power = 2.0; min_power = 0.01
out_file.write('{:2s} {:4s} {:3.1f} {:4.2f}'.format('@C',weight_option,power,min_power))
out_file.write('\n')
################ D card block ####################
# ignored for TEMPCK runs and consistency checks without MAT
#if tempck_num > 0 or MAT_compute > 0:
print_option = ','; null = ',,'; MAT_units = 'ENGL'; dir_name = 'FY17_CALB'; summary_table = 'SUMT'; summary_card = 'SUMP'
out_file.write('{:2s} {:4s} {:1s} {:4s} {:4s} {:10s} {:4s} {:4s}'.format('@D',print_option,null,output_MAT,MAT_units,dir_name,summary_table,summary_card))
out_file.write('\n')
################ E card block ####################
station_count = 0
for network in networks:
if network == 'nhds_daily': # nhds_daily.taplot vs asos.taplot naming difference
count_open = open(workingdir + os.sep + 'taplot_input' + os.sep + fxgroup + '_' + network + '.taplot','r')
else:
count_open = open(workingdir + os.sep + 'taplot_input' + os.sep + fxgroup + '_' + network + '.taplot','r')
for line in count_open:
station_count += int(line.split()[1])
break
count_open.close()
if len(limit_stations) > 0:
station_count = len(limit_stations)
station_count += tempck_num # +1 for a tempck run?
if consis_check != 'on':
station_count += len(mat_basins) # add dummy stations to count
if station_count > 50:
print 'Warning -> more than 50 stations (limit of MAT)'
out_file.write('{:2s} {:2d}'.format('@E',station_count))
out_file.write('\n')
################ F/G/H card block ####################
stations_input = []; stations_available = {} # create a dictionary of available stations (name and id)
write_gh_cards = False # don't write G and H stations if station is not in limit_stations list (unless list is not used)
if len(limit_stations) == 0:
add_station = True
else:
add_station = False
for network in networks:
line_prev = ''
if network == 'nhds_daily': # nhds_daily.taplot vs asos.taplot naming difference
taplot_open = open(workingdir + os.sep + 'taplot_input' + os.sep + fxgroup + '_' + network + '.taplot','r')
summary_open = open(workingdir + os.sep + 'station_summaries' + os.sep + 'nhds_summary_tamx_daily_' + fxgroup + '.csv','r')
else:
taplot_open = open(workingdir + os.sep + 'taplot_input' + os.sep + fxgroup + '_' + network + '.taplot','r')
summary_open = open(workingdir + os.sep + 'station_summaries' + os.sep + fxgroup + '_' + network[:4] + '_summary_tamx_hourly.csv','r')
for entry in summary_open:
sep = entry.split(',')
if sep[0].strip() != 'NAME':
stations_available[sep[0].upper()] = (str(sep[1])[-4:])
if add_station == True:
limit_stations.append(str(sep[1])[-4:])
summary_open.close()
for line in taplot_open: # F/G/H cards copied from taplot
if line[:2] == '@F':
site_name = (line.split("'")[1].replace(' ' + network[:4].upper(),'')).upper()
if network[:4].upper() == 'SCAN':
site_name = (line.split("'")[1].upper())
if stations_available[site_name] in limit_stations: # check if station is specified as a final chosen site
stations_input.append(stations_available[site_name])
print 'Station added to output card: ' + stations_available[site_name]
out_file.write(line)
write_gh_cards = True
else:
write_gh_cards = False
if line[:2] == '@G' or line[:2] == '@H':
if write_gh_cards == True:
out_file.write(line)
taplot_open.close()
####### add dummy stations ########
if consis_check != 'on':
open_dummy = open(dummy_file,'r'); check_basins = []; fe = 20.0
for line in open_dummy:
min_string = ''; max_string = '' # strings to append dummy station values from csv
sep = line.split(',')
if sep[0] != 'Basin':
basin = sep[0]; lat = float(sep[2]); lon = float(sep[3]); elev = float(sep[4]); dummy = 'DUMMY'; obs_time = 24 # obs_time?
for idx, val in enumerate(sep): # loop through all min/max values and append to appropriate string
if idx > 4 and idx % 2 == True:
min_string = min_string + val.strip() + ' '
if idx > 4 and idx % 2 == False:
max_string = max_string + val.strip() + ' '
check_basins.append(basin)
out_file.write('{:2s} {:20s} {:6.2f} {:6.2f} {:2d} {:4d} {:5s}'.format('@F',"'"+str(basin + ' Synthetic')+"'",abs(float(lat)),abs(float(lon)),obs_time,int(elev),dummy))
out_file.write('\n')
out_file.write('@G ' + str(fe) + ' ' + max_string + '\n')
out_file.write('@H ' + str(fe) + ' ' + min_string + '\n')
open_dummy.close()
################ I card block ####################
# area information and predetermined weights
if check_basins != mat_basins: # check order of syntetic stations matches order of basins
print 'Order of MAT basins does not match order in synthetic info csv...'
basin_index = len(stations_input)
for mat_basin in check_basins:
basin_index += 1
area_id = mat_basin; area_desc = "'"+mat_basins_desc[mat_basin]+"'"; area = mat_basins_area[mat_basin]; area_units = 'MI2'; basin_name = 'FY17_CALB'; file_name = area_id
out_file.write('{:2s} {:12s} {:20s} {:5d} {:3s} {:12s} {:12s}'.format('@I',area_id,area_desc,area,area_units,basin_name,file_name))
out_file.write('\n')
################ J card block ####################
# omit when using predetermined weights
if weight_option != 'PRE': # omit J and L if not using pre-determined weights
basn = 'BASN'; basin_id = 'MCGA2'; desc_info = 'test'; lat_lon_pairs = '(xxx.x,xxx.x)'
out_file.write('{:2s} {:4s} {:8s} {:20s} {:13s}'.format('@J',area_id,area_desc,area,area_units,basin_name,file_name))
out_file.write('\n')
################ L card block ####################
# only needed for predetermined weights
count = 1
out_file.write('@L ')
while count <= len(stations_input) + len(check_basins):
if basin_index == count:
weight = 1.0
else:
weight = 0.0
out_file.write('{:4.2f}'.format(weight))
out_file.write(' ')
if count % 10 == 0:
out_file.write('\n')
count += 1
out_file.write('\n')
################ M card block ####################
if ctim == 'CTIM': # omit unless observation time corrections specified in B card
if 'nhds_daily' in networks:
obs_time_file = open(daily_obs_file,'r')
print 'Adding M block -> obs time history...'
for station_num, station_input in enumerate(stations_input):
obs_time_file = open(daily_obs_file,'r')
prev_station = ''; prev_obs = ''
for line in obs_time_file:
sep = line.split(',')
if sep[0] != 'COOP ID' and sep[0] != '':
site_id = str(sep[0])[-4:]
if site_id == station_input:
#print 'Obs time change found: ' + station_input
begin_date = dateutil.parser.parse(sep[1])
if sep[8] != '': # ignore missing obs time instances
time_obs = int(float(sep[8])/100)
if site_id == prev_station: # check for repeat obs_time instances for same site
if time_obs != prev_obs:
out_file.write('@M ' + str(station_num + 1) + ' ' + str(begin_date.month) + ' ' + str(begin_date.year) + ' ' + str(time_obs) + '\n')
else:
out_file.write('@M ' + str(station_num + 1) + ' ' + str(begin_date.month) + ' ' + str(begin_date.year) + ' ' + str(time_obs) + '\n')
prev_station = site_id; prev_obs = time_obs
obs_time_file.close()
out_file.write('@M 999\n')
################ O card block ####################
if ctmp == 'CTMP': # omit unless temperature corrections specified in B card
out_file.write('@O\n')
out_file.write('@O 999\n')
################ Q card block ####################
out_file.write('@Q\n')
station_list = {}
for network in networks:
if network == 'nhds_daily':
for each_file in (os.listdir(workingdir + os.sep + network + os.sep + 'tamx' + os.sep + 'cardfiles'+ os.sep + fxgroup)):
station_list[each_file[3:7]] = '/' + fxgroup + '/' + (each_file[:-4])
else:
for each_file in (os.listdir(workingdir + os.sep + network + os.sep + 'cardfiles_temp'+ os.sep + fxgroup)):
if each_file[:-4] not in station_list:
station_list[each_file[3:7]] = '/' + fxgroup + '/' + (each_file[:-4])
for station in stations_input:
if station in station_list:
out_file.write('{:4s} {:20s}'.format('TAMX',station_list[station] + '.tmx'))
out_file.write('\n')
out_file.write('{:4s} {:20s}'.format('TAMN',station_list[station] + '.tmn'))
out_file.write('\n')
#print station_list[station]
################ R card block ####################
if cons == 'CONS': # omit unless consistency check option is specified in B card
stations_group = station_count; number_groups = int(math.ceil(station_count/float(stations_group)))
out_file.write('{:2s} {:2d} {:2d}'.format('@R',number_groups,stations_group))
out_file.write('\n')
################ S card block ####################
out_file.write('@S\n')
count_stations = 1
while count_stations <= len(stations_input):
if (count_stations % 20) != False:
out_file.write(str(count_stations) + ' ')
else:
out_file.write(str(count_stations) + '\n')
#if count_stations != station_count:
#out_file.write('@S ')
count_stations += 1
out_file.write('\n')
out_file.write('@S\n')
################ T card block ####################
if tempck_num > 0: # only needed for a TEMPCK run
out_file.write('@T\n')
out_file.close()
print 'MAT card completed!'
print 'Remember to add @O card manually!!!!!!!!!!!'
|
import math
import googlemaps
from scipy.optimize import bisect
import pandas as pd
import glob
def distance_on_unit_sphere(lat1, long1, lat2, long2):
"""
Convert latitude and longitude to spherical coordinates in radians.
phi = 90 - latitude
theta = longitude
Compute spherical distance from spherical coordinates. For two locations in spherical coordinates
(1, theta, phi) and (1, theta', phi')
cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi'
distance = rho * arc length
which comes first doesn't matter but use lat1 as the starting point
"""
degrees_to_radians = math.pi / 180.0
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) +
math.cos(phi1) * math.cos(phi2))
arc = math.acos(cos)
return arc * 6373 * 1000 # here we multiply to get the meters in between points
def find_closest_gps(lat, lng, lat_array, lng_array):
"""
this takes in the gps points we need (ex. our start points) and searches through the
list of points given to find the one that is closest to the desired point
first we set the min counters, doesn't matter the values, they'll change
just put a large value as a placeholder to initialize
in theory, it should return a value in meters for distance that is roughly 20m or less since 60mph is 88ft/s and the gps
polls at about 0.2s to 1s between readings so the max distance from a point should be ~15m, but there is some rounding
if the value is larger, it's because we didn't get data close to the desired point
"""
min_distance = 100000
min_index = 10000
zipped_gps = zip(lat_array, lng_array)
for i, gps_pair in enumerate(zipped_gps):
# enumerate is faster than iterating through a list and also keeps track of the index
distance = distance_on_unit_sphere(lat, lng, gps_pair[0], gps_pair[1])
if distance < min_distance:
min_distance = distance
min_index = i
return min_index, min_distance
def get_bearing(lat1, lng1, lat2, lng2):
"""
this function finds the bearing of the road. It will take the start and end points of the section
it is necessary to calculate the effect of the wind
direction matters, lat1,lng1 should be the starting position
"""
startLat = math.radians(lat1)
startLong = math.radians(lng1)
endLat = math.radians(lat2)
endLong = math.radians(lng2)
dLong = endLong - startLong
dPhi = math.log(math.tan(endLat / 2.0 + math.pi / 4.0) / math.tan(startLat / 2.0 + math.pi / 4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
bearing = (math.degrees(math.atan2(dLong, dPhi)) + 360.0) % 360.0
return bearing
def elev_from_gmaps(start_lat, start_lng, end_lat, end_lng, desired_pt_dist, needed_params=['elevation', 'Latitude', 'Longitude', 'Grade']):
"""
This function extracts the elevations from google maps.
You specify the latitude and longitude points and the resolution at which
you want the grade.
"""
# this key is tied to Darren's account. Should get one for someone who is
# on the project permanently.
gmaps = googlemaps.Client(key='AIzaSyDIw4vUsp5DAW18hRSPYpzxQTGZfSMCh4g')
def dist_finder(num_pts, distance, desired_pt_dist):
return distance / num_pts - desired_pt_dist
distance = distance_on_unit_sphere(start_lat, start_lng, end_lat, end_lng)
path_gmaps = [{'lat': float(start_lat), 'lng': float(start_lng)}, {'lat': float(end_lat), 'lng': float(end_lng)}]
# the part below splits the gps locations up if 500 points will not allow us to reach our goal for distance between points
# we then run several smaller calls to the google maps API
num_elev_obs = int(math.ceil(distance / (desired_pt_dist * 499)) + 1.01)
elevations_gmaps = gmaps.elevation_along_path(path_gmaps, num_elev_obs)
all_gps = [x[u'location'] for x in elevations_gmaps]
# this gives us the gps values that we need in order to call the get_elevation
zip_gps = zip(all_gps[:-1], all_gps[1:])
elevations_gmaps_total = []
for pair in zip_gps:
path_gmaps = [{'lat': float(pair[0][u'lat']), 'lng':float(pair[0][u'lng'])}, {'lat': float(pair[1][u'lat']), 'lng':float(pair[1][u'lng'])}]
distance = distance_on_unit_sphere(path_gmaps[0][u'lat'], path_gmaps[0][u'lng'], path_gmaps[1][u'lat'], path_gmaps[1][u'lng'])
opt_num_pts = int(math.ceil(bisect(dist_finder, 0.1, 10000, args=(distance, desired_pt_dist))))
elevations_gmaps_total.extend(gmaps.elevation_along_path(path_gmaps, opt_num_pts))
df1 = pd.DataFrame.from_dict(elevations_gmaps_total)
df1['Latitude'] = df1['location'].map(lambda x: float(x[u'lat']))
df1['Longitude'] = df1['location'].map(lambda x: float(x[u'lng']))
# at first, some of the points were throwing an error
# this should be irrelevant, but I'll leave it just in case there is a gmaps error
# this may happen if Darren's Gmaps ID expires
def lambtry(lat1, lng1, lat2, lng2):
try:
return distance_on_unit_sphere(lat1, lng1, lat2, lng2)
except:
print 'Problem here, call Darren'
df1['lat2'] = df1['Latitude'].shift(-1)
df1['lng2'] = df1['Longitude'].shift(-1)
df1['dist'] = df1.apply(lambda x: lambtry(x['Latitude'], x['Longitude'], x['lat2'], x['lng2']), axis=1) # distance_on_unit_sphere(x['Latitude'],x['Longitude'],x['lat2'],x['lng2']),axis=1)
df1 = df1[df1['dist'] > 1.9].append(df1[-1:])
df1['dist'] = df1['dist'].fillna(method='ffill')
df1['Grade'] = df1['elevation'].diff() * 100 / df1['dist']
df1['dist'].iloc[0] = 0
df1['distance'] = df1['dist'].cumsum()
df1['Grade'] = df1['Grade'].shift(-1)
df1['Grade'] = df1['Grade'].fillna(method='ffill')
df1['Start_Distance'] = df1.loc[:, 'distance'] * 3.28084
df1 = df1[needed_params]
if needed_params == ['elevation', 'Latitude', 'Longitude', 'Grade']:
df1.columns = ['GM-Elev', 'Latitude', 'Longitude', 'GM-Grade']
return df1
else:
return df1
def add_distance_to_df(df, dist_range, avg_dist, column='speeddist'):
"""
This adds the needed distances to help with averaging the subsections
"""
dfadd = pd.DataFrame()
dfadd1 = pd.DataFrame()
dfadd[column] = [x - 0.001 for x in xrange(dist_range[0], dist_range[1], avg_dist)]
dfadd1[column] = [x for x in xrange(dist_range[0], dist_range[1], avg_dist)]
dfnew = pd.merge(df, dfadd, how='outer')
dfnew = pd.merge(dfnew, dfadd1, how='outer')
dist_list = [x for sublist in zip(dfadd[column], dfadd1[column]) for x in sublist]
return dfnew, dist_list
def import_gps_data(gpr_file, gps1, gps2, avg_dist):
"""
This imports the gps data from the GPR van which has been post-processed already.
First it loads in all the appropriate files which may have our section in it and
flattens them together. It then takes in the start and end gps points and clips the
data to only return the necessary info.
"""
df = pd.read_csv(gpr_file, header=None, names=['odo', 'odo2', 'lat', 'lng', 'elev', 'prec', 'prec2'], usecols=['lat', 'lng', 'elev', 'odo', 'odo2'])
df['distance1'] = df.apply(lambda x: distance_on_unit_sphere(gps1[0], gps1[1], x['lat'], x['lng']), axis=1)
df['distance2'] = df.apply(lambda x: distance_on_unit_sphere(gps2[0], gps2[1], x['lat'], x['lng']), axis=1)
df = df.reset_index()
ind1 = df['distance1'].argmin()
ind2 = df['distance2'].argmin()
df = df.loc[ind1:ind2 + 1, :]
start_point = (int(df['distance1'].iloc[0] / float(avg_dist)) + 1) * avg_dist
end_point = int(df['distance1'].iloc[-1] / float(avg_dist)) * avg_dist
df, dist_list = add_distance_to_df(df, [start_point, end_point], avg_dist, column='distance1')
df.index = df['distance1']
df = df.sort_values(by='distance1').interpolate(method='values')
dfreturn = df.loc[df['distance1'].isin(dist_list[1::2])]
dfreturn = dfreturn.append(df.iloc[0])
dfreturn = dfreturn.append(df.iloc[-1])
dfreturn = dfreturn.sort_values('distance1')
dfreturn['Grade'] = dfreturn['elev'].diff() / dfreturn['distance1'].diff() * 100
needed_params = ['distance1', 'lat', 'lng', 'elev', 'Grade']
dfreturn = dfreturn[needed_params]
dfreturn.columns = ['GPR-Distance', 'Latitude', 'Longitude', 'GPR-Elev', 'GPR-Grade']
return dfreturn
def create_subsections_grade(hpgps_file, gps_coords1, gps_coords2, distance=100):
"""
This function adds the information from the hpgps file into the data
distance must be an integer
"""
df = pd.read_table(hpgps_file, delimiter=',', skiprows=None)
df.columns = [x + 1 for x in range(len(df.columns))]
pd.options.mode.chained_assignment = None
df = df.loc[df[9] <= df[9].describe()[5]]
df['Dist_from_start'] = df.apply(lambda x: distance_on_unit_sphere(gps_coords1[0], gps_coords1[1], x[3], x[2]), axis=1)
df['Dist_from_end'] = df.apply(lambda x: distance_on_unit_sphere(gps_coords2[0], gps_coords2[1], x[3], x[2]), axis=1)
start_point = (int(df['Dist_from_start'].loc[df['Dist_from_start'].argmin()] / float(distance)) + 1) * distance
end_point = int(df['Dist_from_start'].loc[df['Dist_from_end'].argmin()] / float(distance)) * distance
df, dist_list = add_distance_to_df(df, [start_point, end_point], distance, column='Dist_from_start')
df.index = df['Dist_from_start']
df = df.sort_values(by='Dist_from_start').interpolate(method='values')
dfreturn = df.loc[df['Dist_from_start'].isin(dist_list[1::2])]
dfreturn = dfreturn.append(df.loc[df['Dist_from_start'].argmin()])
dfreturn = dfreturn.append(df.loc[df['Dist_from_end'].argmin()])
dfreturn = dfreturn.sort_values('Dist_from_start')
dfreturn['Grade'] = dfreturn[7].diff() / dfreturn['Dist_from_start'].diff() * 100 # 90% sure its dist_from_start, might be distance
needed_params = ['Dist_from_start', 3, 2, 7, 'Grade']
dfreturn = dfreturn[needed_params]
dfreturn.columns = ['HPG-Distance', 'Latitude', 'Longitude', 'HPG-Elev', 'HPG-Grade']
return dfreturn
if __name__ == '__main__':
pass
|
from django.db import models
from django.utils import translation
from django_extra_referrals.models import AbstractPayable, AbstractReceivable
_ = translation.ugettext_lazy
class Donation(AbstractReceivable):
fullname = models.CharField(max_length=150)
def __str__(self):
return self.fullname
class Withdraw(AbstractPayable):
fullname = models.CharField(max_length=150)
def __str__(self):
return self.fullname
|
class Bank:
name = None
def assignName(self,bname):
self.name = bname
def displayName(self):
print(self.name)
#Object Creation
b1 = Bank()
#Calling Function's
b1.displayName() #None
b1.assignName("Kotak")
b1.displayName() #Kotak
#Object Creation
b2 = Bank()
#Calling Function's
b2.assignName("SBI")
b2.displayName()
|
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import time
import random
import tweepy
import os
import json
import io
# Go to http://dev.twitter.com and create an app.
# The consumer key and secret as well as the access_token and secret will be generated for you after you register with Twitter Developers
consumer_key = "4oqZjge7qM0n3WNftJiKHFtOF"
consumer_secret = "CZOzvRcdwFOzPZFoM5igXVGBbOBp7lQWBBtCRe76wuv738equP"
access_token = "1004411169568747520-7NBYDlDKlGXX9q5gjXasgRRo5p3HtT"
access_token_secret = "b3BSPhEfHGYCxuIaNPg1CFcJtKkCWnjIZESooDgT99GWL"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_delay=10)
#wfile = open("C:/Users/wlgo6/Desktop/kvs_code/all/clo.json", mode='w', encoding='utf-8') # 텍스트 파일로 출력(쓰기모드)
def get_tweets(listOfTweets, keyword, numOfTweets):
# Iterate through all tweets containing the given word, api search mode
i=0
with open('/Users/junha_lee/Desktop/c.json', 'a', encoding='utf-8') as outfile:
for tweet in tweepy.Cursor(api.search, q=keyword, since='2008-01-01').items(numOfTweets):
# Add tweets in this format # listOfTweets.append(dict_)
# print(tweet)
# print("stop")
raw_data = tweet._json
dict1 = {
'created_at':raw_data['created_at'],
'id': raw_data['id'],
'id_str': raw_data['id_str'],
'text': raw_data['text'],
'truncated': raw_data['truncated'],
'is_quote_status': raw_data['is_quote_status'],
'retweet_count': raw_data['retweet_count'],
'favorite_count': raw_data['favorite_count'],
'favorited':raw_data['favorited'],
'retweeted':raw_data['retweeted'],
'lang':raw_data['lang'],
'source':raw_data['source'],
'in_reply_to_status_id':raw_data['in_reply_to_status_id'],
'in_reply_to_status_id_str':raw_data['in_reply_to_status_id_str'],
'in_reply_to_user_id':raw_data['in_reply_to_user_id'],
'in_reply_to_user_id_str':raw_data['in_reply_to_user_id_str'],
'in_reply_to_screen_name':raw_data['in_reply_to_screen_name'],
'is_quote_status':raw_data['is_quote_status'],
'hashtags':raw_data['entities']['hashtags'],
'symbols':raw_data['entities']['symbols'],
'user_mentions':raw_data['entities']['user_mentions'],
'urls':raw_data['entities']['urls'],
'iso_language_code':raw_data['metadata']['iso_language_code'],
'result_type':raw_data['metadata']['result_type'],
'geo':raw_data['geo'],
'coordinates':raw_data['coordinates'],
'place':raw_data['place'],
#없음 'quote_count': raw_data['quote_count'], ==> 프리미엄 결제를 해야 뽑힘
#없음 'reply_count': raw_data['reply_count'], ==> 프리미엄 결제를 해야 뽑힘
#없음 'filter_level': raw_data['filter_level'], ==> filter API를 써야 뽑히는 column
#없음 'matching_rules':raw_data['matching_rules'], ==> ㄹfilter API를 써야 뽑히는 column
#'possibly_sensitive':raw_data['entities']['urls']['possibly_sensitive'], ==> retweet_status에서 등장
# 'quoted_status_id': raw_data['quoted_status_id'], ==> retweet_status에서 등장
# 'quoted_status_id_str':raw_data['quoted_status_id'] ==> retweet_status에서 등장
}
#print(dict_)
print(str(i))
twitter=json.dumps(dict1, indent=4, ensure_ascii=False)
outfile.write(twitter+',')
i+=1
def main():
# l = StdOutListener()
i=0
listOfTweets=[]
keyword='lol'
numOfTweets=10000000000
get_tweets(listOfTweets, keyword, numOfTweets)
if __name__ == '__main__':
main() |
from collections import defaultdict
def data_to_dict(data, get_mean=True):
result = defaultdict(dict)
for place in data.split('\n'):
all_temps = []
key, temps = place.split(':')
temps_sum = total_temps = 0.0
for temp in temps.split(','):
t = float(temp.split()[1])
temps_sum += t
total_temps += 1
all_temps.append(t)
temps_mean = temps_sum / total_temps
result[key]['mean'] = temps_mean
if get_mean:
continue
total = 0.0
for temp in all_temps:
total += (temp - temps_mean) ** 2
result[key]['variance'] = total / total_temps
return result
def mean(town, strng):
try:
return data_to_dict(strng)[town]['mean']
except KeyError:
return -1
def variance(town, strng):
try:
return data_to_dict(strng, False)[town]['variance']
except KeyError:
return -1
|
import numpy as np
import csv
import matplotlib.pyplot as plt
""""对男女声音进行辨别"""
class NaiveBayes():
def load_data_set(self,file_name):
"""
:param file_name: 文件名字
:return
train_mat:离散化的训练数据集
train_classes: 训练数据集所属的分类
test_mat:离散化的测试数据集
test_classes:测试数据集所述的分类
label_name:特征的名称
"""
data_mat = []
with open(file_name) as file_obj:
voice_reader = csv.DictReader(file_obj)
list_class = []
# 文件头
label_name = list(voice_reader.fieldnames)
num = len(label_name) - 1
for line in voice_reader.reader:
data_mat.append(line[:num])
gender = 1 if line[-1] == 'male' else 0
list_class.append(gender)
# 求每一个特征的平均值
data_mat = np.array(data_mat).astype(float)
count_vector = np.count_nonzero(data_mat, axis=0)
sum_vector = np.sum(data_mat, axis=0)
mean_vector = sum_vector / count_vector
# 数据缺失的地方 用 平均值填充
for row in range(len(data_mat)):
for col in range(num):
if data_mat[row][col] == 0.0:
data_mat[row][col] = mean_vector[col]
# 将数据连续型的特征值离散化处理
min_vector = data_mat.min(axis=0)
max_vector = data_mat.max(axis=0)
diff_vector = max_vector - min_vector
diff_vector /= (divison-1)
new_data_set = []
for i in range(len(data_mat)):
line = np.array((data_mat[i] - min_vector) / diff_vector).astype(int)
new_data_set.append(line)
# 随机划分数据集为训练集 和 测试集
test_set = list(range(len(new_data_set)))
train_set = []
for i in range(2000):
random_index = int(np.random.uniform(0, len(test_set)))
train_set.append(test_set[random_index])
del test_set[random_index]
# 训练数据集
train_mat = []
train_classes = []
for index in train_set:
train_mat.append(new_data_set[index])
train_classes.append(list_class[index])
# 测试数据集
test_mat = []
test_classes = []
for index in test_set:
test_mat.append(new_data_set[index])
test_classes.append(list_class[index])
return train_mat, train_classes, test_mat, test_classes, label_name
def native_bayes(self,train_matrix, list_classes):
"""
:param train_matrix: 训练样本矩阵
:param list_classes: 训练样本分类向量
:return:p_1_class 任一样本分类为1的概率 p_feature,p_1_feature 分别为给定类别的情况下所以特征所有取值的概率
"""
# 训练样本个数
num_train_data = len(train_matrix)
num_feature = len(train_matrix[0])
# 分类为1的样本占比
p_1_class = sum(list_classes) / float(num_train_data)
p_0_class = 1 - p_1_class
n = divison
list_classes_1 = []
train_data_1 = []
list_classes_0 = []
train_data_0 = []
for i in list(range(num_train_data)):
if list_classes[i] == 1:
list_classes_1.append(i)
train_data_1.append(train_matrix[i])
else:
list_classes_0.append(i)
train_data_0.append(train_matrix[i])
# 分类为1 情况下的各特征的概率
train_data_1 = np.matrix(train_data_1)
p_1_feature = {}
for i in list(range(num_feature)):
feature_values = np.array(train_data_1[:, i]).flatten()
# 避免某些特征值概率为0 影响总体概率,每个特征值最少个数为1
feature_values = feature_values.tolist() + list(range(n))
p = {}
count = len(feature_values)
for value in set(feature_values):
p[value] = np.log(feature_values.count(value) / float(count))
p_1_feature[i] = p
# 分类为0 情况下的各特征的概率
train_data_0 = np.matrix(train_data_0)
p_0_feature = {}
for i in list(range(num_feature)):
feature_values = np.array(train_data_0[:, i]).flatten()
# 避免某些特征值概率为0 影响总体概率,每个特征值最少个数为1
feature_values = feature_values.tolist() + list(range(n))
p = {}
count = len(feature_values)
for value in set(feature_values):
p[value] = np.log(feature_values.count(value) / float(count))
p_0_feature[i] = p
# 所有分类下的各特征的概率
p_feature = {}
train_matrix = np.matrix(train_matrix)
for i in list(range(num_feature)):
feature_values = np.array(train_matrix[:, i]).flatten()
feature_values = feature_values.tolist() + list(range(n))
p = {}
count = len(feature_values)
for value in set(feature_values):
p[value] = np.log(feature_values.count(value) / float(count))
p_feature[i] = p
return p_feature, p_1_feature, p_1_class, p_0_feature, p_0_class
def classify_bayes(self,test_vector, p_feature, p_1_feature, p_1_class, p_0_feature, p_0_class):
"""
:param test_vector: 要分类的测试向量
:param p_feature: 所有分类的情况下特征所有取值的概率
:param p_1_feature: 类别为1的情况下所有特征所有取值的概率
:param p_1_class: 任一样本分类为1的概率
:return: 1 表示男性 0 表示女性
"""
# 计算每个分类的概率(概率相乘取对数 = 概率各自对数相加)
sum = 0.0
for i in list(range(len(test_vector))):
sum += p_1_feature[i][test_vector[i]]
sum -= p_feature[i][test_vector[i]]
p1 = sum + np.log(p_1_class)
p0 = 1 - p1
sum = 0.0
for i in list(range(len(test_vector))):
sum += p_0_feature[i][test_vector[i]]
sum -= p_feature[i][test_vector[i]]
p0 = sum + np.log(p_0_class)
if p1 > p0:
return 1
else:
return 0
def test_bayes():
file_name = 'voice.csv'
nb = NaiveBayes()
train_mat, train_classes, test_mat, test_classes, label_name = nb.load_data_set(file_name)
p_feature, p_1_feature, p_1_class, p_0_feature, p_0_class = nb.native_bayes(train_mat, train_classes)
count = 0.0
male_count = 0.0
female_count = 0.0
correct_male_count = 0.0
correct_female_count = 0.0
false_male_count = 0.0
false_female_count = 0.0
for i in list(range(len(test_mat))):
test_vector = test_mat[i]
result = nb.classify_bayes(test_vector, p_feature, p_1_feature, p_1_class, p_0_feature, p_0_class)
if result == test_classes[i]:
if test_classes[i] == 1:
correct_male_count += 1
male_count += 1
else:
correct_female_count += 1
female_count += 1
else:
if test_classes[i] == 1:
false_male_count += 1
male_count += 1
else:
false_female_count += 1
female_count += 1
count += 1
male_correct_rate = correct_male_count / male_count
female_correct_rate = correct_female_count / female_count
male_fail_rate = false_male_count / male_count
female_fail_rate = false_female_count / female_count
total_correct_rate = (correct_male_count+correct_female_count)/count
print('male correct rate: ', correct_male_count / male_count)
print('female correct rate: ', correct_female_count / female_count)
print('male fail rate: ', false_male_count / male_count)
print('female fail rate: ', false_female_count / female_count)
print('total correct rate: ', (correct_male_count+correct_female_count)/count)
return male_correct_rate,female_correct_rate,male_fail_rate,female_fail_rate,total_correct_rate
divison = 30 #量化阶
n = 40
rate = np.zeros([1,5])
for i in range(n):
t = np.zeros(5)
t = test_bayes()
rate = np.append(rate,[t],axis=0)
rate = rate[1:,:]
avr = np.sum(rate,axis = 0)
avr /= n
print('male correct average rate: ', avr[0])
print('female correct average rate: ', avr[1])
print('male fail average rate: ', avr[2])
print('female fail average rate: ', avr[3])
print('total correct average rate: ', avr[4])
title=['male_correct_rate','female_correct_rate','male_fail_rate','female_fail_rate','total_correct_rate']
x = list(range(0,n))
for i in range(5):
#plt.subplot2grid((2,3),(i//3,i%3))
y = rate[:,i]
#plt.plot(x,label='x')
plt.title(title[i])
plt.plot(y,label='y')
plt.show()
|
import os
import definitions
import wsdm.ts.helpers.persons.persons as p_lib
import wsdm.ts.helpers.nationalities.nationalities as nat_lib
import wsdm.ts.helpers.train.common_train as common_train
def init_all_positive_nationalities():
global persons
result = common_train.init_nationalities_empty_dict()
total_count = 0
for person in persons:
total_count += 1
positive_nationality = common_train.get_positive_nationality(person)
if positive_nationality != None:
result[positive_nationality].append(person)
print(total_count, positive_nationality, person)
return result
def save_all_positive_train_data(positive_dict, train_file):
positive_lines = common_train.get_train_lines(positive_dict, True)
with open(train_file, encoding='utf8', mode='w') as fw:
for l in positive_lines:
fw.write(l + "\n")
if __name__ == '__main__':
persons = common_train.init_persons()
all_positive_nationalities = init_all_positive_nationalities()
save_all_positive_train_data(all_positive_nationalities, os.path.join(definitions.TRAINING_DIR, "all_positive_nationality.train"))
|
import datetime
name = str(input("Enter your name"))
age = int(input("Enter your age"))
currentyear = int(datetime.date.today().strftime("%Y"))
futureyear = (currentyear) + 100 - (age)
print("\nHello " + name + " will turn 100 years in " + str(futureyear)) |
from django.db import models
from django.conf import settings
from rest_framework.exceptions import ValidationError
from django.utils.timezone import localdate
from ..common.models import CommonModel
from .managers import MenuManager, OptionManager
from ..common.utils import throwable
class Menu(CommonModel):
"""
Menu model class, in charge of representing the menu objects.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE, related_name='menus')
available_date = models.DateField()
name = models.CharField(max_length=30)
description = models.TextField()
objects = MenuManager() # Setting the custom menu manager
@throwable(ValidationError, 'You cannot change the menu on the same release date', evalute = False )
def is_editable(self, raise_exception = True) -> bool:
"""[Determines whether or not the current model instance is available to be edited]
Arguments:
raise_exception {[bool]} -- [Whether or not this method should raise an exception]
Raises:
ValidationError: [States that the current menu mustn't be updated at this moment]
"""
return self.available_date != localdate()
# if (self.available_date == localdate()):
# if raise_exception:
# raise ValidationError(
# {'detail': 'You cannot change the menu on the same release date'}, code=422)
# else:
# return False
# return True
def is_available(self) -> bool:
"""
Determines whether or not the current menu instance is on its launch date
"""
# Reuses the is_editable behavior which is almost the same required for this method
return not self.is_editable(raise_exception=False)
def get_user(self):
"""
Returns the user who created this menu
"""
return self.user
class Meta:
ordering = ['-available_date']
class Option(CommonModel):
"""
Option model class, in charge of representing the options inside a menu.
"""
name = models.CharField(max_length=30)
description = models.TextField()
price = models.PositiveIntegerField(default=0)
menu = models.ForeignKey(
Menu, on_delete=models.CASCADE, related_name='options')
objects = OptionManager() # Setting the custom menu manager
def get_user(self):
"""
Returns the user who created this option inside a menu
"""
return self.menu.user
|
from django.contrib import admin
# Register your models here.
from .models import toDoList
class toDoListadmin(admin.ModelAdmin):
list_display=["userdetails","taskName","description","status","ts"]
admin.site.register(toDoList,toDoListadmin) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import argparse
from train import train_full
from model.model import UNet
from dataloader.dataloader import InriaDataset
import pandas as pd
var= pd.read_json('variables.json')
# Root
#root = '/home/ign.fr/ttea/Code_IGN/AerialImageDataset'
#train_dir = os.path.join(root,'train/images')
#gt_dir = os.path.join(root,'train/gt')
#test_dir = os.path.join(root,'test/images')
# Main
def main(args):
# Training Model with Full Args
model = UNet(args.n_channel, args.conv_width, args.n_class, cuda=args.cuda)
trained_model = train_full(args, model)
if __name__ == "__main__":
print('Training')
parser = argparse.ArgumentParser()
# Hyperparameter
parser.add_argument('--n_epoch', default = 40)
parser.add_argument('--n_epoch_test',type = int ,default = int(5)) #periodicity of evaluation on test set
parser.add_argument('--batch_size',type = int, default = 16)
parser.add_argument('--conv_width',default = [16,32,64,128,256,128,64,32,16])
parser.add_argument('--cuda',default = 1)
parser.add_argument('--lr', default = 0.0001)
parser.add_argument('--n_class',type = int, default = 2)
parser.add_argument('--n_channel',type=int, default = 3)
parser.add_argument('--class_names' , default= ['None','Batiment'])
parser.add_argument('--save_model', default= True)
parser.add_argument('--save_model_name ', default = "unet_test.pth")
tile_size = (512,512)
parser.add_argument('--train_dataset', default = InriaDataset(var['variables']['root'], tile_size, 'train', None, False))
parser.add_argument('--val_dataset', default = InriaDataset(var['variables']['root'], tile_size, 'validation', None, False))
args = parser.parse_args()
main(args) |
def load_file(filename):
f = open(filename, "r")
data = f.read()
f.close()
return data
def load_lines(filename, sep=","):
return [line.split(sep) for line in\
load_file(filename).splitlines()[1:]]
def groupby(d,cols,callback=None):
if hasattr(d, "keys"):
return _group_lines_in_dict_by(d,cols,callback)
return _group_lines_by(d,cols,callback)
def _group_lines_in_dict_by(d, cols, callback=None):
result = {}
for key in d:
result.setdefault(key, {})
value = d[key]
by_cols = groupby(value, cols)
if callback:
by_cols = callback(by_cols)
result[key] = by_cols
return result
def _group_lines_by(lines, cols, callback=None):
"""
input:
lines - [line1, ..., lineN]
cols - [int1, ..., intN]
returns a dict with
col -> [line1, ..., lineN] if len(cols) == 1
(col1,...,colN) -> [line1, ..., lineN] otherwise
"""
def _key(line):
if len(cols) == 1:
return line[cols[0]]
return tuple([line[x] for x in cols])
result = {}
for line in lines:
key = _key(line)
result.setdefault(key, [])
result[key].append(line)
return result
def is_valid_matrikel_nummer(n):
return len(n) == 8 and n.startswith("a")
def orderby(lines, col):
"""
order the lines by the value indexed by col
"""
ordered = [(line[col],line) for line in lines]
ordered.sort()
return [x[1] for x in ordered]
def dropuntil(lines, filter_):
"""
drop lines until the filter returns True
"""
for i,line in enumerate(lines):
if filter_(line):
return lines[i:]
return []
def collectuntil(lines, filter_):
"""
collect lines until the filter returns True
"""
for i,line in enumerate(lines):
if filter_(line):
return lines[:i]
return lines
|
from tetris.menu.AbstractComponents import *
from tetris.menu.AbstractEntities import *
from Systems import *
from Entities import *
from tetris.game import GameComponent
class Menu(MenuComponent):
def __init__(self, window, world):
super(Menu, self).__init__(window, world)
self.entity = MenuEntity(world)
def prepare(self):
self.world.add_system(MainMenuRenderer(self.window))
self.world.add_system(ButtonUpdateSystem())
def post_process(self):
pass
def handle_event(self, event):
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_RETURN:
menu_state = self.entity.menustate
if menu_state.selected_index is not None:
self.handing_off = True
self.next_state_class = menu_state.buttons[menu_state.selected_index].buttonstate.next_state_class
if event.key.keysym.sym == sdl2.SDLK_UP:
self.entity.menustate.next_button()
if event.key.keysym.sym == sdl2.SDLK_DOWN:
self.entity.menustate.next_button()
|
from art import logo
from art import logo
import random
print(logo)
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
testdict = {
'mike':20,
'dave': 21,
}
def deal_a_card(deck):
deck.append(random.choice(cards))
return deck
def deal():
two_cards = []
for num in (0,2):
two_cards.append(random.choice(cards))
return two_cards
def total(player_deckR, computer_deckR, dict):
totalp = 0
totalc = 0
for num in computer_deckR:
totalc += num
for num in player_deckR:
totalp += num
dict['You'] = totalp
dict['Computer'] = totalc
if totalc and totalp > 21:
print("You both bust! it's a draw")
game_continue = False
if totalp > 21:
print('You bust!! Computer wins')
game_continue = False
if totalc > 21:
print('Computer bust!! You win!!')
game_continue = False
return players_dict
def blackjack(dictionary):
blackjack = ''
for key in dictionary:
if dictionary[key] == 21:
game_continue = False
blackjack = key
print(f"{blackjack} got a blackjack!!")
# def winner(players_dictR):
# winner = 0
# for key in players_dictR:
# score = players_dictR[key]
# if score > winner:
# winner = key
# if score == winner:
# return winner
# return winner
def winner(players_dictR):
highest_bid = 0
winner = ""
for key in players_dictR:
card_score = players_dictR[key]
if card_score > highest_bid:
highest_bid = card_score
winner = key
return winner
start = input("Hi, would you like to play Blackjack? Type 'y' or 'n:' ")
if start == 'y':
game_continue = True
else:
game_continue = False
question = input("Do you want to play again? 'y' or 'n:' ")
players_dict = {}
player_cards = deal()
computer_cards = deal()
print(f"Your starting deal: {player_cards}")
print(f"Computers starting deal: {computer_cards[0]}'?'")
# score_dict = total(player_cards, computer_cards, players_dict)
blackjack(players_dict)
while game_continue:
choice = input("If you would like to stick type 'S' or for another card type 'h' for hit: ")
if choice == 's':
while sum(computer_cards) < 17:
deal_a_card(computer_cards)
if sum(computer_cards) > 21:
print(total(player_cards, computer_cards, players_dict))
print('Computer went BUST! You win!!')
game_continue = False
else:
print(total(player_cards, computer_cards, players_dict))
print(f"The winner is the {winner(players_dict)}!!")
game_continue = False
deal_a_card(computer_cards)
deal_a_card(player_cards)
blackjack(players_dict)
total(player_cards, computer_cards, players_dict)
print(f"Your deal: {player_cards}")
print(f"Computers deal: {computer_cards}'?'")
print(f'The winner is {(winner(players_dict))}')
# if sum(computer_cards) < 17 and sum(player_cards) < 22:
# deal_a_card(computer_cards)
# deal_a_card(player_cards)
# blackjack(players_dict)
# print(total(player_cards, computer_cards, players_dict))
# print(f"The winner is the {winner(players_dict)}!!")
#
# choice = input("If you would like to stick type 'S' or for another card type 'h' for hit: ")
print(total(player_cards, computer_cards, players_dict))
print(player_cards)
print(computer_cards)
# print(winner(players_dict))
|
from django.shortcuts import render, HttpResponse, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
from django.db import transaction
from .forms import UserForm, ProfileForm
from django.contrib.auth.models import User
from django.contrib.auth import login,authenticate,logout
from django.contrib import messages
from .models import Product, Profile
@login_required()
def home(request):
context = {
'yo' : "hello world",
}
return render(request, 'core/home.html', context)
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('home')
template_name = 'registration/signup.html'
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
username, password = form.cleaned_data.get('username'), form.cleaned_data.get('password1')
new_user = authenticate(username=username, password=password)
login(self.request, new_user)
return redirect('/')
@login_required
def profile(request):
user = User.objects.get(id=request.user.id)
print(user)
temp = 'core/profile.html'
return render(request, temp, {'user_p': user})
@login_required
@transaction.atomic
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
print(profile_form)
messages.success(request, ('Your profile was successfully updated!'))
return redirect('core:profile')
else:
messages.error(request, ('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'core/update_profile.html', {
'user_form': user_form,
'profile_form': profile_form
})
def insta(request):
context = {
'data': 'somedata from insta'
}
return render(request, 'core/insta.html', context)
def products_list():
product_qs = Product.objects.all()
products = []
for product in product_qs:
products.append({
'id': product.id,
'title': product.title,
'category': product.category.name
})
return products
def charts(request):
return render(request, 'core/charts.html') |
import os
import re
path_to_read_file = os.path.join('in.txt')
path_to_write_file = os.path.join('out.txt')
words_pattern = re.compile(r'[0-9+]')
numbers_pattern = re.compile(r'[^0-9]')
result = ''
with open(path_to_read_file, 'r') as file_reader:
s = file_reader.readline().strip()
words_list = re.sub(words_pattern, " ", s).split()
numbers_list = [int(i) for i in re.sub(numbers_pattern, " ", s).split()]
for i in range(len(words_list)):
result += words_list[i] * numbers_list[i]
with open(path_to_write_file, 'w') as file_writer:
file_writer.write(result)
|
import cv2
import numpy as np
import os
import pickle
# AN=1, DI=2, FE=3, HA=4, NE=5,SU=6, SA=7
left_eye_cascade = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
right_eye_cascade = cv2.CascadeClassifier('haarcascade_righteye_2splits.xml')
mouth_cascade = cv2.CascadeClassifier('haarcascade_mcs_mouth.xml')
nose_cascade = cv2.CascadeClassifier('haarcascade_mcs_nose.xml')
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
svm_params = dict(kernel_type = cv2.SVM_RBF,
svm_type = cv2.SVM_C_SVC,
C=9.34, gamma=15.68)
svm=cv2.SVM()
def dec2bin(n,num_bit):
bin = []
while(n!=1):
bin.append(n%2)
n = n/2
bin.append(n%2)
s = list(np.uint8(np.concatenate((bin,np.zeros(num_bit-len(bin)),[bin[0]]))))
# s.reverse()
return s
def table_loc():
table = []
for n in range(1,255):
bin_val = np.array(dec2bin(n,8),dtype=float)
# print type(bin_val)
if (np.count_nonzero(np.diff(bin_val)))==2:
table.append(n)
else:
table.append(5)
table = np.array(table)
loc = (table==5)
table = np.concatenate(([0],table,[255,256]))
uni = (np.unique(table))
return loc,uni
def lbp_feat(img_last):
# img_gray = cv2.resize(img_last,(160,160))
img_gray = img_last
cv2.waitKey(3000)
lo,uni = table_loc()
height, width = img_gray.shape
init_img = np.uint8(np.zeros(shape=(height+2,width+2)))
init_img[1:height+1,1:width+1]=img_gray
lbp_img = np.zeros((height,width),dtype=np.float64)
for i in range(1,height+1):
for j in range(1,width+1):
wind = init_img[i-1:i+2,j-1:j+2]
thresh = init_img[i,j]
blk2 = np.zeros((3,3))
for k in range(0,3):
for l in range(0,3):
if wind[k,l]>=thresh:
blk2[k,l] = 1
else:
blk2[k,l] = 0
lb_bin = (np.concatenate(([blk2[0,0]],[blk2[0,1]],[blk2[0,2]],[blk2[1,2]],[blk2[2,2]],[blk2[2,1]],[blk2[2,0]],[blk2[1,0]])))
# print lb_bin
s = 0
for ii in range(8):
s = s + lb_bin[7-ii]*(2**(ii))
# print s.dtype
lbp_img[i-1,j-1] = s
# print lbp_img
im = lbp_img
for nn in range(0,254):
# print lo[nn]
if lo[nn]:
im[im==nn+1]=5
print im
div = 40
feature=[]
for s in range(0,height,height):
for t in range(0,width,width):
lbp_feat = im[s:s+div,t:t+div]
feat = np.histogram(lbp_feat.ravel(),uni)
feat1 = feat[0]
# print feat1
feature.append(list(feat1))
ggg = feature[0]
return feature[0]
label = []
for dirname, dirnames, filenames in os.walk("train/"):
print dirname, dirnames, filenames
feat_last = []
for subdirname in filenames:
# print subdirname[0]
path_name = os.path.join(dirname, subdirname)
label1 = subdirname.split('.')
if label1[1][0:2]=='AN':
label.append(1)
if label1[1][0:2]=='DI':
label.append(2)
if label1[1][0:2]=='FE':
label.append(3)
if label1[1][0:2]=='HA':
label.append(4)
if label1[1][0:2]=='NE':
label.append(5)
if label1[1][0:2]=='SU':
label.append(6)
if label1[1][0:2]=='SA':
label.append(7)
# print path_name
rgb_img = cv2.imread(path_name)
gray_image = cv2.cvtColor(rgb_img,cv2.COLOR_BGR2GRAY)
gray_image1 = gray_image
cv2.imshow('figure',gray_image)
cv2.waitKey(1000)
flag1 = 0
flag2 = 0
flag3 = 0
flag4 = 0
faces = face_cascade.detectMultiScale(gray_image, 1.3, 5)
# print faces
feat_img = []
for (x,y,w,h) in faces:
roi_color = gray_image[y:y+h, x:x+w]
eyes1 = right_eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes1:
# print ex,ey,ew
if ex+ew/2<w/2 and ey+eh/2<h/2:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# cv2.imshow('img',roi_color)
exl = int(ex)
eyl = int(ey)
ewl = int(ew)
ehl = int(eh)
print ewl,ehl
crop1 = roi_color[eyl:eyl+ehl/2,exl+ewl/2:exl+ewl]
# cv2.rectangle(roi_color,(exl+ewl/2,eyl),(exl+ewl,eyl+ehl/2),(255,0,0),2)
# cv2.imshow('img',roi_color)
left_eye = roi_color[eyl:eyl+ehl, exl:exl+ewl]
# cv2.imshow('left',left_eye)
cv2.imshow('left1',crop1)
flag3 = 1
eyes2 = left_eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes2:
# print ex,ey,ew
if ex+ew/2>=w/2 and ey+eh/2<h/2:
exr = ex
eyr = ey
ewr = ew
ehr = eh
crop2 = roi_color[eyr:eyr+ehr/2,exr:exr+ewr/2]
# cv2.rectangle(roi_color,(exr,eyr),(exr+ewr/2,eyr+ehr/2),(255,0,0),2)
# cv2.imshow('img',roi_color)
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# cv2.imshow('img',roi_color)
right_eye = roi_color[eyr:eyr+ehr, exr:exr+ewr]
# cv2.imshow('right',right_eye)
cv2.imshow('right1',crop2)
flag4 = 1
crop3 = roi_color[eyr+ehr/2:eyr+ehr,exl+ewl:exr]
# cv2.rectangle(roi_color,(exl+ewl,eyr+ehr/2),(exr,eyr+ehr),(255,0,0),2)
# cv2.imshow('img',roi_color)
cv2.imshow('middle',crop3)
mouth_rects = mouth_cascade.detectMultiScale(roi_color, 1.3, 11)
for (ex,ey,ew,eh) in mouth_rects:
if ey+eh/2>h-h/4:
if flag1 == 0:
exm = ex
eym = ey
ewm = ew
ehm = eh
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,0),2)
# cv2.imshow('img',roi_color)
# mouth = roi_color[eym:eym+ehm, exm:exm+ewm]
# cv2.imshow('mouth',mouth)
flag1 = 1
# cv2.rectangle(roi_color,(exm+ewm,eym),(exm+ewm+ewm/2,eym+ehm),(255,0,0),2)
# cv2.imshow('img',roi_color)
crop4 = roi_color[eym:eym+ehm,exm+ewm:exm+ewm+ewm/2]
cv2.imshow('mouth1',crop4)
# cv2.rectangle(roi_color,(exm-ewm/2,eym),(exm,eym+ehm),(255,0,0),2)
# cv2.imshow('img',roi_color)
crop5 = roi_color[eym:eym+ehm,exm-ewm/2:exm]
cv2.imshow('mouth2',crop5)
nose_rects = nose_cascade.detectMultiScale(roi_color, 1.3, 11)
for (ex,ey,ew,eh) in nose_rects:
if ey+eh/2>h/2:
if flag2 == 0:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,0),2)
# cv2.imshow('img',roi_color)
exn = ex
eyn = ey
ewn = ew
ehn = eh
# nose = roi_color[eyn:eyn+ehn, exn:exn+ewn]
# cv2.imshow('nose',nose)
flag2 = 1
# cv2.rectangle(roi_color,(exn+ewn,eyn),(exn+ewn+ewn/2,eyn+ehn),(255,0,0),2)
# cv2.imshow('img',roi_color)
crop6 = roi_color[eyn:eyn+ehn,exn+ewn:exn+ewn+ewn/2]
cv2.imshow('nose1',crop6)
# cv2.rectangle(roi_color,(exn-ewn/2,eyn),(exn,eyn+ehn),(255,0,0),2)
# cv2.imshow('img',roi_color)
crop7 = roi_color[eyn:eyn+ehn,exn-ewn/2:exn]
cv2.imshow('nose2',crop5)
feat1 = list(lbp_feat(crop1))
feat_img+=feat1
feat2 = list(lbp_feat(crop2))
feat_img+=feat2
feat3 = list(lbp_feat(crop3))
feat_img+=feat3
feat4 = list(lbp_feat(crop4))
feat_img+=feat4
feat5 = list(lbp_feat(crop5))
feat_img+=feat5
feat6 = list(lbp_feat(crop6))
feat_img+=feat6
feat7 = list(lbp_feat(crop1))
feat_img+=feat7
feat_last.append(feat_img)
file_new1 = open('feature.txt','w')
pickle.dump(feat_last,file_new1)
file_new1.close()
file_new2 = open('label.txt','w')
pickle.dump(label,file_new2)
file_new2.close()
samples = np.array(feat_last,np.float32)
responses = np.array(label,np.float32)
print 'auto training initiated'
print 'please wait.....'
svm.train(samples,responses,params=svm_params)
svm.save("svm_class.xml")
|
class Item:
def __init__(self, name, location, stock):
self.name = name
self.location = location
self.stock = stock
def dictify(self):
return {
'name' : self.name,
'location' : self.location,
'stock' : self.stock
}
def adapt(self, li):
self.name = li[0]
self.location = li[1]
self.stock = li[2] |
some_string = input("Input some string:")
print(some_string[2], some_string[len(some_string)-2], some_string[:5], some_string[:len(some_string)-3],
some_string[0:len(some_string):2], some_string[1:len(some_string):2], some_string[::-1],
some_string[::-2], len(some_string))
|
from django.contrib.auth import logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy, reverse
from django.views.generic import CreateView, UpdateView
from django.contrib import messages
from .forms import RegisterUserForm, ReviewForm, AIFormSet, RebuttalForm, AIRebFormSet, LoginForm, ChangeUserInfoForm, \
ProfPasswordChangeForm, UserPasswordResetForm, UserSetPasswordForm, ComSugForm
from .models import Review, AdvUser, Rebuttal, AdditionalImage
from .utilities import get_timestamp_name
from django.contrib.auth.views import LoginView, PasswordChangeView, PasswordResetView, PasswordResetConfirmView, \
PasswordResetDoneView, PasswordResetCompleteView
from django.contrib.auth.decorators import login_required
from django.views.generic import View
from base64 import b64decode
from django.core.files.base import ContentFile
kassa_id = '5e3bf2a11ae1bd2a008b4574'
def main(request):
return render(request, 'main/index.html')
def redirect_to_info(request):
return render(request, 'main/info_page.html')
# ------------Review----------------
def reviews_list(request):
revs = Review.objects.all()
return render(request, 'review/review_list.html', context={"revs": revs})
def review_detail(request, pk):
rev = get_object_or_404(Review, pk=pk)
img_set = rev.additionalimage_set.all()
return render(request, 'review/review_detail.html', context={"rev": rev, "img_set": img_set})
class ReviewCreate(LoginRequiredMixin, View):
def get(self, request):
form = ReviewForm(initial={'author': request.user.pk})
formset = AIFormSet()
return render(request, "review/add_review.html", context={"form": form, "formset": formset})
def post(self, request):
form = ReviewForm(request.POST, request.FILES)
if form.is_valid():
review = form.save()
i = 0
while i < 20:
b64_text = request.POST.get('image-{}'.format(i))
if b64_text == '':
break
else:
img_format, imgstr = b64_text.split(';base64,')
image_data = b64decode(imgstr)
ext = img_format.split('/')[-1]
filename = get_timestamp_name(ext)
image = AdditionalImage.objects.create(review=review, image=ContentFile(image_data, filename))
image.save()
request.session['review_id'] = form.save().id
i += 1
return redirect('payment:get_payment_form', pk=review.id) # на страницу оплаты
return render(request, "review/add_review.html", context={"form": form})
# formset = AIFormSet(request.POST, request.FILES, instance=review)
# if formset.is_valid():
# formset.save()
# request.session['review_id'] = form.save().id
# return redirect('payment:get_payment_form', pk=review.id) # на страницу оплаты
# return render(request, "review/add_review.html", context={"form": form, "formset": formset})
class ReviewUpdate(LoginRequiredMixin, View):
def get(self, request, pk):
review = get_object_or_404(Review, pk=pk)
form = ReviewForm(instance=review)
formset = AIFormSet(instance=review)
return render(request, 'review/update_review.html', context={"form": form, "formset": formset, "rev_pk": pk})
def post(self, request, pk):
review = get_object_or_404(Review, pk=pk)
form = ReviewForm(request.POST, request.FILES, instance=review)
if form.is_valid():
review = form.save()
formset = AIFormSet(request.POST, request.FILES, instance=review)
if formset.is_valid():
formset.save()
messages.add_message(request, messages.SUCCESS, 'Отзыв исправлен')
return redirect('ReviewApp:get_reviews_list')
return render(request, 'review/update_review.html', context={"form": form, "formset": formset, "rev_pk": pk})
class ReviewDelete(LoginRequiredMixin, View):
template = "review/delete_review.html"
redirect_url = 'ReviewApp:get_reviews_list'
def get(self, request, pk):
review = get_object_or_404(Review, pk=pk)
return render(request, self.template, context={"rev": review})
def post(self, request, pk):
review = get_object_or_404(Review, pk=pk)
request.session['review_id'] = review.id
return redirect(reverse('payment:process_del')) # на страницу оплаты
# ------------Rebuttal----------------
def rebuttals_list(request, pk):
rev = get_object_or_404(Review, pk=pk)
rebs = rev.rebuttal_set.all()
return render(request, 'rebuttal/rebuttal_list.html', context={"rebs": rebs, "rev_pk": pk})
def rebuttal_detail(request, rev_pk, reb_pk):
rev = get_object_or_404(Review, pk=rev_pk)
reb = rev.rebuttal_set.get(pk=reb_pk)
img_set = reb.additionalimagereb_set.all()
return render(request, 'rebuttal/rebuttal_detail.html', context={"rev_pk": rev_pk, "reb": reb, "img_set": img_set})
class RebuttalCreate(LoginRequiredMixin, View):
def get(self, request, pk):
review = get_object_or_404(Review, pk=pk)
form = RebuttalForm(initial={'author': request.user.pk, 'review': pk, 'price': review.price})
formset = AIRebFormSet()
return render(request, "rebuttal/add_rebuttal.html", context={"form": form, "formset": formset})
# -//////------Добавить оплату----------//////-
def post(self, request, pk):
form = RebuttalForm(request.POST, request.FILES)
if form.is_valid():
rebuttal = form.save()
formset = AIRebFormSet(request.POST, request.FILES, instance=rebuttal)
if formset.is_valid():
formset.save()
request.session['rebuttal_id'] = form.save().id
request.session['review_id'] = pk
return redirect(reverse('payment:process_reb')) # на страницу оплаты
return render(request, "rebuttal/add_rebuttal.html", context={"form": form, "formset": formset})
class RebuttalUpdate(LoginRequiredMixin, View):
def get(self, request, rev_pk, reb_pk):
reb = get_object_or_404(Rebuttal, pk=reb_pk)
form = RebuttalForm(instance=reb)
formset = AIRebFormSet(instance=reb)
return render(request, 'rebuttal/update_rebuttal.html', context={"form": form, "formset": formset, "rev_pk": rev_pk, "reb_pk": reb_pk})
def post(self, request, rev_pk, reb_pk):
reb = get_object_or_404(Rebuttal, pk=reb_pk)
form = RebuttalForm(request.POST, request.FILES, instance=reb)
if form.is_valid():
reb = form.save()
formset = AIRebFormSet(request.POST, request.FILES, instance=reb)
if formset.is_valid():
formset.save()
messages.add_message(request, messages.SUCCESS, 'Опровержение исправлено')
return redirect('ReviewApp:get_rebuttal_detail', rev_pk=rev_pk, reb_pk=reb_pk)
return render(request, 'rebuttal/update_rebuttal.html', context={"form": form, "formset": formset, "rev_pk": rev_pk, "reb_pk": reb_pk})
class RebuttalDelete(LoginRequiredMixin, View):
template = 'rebuttal/delete_rebuttal.html'
redirect_url = 'ReviewApp:get_rebuttals_list'
redirect_pk = None
def get(self, request, rev_pk, reb_pk):
rev = get_object_or_404(Review, pk=rev_pk)
reb = rev.rebuttal_set.get(pk=reb_pk)
return render(request, self.template, context={"reb": reb, "rev_pk": rev_pk})
# сначала оплата потом перенаправление на удаление
def post(self, request, rev_pk, reb_pk):
self.redirect_pk = rev_pk
rev = get_object_or_404(Review, pk=rev_pk)
reb = rev.rebuttal_set.get(pk=reb_pk)
reb.is_active = False
reb.save()
messages.add_message(request, messages.SUCCESS, 'Опровержение удалено')
return redirect(self.redirect_url, pk=self.redirect_pk)
# ------------Authorization-------------
class LoginUser(LoginView):
template_name = 'authorization/login.html'
form_class = LoginForm
success_url = reverse_lazy('ReviewApp:profile')
@login_required
def profile(request):
return render(request, 'authorization/profile.html')
def logout_request(request):
logout(request)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
class RegisterUserView(CreateView):
model = AdvUser
template_name = 'authorization/registration.html'
form_class = RegisterUserForm
success_url = reverse_lazy('ReviewApp:profile')
# ------------Profile-------------
@login_required
def profile_reviews(request):
revs = Review.objects.filter(author=request.user.pk)
return render(request, 'authorization/prof_reviews.html', context={"revs": revs})
@login_required
def profile_rebuttals(request):
rebs = Rebuttal.objects.filter(author=request.user.pk)
return render(request, 'authorization/prof_rebuttals.html', context={"rebs": rebs})
@login_required
def profile_rebuttals_on_me(request):
rebs = Rebuttal.objects.exclude(author=request.user.pk).filter(review__in=request.user.review_set.all())
return render(request, 'authorization/prof_rebuttals_on_me.html', context={"rebs": rebs})
@login_required
def review_profile_rebuttals_on_me(request, pk):
rebs = Rebuttal.objects.exclude(author=request.user.pk).filter(review__in=request.user.review_set.all(), review=pk)
return render(request, 'authorization/rev_prof_rebuttals_on_me.html', context={"rebs": rebs})
@login_required
def prof_review_detail(request, pk):
rev = get_object_or_404(Review, pk=pk)
img_set = rev.additionalimage_set.all()
return render(request, 'authorization/prof_review_detail.html', context={"rev": rev, "img_set": img_set})
class ProfReviewDelete(ReviewDelete):
template = 'authorization/prof_review_delete.html'
redirect_url = 'ReviewApp:profile_reviews'
class ProfRebuttalDelete(RebuttalDelete):
template = 'authorization/prof_rebuttal_delete.html'
redirect_url = 'ReviewApp:profile_rebuttals'
def post(self, request, rev_pk, reb_pk):
self.redirect_pk = rev_pk
rev = get_object_or_404(Review, pk=rev_pk)
reb = rev.rebuttal_set.get(pk=reb_pk)
reb.is_active = False
reb.save()
messages.add_message(request, messages.SUCCESS, 'Опровержение удалено')
return redirect(self.redirect_url)
@login_required
def prof_rebuttal_detail(request, rev_pk, reb_pk):
reb = get_object_or_404(Rebuttal, pk=reb_pk)
img_set = reb.additionalimagereb_set.all()
return render(request, 'authorization/prof_rebuttal_detail.html', context={"rev_pk": rev_pk, "reb": reb, "img_set": img_set})
class ChangeUserInfoView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = AdvUser
template_name = 'authorization/change_user_info.html'
form_class = ChangeUserInfoForm
success_url = reverse_lazy('ReviewApp:profile')
success_message = 'Личные данные пользователя изменены'
def dispatch(self, request, *args, **kwargs):
self.user_id = request.user.pk
return super().dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
if not queryset:
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=self.user_id)
class UserPasswordChangeView(SuccessMessageMixin, LoginRequiredMixin, PasswordChangeView):
template_name = 'authorization/change_user_password.html'
success_url = reverse_lazy('ReviewApp:profile')
success_message = 'Пароль пользователя изменен'
form_class = ProfPasswordChangeForm
class UserPasswordResetView(PasswordResetView):
template_name = 'authorization/password_reset.html'
subject_template_name = 'authorization/email/reset_letter_subject.txt'
email_template_name = 'authorization/email/reset_letter_body.txt'
success_url = reverse_lazy('ReviewApp:profile')
form_class = UserPasswordResetForm
class UserPasswordResetDoneView(PasswordResetDoneView):
template_name = 'authorization/password_reset_done.html'
class UserPasswordResetConfirmView(PasswordResetConfirmView):
template_name = 'authorization/password_confirm.html'
success_url = reverse_lazy('ReviewApp:password_reset_complete')
form_class = UserSetPasswordForm
class UserPasswordResetCompleteView(PasswordResetCompleteView):
template_name = 'authorization/password_complete.html'
# ------------ComplaintsAndSuggestions-------------
class ComSugCreate(LoginRequiredMixin, View):
def get(self, request):
form = ComSugForm(initial={'user_id': request.user.pk})
return render(request, "authorization/add_complaint.html", context={"form": form})
def post(self, request):
form = ComSugForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Сообщение разработчикам отправлено')
return redirect('ReviewApp:profile')
return render(request, "authorization/add_complaint.html", context={"form": form})
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.util.frozendict import FrozenDict
@dataclass(frozen=True)
class NativeParsedPythonDependencies:
imports: FrozenDict[str, tuple[int, bool]]
string_candidates: FrozenDict[str, int]
def __init__(self, imports: dict[str, tuple[int, bool]], string_candidates: dict[str, int]):
object.__setattr__(self, "imports", FrozenDict(imports))
object.__setattr__(self, "string_candidates", FrozenDict(string_candidates))
@dataclass(frozen=True)
class NativeParsedJavascriptDependencies:
file_imports: frozenset[str]
package_imports: frozenset[str]
def __init__(self, file_imports: set[str], package_imports: set[str]):
object.__setattr__(self, "file_imports", file_imports)
object.__setattr__(self, "package_imports", package_imports)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
import ujson
from sanic import Blueprint
from sanic import response
from geojson import FeatureCollection
from sanic.request import Request
from sanic_jwt import inject_user
from .service import (
get_hw_module_position_last_point_list, delete_hw_module_all_positions
)
from web_backend.nvlserver.module.hw_module.service import get_hw_module_element_by_traceable_object_id
api_hw_module_position_blueprint = Blueprint('api_hw_module_position', url_prefix='/api/hw_module_position')
@api_hw_module_position_blueprint.route('/point/<traceable_object_id:int>', methods=['GET'])
@inject_user()
# @scoped(['user', 'billing', 'admin'], require_all=True, require_all_actions=True)
async def api_hw_module_position_last_point_get(
request: Request,
user,
traceable_object_id: int = 0):
"""
:param request:
:param traceable_object_id:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
hw_module_id = None
if request.method == 'GET':
if user:
if user.get('user_id', None):
if traceable_object_id:
traceable_object = await get_hw_module_element_by_traceable_object_id(request, traceable_object_id)
if traceable_object:
hw_module_id = (
traceable_object.get('module_id') if traceable_object.get('module_id') != '' else None)
hw_module_last_point_list = await get_hw_module_position_last_point_list(
request, user_id=user.get('user_id'), hw_module_id=hw_module_id)
if hw_module_last_point_list:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = FeatureCollection(
features=hw_module_last_point_list,
property={'layer_type': 'point'})
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = FeatureCollection(
features=[],
property={'layer_type': 'point'})
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_position_blueprint.route('/delete_all', methods=['GET'])
@inject_user()
# @scoped(['user', 'billing', 'admin'], require_all=True, require_all_actions=True)
async def api_hw_module_position_delete_all(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'GET':
if user:
if user.get('user_id', None):
all_deleted = await delete_hw_module_all_positions(request)
if all_deleted:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = None
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
|
import pika
import time
connection = pika.BlockingConnection(
pika.ConnectionParameters(
'localhost'
)
)
channel = connection.channel()
channel.queue_declare(queue='task_queue_2', durable=True)
def callback(ch, method, properties, body):
print(' [x] Recieved %r' % body)
time.sleep(body.count(b'.'))
print(' [x] Done')
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
callback,
queue = 'task_queue_2'
)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
import os
import cPickle
from .._utils import ui
__all__ = ('pickle',)
def pickle(obj, dest=None, protocol=cPickle.HIGHEST_PROTOCOL):
"""Pickle a Python object.
Parameters
----------
dest : None | str
Path to destination where to save the file. If no destination is
provided, a file dialog is shown. If a destination without extension is
provided, '.pickled' is appended.
protocol : int
Pickle protocol (default is HIGHEST_PROTOCOL).
"""
if dest is None:
filetypes = [("Pickled Python Objects (*.pickled)", '*.pickled')]
dest = ui.ask_saveas("Pickle Destination", "", filetypes)
if dest is False:
raise RuntimeError("User canceled")
else:
print 'dest=%r' % dest
else:
dest = os.path.expanduser(dest)
if not os.path.splitext(dest)[1]:
dest += '.pickled'
with open(dest, 'wb') as fid:
cPickle.dump(obj, fid, protocol)
|
# -*- coding:UTF-8 -*-
from urllib import request
from bs4 import BeautifulSoup
import collections
import re
import os
import time
import sys
import types
import pymysql
import requests
import json
import random
"""
类说明:下载《笔趣看》网小说: url:http://www.biqukan.com/
Parameters:
target - 《笔趣看》网指定的小说目录地址(string)
Returns:
无
Modify:
2017-05-06
"""
class download(object):
def __init__(self, target):
self.__target_url = target
"""
函数说明:获取下载链接
Parameters:
无
Returns:
novel_name + '.txt' - 保存的小说名(string)
numbers - 章节数(int)
download_dict - 保存章节名称和下载链接的字典(dict)
Modify:
2017-05-06
"""
def get_download_info(self):
charter = re.compile(u'[第弟](.+)章', re.IGNORECASE)
request = self.get_request(url=self.__target_url)
target_html = request.content.decode('gbk', 'ignore')
listmain_soup = BeautifulSoup(target_html, 'lxml')
chapters = listmain_soup.find_all('div', class_='listmain')
download_soup = BeautifulSoup(str(chapters), 'lxml')
novel_name = str(download_soup.dl.dt).split("》")[0][5:]
flag_name = "《" + novel_name + "》" + "正文卷"
numbers = (len(download_soup.dl.contents) - 1) / 2 - 8
download_dict = collections.OrderedDict()
begin_flag = False
numbers = 1
for child in download_soup.dl.children:
if child != '\n':
if child.string == u"%s" % flag_name:
begin_flag = True
if begin_flag == True and child.a != None:
download_url = "http://www.biqukan.com" + child.a.get('href')
download_name = child.string
names = str(download_name).split('章 ')
name = charter.findall(names[0] + '章')
if name:
try:
download_dict['第' + str(numbers) + '章 ' + names[1]] = download_url
except Exception as e:
download_dict['第' + str(numbers) + '章 '] = download_url
numbers += 1
novel_soup = BeautifulSoup(target_html, 'lxml')
novel_info = novel_soup.find('div', attrs={'class': 'small'}).find_all('span')
novel_author = novel_info[0].get_text().split(':')[1]
novel_cate = novel_info[1].get_text().split(':')[1]
novel_status = novel_info[2].get_text().split(':')[1]
novel_word_count = novel_info[3].get_text().split(':')[1]
novel_last_update_time = novel_info[4].get_text().split(':')[1]
return novel_name, novel_author, novel_cate, novel_status, novel_word_count, novel_last_update_time, numbers, download_dict
"""
函数说明:爬取文章内容
Parameters:
url - 下载连接(string)
Returns:
soup_text - 章节内容(string)
Modify:
2017-05-06
"""
def download_content(self, url):
download_req = self.get_request(url=url)
download_html = download_req.content.decode('gbk', 'ignore')
soup_texts = BeautifulSoup(download_html, 'lxml')
texts = soup_texts.find_all(id='content', class_='showtxt')
try:
soup_text = BeautifulSoup(str(texts), 'lxml').div.text.replace('\xa0', '')
return soup_text
except Exception as e:
return 'empty'
def write_chapter_content_to_sql(self, novel_id, title, text):
db = pymysql.connect('127.0.0.1', 'root', 'root', 'py', charset='utf8')
cursor = db.cursor()
sql = "INSERT INTO biqukan_chapter(novel_id, title, content) VALUES (%s, %s, %s)"
cursor.execute(sql, (novel_id, title, text))
db.commit()
def write_novel_info_to_sql(self, name, author, cate, status, word_count, last_update_time):
db = pymysql.connect('127.0.0.1', 'root', 'root', 'py', charset='utf8')
cursor = db.cursor()
sql = "INSERT INTO biqukan_novel(name, author, cate, status, word_count, last_update_time) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (name, author, cate, status, word_count, last_update_time))
db.commit()
return cursor.lastrowid
def get_proxy(self):
r = requests.get('http://127.0.0.1:8000/?count=50')
ip_ports = json.loads(r.text)
proxies = {
'http': 'http://%s:%s' % (random.choice(ip_ports)[0], random.choice(ip_ports)[1]),
}
return proxies
def get_headers(self):
UA_LIST = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {
'User-Agent': random.choice(UA_LIST)
}
return headers
def get_request(self, url):
proxies = self.get_proxy()
headers = self.get_headers()
try:
request = requests.get(url=url, proxies=proxies, headers=headers, timeout=3)
except Exception as e:
return self.get_request(url)
if (request.status_code == 200):
return request
else:
return self.get_request(url)
if __name__ == "__main__":
# 小说地址
# target_url = str(input("请输入小说目录下载地址:\n"))
i = 196
while True:
target_url = 'http://www.biqukan.com/1_' + str(i)
# 实例化下载类
d = download(target=target_url)
# 获取小说名,章节数,章节链接字典
novel_name, novel_author, novel_cate, novel_status, novel_word_count, novel_last_update_time, numbers, chapter_url_dict = d.get_download_info()
novel_id = d.write_novel_info_to_sql(novel_name, novel_author, novel_cate, novel_status, novel_word_count,
novel_last_update_time)
# 循环取章节链接下载
for chapter_name, url in chapter_url_dict.items():
# 章节信息写入数据库
try:
d.write_chapter_content_to_sql(novel_id, chapter_name, d.download_content(url))
print("爬取《%s》完成" % chapter_name)
except Exception as e:
print("爬取《%s》失败" % chapter_name)
# 显示爬取结果
print("《%s》下载完成!" % novel_name[:-4])
i += 1
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def maxProductDifference(self, nums: List[int]) -> int:
a, b, c, d = float("-inf"), float("-inf"), float("inf"), float("inf")
for num in nums:
if num > a:
a, b = num, a
elif num > b:
b = num
if num < d:
d, c = num, d
elif num < c:
c = num
return a * b - c * d
if __name__ == "__main__":
solution = Solution()
assert 34 == solution.maxProductDifference([5, 6, 2, 7, 4])
assert 64 == solution.maxProductDifference([4, 2, 5, 9, 7, 4, 8])
|
# -*- coding: utf-8 -*-
# @Time : 2018/7/15 11:04
# @Author : xnbo
# @Site :
# @File : websocket_base.py
# @Software: PyCharm
import threading
class WebSocketBase(object):
def __init__(self, service_base, url=None):
""" Connect to the websocket and initialize data stores ."""
self.exchange = service_base.exchange
self.logger = service_base.logger
if url:
self.url = url
else:
self.url = service_base.ws_url
self.on_message = service_base.on_message
self.on_close = service_base.on_close
self.on_open = service_base.on_open
self.on_error = service_base.on_error
self.ping_interval = service_base.ping_interval
self.exited = False
def start_check_thread(self):
"""
启动检测线程
:return:
"""
self.check_thread = threading.Thread(target=lambda: self.check_thread_impl())
self.check_thread.daemon = True
self.check_thread.start()
def exit(self):
"""
exit websocket, rewritten by derived classes
:param msg:
:return:
"""
def check_thread_impl(self):
"""
check websocket connect status, rewritten by derived classes
:param msg:
:return:
"""
def send_command(self, command):
"""
Send a raw command, rewritten by derived classes
:param msg:
:return:
"""
|
from alignment import *
I = np.mat('-1 1 0; 0 -1 0; 0 1 -1')
m, n = np.shape(I)
r = 2
iter = 100
eps = 1e-4
(V, U) = mixed_alignment(I, eps, iter, True)
J = np.eye(3)*-1
J[0,1:3] = 1
(V, U) = simple_alignment(2, J)
|
def calculate_ratio(w,h):
c_gcd = gcd(w,h)
ar_w,ar_h = w//c_gcd,h//c_gcd
return '{}:{}'.format(ar_w,ar_h) if ar_w and ar_h else 'You threw an error'
def gcd (a,b):
if b==0:
return a
return gcd (b,a%b)
'''
We all use 16:9, 16:10, 4:3 etc. ratios every day. Main task is to determine image
ratio by its width and height dimensions.
Function should take width and height of an image and return a ratio string (ex."16:9").
If any of width or height entry is 0 function should throw an exception (or return Nothing).
''' |
import json
class Record(object):
"""记录模块,用于记录断点和将抓取的结果写入文件"""
def __init__(self):
# 断点记录文件路径
self.breakpoint = r'E:\20190320\taobao\breakpoint.txt'
# 抓取结果文件路径
self.result = r'E:\20190320\taobao\result.json'
# 无搜索结果文件路径
self.no_result = r'E:\20190320\taobao\no_result.txt'
def record_breakpoint(self, keyword, page):
"""
记录当前抓取失败的关键字和页号,以便后期重新采集
:param keyword:
:param page:
:return:
"""
with open(self.breakpoint, 'a+', encoding='utf-8', errors='ignore') as f:
# 将关键字和页号写入文件
f.write(keyword + '----' + str(page) + "\n")
def record_result(self, data):
"""
data:为字典的数据格式,转换为json存储
:param data:
:return:
"""
with open(self.result, 'a+', encoding="utf-8", errors='ignore') as f:
# 将数据转换为json
result_ = json.dumps(data, ensure_ascii=False)
f.write(result_ + "\n")
def no_search_result(self, keyword):
"""
将无搜索结果的关键字写入文件
:param keyword:
:return:
"""
with open(self.no_result, 'a+', encoding="utf-8") as f:
f.write(keyword + "\n")
|
RAW_WEBPAGES = "WEBPAGES_CLEAN"
BOOKKEEPING = "WEBPAGES_CLEAN/bookkeeping.json"
TOP_N_results = 500
INDEX_PATH = 'index.json'
HEADER_PATH = "header.json"
|
import torch
class WarmLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, lr_lambda, last_epoch=-1):
self.optimizer = optimizer
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(optimizer.param_groups), len(lr_lambda)))
self.lr_lambdas = list(lr_lambda)
self.last_epoch = last_epoch
super(WarmLR, self).__init__(optimizer, last_epoch)
def state_dict(self):
import types
state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}
state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
state_dict['lr_lambdas'][idx] = fn.__dict__.copy()
return state_dict
def load_state_dict(self, state_dict):
lr_lambdas = state_dict.pop('lr_lambdas')
self.__dict__.update(state_dict)
state_dict['lr_lambdas'] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
def get_lr(self):
if self.last_epoch > 0:
return [group['lr'] + lmbda(self.last_epoch)
for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)]
else:
return list(self.base_lrs)
def get_opt(model, cfg_train, logger=None, is_warm=False, its_total=0):
trainable_vars = [param for param in model.parameters() if param.requires_grad]
if is_warm:
opt = torch.optim.SGD(trainable_vars,
lr=cfg_train.LR_WARM,
momentum=cfg_train.BETA1,
weight_decay=cfg_train.WEIGHT_DECAY,)
factor = float((cfg_train.LR) / its_total)
lmbda = lambda its: factor
lr_scheduler = WarmLR(opt, lmbda, last_epoch=-1)
return opt, lr_scheduler
if cfg_train.TYPE == 'adam':
opt = torch.optim.Adam(trainable_vars,
lr=cfg_train.LR,
betas=(cfg_train.BETA1, cfg_train.BETA2),
eps=1e-08,
weight_decay=cfg_train.WEIGHT_DECAY,
amsgrad=False)
elif cfg_train.TYPE == 'sgd':
opt = torch.optim.SGD(trainable_vars,
lr=cfg_train.LR,
momentum=cfg_train.BETA1,
weight_decay=cfg_train.WEIGHT_DECAY,)
else:
logger.error("{} not exist in opt type".format(cfg_train.TYPE))
if cfg_train.LR_TYPE == 'cos':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, its_total, eta_min=0, last_epoch=-1)
else:
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(opt,cfg_train.LR_REDUCE,
gamma=cfg_train.LR_FACTOR,
last_epoch=-1)
return opt,lr_scheduler |
# -*- coding: utf-8 -*-
import heapq
from collections import Counter
class Solution:
def topKFrequent(self, words, k):
counter = Counter(words)
most_common = []
for el in counter.items():
heapq.heappush(most_common, (-el[1], el[0]))
return [el[1] for el in heapq.nsmallest(k, most_common)]
if __name__ == "__main__":
solution = Solution()
assert ["i", "love"] == solution.topKFrequent(
["i", "love", "leetcode", "i", "love", "coding"], 2
)
assert ["the", "is", "sunny", "day"] == solution.topKFrequent(
["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], 4
)
assert ["a"] == solution.topKFrequent(["aaa", "aa", "a"], 1)
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import threading
from time import sleep
import cv2
import numpy as np
from cv2 import aruco
import array
import serial
from math import radians
from smbus import SMBus
# Set busial address
try:
addr = 0x8 # bus address
bus = SMBus(1) # indicates /dev/ic2-1
except:
print("port not availble")
# Wait for connection to complete
time.sleep(3)
detecting = True
def write_then_read(bus, value):
write_to_i2c(bus, value)
read_from_arduino(bus)
def write_to_i2c(bus, value):
print(value)
try:
b = value.encode('ascii')
for byte in b:
bus.write_byte(addr, byte) # switch it on
except Exception as e:
print(e)
print("WRITE Error")
def set_high_res():
cap.set(cv2.CAP_PROP_FRAME_WIDTH,2592)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,1944)
def set_low_res():
cap.set(cv2.CAP_PROP_FRAME_WIDTH,640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,380)
def share_points():
print("starting up")
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS, 150)
cap.set(cv2.CAP_PROP_EXPOSURE, 0.01)
while True:
print("capturing a frame")
# Capture frame-by-frame
start = time.time()
ret, frame = cap.read()
gray = frame
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
gray = aruco.drawDetectedMarkers(gray, corners, ids)
height, width, _ = gray.shape
print(height, width)
deltaX = 0
deltaY = 0
absX = int(width/2)
absY = int(height/2)
#print("Corner: ", corners)
if len(corners): #returns no of arucos
#print (len(corners)
aruco_list = {}
#print (len(ids))
markerOne = corners[0][0]
cornerOne = markerOne[0]
cornerTwo = markerOne[1]
cornerThree = markerOne[2]
cornerFour = markerOne[3]
# ANGLE
centerX1 = ((cornerTwo[0] + cornerFour[0]) / 2)
centerX2 = ((cornerOne[0] + cornerThree[0]) / 2)
# calcualte center X coordinate of marker
centerX = (centerX1+centerX2) / 2
# find out how off centered we are
deltaX = abs(absX-centerX)
# get angle
xFOV = (deltaX/width) * 54
angle = xFOV
#print("ANGLE: ",angle)
# if we are left of screen center, apply left polynomial fix
if(centerX < 320):
error = 0.0000487516774389672*(centerX**2)-0.0137673927681325*centerX-0.425377206030661
angle = angle - error
angle = 0 - angle
# if we are right of screen center, apply right polynominal fix
else:
error = 0.000032254210770952*(centerX**2)-0.03289220867007*centerX+7.53540624358558
angle = angle - error
if (centerX > 319 and centerX < 321):
# if we are centered, set angle to 0
angle = 0
# ANGLE DONE
# DISTANCE
deltaX1 = abs(cornerTwo[0] - cornerOne[0])
deltaX2 = abs(cornerThree[0] - cornerFour[0])
deltaY1 = abs(cornerFour[1] - cornerOne[1])
deltaY2 = abs(cornerThree[1] - cornerTwo[1])
arucoWidth = (deltaX1+deltaX2) / 2
arucoHeight = (deltaY1+deltaY2) / 2
#figure out width of screen
screenWidth = 0.10 / (arucoWidth / width)
screenHeight = 0.10 / (arucoHeight / height)
# figure out how many pixels correlates t
f = 0.0036
x = 0.0038
y = 0.0029
#Z1 = f*(screenWidth/x)
Z2 = f*(screenHeight/y)
finalDistance = Z2
# DISTANCE DONE
# POINTS
boxOffset = 0.2 #0.1524
# Are all boxes the same size?
x = 0
y = 1
# figure out points to move to
point1 = [finalDistance - boxOffset, 0]
point2 = [0 , 2*boxOffset]
point3 = [4*boxOffset, 0]
point4 = [0, -4*boxOffset]
point5 = [-4*boxOffset, point4[y]]
point6 = [0, 2*boxOffset]
point1[x] = round(point1[y], 3)
point1[x] = round(point1[y], 3)
point2[x] = round(point1[y], 3)
point2[x] = round(point1[y], 3)
point3[x] = round(point1[y], 3)
point3[x] = round(point1[y], 3)
point4[x] = round(point1[y], 3)
point4[x] = round(point1[y], 3)
point5[x] = round(point1[y], 3)
point5[x] = round(point1[y], 3)
point6[x] = round(point1[y], 3)
point6[x] = round(point1[y], 3)
points = [point1, point2, point3, point4, point5, point6]
value_to_send = '(' + str(point1[x]) + ',' + str(point1[y]) + ')' + '(' + str(point2[x]) + ',' + str(point2[y]) + ')' + '(' + str(point3[x]) + ',' + str(point3[y]) + ')'+ '(' + str(point4[x]) + ',' + str(point4[y]) + ')'
#write_to_i2c(value_to_send)
sleep(0.01)
value_to_send = '(' + str(point5[x]) + ',' + str(point5[y]) + ')' + '(' + str(point6[x]) + ',' + str(point6[y]) + ')'
# POINTS DONE
# FINAL ADJUSTMENTS
angle = round(radians(angle - 3.8059460516), 4) # need to verify that adjustment is correct
value_to_send = 'A' + str(angle) + 'S'
print("Distance: ", finalDistance)
value_to_send += 'D' + str(round(finalDistance - boxOffset , 4)) + 'S'
#print("ANGLE: ", angle)
#print("POINTS: ", points)
# Use multithreading to send information to the Arduino
thread_list = []
for thread in threading.enumerate():
thread_list.append(thread.getName())
if "send" not in thread_list:
t1 = threading.Thread(target=write_to_i2c, name="send", args=(bus, value_to_send,))
t1.start()
share_points()
cv2.waitKey(0)
|
from django.conf import settings
if not settings.configured:
settings.configure()
import json
import urllib
import simplejson
import googlemaps
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
import requests
from lxml import html
from BeautifulSoup import BeautifulSoup
#from busutil import *
googleGeocodeUrl = 'http://maps.googleapis.com/maps/api/geocode/json?'
#def calcuatefare(n):
# if n<=4:
# return '100'
# else:
# return str(100+(13*(n-4)))
def index(request):
return render(request,'cabform.html')
def cab(request):
if request.method != 'POST':
return HttpResponseRedirect('/')
src = request.POST.get('from')
dst = request.POST.get('to')
startlat,startlong=get_coordinates(str(src),from_sensor=False)
dstlat,dstlong=get_coordinates(str(dst),from_sensor=False)
# gdis=googlemaps.Client(key='AIzaSyBcvocj_-OQspLCSu-L6FwNw81ttKbWBxQ')
# distance=gdis.distance_matrix(src,dst)
# dis=distance['rows'][0]['elements'][0]['distance']['value']/100
# price=calcuatefare(dis)
lat,long=get_coordinates("J.L.N Marg,Malviya Nagar, Jaipur",from_sensor=False)
lat2,long2=get_coordinates("lnmiit, Jaipur",from_sensor=False)
url = 'https://api.uber.com/v1/estimates/price'
parameters = {
'server_token': 'YcSR2FOOfJMtreWpBeDqeyjDmm8Hj1pSh1ZnQP9h',
'start_latitude': startlat,
'start_longitude': startlong,
'end_latitude': dstlat,
'end_longitude':dstlong,
}
response = requests.get(url, params=parameters)
data = response.json()
return render(request,'cab.html',{'data':data['prices']})
def get_coordinates(query, from_sensor=False):
query = query.encode('utf-8')
url = 'https://api.uber.com/v1/estimates/price'
params = {
'address': query,
'sensor': "true" if from_sensor else "false"
}
url = googleGeocodeUrl + urllib.urlencode(params)
json_response = urllib.urlopen(url)
response = simplejson.loads(json_response.read())
if response['results']:
location = response['results'][0]['geometry']['location']
latitude, longitude = location['lat'], location['lng']
else:
latitude, longitude = None, None
print query, "<no results>"
return latitude, longitude
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.