text stringlengths 8 6.05M |
|---|
from sklearn.model_selection import train_test_split
import utils
from DL4.GAN_model import GAN
import tensorflow as tf
SEED = 42
# Data handling:
# German credit dataset:
german_credit_data = utils.load_data("german_credit.arff")
german_credit_preprocessed_data, german_credit_target = utils.preprocessing_data(german_credit_data, "german_credit")
# Diabetes dataset:
diabetes_data = utils.load_data("diabetes.arff")
diabetes_preprocessed_data, diabetes_target = utils.preprocessing_data(diabetes_data, "diabetes")
#
# ####################################################################################
# ###################################### PART 1 ######################################
# ####################################################################################
#
# # Training Diabetes GAN model:
# discriminator_network_params = {
# 'dimensions': [50, 30, 15, 10, 5],
# 'input_dim': diabetes_preprocessed_data.shape[1],
# 'activations': ['relu', 'relu', 'relu', 'relu']
# }
# generator_network_params = {
# 'dimensions': [50, 70, 50],
# 'input_dim': diabetes_preprocessed_data.shape[1],
# 'output_dim': diabetes_preprocessed_data.shape[1],
# 'activations': ['relu', 'relu', 'relu']
# }
# diabetes_gan = GAN(discriminator_network_params, generator_network_params, epochs=5000)
# diabetes_gan.data = diabetes_preprocessed_data
# diabetes_gan = diabetes_gan.fit(X=diabetes_gan.data, patiance=1000)
# diabetes_gan.plot_acc_graph(diabetes_gan.gen_acc, "Generator", "diabetes")
# diabetes_gan.plot_acc_graph(diabetes_gan.disc_acc, "Discriminator", "diabetes")
# diabetes_gan.plot_loss_graph(diabetes_gan.gen_loss, "Generator", "diabetes")
# diabetes_gan.plot_loss_graph(diabetes_gan.disc_loss, "Discriminator", "diabetes")
#
# tf.keras.models.save_model(diabetes_gan.GAN_model, 'checkpoint/diabetes/GAN/')
# tf.keras.models.save_model(diabetes_gan.generator_model, 'checkpoint/diabetes/Gen/')
# tf.keras.models.save_model(diabetes_gan.discriminator_model, 'checkpoint/diabetes/Disc/')
# Training German-Credit GAN model:
discriminator_network_params = {
'dimensions': [80, 40, 20, 10, 5],
'input_dim': german_credit_preprocessed_data.shape[1],
'activations': ['relu', 'relu', 'relu', 'relu']
}
generator_network_params = {
'dimensions': [80, 120, 80],
'input_dim': german_credit_preprocessed_data.shape[1],
'output_dim': german_credit_preprocessed_data.shape[1],
'activations': ['relu', 'relu', 'relu']
}
german_gan = GAN(discriminator_network_params, generator_network_params, epochs=5000, dummy_fields_indices=[i for i in range(3, 71)])
german_gan.data = german_credit_preprocessed_data
german_gan.fit(X=german_gan.data, patiance=1000)
german_gan.plot_acc_graph(german_gan.gen_acc, "Generator", "german_credit")
german_gan.plot_acc_graph(german_gan.disc_acc, "Discriminator", "german_credit")
german_gan.plot_loss_graph(german_gan.gen_loss, "Generator", "german_credit")
german_gan.plot_loss_graph(german_gan.disc_loss, "Discriminator", "german_credit")
tf.keras.models.save_model(german_gan.GAN_model, 'checkpoint/german/GAN/')
tf.keras.models.save_model(german_gan.generator_model, 'checkpoint/german/Gen/')
tf.keras.models.save_model(german_gan.discriminator_model, 'checkpoint/german/Disc/')
####################################################################################
###################################### PART 2 ######################################
####################################################################################
discriminator_BB_network_params = {
'dimensions': [50, 30, 15, 10, 5],
'input_dim': diabetes_preprocessed_data.shape[1] + 2,
'activations': ['relu', 'relu', 'relu', 'relu']
}
generator_BB_network_params = {
'dimensions': [50, 70, 50],
'input_dim': diabetes_preprocessed_data.shape[1]+1,
'output_dim': diabetes_preprocessed_data.shape[1]+1,
'activations': ['relu', 'relu', 'relu']
}
diabetes_X_train, diabetes_X_test, diabetes_y_train, diabetes_y_test = train_test_split(diabetes_preprocessed_data, diabetes_target, test_size=0.3, random_state=SEED)
diabetes_rf = utils.get_random_forest_model(diabetes_X_train, diabetes_X_test, diabetes_y_train, SEED, 'diabetes')
diabetes_gan_BB = utils.train_and_evaluate_BB_model('diabetes', diabetes_rf, diabetes_preprocessed_data, diabetes_target,
SEED, discriminator_BB_network_params, generator_BB_network_params)
discriminator_BB_network_params = {
'dimensions': [80, 40, 20, 10, 5],
'input_dim': german_credit_preprocessed_data.shape[1]+2,
'activations': ['relu', 'relu', 'relu', 'relu']
}
generator_BB_network_params = {
'dimensions': [80, 120, 80],
'input_dim': german_credit_preprocessed_data.shape[1] + 1,
'output_dim': german_credit_preprocessed_data.shape[1] + 1,
'activations': ['relu', 'relu', 'relu']
}
german_credit_X_train, german_credit_X_test, german_credit_y_train, german_credit_y_test = \
train_test_split(german_credit_preprocessed_data, german_credit_target, test_size=0.3, random_state=SEED)
german_credit_rf = utils.get_random_forest_model(german_credit_X_train, german_credit_X_test, german_credit_y_train,
SEED, 'german-credit')
german_credit_gan_BB = utils.train_and_evaluate_BB_model('german-credit', german_credit_rf,
german_credit_preprocessed_data, german_credit_target,
SEED, discriminator_BB_network_params, generator_BB_network_params)
|
import pytest
def add(a,b):
return a+b
def test_plan():
output=add(2,3)
assert output==5
|
import os
import json
from lxml import etree as et
from collections import OrderedDict
def ensure_dirs(path):
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def load_json(input_path):
with open(input_path, "r", encoding="utf-8") as fin:
return json.load(fin, object_pairs_hook=OrderedDict)
def load_xml(input_path):
with open(input_path, "rb") as fin:
return et.fromstring(fin.read())
def save_xml(output_path, xml):
ensure_dirs(output_path)
with open(output_path, "wb") as fout:
fout.write(et.tostring(xml, encoding="utf-8", pretty_print = True,
xml_declaration=True))
fout.write(u'\n'.encode('utf-8'))
def xpath_default(xml, query, default_namespace_prefix="i"):
nsmap = xml.nsmap if hasattr(xml, "nsmap") else xml.getroot().nsmap
nsmap = dict(((x, y) if x else (default_namespace_prefix, y))
for (x, y) in nsmap.items())
for e in xml.xpath(query, namespaces=nsmap):
yield e
def parse_time(timestamp):
""" Parses timestamps like 12:34.56 and 2s into seconds """
if ":" in timestamp:
parts = timestamp.split(":")
result = float(parts[-1])
if len(parts) >= 2:
result += float(parts[-2]) * 60
if len(parts) >= 3:
result += float(parts[-3]) * 3600
return result
elif timestamp.endswith("s"):
return float(timestamp[:-1])
elif timestamp.endswith("ms"):
return float(timestamp[:-2]) / 1000
elif timestamp.endswith("min"):
return float(timestamp[:-3]) * 60
elif timestamp.endswith("h"):
return float(timestamp[:-1]) * 3600
return float(timestamp)
|
from __future__ import print_function, unicode_literals
import re
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from ..static_version import STATIC_VERSION
register = template.Library()
def get_asset_url(name, prefix, outside):
is_abs = "://" in name
if not is_abs and not outside:
name = "{}/{}?{}".format(prefix, name, STATIC_VERSION)
if not is_abs:
name = settings.STATIC_URL + name
return name
@register.simple_tag
def css(name, media="", outside=False):
return '<link type="text/css" rel="stylesheet" href="{}" {}/>'.format(
get_asset_url(name, "css", outside),
'media="{}"'.format(media) if media else "",
)
@register.simple_tag
def js(name, outside=False):
return '<script type="text/javascript" src="{}"></script>'.format(
get_asset_url(name, "js", outside),
)
@register.simple_tag
def requirejs(name):
return """
<script type="text/javascript">
var require = {{ baseUrl: "{base}", urlArgs: "{version}", paths: {{ "{name}": "optimized-scripts" }} }};
</script>
<script type="text/javascript" src="{base}/require.js" data-main="{name}"></script>
""".format(
base=settings.STATIC_URL + "js",
name=name,
version=STATIC_VERSION,
)
@register.simple_tag(takes_context=True)
def url2(context, view_name, *args, **kwargs):
"""
Like the original ``url`` tag, but items in ``kwargs`` that evaluate to
``False`` are removed before resolving URL
"""
kwargs = {k: v for k, v in kwargs.items() if v}
try:
return reverse(
view_name,
args=args,
kwargs=kwargs,
current_app=context.current_app,
)
except NoReverseMatch as e:
# Simulate behavior of original url tag
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split(".")[0]
try:
return reverse(
"{}.{}".format(project_name, view_name),
args=args,
kwargs=kwargs,
current_app=context.current_app,
)
except NoReverseMatch:
pass
raise e
@register.filter
def normalize_whitespace(value):
return re.sub(r"\s+", " ", value, flags=re.S)
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
class LRUCache:
def __init__(self, capacity):
self.cache = OrderedDict()
self.capacity = capacity
def get(self, key):
if key not in self.cache:
return -1
value = self.cache[key]
del self.cache[key]
self.cache[key] = value
return value
def put(self, key, value):
if key in self.cache:
del self.cache[key]
elif len(self.cache) == self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value
if __name__ == "__main__":
obj = LRUCache(2)
obj.put(1, 1)
obj.put(2, 2)
assert 1 == obj.get(1)
obj.put(3, 3)
assert -1 == obj.get(2)
obj.put(4, 4)
assert -1 == obj.get(1)
assert 3 == obj.get(3)
assert 4 == obj.get(4)
|
# Register your models here.
from django.contrib import admin
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
from .models import HomePage, IconBlurb, MapPlace
# TabularDynamicInlineAdmin for the Slide and IconBlurb
# NB the dynamic just gives some js to "add another" easily.
class IconBlurbInline(TabularDynamicInlineAdmin):
model = IconBlurb
class MapPlaceInline(TabularDynamicInlineAdmin):
model = MapPlace
# HomePage admin custom class.
class HomePageAdmin(PageAdmin):
inlines = [IconBlurbInline, MapPlaceInline]
# register HomePage with its custom admin model
admin.site.register(HomePage, HomePageAdmin)
from .models import Portfolio, PortfolioItem, PortfolioItemImage, PortfolioItemCategory
# register Portfolio with default PageAdmin
admin.site.register(Portfolio, PageAdmin)
class PortfolioItemImageInline(TabularDynamicInlineAdmin):
model = PortfolioItemImage
class PortfolioItemAdmin(PageAdmin):
inlines = (PortfolioItemImageInline,)
admin.site.register(PortfolioItem, PortfolioItemAdmin)
admin.site.register(PortfolioItemCategory)
|
def min_value(digits):
return int(''.join(str(e) for e in sorted(set(digits))))
'''
Given a list of digits, return the smallest number that could be formed from
these digits, using the digits only once (= ignore duplicates).
Note: Only positive integers will be passed to the function
(> 0 ), no negatives or zeros.
Examples
[1, 3, 1] ==> 13
[5, 7, 5, 9, 7] ==> 579
[1, 9, 3, 1, 7, 4, 6, 6, 7] ==> 134679
'''
|
#!/usr/bin/python3
import requests
import webbrowser
import pyautogui
import time
import random
import tkinter as tk
import tkinter.messagebox
def request_words():
""" Request dictionary of words for the world wide web.
"""
url = 'http://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain'
response = requests.get(url)
words = response.content.splitlines()
words = [ word.decode('ascii') for word in words ]
return words
def open_browser():
""" Launch google-chrome browser.
"""
browser = webbrowser.get('google-chrome')
browser.open(r'')
def highlight_ominbar():
""" Click on omnibar in chrome browser.
"""
pyautogui.keyDown('ctrlleft')
pyautogui.press('l')
pyautogui.keyUp('ctrlleft')
def word_sampler(number):
""" Randomly sample a word from the dictionary.
"""
words = request_words()
while True:
yield random.sample(words, number)
def search_google(terms):
""" Type search terms into chrome omnibar.
"""
term = ' '.join(terms)
pyautogui.typewrite(term, 0.1)
pyautogui.typewrite(['enter'])
def confoogle(repeats=1, wait=2, terms=2):
""" Stand alone Confoogle program.
This may be called from the command line with a fixed number of
repeats. For indeterminate repeats use the tkinter version.
"""
words = word_sampler(number=terms)
open_browser()
for _ in range(repeats):
terms = next(words)
highlight_ominbar()
search_google(terms)
time.sleep(wait)
class StartWindow(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
root.bind('<Key>', lambda e: root.destroy())
button = tk.Button(root, text="Confoogle!", command=lambda: self.open_browser() & self.confoogle())
button.pack()
label = tk.Label(root, text="")
def confoogle(self):
terms = next(self.words)
root.attributes('-topmost', 0)
highlight_ominbar()
search_google(terms)
self.after(1000, self.confoogle)
root.attributes('-topmost', 1)
return 1
def open_browser(self):
open_browser()
self.words = word_sampler(2)
return 1
def quit(self):
self.destroy()
tk.sys.exit()
if __name__ == '__main__':
root = tk.Tk()
StartWindow(root).pack()
root.mainloop()
|
from flask import request, jsonify
from auth.model.user import User
from werkzeug.security import generate_password_hash, check_password_hash
from exception import MyException
from extensions.extensions import db, jwt
from auth.model.token_revoked import RevokedToken
from flask_jwt_extended import create_access_token, create_refresh_token, current_user, get_jwt, get_jwt_identity
from flask_jwt_extended import decode_token
from password_validator import PasswordValidation
from email_validator import validate_email, EmailNotValidError
from datetime import datetime, timezone
def add_user(firstname, lastname, username, email, password):
"""add user into the users table"""
if firstname == '':
return {'message': 'firstname is not valid', 'error': 'bad request, 404'}, 400
else:
firstname = firstname.strip()
if lastname == '':
return {'message': 'lastname is not valid', 'error': 'bad request, 404'}, 400
else:
lastname = lastname.strip()
if username == '':
return {'message': 'username is not valid', 'error': 'bad request, 404'}, 400
else:
username = username.strip()
try:
valid = validate_email(email, allow_smtputf8=False)
email = valid.email
except EmailNotValidError as err:
return str(err)
if not (PasswordValidation.is_check_none_space_length(password) and PasswordValidation.is_check_char(
password) and PasswordValidation.is_check_special_char(password)):
return {'error': '400 Bad Request', 'message': 'Enter a valid Password'}, 400
pwd = password_hashing(password)
user = User(username=username, password=pwd, email=email, firstname=firstname, lastname=lastname)
db.session.add(user)
db.session.commit()
return {
'username': username,
'password': password,
'firstname': firstname,
'lastname': lastname,
'email': email,
}
def list_users():
"""return list of users"""
users = User.find_all_user()
results = []
for user in users:
data_user = dict()
data_user['id'] = user.user_id
data_user['username'] = user.username
data_user['password'] = user.password
data_user['email'] = user.email
data_user['firstname'] = user.firstname
data_user['lastname'] = user.lastname
data_user['role'] = user.role
data_user['email_status'] = user.email_status
data_user['email_created_at'] = user.to_str_date()
results.append(data_user)
return {'users': results}
def list_user(id):
"""return user by id"""
user = User.find_user_by_id(id)
if user is None:
raise MyException('could not find this id', status_code=404)
return user.write_to_dict()
def update_user():
"""update specific or collection of field in a row of users table """
user = User.query.filter(User.user_id == current_user.user_id).one()
if user is None:
raise MyException('need user login', status_code=401)
if request.json == {}:
raise MyException('update field cannot be empty', status_code=400)
username = request.json.get('username', None)
if username is not None:
user.username = username
firstname = request.json.get('firstname', None)
if firstname is not None:
user.firstname = firstname
lastname = request.json.get('lastname', None)
if lastname is not None:
user.lastname = lastname
db.session.commit()
user_data = dict()
user_data['username'] = user.username
user_data['firstname'] = user.firstname
user_data['lastname'] = user.lastname
return user_data
def update_email(token):
""" update email verifying it and set status to true"""
try:
token_data = decode_token(token)
email = token_data['email']
username = token_data['username']
except Exception:
raise MyException('please click the link to verify your email', status_code=404)
user = User.query.filter(User.username == username).first()
if user is None:
raise MyException('invalid user', status_code=404)
user.email = email
db.session.commit()
user.email_status = True
return {"updated_email": email}
def update_role(username, role):
"""update user's role"""
user = User.find_user_by_username(username)
if user is None:
raise MyException('could not find this username', status_code=404)
user.role = role
db.session.commit()
return user.write_to_dict()
def delete_user(id):
""" delete row in a user table"""
user = User.find_user_by_id(id)
if user is None:
raise MyException('could not find this id', status_code=404)
db.session.delete(user)
db.session.commit()
return {'deleted': user.user_id}
def user_login(username, password):
"""authenticate user and assign access and refresh tokens to that user """
user = User.find_user_by_username(username)
if not user:
raise MyException('could not find this username', status_code=404)
if not password_verify(user.password, password):
raise MyException('invalid password', status_code=400)
if user.email_status is False:
raise MyException('please verify your email', status_code=401)
access_token, refresh_token = generate_token(user.username, user.role)
return jsonify(
username=user.username,
access_token=access_token,
refresh_token=refresh_token,
role=user.role
)
def refresh_access_token():
""" refresh access token by checking refresh token has been revoked or not """
jti = get_jwt()['jti']
if db.session.query(RevokedToken.id).filter_by(refresh_jti=jti).first():
return {'msg': 'refresh token is expired'}, 403
identity = get_jwt_identity()
token = create_access_token(identity=identity, fresh=True)
return {'token': token}
def generate_token(identity, role):
"""create access and refresh token"""
access_token = create_access_token(identity=identity, fresh=True, additional_claims={role: True})
refresh_token = create_refresh_token(identity=identity)
return access_token, refresh_token
def user_identity():
""" return login user identity"""
return jsonify(
firstname=current_user.firstname,
lastname=current_user.lastname,
username=current_user.username,
email=current_user.email,
role=current_user.role
)
def password_hashing(pwd):
""" return password hash"""
password = generate_password_hash(pwd)
return password
def password_verify(password, pwd):
""" return true if string password match password hash else false """
return check_password_hash(password, pwd)
@jwt.user_lookup_loader
def user_lookup_callback(_jwt_header, jwt_data):
""" callback function to return user identity"""
identity = jwt_data["sub"]
return User.find_user_by_username(identity)
@jwt.expired_token_loader
def revoked_token_callback(_jwt_header, _jwt_payload):
"""callback function that gives error message when expired tokens encounter"""
return {'error': '401, Unauthorized', 'message': 'this token has been expired'}, 401
@jwt.token_in_blocklist_loader
def check_if_token_revoked(_jwt_header, jwt_payload):
"""return true if token has been revoked else false"""
jti = jwt_payload['jti']
token = db.session.query(RevokedToken.id).filter_by(access_jti=jti).scalar()
return token is not None
def user_logout():
"""logout user revoking fresh tokens """
refresh_token = request.json.get('refresh_token', None)
if refresh_token is None:
return {'require refresh token'}
data = decode_token(refresh_token)
refresh_jti = data['jti']
access_jti = get_jwt()['jti']
revoked_at = datetime.now(timezone.utc)
revoked_token = RevokedToken(access_jti=access_jti, refresh_jti=refresh_jti, created_at=revoked_at)
db.session.add(revoked_token)
db.session.commit()
return jsonify(
message="successfully revoked tokens",
access_jti=access_jti,
refresh_jti=refresh_jti
)
|
# -*- coding: utf-8 -*-
import uuid
import pytz
from model.systems.assistance.date import Date
class Issue:
'''
' Obtener los hijos de un issue en base a su id
'''
def getChildsId(self, con, id):
cur = con.cursor()
cur.execute('''
SELECT r.id
FROM issues.request AS r
WHERE r.related_request_id = %s
ORDER BY r.created ASC;
''',(id,))
ids = []
for row in cur:
ids.append(issue[0])
return ids
'''
' Eliminar los estados de un issue en base a su id
'''
def deleteStatesFromIssue(self, con, id):
cur = con.cursor()
cur.execute('''
DELETE FROM issues.state
WHERE request_id = %s
''',(id,))
'''
' Eliminar un determinado issue, para poder ser eliminado no debe estar asociado a ningun estado
'''
def deleteIssue(self, con, id):
cur = con.cursor()
cur.execute('''
DELETE FROM issues.request
WHERE id = %s
''',(id,))
'''
' Insertar issue
'''
def insertIssue(self, con, id, request, officeId, requestorId, created, priority, visibility, relatedRequestId):
cur = con.cursor()
cur.execute('set timezone to %s',('UTC',))
cur.execute("""
INSERT INTO issues.request (id, request, office_id, requestor_id, created, priority, visibility, related_request_id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
""",(id, request, officeId, requestorId, created, priority, visibility, relatedRequestId))
'''
' Insertar estado de un determinado issue
'''
def insertState(self, con, requestId, requestorId, created, state):
cur = con.cursor()
cur.execute('set timezone to %s',('UTC',))
cur.execute("""
INSERT INTO issues.state (state, created, user_id, request_id)
VALUES (%s, %s, %s, %s);
""",(state, created, requestorId, requestId))
'''
' Obtener peticiones relacionadas en funcion de los ids
'''
def __getIssuesRelated(self, con, ids, relatedRequestIds):
cur = con.cursor()
cur.execute('''
SELECT r.id, r.created, r.request, r.requestor_id, r.office_id, r.related_request_id, r.assigned_id, r.priority, r.visibility, s.state
FROM issues.request AS r
INNER JOIN issues.state AS s ON (r.id = s.request_id)
INNER JOIN (
SELECT request_id, max(created) AS created
FROM issues.state
GROUP BY request_id
) AS s2 ON (s.request_id = s2.request_id AND s.created = s2.created)
WHERE ((r.related_request_id = ANY(%s)) OR (r.related_request_id = ANY(%s))) AND NOT (r.id = ANY(%s))
ORDER BY r.created ASC;
''', (ids, relatedRequestIds, ids))
issues = []
ids = []
relatedRequestIds = []
for issue in cur:
ids.append(issue[0])
if issue[5] != None:
relatedRequestIds.append(issue[5])
issues.append(
{
'id':issue[0],
'created':issue[1],
'request':issue[2],
'requestor_id':issue[3],
'office_id':issue[4],
'related_request_id':issue[5],
'assigned_id':issue[6],
'priority':issue[7],
'visibility':issue[8],
'state':issue[9],
}
)
return {
"ids":ids,
"relatedRequestIds":relatedRequestIds,
"issues":issues
}
'''
' Obtener peticiones asociadas a un determinado usuario
'''
def getIssuesByUser(self, con, userId):
ids = []
relatedRequestIds = []
cur = con.cursor()
cur.execute('''
SELECT r.id, r.created, r.request, r.requestor_id, r.office_id, r.related_request_id, r.assigned_id, r.priority, r.visibility, s.state
FROM issues.request AS r
INNER JOIN issues.state AS s ON (r.id = s.request_id)
INNER JOIN (
SELECT request_id, max(created) AS created
FROM issues.state
GROUP BY request_id
) AS s2 ON (s.request_id = s2.request_id AND s.created = s2.created)
WHERE r.requestor_id = %s OR r.assigned_id = %s
ORDER BY r.created ASC;
''', (userId, userId))
if cur.rowcount <= 0:
return []
issues = []
ids = []
relatedRequestIds = []
for issue in cur:
ids.append(issue[0])
if issue[5] != None:
relatedRequestIds.append(issue[5])
issues.append(
{
'id':issue[0],
'created':issue[1],
'request':issue[2],
'requestor_id':issue[3],
'office_id':issue[4],
'related_request_id':issue[5],
'assigned_id':issue[6],
'priority':issue[7],
'visibility':issue[8],
'state':issue[9],
}
)
while True:
data = self.__getIssuesRelated(con, ids, relatedRequestIds)
print(data)
if(len(data["ids"]) == 0):
break;
ids = list(set(ids + data["ids"]))
relatedRequestIds = data["relatedRequestIds"]
issues = issues + data["issues"]
return issues;
'''
' Actualizar los datos de un pedido, solo los datos y no las relaciones
'''
def updateData(self,con,id,request,priority,visibility,state,userId):
cur = con.cursor()
cur.execute("""
UPDATE issues.request SET request = %s, priority = %s, visibility = %s
WHERE issues.request.id = %s;
""",(request, priority, visibility, id))
cur.execute('''
SELECT state
FROM issues.state AS r
WHERE r.request_id = %s
ORDER BY created DESC
LIMIT 1
''',(id,))
oldState = None
for issue in cur:
oldState = issue[0]
if(oldState != state):
cur.execute('set timezone to %s',('UTC',))
cur.execute("""
INSERT INTO issues.state (created, state, user_id, request_id)
VALUES (now(), %s, %s, %s);
""",(state, userId, id))
events = []
e = {
'type':'IssueUpdatedData',
'data':{
'id':id,
'request':request,
'priority':priority,
'visibility':visibility,
'state':state,
}
}
events.append(e)
return events
|
from typing import List
from django.urls import reverse
from colossus.apps.accounts.models import User
from colossus.apps.templates.models import EmailTemplate
from colossus.apps.templates.tests.factories import EmailTemplateFactory
from colossus.test.factories import UserFactory
from colossus.test.testcases import TestCase
class EmailTemplateTestCase(TestCase):
def setUp(self):
self.templates: List[EmailTemplate] = EmailTemplateFactory.create_batch(5)
self.user: User = UserFactory(username='alex')
self.client.login(username='alex', password='123')
class EmailTemplateListViewTests(EmailTemplateTestCase):
def setUp(self):
super().setUp()
self.url = reverse('templates:emailtemplates')
self.response = self.client.get(self.url)
def test_status_code_200(self):
self.assertEqual(self.response.status_code, 200)
def test_response_context(self):
context = self.response.context
self.assertIn('templates', context)
self.assertEqual('templates', context['menu'])
self.assertEqual(5, context['total_count'])
def test_html_content(self):
contents = map(lambda t: t.name, self.templates)
for content in contents:
with self.subTest(content=content):
self.assertContains(self.response, content)
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
import heapq
self._heap = []
for list_ in lists:
while list_:
heapq.heappush(self._heap, list_.val)
list_ = list_.next
if not self._heap:
return
ret = a= ListNode(heapq.heappop(self._heap))
while len(self._heap) != 0:
t = ListNode(heapq.heappop(self._heap))
ret.next = t
ret = ret.next
return a
|
#!/usr/bin/env python
import sys
import fileinput
import re
number=re.compile("^[0-9]")
for line in fileinput.input():
tokens=line.strip().split('\t')
if len(tokens)>4:
tokens[2]=tokens[2].strip().replace("\n","").replace("\r","")
m=number.search(tokens[3].strip())
if m is None:
m=number.search(tokens[4].strip())
if m is None:
sys.stderr.write("Error: invalid line {0}".format(line))
else:
sys.stdout.write( "{0}\t{1}\t{2}{3}\t{4}\t\n".format(tokens[0],tokens[1],tokens[2],tokens[3],tokens[4]) )
else:
sys.stdout.write(line)
elif len(tokens)==4:
sys.stdout.write(line)
else:
sys.stderr.write("Error: length is only {0} for line {1}".format(len(tokens),line))
|
import numpy as np
import random
import tfim
import itertools as it
import argparse
import networkx as nx
from line_profiler import LineProfiler
import json
from itertools import groupby
import sys, os
from itertools import combinations
def main():
parser = argparse.ArgumentParser()
parser.add_argument('yheight', type=int, help='Height of grid')
parser.add_argument('xwidth', type=int, help='Width of grid')
parser.add_argument('initial_seed', type=int, help='First Jij seed')
parser.add_argument('seed_range', type=int, help='Number of seeds')
args = parser.parse_args()
PBC = True
yheight = args.yheight
xwidth = args.xwidth
L = [yheight, xwidth]
lattice = tfim.Lattice(L, PBC)
N = lattice.N
basis = tfim.IsingBasis(lattice)
initial = args.initial_seed
num_seeds = args.seed_range
center = (500,375)
ground_states = {}
num_missing = 0
different = []
f = open("ground_states.txt", "w+")
for seed in range(initial, initial + num_seeds):
bonds = bond_list(seed, N, PBC, xwidth, yheight)
Jij = make_Jij(N, bonds, lattice)
coordList = spinCoords(center, xwidth, yheight)
plaq = make_plaquettes(PBC, lattice, N, xwidth, yheight)
f_plaq = frustrated(Jij, plaq)
node_pairs = plaq_pairing(f_plaq, coordList, PBC, xwidth, yheight)
init_ground = initial_ground(node_pairs, xwidth, yheight)
p_pairings = init_ground[0]
ground_distance = init_ground[1]
edges = viable_edges(node_pairs, p_pairings, ground_distance, f_plaq, xwidth, yheight)
matchings = plaq_groups(edges, f_plaq, ground_distance)
string_groups = add_all_strings(matchings, lattice, coordList)
b_bonds = broken_bonds(string_groups, N, coordList, xwidth, yheight)
true_ground = make_config(b_bonds, Jij, N, xwidth, lattice, string_groups)
ground_config = true_ground[0]
true_ground_strings = true_ground[1]
number_ground_states = len(true_ground_strings)
if number_ground_states == 0: #This means that we assumed a total string length that did not produce any actual ground states. The strategy to fix that is to add to the total string length until we find ground states.
found_ground = False
inc = True
else:
found_ground = True #This is the part where it works
inc = False
#print("Ground distance for something that's working : ", ground_distance)
#Below is the piece that isn't working as well. It tends to return some correct ground states, but has not worked fully yet. You can check things with the tfim code, but it takes a while for that to return anything. I wouldn't use tfim for anything too far beyond a 4x4 system, or it takes too long.
'''
incremented = 0
while found_ground == False:
incremented += 1
ground_distance += 1 #This adds to the total string distance that we are assuming will #produce a ground state
if ground_distance > 100:
break #Breaks out of the whole thing if the code has run too far
edges = viable_edges(node_pairs, p_pairings, ground_distance, f_plaq, xwidth, yheight)
if len(edges) != 0:
matchings = plaq_groups(edges, f_plaq, ground_distance)
string_groups = add_all_strings(matchings, lattice, coordList)
b_bonds = broken_bonds(string_groups, N, coordList, xwidth, yheight)
true_ground = make_config(b_bonds, Jij, N, xwidth, lattice, string_groups)
ground_config = true_ground[0]
true_ground_strings = true_ground[1]
number_ground_states = len(true_ground_strings)
if number_ground_states != 0: #If it finds ground states, we can break out of the while #loop
found_ground = True
'''
#If you are running with the incremented states, you would change this so that incremented states can also get in (inc stands for incremented and means that we increased the initial minimum assumed string length)
if not inc:
ground_states.update({seed:ground_config})
ground_config = list(set(ground_config))
ground_config.sort()
#This whole piece will check whether you are returning ground states that are correct by checking them against tfim. Again, don't run this bit if you are doing much beyond a 4x4 system or tfim takes forever
Jij2 = Jij_convert(Jij, N) #gives the Jij matrix in tfim's form
Energies = -1*tfim.JZZ_SK_ME(basis, Jij2)
number_ground = num_ground_states(Energies)[0]
states = num_ground_states(Energies)[1]
states.sort()
mod_states = convert_states(states, basis)
mod_states.sort()
print("From tfim: ", mod_states)
if ground_config != mod_states:
different.append(seed)
print("Different!") #Lets you know when this code isn't working
print("Different seeds: ", different)
f.write(json.dumps(ground_states))
f.close()
def bond_list(seed, N, PBC, xwidth, yheight):
np.random.seed(seed)
# Generates a random list of bonds with equal numbers of ferromagnetic and antiferromagnetic bonds
if PBC == True:
num_of_bonds = 2*N
else:
num_of_bonds = (xwidth - 1)*(yheight) + (xwidth)*(yheight - 1)
if num_of_bonds%2 == 0:
a1 = [-1 for i in range(num_of_bonds//2)]
else:
a1 = [-1 for i in range((num_of_bonds//2) + 1)]
a2 = [1 for i in range(num_of_bonds//2)]
a = list(np.random.permutation(a1+a2))
return a
def make_Jij(N, b_list, lattice):
#Goes through the list of bonds to make the jij matrix that tells you how all of the spins are bonded to each other
bond_index = 0
Jij = np.zeros((N,N))
for i in range(0,N):
NNs = lattice.NN(i)
for j in NNs:
if Jij[i][j] == 0:
Jij[i][j] = b_list[bond_index]
Jij[j][i] = b_list[bond_index]
bond_index += 1
return Jij
def make_plaquettes(PBC, lattice, N, xwidth, yheight):
#Makes a list of plaquettes; each entry is a list that contains the four spins that form the plaquettes, where the first spin listed is the plaquette "name"
p_list = []
if PBC:
for i in range(0, N):
NNs = lattice.NN(i)
plaq = [i]
plaq.append(NNs[3])
NNs2 = lattice.NN(NNs[3])
plaq.append(NNs2[1])
plaq.append(NNs[1])
p_list.append(plaq)
else: #this whole statement doesn't matter unless you aren't using periodic boundary conditions
for y in range(0,yheight):
for x in range(0, xwidth):
if y == yheight-1 or x == xwidth-1: #This part adds empty plaquettes so the first number is also the index of each plaquette
#if i want to take it out, just need to subtract 1 from x and y range
p_list.append([])
else:
plaq = []
i = y*xwidth + x
plaq.append(i)
plaq.append(i+1)
plaq.append(i+xwidth+1)
plaq.append(i+xwidth)
p_list.append(plaq)
return p_list
def frustrated(Jij, plaqs):
#Makes a list of frustrated plaquettes by seeing how many antiferromagnetic bonds are in the plaquette (1 or 3 means frustrated)
#List is made of the first spin in each frustrated plaquette
f_plaq = []
for plaq in plaqs:
count = 0
if len(plaq)!=0:
if Jij[plaq[0]][plaq[1]] == -1:
count += 1
if Jij[plaq[1]][plaq[2]] == -1:
count += 1
if Jij[plaq[2]][plaq[3]] == -1:
count += 1
if Jij[plaq[0]][plaq[3]] == -1:
count += 1
if count == 1 or count == 3:
f_plaq.append(plaq[0])
#print("number of frustrated plaquettes: ", len(f_plaq))
return f_plaq
def plaq_pairing(f_plaq, coordList, PBC, xwidth, yheight):
'''Function returns a list of all possible pairs between frustrated plaquettes with the distances between them–The distance is stored as the maximum possible distance between two plaquettes minus the actual distance'''
#Sort of confusing part here: To make things work in the initial_ground function, I am storing the distance between the plaquettes as sort of the opposite. If the maximum possible distance between two plaquettes is 4, and the plaquettes are right next to each other, the distance is stored as 3. If the plaquettes are 4 apart, the distance is stored as 1.
pair_list = []
for index, p1 in enumerate(f_plaq):
coord1 = coordList[p1]
for p2 in f_plaq[index+1:]:
coord2 = coordList[p2]
x1 = coord1[0]
x2 = coord2[0]
y1 = coord1[1]
y2 = coord2[1]
xdiff = abs((x1 - x2))
ydiff = abs((y2-y1))
if PBC:
if xdiff > (xwidth)//2:
xdiff = (xwidth) - xdiff
if ydiff > (yheight)//2:
ydiff = (yheight) - ydiff
else:
if xdiff > (xwidth-1)//2:
xdiff = (xwidth - 1) - xdiff
if ydiff > (yheight-1)//2:
ydiff = (yheight) - ydiff
tot_dist = int(xdiff + ydiff)
#Here we build a list of pairs with the distance between them
max = xwidth//2 + yheight//2
op_dist = (max - tot_dist)
if p1 > p2:
pair_list.append((p2, p1, op_dist))
else:
pair_list.append((p1, p2, op_dist)) #Pair_list does not have true distance between pairs, the true distance is subtracted from the max possible distance to help with node matching later
#print ("PAIR LIST", pair_list)
return pair_list
def initial_ground(pair_list, xwidth, yheight):
G = nx.Graph()
G.add_weighted_edges_from(pair_list) #makes graph of all node pairs
matching = nx.max_weight_matching(G) #gives one solution
#max_weight_matching is the reason the pair distance from the last function is a little confusing - this function matches things up to give the largest total distance between all plaquettes. Since we stored the distances sort of backwards, this will give us the minimum weight matching
ground_dist = 0
p_pairs = []
for pair in matching:
edge = G.get_edge_data(pair[0], pair[1])
pair_dist = (xwidth//2 + yheight//2)-edge['weight']
ground_dist += pair_dist #total string length for a ground state soln
if pair[0] > pair[1]:
p0 = pair[1]
p1 = pair[0]
pair = (p0, p1)
p_pairs.append([pair, pair_dist]) #adds solution from above to list with pairs and pair distance
#print ("P PAIRS: " p_pairs)
return p_pairs, ground_dist #So this is the first pairing p_pairs that will lead to a ground state if the lowest distance between plaquettes is the correct energy
#ground_dist here tells us what we can expect the total string length to be for the rest of the ground states
def viable_edges(pair_list, p_pairs, ground_dist, f_plaq, xwidth, yheight):
'''Function takes the list of all possible pairings of nodes and returns a list of lists. Each list in it corresponds to one of the frustrated plaquettes and has all of the edges that could be used to make a ground state with that plaquette'''
#This uses the ground_dist returned in the last function
#If we did not find a ground state with the minimum ground distance, this will use an incremented ground distance
edge_lst = []
plaq_dict = {}
#Make the list for the edges grouped by plaquette
for index, plaq in enumerate(f_plaq):
edge_lst.append([])
plaq_dict[plaq] = index
G = nx.Graph()
G.add_weighted_edges_from(pair_list) #creates a graph with edges between each pair in pair_list
#This bit just checks if p_pairs, the ground state that we found in the last function, has the ground_dist that we are looking for
#It will if we haven't incremented anything yet
first = False
p_dist = 0
for pair in p_pairs:
dist = pair[1]
p_dist += dist
if p_dist == ground_dist:
first = True #So we only remove certain edges if p_pairs is relevant
if first: #if the initial ground distance for p_pairs is good, we add the edges from p_pairs to the list of viable edges
for pair in p_pairs:
plaq = pair[0][0]
ind = plaq_dict.get(plaq)
edge_lst[ind].append(pair)
loopnum = 0
for plaq2 in f_plaq: #We start looping through each frustrated plaquette
G2 = G.copy() #returns us to a full graph of pairs each time we loop through a plaquette
loopnum += 1
if first:
for pair in p_pairs: #Remember p_pairs is the list of pairs in the ground state we found above
if pair[0][1] == plaq2 or pair[0][0] == plaq2:
#print("Removing edge: ", pair)
G2.remove_edge(*pair[0]) #Remove the edge in our graph that has already been added to edge_lst above
break
ground_energy = True
while ground_energy == True:
#This chunk builds the best ground state it can by using the edges in the graph called matching
#Once it builds the ground state, it removes the edge that contains the plaquette we are currently on, and then it makes another matching to see if any other ground states include that edge paired with a different plaquette
matching = nx.max_weight_matching(G2) #make the best graph for the edges that we haven't yet removed
if len(matching) != len(f_plaq)/2: #This would happen if we have taken out all edges for a particular plaquette
ground_energy = False
break #takes us back to loop through edges for a new plaquette
new_length = 0
new_group = []
for pair in matching: #takes each pair in the best matching and adds it to a group
edge = G2.get_edge_data(pair[0], pair[1])
if pair[0] == plaq2 or pair[1] == plaq2:
rem_edge = (pair[0], pair[1]) #the edge that we are going to remove
pair_dist = (xwidth//2 + yheight//2)-edge['weight'] #the actual distance
new_length += pair_dist
if pair[0] > pair[1]: #This makes it so the first listed spin is the smaller one
p0 = pair[1]
p1 = pair[0]
pair = (p0, p1)
new_group.append([pair, pair_dist])
if new_length == ground_dist: #if we made a possible ground state from the matching
G2.remove_edge(*rem_edge) #removes the edge with the current plaquette in it
for pair in new_group: #"new group" is a group of edges for the plaquette we are on
plaq3 = pair [0][0]
ind = plaq_dict.get(plaq3)
if pair not in edge_lst[ind]:
edge_lst[ind].append(pair)
elif new_length < ground_dist: #This would only happen if you are incrementing the minimum ground distance, might be a spot where the code isn't doing what we want
G2.remove_edge(*rem_edge)
else:
ground_energy = False #This means that we have taken all possible ground states for that plaquette, and we need to move to the next one
ind = plaq_dict[plaq2]
zeroes = True
for plaq in edge_lst:
if len(plaq) != 0:
zeroes = False
break
if zeroes:
edge_lst = []
#print('Edges: ', edge_lst)
#print("here")
return edge_lst
def plaq_groups(edges, f_plaq, ground_dist):
'''This function returns all of the potential ground states at this point. Each state is made of pairs of plaquettes'''
group = []
used_plaquettes = []
all_groups = []
current_plaq = 0
p_ind = 0
index = 0
loop_count = 0
new = False
running = True
plaq_dict = {}
for index, plaq in enumerate(f_plaq): #Allows me to find the index of the frustrated plaquettes
plaq_dict[plaq] = index
#print(edges)
'''The main piece of the function'''
#This is only important if there are only two frustrated plaquettes
if len(f_plaq) == 2:
for i in edges[current_plaq:]:
for pair in i:
group.append(pair)
all_groups.append(group)
return all_groups
while running:
for group_index, p_edges in enumerate(edges[current_plaq:]):
#here we loop through each group of plaquettes from the current_plaq to the end
#p_edges contains each possible edge associated with the plaquette
#print("p_edges: ", p_edges)
if new:
new = False #new allows me to restart the for loop when i change current_plaq
break
if group_index + current_plaq == len(edges) - 1: #if we get to the last group of edges for the last possible plaquette without having a full ground state, we need to use a different combination of edges
try_new = False
for_loop = False
for e_ind, edge in enumerate(group[::-1]): #going through the ground state group backwards to see if other edge choices will work
loop_count +=1
if loop_count > 1000000:
running = False
new = True
for_loop = True
if try_new == True:
break
else:
plaq_ind = plaq_dict.get(edge[0][0])
for e_index, edge2 in enumerate(edges[plaq_ind]):
if edge2 == edge:
if e_index == len(edges[plaq_ind])-1 and plaq_ind == 0: #end of program, we've reached the last entry of the first plaquette
running = False
new = True
try_new = True
break
elif e_index == len(edges[plaq_ind])-1: #Move to the previous plaquette list to find a viable edge
break
else:
current_plaq = plaq_ind #move to the next edge in the list for the plaquette
p_ind = e_index + 1
try_new = True
new = True
break
if for_loop:
group = group[:-e_ind]
#print("Line 387 Current group: ", group)
used_plaquettes = used_plaquettes[:-e_ind]#FACTOR OF 2 ADDED
if len(group) == 1 and current_plaq == 0: #and group[0][0][0] == plaq_dict.get(current_plaq):
#WHAT I CHANGED: current_plaq == 0 is new, i replaced that other and statement with it and it worked better? still missing one state
group = []
used_plaquettes = []
#The general part of the function when we are not at the end and do not yet have a full list of edges
for pair in p_edges[p_ind:]: #Here we are looping through each single edge from the edges associated with the current plaquette
#p_ind depends on which edges we get through below
p_ind = 0 #Resets p_ind for the next loop through
if (pair[0][0] in used_plaquettes):
#current_plaq += 1 #new
#new = True #new
break #Can move to next plaquette because the first plaquette has already been used. This moves us back to the first for loop and a new group of edges for the next plaquette
#elif len(group) != 0 and pair[0][0] == group[-1][0][0]:
#print("Line 404 yuh")
#break
elif pair[0][1] in used_plaquettes:
continue #Need to go through to the next pair to see if we can use this plaquette still
else:
group.append(pair) #neither element has been used yet
#print("Line 412 Current group: ", group)
#if current_plaq == 0: #also new
#used_plaquettes.append(pair[0][0]) #THIS IS NEW, Might not be a good idea
#need to see how I am handling used-plaquettes to see if this is good
used_plaquettes.append(pair[0][1])#maybe add the first element of pair if this is the first plaquette we visit
if len(group) == len(f_plaq)//2: #Group is full
#print('used: ', used_plaquettes)
length = 0
for pair in group:
#print ("Pair: ", pair)
length += pair[1]
if length == ground_dist:
all_groups.append(group) #once we have filled a group, we know it will be good so we can add it to all_groups
#print('group added: ', group)
last_pair = group[-2] #This is the pair that we remove and replace before cycling through other options
#print('last pair: ', last_pair)
ind = plaq_dict.get(last_pair[0][0]) #The plaquette index
group = group[:-2]
#print('group 432: ', group)
used_plaquettes = used_plaquettes[:-2] #WAS AT -2
#print ('after reducing used: ', used_plaquettes)
found = False
while found == False:
loop_count += 1
if loop_count > 1000000:
running = False
new = True
found = True
break
for index, pairing in enumerate(edges[ind]):
if pairing == last_pair and index == len(edges[ind])-1: #This happens if we are at the last pair of a particular plaquette
if len(group) == 0: #This happens if we have gotten through the last edge of the first plaquette, function is done
running = False
found = True
break
last_pair = group[-1] #Take off the last pair and go to that plaquette to see if there are further pairs to use
ind = plaq_dict.get(last_pair[0][0])
group = group[:-1]
#print('group 452', group)
used_plaquettes = used_plaquettes[:-1]#WAS AT -1
elif pairing == last_pair: #This means there are more pairs for the plaquette in question, so we adjust current_plaq and p_ind, and go through the for loops again from there
current_plaq = ind
p_ind = index + 1
found = True
new = True
break
break
#print('number of groups: ', len(all_groups))
#for i in range(0, len(all_groups)):
#print('group: ', all_groups[i])
return all_groups #This goes into the next function as 'groups'
def add_all_strings(groups, lattice, coordList):
'''Takes all of the ground states from the last function, and sees if the plaquette pairs can form more than one string path between them'''
edges = []
for i in range(len(coordList)):
NNs = lattice.NN(i)
for j in NNs:
if i < j:
edges.append((i,j))
G = nx.Graph()
G.add_edges_from(edges) #G has edges connecting all points in a lattice with PBC
all_groups = []
index = 0
for group in groups: #loops through each group of plaquette pairs
if index > 20000:
print('Not all ground states found') #This is a safeguard against it running for too long
break
single_pairing = []
index += 1
for pairing in group: #each plaquette pair in the group
paths = nx.all_shortest_paths(G, pairing[0][0], pairing[0][1]) #finds all possible paths between two points
paths_list = []
for path in paths:
paths_list.append(path)
single_pairing.append(paths_list)
path_combos = it.product(*single_pairing) #
for combo in path_combos:
all_groups.append(combo)
return all_groups #This function returns potential ground states in the form of groups of paths that represent bonds that should be broken
'''When I say "path" I just mean the path that the string will take between two frustrated plaquettes. Each bond that the string goes through will be broked in the final state'''
def broken_bonds(string_groups, N, coordList, xwidth, yheight):
'''Returns a list of NxN matrices. Each matrix corresponds to a potential ground state with 1's where there are broken bonds between two spins'''
config_Jij_list = []
for str_index, state in enumerate(string_groups):
config_Jij = np.zeros((N,N)) #makes a Jij matrix that will eventually give the locations of broken bonds
#print('state: ', state)
for string in state: #string is a path between frustrated plaquettes
for index in range(0, len(string)-1): #go through the whole string
p1 = string[index] #p1 and p2 are the two plaquettes we are going between
p2 = string[index + 1]
if p1>p2:
hold = p1
p1 = p2
p2 = hold
c1x = coordList[p1][0]
c2x = coordList[p2][0]
if c1x == c2x: #there will be a vertical bond broken
if p2 + xwidth > N - 1 and p1 < xwidth:
sp1 = p1
if (p1+1) % xwidth == 0:
sp2 = p1 - xwidth + 1
else:
sp2 = p1 + 1
else:
sp1 = p2
if (p1+1) % xwidth == 0: #on the far right
sp2 = p2 - xwidth + 1
else:
sp2 = p2 + 1
else:
if p2 + xwidth > N - 1: #Will be broken between a top and a bottom
if (p2+1) % xwidth == 0:
if p1 % xwidth == 0: #Then the plaquettes are on opposite sides
sp1 = p1
else:
sp1 = p2
else:
sp1 = p2
sp2 = sp1 - (xwidth * (yheight - 1))
elif (p2+1) % xwidth == 0:
if p1 % xwidth == 0:
sp1 = p1
else:
sp1 = p2
sp2 = sp1 + xwidth
else:
sp1 = p2
sp2 = p2 + xwidth
bond = (sp1, sp2)
config_Jij[sp1][sp2] = 1
config_Jij[sp2][sp1] = 1
config_Jij_list.append([config_Jij, str_index])
#print('config_Jij: ', config_Jij)
return config_Jij_list #a list of Jij matrices of broken bonds, the str_index says which path group it correponds to in string_groups from the function add_all_strings
def make_config(b_bonds, bonds, N, xwidth, lattice, string_groups):
#This funciton will go through all of the potential ground states that we found and check to see if the bond configuration will allow those ground states to actually work
#Function is working
ground_states = []
true_strings = []
for Jij in b_bonds:
broken = Jij[0]
spin_list = []
spin_list.append(0) #Set the first spin as down
valid = True
#Loop through all other spins
for sp1 in range(1, N):
if valid == False:
break
if sp1 % xwidth == 0:
sp2 = sp1 - xwidth
else:
sp2 = sp1 - 1
spin2 = spin_list[sp2]
bond = bonds[sp1][sp2]
#print('sp1: ', sp1, 'sp2: ', sp2)
status = broken[sp1][sp2]
#print('status: ', status)
#Set spin
if bond == 1:
#Spins want to be the same
if status == 1: #broken
spin1 = abs(spin2 - 1)
else:
spin1 = spin2
else:
#Spins want to be opposite
if status == 1: #broken
spin1 = spin2
else:
spin1 = abs(spin2 - 1)
spin_list.append(spin1)
#Check bonds to all lower spins
NNs = lattice.NN(sp1)
for i in NNs:
if i < sp1:
#print('spins: ', i, sp1)
spini = spin_list[i]
#print('spini: ', spini)
#print('spin1: ', spin1)
bond = bonds[sp1][i]
status = broken[sp1][i]
if bond == 1: #Spins want to be same
if status == 1: #Spins should be opposite
if spin1 == spini:
valid = False
#print("break 1")
break
else: #Spins should be same
if spin1 != spini:
valid = False
#print("break 2")
break
else: #Spins want to be opposite
if status == 1: #spins should be same
if spini != spin1:
valid = False
#print("break 3")
break
else: #Spins should be opposite
if spini == spin1:
valid = False
#print("break 4")
break
if valid:
#print('accepted: ', string_groups[Jij[1]])
#print('b_bonds Jij: ', broken)
index = 0
for i in range(0, N):
if spin_list[i] == 1:
index += 2**i
spin_list.reverse()
ground_states.append(index)
true_strings.append(string_groups[Jij[1]])
return ground_states, true_strings
def spinCoords(center, xwidth, yheight):
coords = []
y_init = center[1] - ((yheight)/2)
for j in range(0, yheight):
y = y_init + (j)
x_init = center[0] - ((xwidth)/2)
for i in range(0, xwidth):
x = x_init + i
c = (x,y)
coords.append(c)
return coords
"""Making the spin states from tfim to check against mine"""
def Jij_convert(Jij, N):
new = np.zeros((N//2,N))
for j in range(0,N): #columns
count = 0
while count < N//2:
if j < N//2:
#make i at the index J be N-1
subtract = 1
for i in range(j, N//2):
new[i][j] = Jij[j][N-subtract]
subtract += 1
count += 1
if count == N//2:
break
i = j
start = 0
while i > N//2:
i -= 1
start += 1
for q in range(0, N//2):
new[i-1][j] = Jij[j][q + start]
count += 1
i -= 1
if i <= 0:
break
return new
def num_ground_states(Energies):
sorted_indices = np.argsort(Energies)
sorted_energies = Energies[sorted_indices]
split_list = []
for key, group in groupby(sorted_energies):
split_list.append(list(group))
num_list = []
for x in range(0, len(split_list)):
index = 0
for y in range(0, x+1):
index = index + len(split_list[y])
start = (index - len(split_list[x]))
entry = sorted_indices[start:index]
entry.sort()
#entry = entry[:len(entry)/2]
num_list.append(entry)
ground_states = num_list[0]
ground = len(split_list[0])
return ground, ground_states
def convert_states(states, basis):
g_states = []
for state in states:
binary = basis.state(state)
binary = binary[::-1]
if binary[-1] == 0:
index = basis.index(binary)
g_states.append(index)
return g_states
if __name__ == "__main__":
main()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
# The order is important
urlpatterns = patterns('',
url(r'^$', 'interview.views.home'),
url(r'^admin/', include(admin.site.urls)) # Username: alejandro, Password: alejandro
)
# Allow path to static files to be shown
urlpatterns += staticfiles_urlpatterns() |
class Solution(object):
def numPairsDivisibleBy60(self, time):
c = 0
n = [0] * 60
for i in time:
n[i%60]+=1
for i in range(0, 31):
if n[i] == 0:
continue
if (i == 0 or i == 30):
if n[i] > 1:
c+=(n[i]*(n[i]-1)/2)
else:
c+=(n[i]*n[60-i])
return c |
import re
import os
from .. import root_dir
# constant and path variables
labels = ["0", "1", "2", "3", "4", "5", "6",
"7", "8", "9", "character"]
darknet_dir = root_dir.darknet_path()
crop_char_img_dir = os.path.join(root_dir.data_path(), "crop_char_img")
os.makedirs(crop_char_img_dir, exist_ok = True)
char_count = 0
number_plate = ""
# coding
with open(os.path.join(darknet_dir, "result.txt"), "r") as file:
line = file.readline()
line = file.readline()
line = file.readline()
line = file.readline()
while (not line.find("Predicted") == -1):
print(number_plate)
if "Enter Image Path" in line:
image_path = (line.split(":")[1])[1:]
print(image_path)
number_plate = ""
while(True):
line = file.readline()
detected_class_label = (line.split("(")[0]).split(":")[0]
# for detected class labels
if detected_class_label in labels:
# for digit class
if(detected_class_label != "character"):
number_plate += detected_class_label
# for character class
else:
char_count += 1
# increasing area (skip this portion, if you don't need it)
cor = re.findall(r'\d+', (line.split("(")[1]))
left = int (cor[0]) - 10
if (left < 0): left = 0
top = int (cor[1]) - 25
if (top < 0): top = 0
width = int (cor[2]) + 30
height = int (cor[3]) + 50
# making bounding box coordinates in imagemagick format (WxH+X+Y)
crop = str(width) + "x" + str(height) + "+" + str(left) + "+" + str(top)
# saving output directory
output_dir = os.path.join(crop_char_img_dir, detected_class_label)
os.makedirs(output_dir, exist_ok = True)
input_path = os.path.join(darknet_dir, image_path)
output_path = os.path.join(output_dir, str(char_count) + ".jpg")
os.system("convert -crop " + crop + " " + input_path + " " + output_path)
else:
break |
import unittest
class Testing(unittest.TestCase):
def test_string(self):
a = 'some'
b = 'some'
self.assertEqual(a, b)
def test_boolean(self):
a = True
b = True
self.assertEqual(a, b)
# pytest only needs from here...
def test_string_pytest():
a = 'some'
b = 'someg'
assert a == b
def test_boolean():
a = True
b = True
assert a is b
# to here
if __name__ == '__main__':
unittest.main()
|
class Call(object):
def __init__(self, idnum, name, phone, time, reason):
# Call Setup
self.idnum = idnum
self.name = name
self.phone = phone
self.time = time
self.reason = reason
def displayAll(self):
# Displays all of my caller's information
print 'ID: {}'.format(self.idnum)
print 'NAME: {}'.format(self.name)
print 'PHONE: {}'.format(self.phone)
print 'TIME: {}'.format(self.time)
print 'REASON: {}'.format(self.reason)
# return self
class CallCenter(object):
def __init__(self, calls):
self.calls = calls
self.queue_size = len(calls)
def add(self, idnum, name, phone, time, reason):
new_call = Call(idnum, name, phone, time, reason)
self.calls.append(new_call)
return self
def info(self):
for call in self.calls:
print call.idnum
print call.name
print call.phone
print call.phone
print call.time
print call.reason
print 'The length of the que is', self.queue_size
return self
def remove(self):
for idx in range(0, len(self.calls) - 1):
self.calls[idx] = self.calls[idx + 1]
self.calls.pop()
return self
def find_and_remove(self, phone_number):
for idx in range(0, len(self.calls)):
if self.calls[idx].caller_phone_number == phone_number:
for idx2 in range(idx, len(self.calls) - 1):
self.calls[idx2] = self.calls[idx2 + 1]
self.calls.pop()
break
return self
def sort(self):
pass
# a = Call(1, 'Noel', '(999)-999-9999', '12:00PM', 'physical exam')
d = Call(3, 'Bob', '(000)-000-0000', '12:00PM', 'physical exam')
b = CallCenter([d])
# c = Call(2, 'Bob', '(000)-000-0000', '12:00PM', 'physical exam')
# print b.info()
b.calls
b.add(3, 'Bob', '(000)-000-0000', '12:00PM', 'physical exam')
print b.info()
print b.calls
b.remove()
print b.info()
b.remove()
print b.info()
b.remove()
|
import functools
from django.core.exceptions import ValidationError
from django.views.defaults import permission_denied, page_not_found
from django.views.generic import TemplateView, FormView
from django.shortcuts import redirect, get_object_or_404
# from django.urls import reverse
from django.db.models import Count
from mastermind.forms import (
GameCreateForm, GameUnconfirmedOptionsForm,
GameSubmissionForm, GameAdminForm,
)
from mastermind.models import Game, Slot, Option, Submission, SubmissionSlot
class Home(TemplateView):
template_name = 'mastermind/home.html'
def get_context_data(self, **kwargs):
data = super(Home, self).get_context_data(**kwargs)
profile = self.request.profile
if profile:
own_games = profile.game_set.all()
own_games = own_games.annotate(
submission_count=Count('submission'))
my_submissions = profile.submission_set.all()
else:
own_games = []
my_submissions = []
data['own_games'] = own_games
data['my_submissions'] = my_submissions
data['games'] = Game.objects.filter(mode=Game.OPEN)
data['game_create_form'] = GameCreateForm()
return data
class GameCreate(FormView):
form_class = GameCreateForm
template_name = 'mastermind/game_create.html'
def get_initial(self):
return {'title': self.request.GET.get('title', '')}
def form_valid(self, form):
game = Game(owner=self.request.get_or_create_profile(),
title=form.cleaned_data['title'])
slots = []
for i, s in enumerate(form.cleaned_data['slots']):
slot = Slot(game=game, stem=s, position=i + 1)
try:
slot.clean()
except ValidationError as exn:
form.add_error('slots', exn)
return self.form_invalid(form)
slots.append(slot)
options = []
for o in form.cleaned_data['options']:
option = Option(game=game, kind=Option.CANONICAL, text=o)
try:
option.clean()
except ValidationError as exn:
form.add_error('options', exn)
return self.form_invalid(form)
options.append(option)
game.save()
for x in slots + options:
x.game = x.game # Update game_id
Slot.objects.bulk_create(slots)
Option.objects.bulk_create(options)
return redirect('game_admin', pk=game.pk)
def single_game_decorator(middleware):
def decorator(cls):
dispatch = cls.dispatch
get_context_data = cls.get_context_data
@functools.wraps(dispatch)
def dispatch_wrapped(self, request, *args, **kwargs):
super_func = dispatch.__get__(self, type(self))
game = get_object_or_404(Game.objects, pk=kwargs.pop('pk'))
response = middleware(request, game)
if response is not None:
return response
self.game = game
return super_func(request, *args, **kwargs)
@functools.wraps(get_context_data)
def get_context_data_wrapped(self, **kwargs):
super_func = get_context_data.__get__(self, type(self))
data = super_func(**kwargs)
data['game'] = self.game
return data
cls.dispatch = dispatch_wrapped
cls.get_context_data = get_context_data_wrapped
return cls
return decorator
@single_game_decorator
def single_game(request, game):
if game.mode != Game.OPEN:
return page_not_found(request, exception=None)
@single_game_decorator
def single_game_admin(request, game):
if not (game.owner == request.profile or request.user.is_superuser):
return permission_denied(request, exception=None)
@single_game_admin
class GameAdmin(FormView):
template_name = 'mastermind/game_admin.html'
form_class = GameAdminForm
def get_form_kwargs(self, **kwargs):
data = super(GameAdmin, self).get_form_kwargs(**kwargs)
data['game'] = self.game
return data
def form_valid(self, form):
options = list(self.game.option_set.all())
new_options = []
for option_text in form.cleaned_data['new_options']:
option = Option(game=self.game, text=option_text,
kind=Option.CANONICAL)
option.clean()
new_options.append(option)
option_map = {o.text: o for o in options + new_options}
for option in options:
data = form.cleaned_option(option)
target = data['alias_target']
if target == '':
option.kind = Option.UNCONFIRMED
elif target == option.text:
option.kind = Option.CANONICAL
else:
option.kind = Option.ALIAS
option.alias_target = option_map[target]
option.clean()
slots = list(self.game.slot_set.all())
for slot in slots:
data = form.cleaned_slot(slot)
slot.position = data['position']
slot.stem = data['stem']
if data['key']:
slot.key = option_map[data['key']]
else:
slot.key = None
next_position = len(slots) + 1
for stem in form.cleaned_data['new_slots']:
slots.append(Slot(
game=self.game,
position=next_position,
stem=stem,
key=None))
for o in new_options + options:
o.alias_target = o.alias_target # Update alias_target_id
o.save()
for s in slots:
s.key = s.key # Update key_id
s.save()
self.game.mode = form.cleaned_data['mode']
self.game.save()
return redirect('game_admin', pk=self.game.pk)
@single_game_admin
class GameUnconfirmedOptions(FormView):
template_name = 'mastermind/game_unconfirmed_options.html'
form_class = GameUnconfirmedOptionsForm
def get_options(self):
return Option.objects.filter(game=self.game, kind=Option.UNCONFIRMED)
def get_form_kwargs(self, **kwargs):
data = super(GameUnconfirmedOptions, self).get_form_kwargs(**kwargs)
data['options'] = self.get_options()
return data
def form_valid(self, form):
canonical_texts = {}
for o in form.options:
k = 'o-%s' % o.pk
c = form.cleaned_data[k]
if not c:
continue
canonical_texts[o] = c
qs = Option.objects.filter(
game=self.game, text__in=canonical_texts.values())
existing = {o.text: o for o in qs}
new_options = []
errors = False
save_options = []
for o, c in canonical_texts.items():
try:
canonical = existing[c]
except KeyError:
if c == o.text:
o.kind = Option.CANONICAL
save_options.append(o)
continue
else:
canonical = Option(
game=self.game, kind=Option.CANONICAL, text=c)
try:
canonical.clean()
except ValidationError as exn:
form.add_error(None, exn)
return self.form_invalid(form)
existing[c] = canonical
new_options.append(canonical)
else:
if o == canonical:
o.kind = Option.CANONICAL
save_options.append(o)
continue
k = 'o-%s' % o.pk
if canonical.is_alias:
form.add_error(
k, '%s er et alias for %s' % (c, canonical.alias_target))
errors = True
elif canonical.is_unconfirmed:
form.add_error(k, '%s er ubekræftet' % c)
errors = True
else:
o.kind = Option.ALIAS
o.alias_target = canonical
save_options.append(o)
if errors:
return self.form_invalid(form)
try:
for o in new_options + save_options:
o.clean()
except ValidationError as exn:
form.add_error(None, exn)
return self.form_invalid(form)
for o in new_options:
o.save()
for o in save_options:
o.alias_target = o.alias_target
o.save()
return redirect('game_admin', pk=self.game.pk)
@single_game
class GameSubmission(FormView):
form_class = GameSubmissionForm
template_name = 'mastermind/game_submission.html'
def get_context_data(self, **kwargs):
data = super(GameSubmission, self).get_context_data(**kwargs)
slots = list(self.game.slot_set.all())
correct_options = set(s.key for s in slots if s.key is not None)
if self.request.profile:
submissions = Submission.objects.filter(
profile=self.request.profile, game=self.game)
submission_slot_qs = SubmissionSlot.objects.filter(
submission__in=submissions)
submission_slots = {(ss.submission, ss.slot): ss.option
for ss in submission_slot_qs}
else:
submissions = []
rows = []
for submission in submissions:
row = []
for slot in slots:
try:
chosen = submission_slots[submission, slot]
except KeyError:
row.append(dict())
continue
if chosen.kind == Option.ALIAS:
option = chosen.alias_target
else:
option = chosen
if slot.key is None or option.kind == Option.UNCONFIRMED:
information = '?' # 'unknown'
elif slot.key == option:
information = '\N{BLACK CIRCLE}' # 'correct'
elif option in correct_options:
information = '\N{WHITE CIRCLE}' # 'other'
else:
information = '\N{HEAVY BALLOT X}' # 'wrong'
row.append(dict(option=chosen, information=information))
rows.append(dict(submission=submission, slots=row))
data['slots'] = slots
data['submissions'] = rows
return data
def get_form_kwargs(self, **kwargs):
slots = self.game.slot_set.all()
slots_initial = {}
if self.request.profile:
try:
submission = Submission.objects.filter(
profile=self.request.profile, game=self.game).latest()
except Submission.DoesNotExist:
pass
else:
for s in submission.submissionslot_set.all():
slots_initial[s.slot] = s.option.text
data = super(GameSubmission, self).get_form_kwargs(**kwargs)
data['game'] = self.game
data['slots'] = slots
data['slots_initial'] = slots_initial
return data
def form_valid(self, form):
profile = self.request.get_or_create_profile()
submission = Submission(profile=profile, game=self.game)
slots = []
for slot in form.slots:
k = 's-%s' % slot.pk
option = form.cleaned_data[k]
if option is None:
continue
if not option.pk:
option.save()
slots.append(SubmissionSlot(submission=submission,
slot=slot,
option=option))
submission.save()
for s in slots:
s.submission = s.submission # Update submission_id
s.save()
return redirect('game_submission_create', pk=self.game.pk)
|
def pointless(*args):
return "Rick Astley"
|
class View:
def __init__(self):
pass
def json(self, action):
return action
def html(self, action):
return action
def xml(self, action):
return action
|
# Dependencies
import pymongo
import datetime
# The default port used by MongoDB is 27017
# https://docs.mongodb.com/manual/reference/default-mongodb-port/
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Declare the database
db = client.fruits_db
# Declare the collection
collection = db.fruits_db
# Part I
# A dictionary that represents the document to be inserted
post = {
'vendor': 'fruit star',
'fruit': 'raspberry',
'quantity': 21,
'ripeness': 2,
'date': datetime.datetime.utcnow()
}
# Insert the document into the database
# The database and collection, if they don't already exist, will be created at this point.
collection.insert_one(post)
vendor = input("Vendor name: ")
fruit_type = input("Type of fruit: ")
quantity = input("Number of boxes received: ")
ripeness = input("Ripeness of fruit (1 is unripe; 2 is ripe, 3 is over-ripe: ")
post = {
'vendor': vendor,
'fruit': fruit_type,
'quantity': quantity,
'ripeness': ripeness,
'date': datetime.datetime.utcnow()
}
collection.insert_one(post)
results =db.fruits_db.find()
for result in results:
print(result)
|
from selenium import webdriver
import time
import random
letters = ["a", "b","c", "d","e", "f","g", "h","i", "j","k", "l","m", "n",
"o", "p","q", "r","s", "t","u", "v","w", "x","y", "z",]
while True:
random1 = random.choice(letters)+random.choice(letters)+random.choice(letters)+random.choice(letters)
username=random1+'@hotmail.com'
driver = webdriver.Chrome('./drivers/chromedriver')
driver.get("https://robinhood.com/gb/en/about/?referral_code=cG5t")
driver.find_element_by_class_name("css-yjt382-EntryForm").send_keys(username)
time.sleep(1)
driver.find_element_by_class_name("css-jv7v4j-EntryForm").click()
time.sleep(2)
driver.close()
continue |
for i in range(0, 7):
if (i <= 3):
for j in range(0, i+1):
print("* ", end="")
print("\r")
else:
for x in range(i-1,6):
print("* ", end="")
print("\r")
|
import sys
import gitlab
from functions import show_vars, show_vars_for_all, add_variable, change_variable, delete_variable
from support import parse_file_variable, parse_single_variable
from namespace_definer import namespace
if "-a" in sys.argv:
masked = False
if "-m" in sys.argv:
masked = True
add_variable(data=parse_single_variable(
sys.argv[sys.argv.index('-a')+1]), masked=masked, namespace=namespace)
if "-f" in sys.argv:
add_variable(data=parse_file_variable(
sys.argv[sys.argv.index('-f')+1]), namespace=namespace)
if "-l" in sys.argv:
show_vars(namespace)
if "-la" in sys.argv:
show_vars_for_all(namespace)
if "-rm" in sys.argv:
data = str(sys.argv[sys.argv.index('-rm')+1]).split(",")
# Iterate through argument, get list no matter if we get one ore multiple variables
for arg in data:
try:
delete_variable(data=arg, namespace=namespace)
except gitlab.exceptions.GitlabDeleteError as e:
print(
f" ===== no such variable {arg} =====")
continue
if "-rmf" in sys.argv:
data = sys.argv[sys.argv.index('-rmf') + 1]
delete_variable(data=parse_file_variable(data),
namespace=namespace, fromfile=True)
if "-cv" in sys.argv:
data = str(sys.argv[sys.argv.index('-cv')+1]).split(",")
# Iterate through argument, get list no matter if we get one ore multiple variables (same as -rm)
for arg in data:
try:
change_variable(data=parse_single_variable(arg),
namespace=namespace)
except gitlab.exceptions.GitlabDeleteError as e:
print(
f" ===== no such variable {arg} =====")
continue
|
import requests
r = requests.get("http://www.cyclonemfg.com/")
print(r.status_code)
print(r.ok)
|
# alphabet = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',\
# 13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X',24:'Y',25:'Z'}
# userInput = input('Enter a string: ').lower()
# vowels = ['a', 'e', 'i', 'o', 'u']
# counter = 0
# for letter in userInput:
# for vowel in vowels:
# if vowel == letter:
# counter += 1
# print('Number of vowels: ' + str(counter))
'''
counts unique string in a big string
bc'''
# userInput = input('Enter a string: ').lower()
# counter = 0
# for index in range(len(userInput)):
# if index+3 > len(userInput):
# break
# if userInput[index] == 'b':
# if userInput[index+1] == 'o':
# if userInput[index+2] == 'b':
# counter += 1
# print('Number of times "bob" occurs is: ' + str(counter))
##userStr = input('Enter your string: ')
##subStr = ''
##longStr = ''
##for i in range(len(userStr)):
## if i +1 >= len(userStr):
## break
## elif userStr[i] <= userStr[i+1]:
## subStr += userStr[i]
## else:
## if userStr[i-1] <= userStr[i]:
## subStr += userStr[i]
## if len(longStr) < len(subStr):
## longStr = subStr
## subStr = ''
##print('Longest substring in alphabetical order is: '+longStr)
##
print(sum([x for x in range(1,10)])) |
import web
class WayPoint:
def GET(self):
return render.waypoint()
|
#Escribir un programa que le diga al usuario que ingrese una cadena.
# El programa tiene que evaluar la cadena y decir cuantas letras mayúsculas tiene.
def conytarMayus(cadena:str):
mayusculas="ABCDEFGHIJKLMNÑOPQRSTUVWXYZ"
contaMyus = 1
for i in mayusculas:
if i in cadena:
contaMyus += 1
return contaMyus
#mensaje = input('ingrese mensaje: ')
#print(conytarMayus(mensaje))
#Ejercicio 6
# Escribir un pequeño programa donde:
# - Se ingresa el año en curso.
# - Se ingresa el nombre y el año de nacimiento de tres personas.
# - Se calcula cuántos años cumplirán durante el año en curso.
# - Se imprime en pantalla.
#respuesta:
# añoCurso = input('Ingrese el año de curso actual: ')
# fehActual = int(añoCurso)
# vuelta = 0
# datos = []
# estudiantes= []
# while vuelta < 3:
# nombre = input('ingrese nombre de estudiante: ')
# añoNacimiento = input('Ingrese el año de nacimiento: ')
# fecha = int(añoNacimiento)
# datos.append(nombre)
# datos.append(fecha)
# estudiantes.append(datos)
# datos = []
# vuelta += 1
def calcuEdad(añoActual, Participantes):
for i in Participantes:
edad = añoActual - i[1]
print('La edad de '+i[0]+' es '+str(edad))
#calcuEdad(fehActual, estudiantes)
def contar_vocales(palabra):
vocales = 'aeiou'
contaVocal = 0
for vocal in vocales:
print(f'{vocal} : {palabra.count(vocal)}')
#palabra = input('Ingrese la palabra: ')
#contar_vocales(palabra)
#Definir una lista con un conjunto de nombres,
# imprimir la cantidad de comienzan con la letra a.
# También se puede hacer elegir al usuario la letra a buscar.
# (Un poco mas emocionante)
usuarios = ['cercio', 'maria', 'simon', 'andres', 'delcy', 'karelba', 'mariela']
def usuarioPorLetra(usuarios, letra):
contarUser = 0
for user in usuarios:
if user[0] == letra:
contarUser += 1
return contarUser
letra = input('introducir letra: ')
print(usuarioPorLetra(usuarios, letra))
|
import random
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import graphviz
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from pandarallel import pandarallel
from pandas import DataFrame
from sklearn import tree
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neighbors import kneighbors_graph
from sklearn.utils import resample
from wrangling.utils import convert_to_python_datetime, convert_datetime_cols_to_python_datetime, \
convert_timedelata_to_days, filter_based_on_ranges
pandarallel.initialize()
train_data = pd.read_csv('/Users/saransh/Desktop/practicum/data/user_order_train.csv')
train_data = train_data[~train_data.user_id.isna()] # orders not corresponding to any user
train_data = train_data[~train_data.ship_date.isna()] # orders not shipped
train_data = train_data[~train_data.stylist_id.isna()] # no stylists assigned
transactions_type_mapping = dict(
orders_id=np.int64,
user_id=np.int64,
stylist_id=np.int64,
items_count=np.int64,
kept_count=np.int64,
order_rank=np.int64,
num_child=np.int64
)
for col, type in transactions_type_mapping.items():
train_data[col] = train_data[col].astype(type) # transform dataframe cols to appropriate types
# convert datetime cols to appropriate types
datetime_conversion_func = partial(convert_to_python_datetime, format_str='%Y-%m-%d')
convert_datetime_cols_to_python_datetime(train_data, conversion_func=datetime_conversion_func)
train_data = train_data.set_index(['user_id', 'orders_id'])
train_data['request_ship_diff'] = train_data.ship_date - train_data.request_date
train_data['keep_rate'] = train_data['kept_count']/train_data['items_count']
train_data['request_ship_diff'] = train_data['request_ship_diff'].apply(convert_timedelata_to_days)
Y = train_data['order_lag']
X: DataFrame = train_data[
['shipped_price', 'kept_price', 'items_count', 'box_price', 'kept_count', 'order_rank',
'num_child', 'request_ship_diff']
]
X_ = X - X.mean(axis=0) # center
X_ /= X.std(axis=0) # scale
pca = PCA(svd_solver='full')
'''
pca_scores = []
for n in range(0,9):
pca = PCA(svd_solver='full', n_components=n)
pca_scores.append(np.mean(cross_val_score(pca, X_, cv=5)))
'''
X_r = pca.fit(X_).transform(X_)
p_comps = pd.DataFrame(X_r)
plt.figure()
axs: Axes = plt.gca()
axs.scatter(p_comps[0], p_comps[1], marker='.', alpha=0.04)
axs.set_title("Biplot")
axs.set_xlabel("PC1")
axs.set_ylabel("PC2")
plt.show()
plt.figure()
axs: Axes = plt.gca()
axs.plot(range(X_.shape[1]), pca.explained_variance_ratio_, 'bo-')
axs.axhline(y=1/(X_.shape[1]), linestyle='dashed', color='red', label='Average variance per component')
axs.legend()
axs.set_xlabel("Principal components")
axs.set_ylabel("Relative variance explained")
plt.show()
p_comps = p_comps.iloc[:, :5]
silhouettes = []
for n_cluster in range(2,8):
print(f"Starting with cluster size {n_cluster}")
sil_scores = []
futures = []
executor = ThreadPoolExecutor(max_workers=6)
cl = partial(AgglomerativeClustering, n_clusters=n_cluster, linkage='ward')
for conn in [10, 15, 20, 30, 40, 50]:
connectivity = kneighbors_graph(p_comps, n_neighbors=conn, include_self=False)
cl_ = cl(connectivity=connectivity)
futures.append(executor.submit(cl_.fit, p_comps))
executor.shutdown()
for ft in futures:
sil_scores.append(silhouette_score(p_comps, ft.result().labels_))
silhouettes.append(np.mean(sil_scores))
executor = ThreadPoolExecutor(max_workers=4)
futures = []
silhouettes = []
connectivity = kneighbors_graph(p_comps, n_neighbors=50, include_self=False)
cl = partial(AgglomerativeClustering, connectivity=connectivity, linkage='ward')
for n_cluster in range(2,6):
print(f"Starting with cluster size {n_cluster}")
cl_ = cl(n_clusters=n_cluster)
futures.append(executor.submit(cl_.fit, p_comps))
executor.shutdown()
executor = ThreadPoolExecutor(max_workers=4)
result_fts = []
for ft, n_cluster in zip(futures, range(2,6)):
print(f"Computing Silhouette for {n_cluster}")
result_fts.append(executor.submit(silhouette_score, p_comps, ft.result().labels_))
executor.shutdown()
for ft in result_fts:
silhouettes.append(ft.result())
sil_15 = [0.18627172270800177, 0.14481988402718535, 0.14712724372975608, 0.14444048153087216]
sil_20 = [0.15975867873403884, 0.13524309234040838, 0.12276975129993783, 0.1257405254250921]
sil_25_30_and_50 = [0.13951766770474536, 0.13751989917711452, 0.14666958904917365, 0.14974942877045486]
avg_silhouettes_over_neighbors = [0.15450515839588114, 0.13880298045604902, 0.14054978298997323,
0.14184250997247824, 0.12330216541761356, 0.11209507135789999]
plt.figure()
axs: Axes = plt.gca()
axs.plot(range(2,6), sil_25_30_and_50, 'bo-')
axs.set_xlabel("Cluster sizes")
axs.set_ylabel("Silhouette index")
axs.set_title("Silhouette metric")
plt.show()
connectivity = kneighbors_graph(p_comps, n_neighbors=50, include_self=False)
cl = partial(AgglomerativeClustering, connectivity=connectivity, linkage='ward')
p_cl_ = cl(n_clusters=5).fit(p_comps)
random.seed(60616)
cl_means = pd.DataFrame() # Storing Bootstrapped clustering results
for num_samples in range(0, 101):
print(f"Starting iter {num_samples}")
sampled_data = resample(p_comps, stratify=p_cl_.labels_) # stratified bootstrap based on initial clustering labels
order_lag = train_data.reset_index().order_lag.iloc[sampled_data.index]
cl_ = cl(n_clusters=5).fit(sampled_data)
cl_means[num_samples] = sorted(order_lag.groupby(cl_.labels_).mean().tolist())
plt.figure()
axs: Axes = plt.gca()
axs.set_xlabel("PC1")
axs.set_ylabel("PC2")
p_comps_grp = p_comps.groupby(cl_.labels_)
for key, grp in p_comps_grp:
axs.scatter(grp[0], grp[1], marker='.', label=key, alpha=0.04)
axs.legend()
plt.show()
plt.figure()
axs: Axes = plt.gca()
axs.set_xlabel("PC1")
axs.set_ylabel("PC2")
axs.set_title("Density biplot")
p_comps_grp = p_comps.groupby(cl_.labels_)
for key, grp in p_comps_grp:
axs.scatter(grp[0], grp[1], marker ='.', label=key)
axs.legend()
plt.show()
# create a decision tree to profile these clusters
train_data['cl_labels'] = p_cl_.labels_
train_data['cl_labels'].value_counts()
train_data.groupby('cl_labels').order_lag.mean()
filter_based_on_ranges(train_data, 'order_lag', upper_bound=400).boxplot(
"order_lag", by='cl_labels', vert=False, showmeans=True)
train_data.boxplot(
"order_lag", by='cl_labels', vert=False, showmeans=True)
X_train, X_test, y_train, y_test = train_test_split(
X, cl_.labels_, train_size=0.8, test_size=0.2, random_state=60616, stratify=cl_.labels_
)
trees = []
tree_acc_scores_validation = []
tree_acc_std_errors = []
tree_classifier = partial(
tree.DecisionTreeClassifier, criterion="entropy", random_state=60616, class_weight="balanced"
)
for tree_depth in range(2,25):
trees.append(
tree_classifier(max_depth=tree_depth)
)
for t in trees:
scores = cross_val_score(t, X_train, y_train, cv=5)
tree_acc_scores_validation.append(scores.mean())
tree_acc_std_errors.append(scores.std())
plt.figure()
axs: Axes = plt.gca()
axs.plot(range(2, 25), tree_acc_scores_validation, 'bo--')
axs.set_xlabel("Tree depth")
axs.set_ylabel("Validation set accuracy")
axs.set_xticks(list(range(2, 25)))
axs.set_title("Tree pruning/ Hyperparameter selection")
plt.show()
t = tree_classifier(max_depth=4).fit(X_train, y_train)
t.score(X_test, y_test)
t.feature_importances_
axs: Axes = train_data.boxplot('order_rank', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Order rank")
axs: Axes = train_data[(train_data.request_ship_diff < 50) & (train_data.request_ship_diff > 0)].boxplot('request_ship_diff', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Lag b/w request date and ship date")
axs: Axes = train_data.boxplot('kept_count', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Kept count")
axs: Axes = train_data.boxplot('keep_rate', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Keep rate")
axs: Axes = train_data.boxplot('box_price', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Box price")
axs: Axes = train_data.boxplot('num_child', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_ylabel("Number of children")
axs.set_title("Boxplot grouped by cluster labels")
axs: Axes = train_data.boxplot('kept_price', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Kept price")
plt.show()
axs: Axes = train_data.boxplot('order_lag', by='cl_labels')
axs.set_xlabel("Cluster labels")
axs.set_title("Boxplot grouped by cluster labels")
axs.set_ylabel("Order lag")
plt.show()
dot_data = tree.export_graphviz(
t, feature_names=X.columns, class_names=['0', '1', '2', '3', '4'], filled=True, rounded=True,
)
graph = graphviz.Source(dot_data)
graph.render("Cluster profiling", "visualizations", cleanup=True)
train_data.reset_index(inplace=True)
train_data.to_csv('/Users/saransh/Desktop/practicum/data/order_clustered.csv', index=False)
|
from django.urls import re_path
from .views import (
OrganisationDetailView,
OrganisationsFilterView,
SupportedOrganisationsView,
)
urlpatterns = [
re_path(
r"^$", SupportedOrganisationsView.as_view(), name="organisations_view"
),
# canonical URL for a single organisation record
re_path(
r"^(?P<organisation_type>[-\w]+)/(?P<official_identifier>[-\w]+)/(?P<date>\d{4}-\d{2}-\d{2})/$",
OrganisationDetailView.as_view(),
name="organisation_view",
),
# a list of 'generations' of an organisation
re_path(
r"^(?P<organisation_type>[-\w]+)/(?P<official_identifier>[-\w]+)/$",
OrganisationsFilterView.as_view(),
name="organisations_filter_by_identifier",
),
# list of organisations of a given type
re_path(
r"^(?P<organisation_type>[-\w]+)/$",
OrganisationsFilterView.as_view(),
name="organisations_filter_by_type",
),
]
|
def countFreq(arr,num):
count = 0
if(len(arr)==0):
return 0
mid = len(arr)//2
if(arr[mid]==num):
count = count+1
count = count+countFreq(arr[mid+1:],num)
count = count+countFreq(arr[:mid],num)
return count
arr = [1,2,3,4,4,4,5,6,6,7]
print(countFreq(arr,6)) |
import os
import sys
import pandas as pd
import numpy as np
import scipy.stats as stats
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['pdf.fonttype'] = 42
import statsmodels.stats.multitest as multitest
##########################################################################################
def read_input(fileName):
fam_dict = {}
with open(fileName, "r") as f:
for line in f:
values = line.strip().split("\t")
fam_dict[values[0]] = values[1]
return fam_dict
def get_repeat_fam_dict(fileName, boundList):
rep_family = {}
with open(fileName, "r") as f:
next(f)
for line in f:
values = line.strip().split("\t")
values2 = values[0].split("_")
if len(values2) == 2:
rfam = values2[0]
elif len(values2) == 3:
rfam = values2[0]+"_"+values2[1]
else:
rfam = values2[0]+"_"+values2[1]+"_"+values2[2]
# Will store:
# l2FC, p-adj, pval, baseMean
# values[2], values[5], values[4], values[1]
if values[5] == "NA":
values[5] = np.nan
else:
values[1] = np.float(values[1])
values[2] = np.float(values[2])
values[4] = np.float(values[4])
values[5] = np.float(values[5])
# Checking if instance is bound
if values[0] in boundList:
b_flag = 1
else:
b_flag = 0
rvals = np.array([values[2], values[5], values[4], values[1], b_flag])
if rfam in rep_family:
rep_family[rfam] = np.vstack((rep_family[rfam], rvals))
else:
rep_family[rfam] = rvals
return rep_family
def get_bound(fileName):
rep_dict = {}
with open(fileName, "r") as f:
next(f)
for line in f:
values = line.strip().split("\"")
rep_dict[values[1]] = 0
return rep_dict
##########################################################################################
"""
First argument is a file with list of repeat families to be plotted in the first column and the corresponding label in the second column.
Second argument is the output from deseq2, with pvalues and log2FCs.
Third argument are repeats bound by factor, in gtf. Naming of repeat integrants should be consistent between DEA output and this file.
Fourth argument is the suffix for the output file.
"""
famN_file = sys.argv[1]
file_name = sys.argv[2]
bound_file = sys.argv[3]
suffix = sys.argv[4]
significant_padj = 0.05
# reading families to be plotted
famN_dict = read_input(famN_file)
# Getting bound repeats
bound_dict = get_bound(bound_file)
# Reading data
dea_rep = get_repeat_fam_dict(file_name, bound_dict)
# Initializinf lists of values to be used for plotting
flat_labels = []
flat_log2FC = []
flat_type = [] # now type will be bound vs not
# For each repeat to be plotted
for r in famN_dict:
if r in dea_rep:
temp_array = dea_rep[r]
if len(dea_rep[r].shape) > 1:
# for each locus
for l in temp_array:
flat_log2FC.append(l[0])
flat_labels.append(famN_dict[r])
if np.float(l[4]) == 1:
flat_type.append("bound")
else:
flat_type.append("not_bound")
else:
print("family ", r, " with only one locus")
else:
print(r, "missing from locus analysis")
# Making dataframe
rep_df = pd.DataFrame(np.column_stack([flat_labels, flat_log2FC, flat_type]), columns=["labels", "log2FC", "type"])
rep_df['log2FC'] = rep_df['log2FC'].astype('float')
rep_df['labels']= rep_df['labels'].astype(basestring)
rep_df['type']= rep_df['type'].astype(basestring)
sorted_df = rep_df.sort_values(by='type', ascending=False)
# Plotting
l_order = ["L1PA17", "L1PA16", "L1PA15-16", "L1PA15", "L1PA14", "L1PA13", "L1PA12", "L1PA11", "L1PA10", "L1PA8A","L1PA8","L1PA7","L1PA6","L1PA5","L1PA4", "L1PA3","L1PA2", "L1HS"]
# l_order = ["HERV1 (LTRd)", "HERV9 (LTR12)", "HERVH (LTR7)", "HERVK (LTR22A)", "MER48 (LTR)", "MER92A (LTR)", "ERVL-MaLR (MLT1)", "MamGyp (LTR2b)", "ERVL (MER54A)", "ERVL (LTR82A)", "ERV1 (MER101)", "ERV1 (LTR15)", "LTR69", "LOR1 (LTR)", "HERV3"]
# For numbers
counts_df = rep_df.groupby('labels')['type'].value_counts().unstack().fillna(0)
# reorder by l_order
counts_df = counts_df.reindex(l_order)
tot_locus = counts_df.sum(axis=1)
percent_lab = [str(int(counts_df.bound[i]))+"/"+str(int(tot_locus[i])) for i in range(counts_df.shape[0])]
# Plotting parameters
y_pos = -6
f, ax = plt.subplots(figsize=(5,3))
cru = sns.boxplot(x="labels", y="log2FC", data=sorted_df, zorder=1, color="lightgrey", fliersize=0, order=l_order)
cru2 = sns.stripplot(x="labels", y="log2FC", hue="type", data=sorted_df, size=3, alpha=0.4, dodge=True, palette={"bound": "orange", "not_bound": "darkgrey"}, zorder=1, order=l_order, jitter=0.2)
for line in range(0,len(percent_lab)):
cru.text(line, y_pos, percent_lab[line], horizontalalignment='center', size='small', color='black', rotation=45)
plt.xticks(rotation=90)
plt.xlabel("", fontsize=11)
plt.ylim(-10,10)
f.subplots_adjust(bottom=0.2)
cru2.legend(loc='center left', bbox_to_anchor=(1, 0.5))
f.savefig(suffix+"_bound-loci.stripplot_dodged.pdf")
plt.close()
|
from collections import namedtuple
import math
import random
Point = namedtuple("Point","x,y")
def distance(p1,p2):
return math.sqrt( math.pow((p2.x - p1.x),2) + math.pow((p2.y - p1.y),2))
def merge_sort(alist):
if len(alist) <= 1:
return alist
else:
mid = len(alist)//2
left = merge_sort(alist[:mid])
right = merge_sort(alist[mid:])
return merge(left,right)
def merge(left,right):
merged = []
left_p,right_p = 0,0
while len(left) > left_p and len(right) > right_p:
if left[left_p] < right[right_p]:
merged.append(left[left_p])
left_p += 1
else:
merged.append(right[right_p])
right_p += 1
merged += left[left_p:] + right[right_p:]
return merged
def find_closest_pair(ordered_by_x,ordered_by_y):
if len(ordered_by_x) <= 3:
distances = {}
for i in range(len(ordered_by_x)):
for j in range(len(ordered_by_x)):
#distances[distance(ordered_by_x[i],ordered_by_y[j])] = (ordered_by_x[i],ordered_by_y[j])
if i != j:
distances[distance(ordered_by_x[i],ordered_by_x[j])] = (ordered_by_x[i],ordered_by_x[j])
#distances[distance(ordered_by_y[i],ordered_by_y[j])] = (ordered_by_y[i],ordered_by_y[j])
return distances[min(distances.keys())]
else:
mid = len(ordered_by_x)//2
points = []
points.append(list(find_closest_pair(ordered_by_x[:mid],ordered_by_y[:mid])))
points.append(list(find_closest_pair(ordered_by_x[mid:],ordered_by_y[mid:])))
distances = {}
for point_set in points:
distances[distance(point_set[0],point_set[1])] = point_set
delta = min(distances.keys())
split_case = check_split_case(ordered_by_x,ordered_by_y,delta)
if split_case:
if min(delta,distance(split_case[0],split_case[1])) == delta:
return distances[min(distances.keys())]
else:
return split_case
else:
return distances[min(distances.keys())]
def check_split_case(ordered_by_x,ordered_by_y,delta):
x_bar = ordered_by_x[len(ordered_by_x)//2]
x_coordinates_to_check = [elem.x for elem in ordered_by_x if elem.x < x_bar.x + delta and elem.x > x_bar.x - delta]
coordinates = [elem for elem in ordered_by_y if elem.x in x_coordinates_to_check]
smallest_distance = delta
best_pair = None
for i in range(len(coordinates)-1):
for j in range(1,min(7,len(coordinates)-i)):
p,q = coordinates[i],coordinates[i+j]
if distance(p,q) < smallest_distance:
best_pair = (p,q)
smallest_distance = distance(p,q)
return best_pair
def impose_ordering(coordinates,old_ordering,new_ordering):
dicter = {}
for ind,elem in enumerate(old_ordering):
dicter[new_ordering.index(elem)] = ind
new_coordinates = []
for ind in range(len(coordinates)):
new_coordinates.append(coordinates[dicter[ind]])
return new_coordinates
#coordinates = [Point(i,i) for i in range(1000)]
coordinates = []
x_s = []
y_s = []
for _ in range(10000):
tmp_x,tmp_y = random.randint(0,100000),random.randint(0,100000)
while tmp_x in x_s or tmp_y in y_s:
tmp_x,tmp_y = random.randint(0,100000),random.randint(0,100000)
tmp_p = Point(tmp_x,tmp_y)
x_s.append(tmp_x)
y_s.append(tmp_y)
coordinates.append(tmp_p)
old_x_ordering = [elem.x for elem in coordinates]
new_x_ordering = merge_sort([elem.x for elem in coordinates])
old_y_ordering = [elem.y for elem in coordinates]
new_y_ordering = merge_sort([elem.y for elem in coordinates])
ordered_by_x = impose_ordering(coordinates,old_x_ordering,new_x_ordering)
ordered_by_y = impose_ordering(coordinates,old_y_ordering,new_y_ordering)
pair = find_closest_pair(ordered_by_x,ordered_by_y)
print("Point 1:",pair[0].x,pair[0].y)
print("Point 2:",pair[1].x,pair[1].y)
print("Distance:",distance(pair[0],pair[1]))
|
from typing import List, Union
from dataclasses import dataclass
from torch.tensor import Tensor
@dataclass
class EncodedSentence:
x_inputs: Tensor
x_attention: Tensor
y_inputs: Tensor
y_attention: Tensor |
#!/usr/bin/python
"""
Test the performance of several inter-process transfer methods on a large dictionary.
"""
import os
import sys
import time
import socket
import random
import string
from pathlib import Path
import multiprocessing as mp
import subprocess as sp
from uuid import uuid4
sys.path.append("/home/mot/py/")
from mpyx import EZ, F, Stamp, Sink, As, By, Datagram
from mpyx.Vid import FFmpeg
import numpy as np
import fcntl
import shlex
from skvideo.io import FFmpegReader, FFmpegWriter
from functools import reduce
dictionary = None
class FrameData(Datagram):
def initialize(self, experiment_uuid, segment_uuid, number):
self.experiment_uuid = experiment_uuid
self.segment_uuid = segment_uuid
self.number = number
def main():
n_frames = 100
pipe(n_frames)
def make_frame(number):
return FrameData(uuid4(), uuid4(), number)
class Timeit(object):
import time
startTime = time.time()
@staticmethod
def start():
Timeit.startTime = time.time()
@staticmethod
def end():
print(" `-> Took {} seconds.".format(time.time() - Timeit.startTime))
class RandomFrameEmitterPipe(F):
def setup(self, n_frames):
for i in range(n_frames):
frame = make_frame(i)
self.put(frame)
class SetAProp1(F):
def do(self, frame):
frame.raw = np.random.random((1729, 2336))
print(frame.raw)
self.put(frame)
class SetAProp2(F):
def do(self, frame):
raw = frame.raw
proc = raw / 2
frame.processed = proc
self.put(frame)
class Cleaner(F):
def do(self, frame):
frame.clean()
def pipe(n_frames):
"""
Use normal python provided queues.
"""
ez = EZ(
RandomFrameEmitterPipe(n_frames),
SetAProp1(),
SetAProp2(),
Cleaner(),
As(1, Sink),
)
print("Timing multiprocess pipes with Datagram")
Timeit.start()
ez.start().join()
Timeit.end()
if __name__ == "__main__":
main()
|
# do not need to specify the variable type
# declare a variable and it's type by given it a value
# example with integers and floats
def test_1():
a = 12
b = 21
d = 21.0
c = a + b
e = a + d
print(c)
print(e)
# strings
# use single or double quotes
def test_2():
print("hello")
print('hello')
# how to do apostrophes
print("it's mine")
print('it\'s mine')
# reading input from the keyboard
def integer_test():
number = int(input("Enter an integer: "))
# check if number is less than 100
if number <= 100:
print("Number is less than or equal 100")
else:
print("Number is greater than 100")
# check if number is even
n = int(input("Enter a number: "))
if n % 2 == 0:
print("Even Steven")
else:
print("That's odd")
# try out a while loop with some floats
def investment_test():
amount = float(input("Enter investment amount: "))
rate = float(input("Enter interest rate: "))
period = int(input("Enter number of years of investment: "))
# initialize values
value = 0
year = 1
if period <= 10:
print("Future value: %.2f" % (amount * (1 + rate) ** period))
print("Your yearly returns are: ")
while year <= period:
value = amount * (1 + rate)
print("Year %d Total Value: %.2f" % (year, value))
amount = value
year = year + 1
else:
print("Too many periods to print individually, you maniac!")
print("Future value: %.2f" % (amount * (1 + rate) ** period))
# tuple unpacking
def tuple_test():
data = ("Yana Yang", "Canada", "Python")
name, country, language = data
print(name)
print(country)
print(language)
# formatting strings
# older way to add expressions into a string
def string_test1():
name = "Yana (is so cool)"
language = "Python"
# using .format
message = "{0} is learning {1}.".format(name, language)
print(message)
# PEP 498 a newer and simple way to embed expressions in a string
# using "f"
def string_test2():
name = "Yana"
adjective = input("Enter adjective: ")
message = f"{name} is so {adjective}!"
print(message)
# f-strings and dates!
def date_test():
import datetime
print("What day of the week is it?!")
# d = datetime.date(2019, 8, 1)
d = datetime.datetime.now()
print(f"{d:%F} is a {d:%A}!") #standard formats
# test_1()
# test_2()
# integer_test()
# investment_test()
# tuple_test()
# string_test1()
# string_test2()
# date_test()
|
#test generic_language.py
from dotenv import load_dotenv, find_dotenv
from pathlib import Path
import json
import os
import pymysql
import traceback
import time
import sys
import re
import subprocess
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path + "/..")
from languagefactory import LanguageFactory
def test_language_factory():
f = LanguageFactory()
assert f.create(1) is not None
try:
k = f.create(100) is None
assert False
except:
assert True |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 13:33:29 2020
@author: Joe
UKBiobank data loading utilities
"""
import wx
from ukbiobank.gui.load_frame import LoadFrame
# Open GUI
def open():
app = wx.App()
LoadFrame()
app.MainLoop()
return
|
# Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.
# Comments Section:
# - This exercice can be easily done by hand.
# Gonna make 1 by hand so you can understand what's going on:
# 7*1 + 3 = 10
# 7*4 + 2 = 3*10
# 7*2 + 6 = 2*10
# 7*8 + 4 = 6*10
# 7*5 + 5 = 4*10
# 7*7 + 1 = 5*10
# And then it repeats, so, 1/7 = 0.(142857)
def cyclelen(n):
l = 0
history = {}
dividend = 10
while True:
if dividend in history:
return l
else:
history[dividend] = dividend//n
dividend = (dividend % n)*10
l += 1
def problem26():
num = 0
maxlen = 0
for i in range(1,1000):
leni = cyclelen(i)
if leni > maxlen:
maxlen = leni
num = i
return num
|
"""
输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。
示例 1:
输入:head = [1,3,2]
输出:[2,3,1]
"""
def print_links(links):
stack = []
while links:
stack.append(links.val)
links = links.next
while stack:
print(stack.pop())
# def print_link_recursion(links):
# if links:
# print_link_recursion(links.next)
# print links.val
|
def additionwithoutplus(a,b):
if(b==0):
#print a
return a
sum=a^b
carry=a&b
return additionwithoutplus(sum,carry<<1)
k=additionwithoutplus(10,15)
print ("The sum is ",k)
|
from math import e, factorial
def poison_distribution(k,l):
return round(((l**k)*(e**(-l)))/factorial(k), 3)
print(poison_distribution(3,2))
|
import math
import csv
import sys
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def main():
test_pred = csv.reader(open(sys.argv[1]), delimiter=" ")
print "PhraseId,Sentiment"
for (phrase_id, pred0, pred1, pred2, pred3, pred4) in test_pred:
prob = [sigmoid(float(pred0)), sigmoid(float(pred1)), sigmoid(float(pred2)), \
sigmoid(float(pred3)), sigmoid(float(pred4))]
print phrase_id + "," + str(prob.index(max(prob)))
main() |
import math
from django.db import models
from django.utils import timezone
from datetime import timedelta, datetime
from numpy import mean
class Cinema(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
def roomQuantity(self):
return Room.objects.filter(cinema=self.pk).count()
def rooms(self):
return Room.objects.filter(cinema=self.pk)
def rooms_serial(self):
return Room.objects.filter(cinema=self.pk).values()
def movies(self):
list = set()
a = Room.objects.filter(cinema_id=self.pk)
for x in a:
for y in x.roomMovies():
list.add(y)
return list
def freeTime(self):
a = self.myCinema.all()
aux = 0
for x in a:
aux += x.totalFreeTime()
hours = aux//3600
minutes = (aux % 3600) // 60
seconds = (aux % 60)
return str(hours)+":"+str(minutes)+":"+str(seconds)
class Meta:
managed = True
db_table = 'cinema'
class Actor(models.Model):
name = models.CharField(max_length=200)
birth_date = models.DateField('Birth_date')
def __str__(self):
return self.name
def getShortMovies(self):
return map(str, map(Movie.getName, filter(lambda d: d.duration_mins < 60, self.movie_set.all())))
def getLongMovies(self):
return map(str, map(Movie.getName, filter(lambda d: d.duration_mins > 60, self.movie_set.all())))
def getMeanShortMovieDuration(self):
movies = filter(lambda d: d.duration_mins < 60, self.movie_set.all())
if movies:
return reduce(mean, map(Movie.getDuration, movies))
else:
return "No short movies"
def getMaxMovieDuration(self):
movies = self.movie_set.all()
if movies:
return reduce(max, map(Movie.getDuration, movies))
else:
return "No movies"
def getMinMovieDuration(self):
movies = self.movie_set.all()
if movies:
return reduce(min, map(Movie.getDuration, movies))
else:
return "No movies"
class Meta:
managed = True
db_table = 'actor'
class Movie(models.Model):
name = models.CharField(max_length=200)
main_actor = models.ForeignKey(Actor, related_name='main_actor')
actor = models.ManyToManyField(Actor)
duration_mins = models.IntegerField()
def __str__(self):
return self.name
def getName(self):
return self.name
def getDuration(self):
return self.duration_mins
def actors(self): # Exception!
return self.actors.all().values()
def timesPlayed(self):
return MovieByRoom.objects.filter(movie=self.pk).count()
def nextReproduction(self):
items = set()
for x in Room.objects.all():
a = MovieByRoom.objects.filter(start_datetime__gt=timezone.now(), room=x.id,
movie=self.pk).order_by('Start_datetime')
if a.__len__() >= 1:
items.add(a[0].pk)
return MovieByRoom.objects.filter(pk__in=items)
def nextReproduction_serial(self):
items = set()
for x in Room.objects.all():
a = MovieByRoom.objects.filter(start_datetime__gt=timezone.now(), room=x.id,
movie=self.pk).order_by('Start_datetime')
if a.__len__() >= 1:
items.add(a[0].pk)
return MovieByRoom.objects.filter(pk__in=items).values()
def reproductionTime(self):
list = set()
for y in Room.objects.all():
time = timezone.now() - timezone.now()
for x in MovieByRoom.objects.filter(movie=self.pk, room=y.pk):
time = time + (x.end_datetime - x.start_datetime)
list.add(str(y) + " time: " + str(time))
return list
class Meta:
managed = True
db_table = 'movie'
class Room(models.Model):
room_number = models.IntegerField()
room_manager = models.CharField(max_length=200)
cinema = models.ForeignKey(Cinema, related_name='myCinema')
played = models.ManyToManyField(Movie, through='MovieByRoom')
def __str__(self):
return self.cinema.name + " " + str(self.room_number)
def state(self):
if bool(MovieByRoom.objects.filter(start_datetime__lte=timezone.now(),
end_datetime__gte=timezone.now(), room=self.pk).count()):
return "Reproducing"
else:
return "Inactive"
def history(self):
return MovieByRoom.objects.filter(end_datetime__lte=timezone.now(), room=self.pk).distinct()
def movieQuantity(self):
q = MovieByRoom.objects.filter(room=self.pk)
q.query.group_by = ['movie_id']
return q.count()
def freeTimeInterval(self):
list = set()
a = MovieByRoom.objects.filter(room=self.pk, start_datetime__range=(timezone.now().date(),
timezone.now().date() + timedelta(
days=1))).order_by('Start_datetime')
b = MovieByRoom.objects.filter(room=self.pk, end_datetime__range=(timezone.now().date(),
timezone.now().date() + timedelta(
days=1))).order_by('Start_datetime')
if len(b) >= 1:
b = b[0]
if datetime.combine(b.start_datetime.date(), b.start_datetime.time()) <= datetime.combine(timezone.now().date(), datetime.min.time()):
init = b.end_datetime
else:
init = datetime.combine(timezone.now().date(), datetime.min.time())
else:
init = datetime.combine(timezone.now().date(), datetime.min.time())
for x in a:
list.add((init.time(), (x.fecha_y_hora_inicio).time()))
init = x.end_datetime
if datetime.combine(init.date(), init.time()) <= datetime.combine(timezone.now().date(), datetime.max.time()):
list.add((init.time(), datetime.min.time().replace(hour = 23, minute = 59, second = 59)))
return list
def freeTimeToString(self):
a = self.freeTimeInterval()
list = []
for x in a:
list.extend(["from: " + str(x[0]) + " to " + str(x[1])])
return list
def freePercentage(self):
hours = 0
for x in self.freeTimeInterval():
hours = hours + ((x[1].hour*60*60 + x[1].minute*60 + x[1].second) - (x[0].hour*60*60 + x[0].minute*60 + x[0].second))
return str("Free: " + str(math.ceil(hours / 864)) + "% and busy: " + str(100 - math.ceil(hours / 864)) + "%")
def totalFreeTime(self):
seconds = 0
for x in self.freeTimeInterval():
seconds = seconds + ((x[1].hour*60*60 + x[1].minute*60 + x[1].second) - (x[0].hour*60*60 + x[0].minute*60 + x[0].second))
return seconds
def roomMovies(self):
list = set()
b = MovieByRoom.objects.filter(room=self.pk)
b.query.group_by = ['movie_id']
for x in b:
a = Movie.objects.filter(id=x.movie_id)
if a.__len__() >= 1:
list.add(a[0])
return list
def playing(self):
return self.played.values()
class Meta:
managed = True
db_table = 'room'
class MovieByRoom(models.Model):
movie = models.ForeignKey(Movie)
room = models.ForeignKey(Room)
start_datetime = models.DateTimeField('Start_datetime')
end_datetime = models.DateTimeField('')
def __str__(self):
return self.movie.name + " / " + str(self.room) + " / " + str(self.start_datetime)
def room_serial(self):
return self.room.objects.values()
def cinemaRoomQuantity():
list = set()
for x in Cinema.objects.all():
list.add(str(x) + " rooms: " + str(x.roomQuantity()))
return list
def roomState():
list = set()
for x in Room.objects.all():
list.add(str(x) + ", state: " + x.state())
return list
def roomHistory():
list = set()
a = Room.objects.all()
for x in a:
for y in x.history():
list.add(str(x) + " " + y.movie.name)
return list
class General(models.Model):
def roomQuantityCinema(self):
list = set()
for x in Cinema.objects.all():
list.add(str(x) + " rooms: " + str(x.roomQuantity()))
return list
def roomState(self):
list = set()
for x in Room.objects.all():
list.add(str(x) + ", state: " + x.state())
return list
def roomHistory(self):
list = set()
a = Room.objects.all()
for x in a:
for y in x.history():
list.add(str(x) + " " + y.movie.name)
return list
def actors(self):
return Actor.objects.all().values("name") |
def read_essid(interface):
'''Read the name of the network a Wireless device is
attached to.
ARGS:
@interface -- The interface inquired about. Ex wlan0
RETURNS:
@essid -- The name of the wireless network the device
is configured on.
'''
from parse_iwconfig import parse_iwconfig
interfaces = parse_iwconfig()
for intrfc in interfaces:
if intrfc['interface'] == interface:
return interface['essid']
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# 162 ms
class Solution(object):
def addTwoNumbers(self, l1, l2):
head = ListNode(0);
current = head
carry = 0
while True:
a = l1.val if l1 is not None else 0
b = l2.val if l2 is not None else 0
current.val = a + b + carry
if current.val > 9:
current.val -= 10
carry = 1
else:
carry = 0
l1 = l1.next if l1 is not None else None
l2 = l2.next if l2 is not None else None
if l1 is None and l2 is None:
if carry >0:
current.next = ListNode(0)
current = current.next
current.val = carry
return head
current.next = ListNode(0)
current = current.next
|
import pytest
import responses
import pyyoutube.models as mds
from .base import BaseTestCase
from pyyoutube.error import PyYouTubeException
class TestSubscriptionsResource(BaseTestCase):
RESOURCE = "subscriptions"
def test_list(self, helpers, key_cli, authed_cli):
with pytest.raises(PyYouTubeException):
key_cli.subscriptions.list()
with responses.RequestsMock() as m:
m.add(
method="GET",
url=self.url,
json=self.load_json(
"subscriptions/subscriptions_by_mine_p1.json", helpers
),
)
res = key_cli.subscriptions.list(
parts=["id", "snippet"],
channel_id="UCa-vrCLQHviTOVnEKDOdetQ",
max_results=10,
)
assert res.items[0].id == "zqShTXi-2-Tx7TtwQqhCBzrqBvZj94YvFZOGA9x6NuY"
res = authed_cli.subscriptions.list(mine=True, max_results=10)
assert res.items[0].snippet.channelId == "UCNvMBmCASzTNNX8lW3JRMbw"
res = authed_cli.subscriptions.list(
my_recent_subscribers=True, max_results=10
)
assert res.items[0].snippet.channelId == "UCNvMBmCASzTNNX8lW3JRMbw"
res = authed_cli.subscriptions.list(my_subscribers=True, max_results=10)
assert res.items[0].snippet.channelId == "UCNvMBmCASzTNNX8lW3JRMbw"
with responses.RequestsMock() as m:
m.add(
method="GET",
url=self.url,
json=self.load_json("subscriptions/subscriptions_by_id.json", helpers),
)
res = key_cli.subscriptions.list(
parts=["id", "snippet"],
subscription_id=[
"zqShTXi-2-Tx7TtwQqhCBwViE_j9IEgnmRmPnqJljxo",
"zqShTXi-2-Rya5uUxEp3ZsPI3fZrFQnSXNQCwvHBGGo",
],
)
assert res.items[0].id == "zqShTXi-2-Tx7TtwQqhCBwViE_j9IEgnmRmPnqJljxo"
def test_inset(self, helpers, authed_cli):
with responses.RequestsMock() as m:
m.add(
method="POST",
url=self.url,
json=self.load_json("subscriptions/insert_response.json", helpers),
)
subscription = authed_cli.subscriptions.insert(
body=mds.Subscription(
snippet=mds.SubscriptionSnippet(
resourceId=mds.ResourceId(
kind="youtube#channel",
channelId="UCQ6ptCagG3W0Bf4lexvnBEg",
)
)
)
)
assert subscription.id == "POsnRIYsMcp1Cghr_Fsh-6uFZRcIHmTKzzByiv9ZAro"
def test_delete(self, helpers, authed_cli):
with responses.RequestsMock() as m:
m.add(
method="DELETE",
url=self.url,
status=204,
)
assert authed_cli.subscriptions.delete(
subscription_id="POsnRIYsMcp1Cghr_Fsh-6uFZRcIHmTKzzByiv9ZAro"
)
with pytest.raises(PyYouTubeException):
with responses.RequestsMock() as m:
m.add(
method="DELETE",
url=self.url,
json=self.load_json("error_permission_resp.json", helpers),
status=403,
)
authed_cli.subscriptions.delete(
subscription_id="POsnRIYsMcp1Cghr_Fsh-6uFZRcIHmTKzzByiv9ZAro"
)
|
# -*- coding: utf-8 -*-
"""
Created on 2017/3/19
@author: will4906
"""
# 处理item_group函数
def handle_item_group(item_group):
AND = ' AND '
OR = ' OR '
NOT = ' NOT '
exp_str = ""
keyand = item_group.__getattribute__('And')
keyor = item_group.__getattribute__('Or')
keynot = item_group.__getattribute__('Not')
if keyand is not None:
parms = keyand.__getattribute__('parm')
for parm in parms:
exp_str += AND + parm
exp_str = exp_str.replace(AND, '', 1)
if keyor is not None:
parms = keyor.__getattribute__('parm')
for parm in parms:
exp_str += OR + parm
if keyand is None:
exp_str = exp_str.replace(OR, '', 1)
if keynot is not None:
parms = keynot.__getattribute__('parm')
for parm in parms:
exp_str += NOT + parm
if keyand is None and keyor is None:
exp_str = exp_str.replace(NOT, '', 1)
return exp_str
# 处理申请号的函数
def handle_request_number(request_number):
print(request_number)
# 处理日期元素的函数
def handle_date_element(title, date_element):
if isinstance(date_element, DateSelect):
return title + date_element.__getattribute__('search_exp')
else:
raise Exception('We just support DateSelect for date element!')
# 处理发明类型的函数
def handle_invention_type(title, invention_type):
exp_str = ""
if isinstance(invention_type, Or):
OR = ' OR '
keyor = invention_type
if keyor is not None:
parms = keyor.__getattribute__('parm')
for parm in parms:
if parm == 'I' or parm == 'U' or parm == 'D':
parm = '\"' + parm + '\"'
elif parm.find('发明申请') != -1:
parm = '\"I\"'
elif parm.find('实用新型') != -1:
parm = '\"U\"'
elif parm.find('外观设计') != -1:
parm = '\"D\"'
exp_str += OR + parm
exp_str = exp_str.replace(OR, '', 1)
elif isinstance(invention_type, str):
if invention_type == 'I' or invention_type == 'U' or invention_type == 'D':
exp_str = '\"' + invention_type + '\"'
elif invention_type.find('发明申请') != -1:
exp_str = '\"I\"'
elif invention_type.find('实用新型') != -1:
exp_str = '\"U\"'
elif invention_type.find('外观设计') != -1:
exp_str = '\"D\"'
else:
raise Exception('We just support string or Or for date element!')
return "(" + exp_str + ")"
# 默认处理函数
def default_handle(title, default):
if isinstance(default, ItemGroup):
return title + '=(' + handle_item_group(default) + ')'
elif isinstance(default, str):
return title + '=(' + default + ')'
else:
raise Exception('We just support string or ItemGroup!')
def find_element_in_item_group(element, item_group):
keyand = item_group.__getattribute__('And')
keyor = item_group.__getattribute__('Or')
keynot = item_group.__getattribute__('Not')
if keyand is not None:
parms = keyand.__getattribute__('parm')
try:
return parms.index(element)
except:
pass
if keyor is not None:
parms = keyor.__getattribute__('parm')
try:
return parms.index(element)
except:
pass
if keynot is not None:
parms = keynot.__getattribute__('parm')
try:
return parms.index(element)
except:
pass
return None
title_case = {
'request_number': default_handle,
'request_date': handle_date_element,
'publish_number': default_handle,
'publish_date': handle_date_element,
'invention_name': default_handle,
'ipc_class_number': default_handle,
'proposer_people': default_handle,
'inventor_people': default_handle,
'priority_number': default_handle,
'priority_date': handle_date_element,
'abstract': default_handle,
'claim': default_handle,
'instructions': default_handle,
'key_word': default_handle,
'locarno_class_number': default_handle,
'description_of_the_design': default_handle,
'agent': default_handle,
'agency': default_handle,
'proposer_post_code': default_handle,
'proposer_address': default_handle,
'proposer_location': default_handle,
'FT_class_number': default_handle,
'UC_class_number': default_handle,
'ECLA_class_number': default_handle,
'FI_class_number': default_handle,
'English_invention_name': default_handle,
'French_invention_name': default_handle,
'German_invention_name': default_handle,
'other_invention_name': default_handle,
'English_abstract': default_handle,
'PCT_enters_national_phase_date': handle_date_element,
'PCT_international_application_number': default_handle,
'French_abstract': default_handle,
'German_abstract': default_handle,
'other_abstract': default_handle,
'PCT_international_application_date': handle_date_element,
'PCT_international_publish_number': default_handle,
'PCT_international_publish_date': handle_date_element,
'CPC_class_number': default_handle,
'C-SETS': default_handle,
'invention_type': handle_invention_type,
'publish_country': default_handle,
}
title_define = {
'request_number': '申请号',
'request_date': '申请日',
'publish_number': '公开(公告)号',
'publish_date': '公开(公告)日',
'invention_name': '发明名称',
'ipc_class_number': 'IPC分类号',
'proposer_people': '申请(专利权)人',
'inventor_people': '发明人',
'priority_number': '优先权号',
'priority_date': '优先权号',
'abstract': '摘要',
'claim': '权利要求',
'instructions': '说明书',
'key_word': '关键词',
'locarno_class_number': '外观设计洛迦诺分类号',
'description_of_the_design': '外观设计简要说明',
'agent': '代理人',
'agency': '代理机构',
'proposer_post_code': '申请人邮编',
'proposer_address': '申请人地址',
'proposer_location': '申请人所在国(省)',
'FT_class_number': 'FT分类号',
'UC_class_number': 'UC分类号',
'ECLA_class_number': 'ECLA分类号',
'FI_class_number': 'FI分类号',
'English_invention_name': '发明名称(英)',
'French_invention_name': '发明名称(法)',
'German_invention_name': '发明名称(德)',
'other_invention_name': '发明名称(其他)',
'English_abstract': '摘要(英)',
'PCT_enters_national_phase_date': 'PCT进入国家阶段日期',
'PCT_international_application_number': 'PCT国际申请号',
'French_abstract': '摘要(法)',
'German_abstract': '摘要(德)',
'other_abstract': '摘要(其他)',
'PCT_international_application_date': 'PCT国际申请日期',
'PCT_international_publish_number': 'PCT国际申请公开号',
'PCT_international_publish_date': 'PCT国际申请公开日期',
'CPC_class_number': 'CPC分类号',
'C-SETS': 'C-SETS',
'invention_type': '发明类型',
'publish_country': '公开国',
}
# 日期选择器
class DateSelect:
def __init__(self, select='=', date='2001-01-01', enddate=None):
# 符号:'=', '>', '>=', '<', '<=', ':'
self.select = select
# 日期(固定格式),eg: 2001-01-01
self.date = date
# 结束日期,当符号位为":"时,此变量有效,只从date开始到enddate结束
self.enddate = enddate
self.search_exp = ''
if self.select != ':':
self.search_exp = self.select + self.date
else:
self.search_exp = self.date + self.select + self.enddate
def __repr__(self):
return 'DateSelect{select=' + str(self.select) + ',date=' + str(self.date) + ',enddate=' + str(self.enddate)
def __str__(self):
return 'DateSelect{select=' + str(self.select) + ',date=' + str(self.date) + ',enddate=' + str(self.enddate)
class ItemGroup:
def __init__(self, And=None, Or=None, Not=None):
self.And = And
self.Or = Or
self.Not = Not
def add_or(self, *parm):
if self.Or is None:
self.Or = Or(*parm)
else:
self.Or.add_parm(*parm)
class And:
def __init__(self, *parm):
self.parm = parm
def add_parm(self, *ps):
self.parm = self.parm + ps
class Or:
def __init__(self, *parm):
self.parm = parm
def add_parm(self, *ps):
self.parm = self.parm + ps
class Not:
def __init__(self, *parm):
self.parm = parm
class QueryItem:
def __init__(self, **kwargs):
self.parm = kwargs
self.queryAnd = And()
invention_type = kwargs.get('invention_type')
if invention_type is not None:
publish_country = kwargs.get('publish_country')
if publish_country is None:
kwargs['publish_country'] = 'CN'
else:
if isinstance(publish_country, str):
if publish_country != 'CN':
kwargs['publish_country'] = ItemGroup(Or=Or('CN', publish_country))
elif isinstance(publish_country, ItemGroup):
if find_element_in_item_group('CN', publish_country) is None:
publish_country.add_or('CN')
for title, value in title_define.items():
key = kwargs.get(title)
if key is not None:
self.queryAnd.add_parm(title_case.get(title)(value, key))
self.itemGroup = ItemGroup(And=self.queryAnd)
self.search_exp = handle_item_group(self.itemGroup)
|
# coding: utf-8
# # VQE Screening 2
# In[1]:
scaffold_codeBell = """
// Ref[1] https://arxiv.org/pdf/1907.13623.pdf
const double alpha0 = 3.14159265359;
module initialRotations(qbit reg[2]) {
Rx(reg[0], alpha0);
CNOT(reg[0], reg[1]);
H(reg[0]);
}
module entangler(qbit reg[2]) {
H(reg[0]);
CNOT(reg[0], reg[1]);
H(reg[1]);
CNOT(reg[1], reg[0]);
}
module prepareAnsatz(qbit reg[2]) {
initialRotations(reg);
entangler(reg);
}
module measure(qbit reg[2], cbit result[2]) {
CNOT(reg[0], reg[1]); // Fig. 7 of Ref[1]
H(reg[0]); // Fig. 7 of Ref[1]
result[0] = MeasZ(reg[0]);
result[1] = MeasZ(reg[1]);
}
int main() {
qbit reg[2];
cbit result[2];
prepareAnsatz(reg);
measure(reg, result);
return 0;
}
"""
# ***
# # Executing it!
# In[2]:
# Compile the Scaffold to OpenQASM
from scaffcc_interface import ScaffCC
openqasmBell = ScaffCC(scaffold_codeBell).get_openqasm()
print(openqasmBell)
# ### Execute on a Simulator
# In[3]:
from qiskit import Aer,QuantumCircuit, execute
Aer.backends()
# In[4]:
simulator = Aer.get_backend('qasm_simulator')
vqe_circBell = QuantumCircuit.from_qasm_str(openqasmBell)
num_shots = 100000
sim_resultBell = execute(vqe_circBell, simulator, shots=num_shots).result()
countsBell = sim_resultBell.get_counts()
expected_valueBellXX = (+countsBell.get('00', 0) - countsBell.get('01', 0) + countsBell.get('10', 0) - countsBell.get('11', 0)) / num_shots
expected_valueBellYY = (-countsBell.get('00', 0) + countsBell.get('01', 0) + countsBell.get('10', 0) - countsBell.get('11', 0)) / num_shots
expected_valueBellZZ = (+countsBell.get('00', 0) + countsBell.get('01', 0) - countsBell.get('10', 0) - countsBell.get('11', 0)) / num_shots
expected_value = 0.5 - 0.5 * expected_valueBellXX - 0.5 * expected_valueBellYY + 0.5 * expected_valueBellZZ
print('The lowest eigenvalue is the expected value, which is : %s' % expected_value)
#print(countsBell.get('00', 0))
#print(countsBell.get('01', 0))
#print(countsBell.get('10', 0))
#print(countsBell.get('11', 0))
#print(expected_valueBellXX)
#print(expected_valueBellYY)
#print(expected_valueBellZZ)
# ***
# # Circuit Visualization
# In[5]:
from qiskit.tools.visualization import circuit_drawer
circuit_drawer(vqe_circBell, scale=.4)
|
#터틀 그래픽을 활용하여 원점을 중심으로 가로 세로 200크기의 사각형을 그린다
#마우스 이벤트를 사용하여 사각형 내부를 클릭하면 클릭한 지점에 파랑색 원,외부를 클릭하면 빨강색 원을 그린다
#원의 크기는 5입니다.
import turtle as t
import math
t.shape('turtle')
#사각형 그리기
t.penup()
t.goto(100,100)
t.pendown()
for i in range(4):
t.right(90)
t.forward(200)
def decision(x,y):
t.penup()
t.goto(x-10,y-10)
t.pendown()
if 100>=x>=-100 and 100>=y>=-100:
t.pencolor("Blue")
t.circle(20)
else:
t.pencolor("Red")
t.circle(20)
t.onscreenclick(decision)
t.done()
|
import unittest
import operator
from copy import copy
from pyconc import Empty
from pyconc import Singleton
from pyconc import Concat
from pyconc import identity
from pyconc import SingleThreadedMultiplexor
class ConcTests(unittest.TestCase):
def setUp(self):
self.empty = Empty()
self.single = Singleton(42)
self.left_only = Concat(self.single, self.empty)
self.right_only = Concat(self.empty, self.single)
self.double = Concat(self.single, self.single)
self.two_by_two = Concat(self.double, self.double)
self.one_234 = Concat(Concat(Singleton(1), Singleton(2)),
Concat(Singleton(3), Singleton(4)))
self.st_multiplexor = SingleThreadedMultiplexor()
def test_identity(self):
for thing in [None, 5, "foo", object(), Empty(), [1, 2, 3],
(3, 4, 5), False, True]:
self.assertEquals(thing, identity(thing))
def test_length_of_empty_list_is_zero(self):
clist = self.empty
self.assertEquals(0, len(clist))
def test_length_of_singleton_list_is_one(self):
clist = self.single
self.assertEquals(1, len(clist))
def test_length_of_concat_is_length_of_left_plus_right(self):
self.assertEquals(1, len(self.left_only))
self.assertEquals(1, len(self.right_only))
self.assertEquals(2, len(self.double))
self.assertEquals(4, len(self.two_by_two))
def test_as_list_simple(self):
self.assertEquals([], self.empty.to_list())
self.assertEquals([42], self.single.to_list())
self.assertEquals([42], self.left_only.to_list())
self.assertEquals([42], self.right_only.to_list())
self.assertEquals([42, 42], self.double.to_list())
self.assertEquals([42, 42, 42, 42], self.two_by_two.to_list())
self.assertEquals([1, 2, 3, 4], self.one_234.to_list())
def test_singlethreaded_mapreduce_empty_no_initial_value(self):
f = lambda: self.st_multiplexor.map_reduce(self.empty,
None, operator.__add__)
self.assertRaises(TypeError, f)
def test_singlethreaded_mapreduce_empty_with_initial_value(self):
self.assertEquals(10, self.st_multiplexor.map_reduce(self.empty,
None,
operator.__add__,
initial=10))
def test_singlethreaded_mapreduce_singleton__no_initial_value(self):
self.assertEquals(42, self.st_multiplexor.map_reduce(self.single,
None,
operator.__add__))
def test_singlethreaded_mapreduce_singleton_with_initial_value(self):
self.assertEquals(52, self.st_multiplexor.map_reduce(self.single,
None,
operator.__add__,
initial=10))
def test_singlethreaded_mapreduce_concat_no_initial_value(self):
self.assertEquals(10, self.st_multiplexor.map_reduce(self.one_234,
None,
operator.__add__))
def test_singlethreaded_mapreduce_concat_with_initial_value(self):
self.assertEquals(20, self.st_multiplexor.map_reduce(self.one_234,
None,
operator.__add__,
initial=10))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
from distutils.core import setup
setup(packages=['gridmetrics'])
|
celsius=float(input("Enter temperature in celsius : "))
fahrenheit = (celsius * 1.8) + 32
print("The temperature in fahrenheit is :",fahrenheit)
|
import math
import tensorflow as tf
import matplotlib.pyplot as plt
def sin():
while True:
x = tf.random.normal((32, 10), math.pi, 1.5)
y = tf.math.sin(x)
yield (x, y)
def build_model(layers):
x = inputs = tf.keras.Input((10, ))
for _ in range(layers):
x = tf.keras.layers.Dense(32, activation="relu")(x)
x = tf.keras.layers.Dense(10)(x)
model = tf.keras.Model(inputs, x)
model.compile("adam", "mse")
return model
def resblock(inputs):
x = tf.keras.layers.Dense(32, activation='relu')(inputs)
x = tf.keras.layers.Dense(32)(x)
x = x + inputs
return tf.keras.layers.Activation('relu')(x)
def build_resnet_model(layers):
x = inputs = tf.keras.Input((10, ))
x = tf.keras.layers.Dense(32, activation='relu')(x)
# Each resblock includes 2 layers
for _ in range((layers-2)//2):
x = resblock(x)
x = tf.keras.layers.Dense(10)(x)
model = tf.keras.Model(inputs, x)
model.compile("adam", "mse")
return model
dts = tf.data.Dataset.from_generator(sin, (tf.float32, tf.float32),
((32, 10), (32, 10)))
layers = 30
losses = []
print("PlainNet")
for t in range(1, layers+1):
plain = build_model(t)
hist = plain.fit(dts, steps_per_epoch=20, epochs=20*t, verbose=0)
losses.append(hist.history["loss"][-1])
print(losses[-1])
plt.plot(range(1, layers+1), losses)
plt.title("sin function with NN")
plt.xlabel("Layers")
plt.ylabel("Final loss")
plt.show()
res_losses = []
print("ResNet")
for t in range(2, layers+1, 2):
resnet = build_resnet_model(t)
hist = resnet.fit(dts, steps_per_epoch=20, epochs=20*t, verbose=0)
res_losses.append(hist.history["loss"][-1])
print(res_losses[-1])
plt.plot(range(2, layers+1, 2), res_losses)
plt.title("sin function with ResNet")
plt.xlabel("Layers")
plt.ylabel("Final loss")
plt.show()
plt.plot(range(1, layers+1), losses, label="PlainNet")
plt.plot(range(2, layers+1, 2), res_losses, label="ResNet")
plt.title("sin function with NN")
plt.xlabel("Layers")
plt.ylabel("Final loss")
plt.legend()
plt.show()
|
# In-place O(logN)
def quick_sort(x):
def partition(low, high):
pivot = x[(low + high) // 2]
while low <= high: # mid에서 만나면 탈출(즉, low == mid인 경우 탈출)
while x[low] < pivot:
low += 1
while x[high] > pivot:
high -= 1
if low <= high:
x[low], x[high] = x[high], x[low]
low, high = low + 1, high - 1
return low
def sort(low, high):
if high <= low: # 크기가 0, 1인 경우 처리
return
mid = partition(low, high)
sort(low, mid - 1)
sort(mid, high)
return sort(0, len(x) - 1)
# 공간복잡도 O(NlogN)
def quicksort(x):
if len(x) <= 1:
return x
pivot = x[len(x) // 2]
left = []
right = []
mid = []
for a in x:
if a < pivot:
left.append(a)
elif a > pivot:
right.append(a)
else:
mid.append(a)
return quicksort(left) + mid + quicksort(right) |
#/usr/bin/env python
from astropy.units import u
from ...ast_object import ASTObject
import starlink.Ast as Ast
__all__ = ['ASTTimeFrame']
class ASTTimeFrame(ASTObject):
'''
self.astObject is of type TimeFrame.
'''
def __init__(self, ast_object=None):
raise NotImplementedError() |
# Generated by Django 2.0.4 on 2018-05-12 20:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='music',
name='href',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='music',
name='src',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='music',
name='style',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='music',
name='title',
field=models.CharField(max_length=200),
),
]
|
import cv2
import numpy as np
from cv2 import VideoWriter, VideoWriter_fourcc
import os
import glob
import pandas as pd
import json
from PIL import Image, ImageDraw
from saveGIF import*
"""
Author: CS6670 Group
Code structure inspired from carpedm20/DCGAN-tensorflow, GV1028/videogan
"""
def clear_prior_generated_content():
for i in glob.glob('./genvideos/*'):
os.remove(i)
for i in glob.glob('./gengifs/*'):
os.remove(i)
def delete_empty_jsons(path):
for i in (glob.glob(path)):
should_delete = False
with open(i,'r') as file_name:
data = json.load(file_name)
should_delete = (data == [])
if should_delete:
print('Deleting empty json: ' + i )
os.remove(i)
def group_same_videos(image_folders):
out = []
cur = []
curr_dir = ''
for img in image_folders:
if curr_dir is '':
curr_dir = img.split('_')[0]
cur.append(img)
elif curr_dir == img.split('_')[0]:
cur.append(img)
else:
if cur != []:
out.append(cur)
cur = [img]
curr_dir = img.split('_')[0]
if cur != []:
out.append(cur)
return out
def random_start_group_frames(files,nof):
out = []
for group in files:
start = np.random.randint(0,high=len(group)-1-nof)
out.append(group[start:start+nof])
return out
def read_process_training_points(path, nof=128, delete_empty=False):
if delete_empty:
delete_empty_jsons(path)
valid_jsons = glob.glob(path)
valid_jsons = group_same_videos(valid_jsons)
filter_short_training_sets = lambda x: len(x) >= nof
valid_jsons = list(filter(filter_short_training_sets,valid_jsons))
print('Loading Valid Videos...Total:', len(valid_jsons))
return random_start_group_frames(valid_jsons, nof)
def read_process_jsons_video(files,nof=128):
videos = np.zeros((1,nof,34,34,1))
for f in range(len(files)):
with open(files[f],'r') as file_name:
data = json.load(file_name)[0]
no_str_list=[]
for i in data:
if not isinstance(i[1],str):
no_str_list+= i[1]
frame = np.diag(np.array(no_str_list)).reshape((34,34,1))
videos[0,f,:,:,:,] = frame
return videos.astype('float32')
def read_and_load_video_all_files(dir,delete_genVideo=True,delete_empty = False):
if delete_genVideo:
clear_prior_generated_content()
output= read_process_training_points(dir+'/*json',delete_empty=delete_empty)
return [read_process_jsons_video(i) for i in output]
def process_and_write_video(videos,name):
videos =np.array(videos)
width = height = 34
FPS = 24
fourcc = VideoWriter_fourcc(*'XVID')
video = VideoWriter('./genvideos/'+name+'.avi', fourcc, float(FPS), (width, height),0)
# videos = np.reshape(videos,[-1,videos.shape[0],width,height,1])
for i in range(videos.shape[0]):
vid = videos[i,:,:,:,:]
vid = (vid + 1)*127.5
for j in range(vid.shape[0]):
frame = vid[j,:,:,:]
video.write(frame)
video.release()
def output_pose_video_matrix(matrix,name):
output_values = np.array(matrix)
frames = []
for i in range(output_values.shape[1]):
relavent_pose = list(output_values[0,i,:,:,0].diagonal())
frames.append(form_image_frame(relavent_pose))
list_pillow_images_to_gif(frames,name)
|
# -*- coding: utf-8 -*-
"""
# @file name : dataset.py
# @author : yts3221@126.com
# @date : 2019-08-21 10:08:00
# @brief : 各数据集的Dataset定义
"""
import numpy as np
import torch
import os
import random
from PIL import Image
from torch.utils.data import Dataset
random.seed(1)
rmb_label = {"1": 0, "100": 1}
class RMBDataset(Dataset):
def __init__(self, data_dir, transform=None):
"""
rmb面额分类任务的Dataset
:param data_dir: str, 数据集所在路径
:param transform: torch.transform,数据预处理
"""
self.label_name = {"1": 0, "100": 1}
self.data_info = self.get_img_info(data_dir) # data_info存储所有图片路径和标签,在DataLoader中通过index读取样本
self.transform = transform
def __getitem__(self, index):
path_img, label = self.data_info[index]
img = Image.open(path_img).convert('RGB') # 0~255
if self.transform is not None:
img = self.transform(img) # 在这里做transform,转为tensor等等
return img, label
def __len__(self):
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
# 遍历类别
for sub_dir in dirs:
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg'), img_names))
# 遍历图片
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
label = rmb_label[sub_dir]
data_info.append((path_img, int(label)))
return data_info
class AntsDataset(Dataset):
def __init__(self, data_dir, transform=None):
self.label_name = {"ants": 0, "bees": 1}
self.data_info = self.get_img_info(data_dir)
self.transform = transform
def __getitem__(self, index):
path_img, label = self.data_info[index]
img = Image.open(path_img).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.data_info)
def get_img_info(self, data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
# 遍历类别
for sub_dir in dirs:
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg'), img_names))
# 遍历图片
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
label = self.label_name[sub_dir]
data_info.append((path_img, int(label)))
if len(data_info) == 0:
raise Exception("\ndata_dir:{} is a empty dir! Please checkout your path to images!".format(data_dir))
return data_info
class PortraitDataset(Dataset):
def __init__(self, data_dir, transform=None, in_size = 224):
super(PortraitDataset, self).__init__()
self.data_dir = data_dir
self.transform = transform
self.label_path_list = list()
self.in_size = in_size
# 获取mask的path
self._get_img_path()
def __getitem__(self, index):
path_label = self.label_path_list[index]
path_img = path_label[:-10] + ".png"
img_pil = Image.open(path_img).convert('RGB')
img_pil = img_pil.resize((self.in_size, self.in_size), Image.BILINEAR)
img_hwc = np.array(img_pil)
img_chw = img_hwc.transpose((2, 0, 1))
label_pil = Image.open(path_label).convert('L')
label_pil = label_pil.resize((self.in_size, self.in_size), Image.NEAREST)
label_hw = np.array(label_pil)
label_chw = label_hw[np.newaxis, :, :]
label_hw[label_hw != 0] = 1
if self.transform is not None:
img_chw_tensor = torch.from_numpy(self.transform(img_chw.numpy())).float()
label_chw_tensor = torch.from_numpy(self.transform(label_chw.numpy())).float()
else:
img_chw_tensor = torch.from_numpy(img_chw).float()
label_chw_tensor = torch.from_numpy(label_chw).float()
return img_chw_tensor, label_chw_tensor
def __len__(self):
return len(self.label_path_list)
def _get_img_path(self):
file_list = os.listdir(self.data_dir)
file_list = list(filter(lambda x: x.endswith("_matte.png"), file_list))
path_list = [os.path.join(self.data_dir, name) for name in file_list]
random.shuffle(path_list)
if len(path_list) == 0:
raise Exception("\ndata_dir:{} is a empty dir! Please checkout your path to images!".format(data_dir))
self.label_path_list = path_list |
import os
while True:
i = 1 #곱해주는 수를 항상 1로 초기화
num = int(input("몇 단?(0을 입력 시 종료) : "))
if num == 0:
print("구구단 프로그램을 종료합니다.")
break
while i < 10:
print("%d x %d = %d"%(num, i, num * i))
i += 1
os.system("pause")
os.system("cls")
|
#! /usr/bin/env python
#
# Copyright 2016 ARTED developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Autotools (configure) like script with Python.
#
from optparse import OptionParser, OptionGroup
import os
SOURCE_DIR = os.path.dirname(__file__)
def on_or_off(v) :
if v:
return 'on'
else:
return 'off'
def debug_or_release(v) :
if v:
return 'Debug'
else:
return 'Release'
def add_option(dic, name, var) :
if var is not None:
dic[name] = on_or_off(var)
def add_env(dic, name, var) :
if var is not None:
dic[name] = var
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('-n', '--dry-run', action='store_true', default=False, dest='dry_run', help='don\'t actually run.')
parser.add_option('-v', '--verbose', action='store_true', default=False, dest='verbose', help='show verbose messages.')
group = OptionGroup(parser, 'Build target')
group.add_option('-a', '--arch', action='store', default=None, dest='arch', help='cross compile mode. ARCH format should be <COMPILER>-<SYSTEM>')
group.add_option('-t', '--target', action='store', default='sc', dest='target', help='build target. sc (Single-cell) or ms (Multi-scale), default sc')
parser.add_option_group(group)
group = OptionGroup(parser, 'Optimization options')
group.add_option('--old-stencil', action='store_false', dest='stencil_optimized', help='use old implementation of the stencil computation code.')
group.add_option('--explicit-vec', action='store_true', dest='explicit_vec', help='enable explicit vectorization in the stencil computation with C-lang.')
group.add_option('--compiler-vec', action='store_false', dest='explicit_vec', help='defer to compiler vectorization in the stencil computation with Fortran90.')
group.add_option('--simd-set', action='store', dest='simd', help='specifies SIMD instruction set. (e.g. AVX, HPC_ACE2...)')
group.add_option('--enable-swp', action='store_true', dest='swp', help='enable software prefetch in the explicit vec.')
group.add_option('--disable-swp', action='store_false', dest='swp', help='disable software prefetch in the explicit vec.')
group.add_option('--array-padding', action='store_true', dest='padding', help='array padding applied to the stencil computation.')
group.add_option('--domain-pow2', action='store_true', dest='domain_two', help='3-D domain size is power of two.')
group.add_option('--loop-blocking', action='store_true', dest='loop_blocking', help='loop blocking applied to the stencil computation.')
group.add_option('--opt-current', action='store_true', dest='current_optimized', help='enable the current routine optimization in RT.')
group.add_option('--reduce-manycore', action='store_true', dest='reduce_manycore', help='enable reduction code optimization for many-core processor.')
parser.add_option_group(group)
group = OptionGroup(parser, 'Debug options')
group.add_option('-d', '--debug', action='store_true', default=False, dest='debug', help='enable debug build.')
group.add_option('--papi', action='store_true', dest='papi', help='use PAPI profiling (SC only).')
group.add_option('--nvtx', action='store_true', dest='nvtx', help='use NVIDIA Tools Extention Library.')
parser.add_option_group(group)
(options, args) = parser.parse_args()
### check build target
if options.target != 'sc' and options.target != 'ms':
print 'invalid target : {0}'.format(options.target)
print ''
parser.print_help()
exit(-1)
### check options
dict = {}
dict['BUILD_TARGET'] = options.target.lower()
if options.arch is not None:
dict['CMAKE_TOOLCHAIN_FILE'] = options.arch.lower()
dict['CMAKE_BUILD_TYPE'] = debug_or_release(options.debug)
dict['CMAKE_VERBOSE_MAKEFILE'] = on_or_off(options.verbose)
add_option(dict, 'USE_PAPI', options.papi)
add_option(dict, 'USE_NVTX', options.nvtx)
add_option(dict, 'OPT_STENCIL', options.stencil_optimized)
add_option(dict, 'OPT_CURRENT', options.current_optimized)
add_option(dict, 'DOMAIN_IS_POW2', options.domain_two)
add_option(dict, 'ENABLE_ARRAY_PADDING', options.padding)
add_option(dict, 'ENABLE_EXPLICIT_VEC', options.explicit_vec)
add_option(dict, 'ENABLE_LOOP_BLOCKING', options.loop_blocking)
add_option(dict, 'ENABLE_SWPREFETCH', options.swp)
add_option(dict, 'ENABLE_REDUCE_FOR_MANYCORE', options.reduce_manycore)
if options.simd is not None:
dict['SIMD_SET'] = options.simd.upper()
define = ''
for k,v in dict.items():
define = '{0} -D {1}={2}'.format(define, k, v)
env = ''
for var in args:
(k, v) = var.split('=', 1)
env = '{0} {1}="{2}"'.format(env, k, v)
### configuration
comm = '{2} cmake {0} {1}'.format(define, SOURCE_DIR, env)
print ' $', comm
os.system(comm)
|
import mcpi.minecraft as minecraft
import mcpi.block as block
import time
import anyio.seg7 as display
import RPi.GPIO as GPIO
BUTTON = 4
LED_PINS = [10,22,25,8,7,9,11,12]
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON, GPIO.IN)
ON = False
display.setup(GPIO, LED_PINS, ON)
mc = minecraft.Minecraft.create()
def bomb(x,y,z):
mc.setBlock(x+1,y,z+1, block.TNT.id)
for t in range (6):
display.write(str(5-t))
time.sleep(1)
mc.postToChat("BANG!! La bomba ha estallado")
mc.postToChat("TodoElectronica 21")
mc.setBlocks(x-10,y-5,z-10,x+10,y+10,z+10, block.AIR.id)
try:
while True:
time.sleep(0.1)
if GPIO.input(BUTTON) == False:
pos = mc.player.getTilePos()
bomb (pos.x,pos.y,pos.z)
finally:
GPIO.cleanup()
|
from flask import Blueprint, request, render_template, redirect, url_for
from application.database.models import Product, db
from application.forms.product import CreateProductForm, UpdateProductForm, DeleteProductForm, SearchProductForm, FilterProductForm
product_bp = Blueprint('product_bp', __name__, url_prefix="/product")
@product_bp.route("/")
def get_products():
Products = Product.query.all()
return render_template('product/Products.html', Products=Products)
@product_bp.route("/create", methods=['GET', 'POST'])
def create_product():
form = CreateProductForm()
if form.validate_on_submit():
# register product
product = Product(
)
db.session.add()
db.session.commit()
id = product.id
return redirect(url_for('product_bp.update_product', id=id))
return render_template('product/create-product.html', form=form)
@product_bp.route("/<id>/update", methods=['GET', 'POST'])
def update_product(id):
product = Product.query.get(id)
form = UpdateProductForm(obj=product)
if form.validate_on_submit():
# update product
db.session.commit()
id = product.id
return redirect(url_for('product_bp.update_product', id=id))
return render_template('product/update-product.html', form=form)
@product_bp.route("/<id>/delete", methods=['GET', 'POST'])
def delete_product(id):
product = Product.query.get(id)
form = DeleteProductForm(obj=product)
if form.validate_on_submit():
db.session.delete()
db.session.commit()
return redirect(url_for('product_bp.get_Products'))
return render_template('product/delete-product.html', form=form, product=product)
# secondary routes
@product_bp.route("/<id>/stock")
def get_product_stock():
product = Product.query.get(id)
stock = product.stock
return render_template('stock/secondary-stock.html', parent_template='product/product.html', product=product, stock=stock)
@product_bp.route("/<id>/sales")
def get_product_sales():
product = Product.query.get(id)
sales = product.sales
return render_template('sale/secondary-sales.html', parent_template='product/product.html', product=product, sales=sales)
|
def collatz(number):
if (number % 2 == 0):
print(number // 2)
valor = number // 2
return valor
else:
print(3 * number + 1)
valor = 3 * number +1
return valor
try:
valor = int(input('Informe um número: '))
while valor != 1:
valor = collatz(int(valor))
except ValueError:
print("Insira um número inteiro!")
|
# USAGE
# python search.py --tree vptree.pickle --hashes hashes.pickle --query queries\accordion.jpg
from pyimagesearch.parallel_hashing import *
import argparse
import pickle
import time
import cv2
# construct argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-t', '--tree', required=True, type=str, help='Path to output VP-tree')
ap.add_argument('-a', '--hashes', required=True, type=str, help='Path to output hashes directory')
ap.add_argument('-q', '--query', required=True, type=str, help='Path to input query image')
ap.add_argument('-d', '--distance', type=int, default=10, help='Maximum hamming distance')
args = vars(ap.parse_args())
# load the VP-Tree and hashes dictionary
print('[INFO] loading VP-Tree and hashes...')
tree = pickle.loads(open(args['tree'], 'rb').read())
hashes = pickle.loads(open(args['hashes'], 'rb').read())
# load the input query image
image = cv2.imread(args['query'])
cv2.imshow('Query', image)
# compute the hash for the query image, then convert it
query_hash = dhash(image)
query_hash = convert_hash(query_hash)
# perform the search
print('[INFO] performing search...')
start = time.time()
results = tree.get_all_in_range(query_hash, args['distance'])
results = sorted(results)
print(f'[INFO] search took {time.time() - start} seconds')
# loop over the results
for d, h in results:
# grab all image paths in our dataset with the same hash
result_paths = hashes.get(h, [])
print(f'[INFO] {len(result_paths)} total image(s) with d: {d}, h: {h}')
# loop over the result paths
for result_path in result_paths:
# load the result image and display it to our screen
result = cv2.imread(result_path)
cv2.imshow('Result', result)
cv2.waitKey(0) |
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import threading
import urllib2
import time
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
message = threading.currentThread().getName()
print message
self.wfile.write(message)
self.wfile.write('\n')
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
servers = {}
server = ThreadedHTTPServer(('localhost', 8080), Handler)
server2 = ThreadedHTTPServer(('localhost', 8000), Handler)
for n in range(5):
for i in range(5):
servers[i] = threading.Thread(target=server.serve_forever)
servers[i].daemon = True
servers[i].start()
request = urllib2.Request('http://localhost:8080')
response = urllib2.urlopen(request)
time.sleep(1)
print 'Starting server, use <Ctrl-C> to stop'
server.shutdown()
server.server_close() |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
import os
from dataclasses import dataclass
from os import PathLike
from pathlib import Path, PurePath
from typing import Any, Iterable
from pants.core.util_rules.system_binaries import GitBinary, GitBinaryException, MaybeGitBinary
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.rules import collect_rules, rule
from pants.util.contextutil import pushd
logger = logging.getLogger(__name__)
class GitWorktree(EngineAwareReturnType):
"""Implements a safe wrapper for un-sandboxed access to Git in the user's working copy.
This type (and any wrappers) should be marked `EngineAwareReturnType.cacheable=False`, because
it internally uses un-sandboxed APIs, and `@rules` which produce it should re-run in each
session. It additionally implements a default `__eq__` in order to prevent early-cutoff in the
graph, and force any consumers of the type to re-run.
"""
worktree: PurePath
_gitdir: PurePath
_git_binary: GitBinary
def __init__(
self,
binary: GitBinary,
worktree: PathLike[str] | None = None,
gitdir: PathLike[str] | None = None,
) -> None:
"""Creates a git object that assumes the git repository is in the cwd by default.
binary: The git binary to use.
worktree: The path to the git repository working tree directory (typically '.').
gitdir: The path to the repository's git metadata directory (typically '.git').
"""
self.worktree = Path(worktree or os.getcwd()).resolve()
self._gitdir = Path(gitdir).resolve() if gitdir else (self.worktree / ".git")
self._git_binary = binary
def cacheable(self) -> bool:
return False
@property
def current_rev_identifier(self):
return "HEAD"
@property
def commit_id(self):
return self._git_binary._invoke_unsandboxed(self._create_git_cmdline(["rev-parse", "HEAD"]))
@property
def branch_name(self) -> str | None:
branch = self._git_binary._invoke_unsandboxed(
self._create_git_cmdline(["rev-parse", "--abbrev-ref", "HEAD"])
)
return None if branch == "HEAD" else branch
def _fix_git_relative_path(self, worktree_path: str, relative_to: PurePath | str) -> str:
return str((self.worktree / worktree_path).relative_to(relative_to))
def changed_files(
self,
from_commit: str | None = None,
include_untracked: bool = False,
relative_to: PurePath | str | None = None,
) -> set[str]:
relative_to = PurePath(relative_to) if relative_to is not None else self.worktree
rel_suffix = ["--", str(relative_to)]
uncommitted_changes = self._git_binary._invoke_unsandboxed(
self._create_git_cmdline(
["diff", "--name-only", "HEAD"] + rel_suffix,
)
)
files = set(uncommitted_changes.splitlines())
if from_commit:
# Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just
# the changes that have occurred on the current branch.
committed_cmd = ["diff", "--name-only", from_commit + "...HEAD"] + rel_suffix
committed_changes = self._git_binary._invoke_unsandboxed(
self._create_git_cmdline(committed_cmd)
)
files.update(committed_changes.split())
if include_untracked:
untracked_cmd = [
"ls-files",
"--other",
"--exclude-standard",
"--full-name",
] + rel_suffix
untracked = self._git_binary._invoke_unsandboxed(
self._create_git_cmdline(untracked_cmd)
)
files.update(untracked.split())
# git will report changed files relative to the worktree: re-relativize to relative_to
return {self._fix_git_relative_path(f, relative_to) for f in files}
def changes_in(self, diffspec: str, relative_to: PurePath | str | None = None) -> set[str]:
relative_to = PurePath(relative_to) if relative_to is not None else self.worktree
cmd = ["diff-tree", "--no-commit-id", "--name-only", "-r", diffspec]
files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).split()
return {self._fix_git_relative_path(f.strip(), relative_to) for f in files}
def _create_git_cmdline(self, args: Iterable[str]) -> list[str]:
return [f"--git-dir={self._gitdir}", f"--work-tree={self.worktree}", *args]
def __eq__(self, other: Any) -> bool:
# NB: See the class doc regarding equality.
return id(self) == id(other)
@dataclass(frozen=True)
class MaybeGitWorktree(EngineAwareReturnType):
git_worktree: GitWorktree | None = None
def cacheable(self) -> bool:
return False
@dataclasses.dataclass(frozen=True)
class GitWorktreeRequest:
gitdir: PathLike[str] | None = None
subdir: PathLike[str] | None = None
@rule
async def get_git_worktree(
git_worktree_request: GitWorktreeRequest,
maybe_git_binary: MaybeGitBinary,
) -> MaybeGitWorktree:
if not maybe_git_binary.git_binary:
return MaybeGitWorktree()
git_binary = maybe_git_binary.git_binary
cmd = ["rev-parse", "--show-toplevel"]
try:
if git_worktree_request.subdir:
with pushd(str(git_worktree_request.subdir)):
output = git_binary._invoke_unsandboxed(cmd)
else:
output = git_binary._invoke_unsandboxed(cmd)
except GitBinaryException as e:
logger.info(f"No git repository at {os.getcwd()}: {e!r}")
return MaybeGitWorktree()
git_worktree = GitWorktree(
binary=git_binary,
gitdir=git_worktree_request.gitdir,
worktree=PurePath(output),
)
logger.debug(
f"Detected git repository at {git_worktree.worktree} on branch {git_worktree.branch_name}"
)
return MaybeGitWorktree(git_worktree=git_worktree)
def rules():
return [*collect_rules()]
|
#!/usr/bin/env python2
##############################################################################
# Copyright (c) 2012, GeoData Institute (www.geodata.soton.ac.uk)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##############################################################################
"""
Output mapserver configuration information to `node-gyp`
Configuration options are retrieved from environment variables set using `npm
config set`. This allows for a simple `npm install mapserv` to work.
"""
from optparse import OptionParser
import os
def get_lib_dir():
return os.environ.get('npm_config_mapserver_lib_dir', '')
parser = OptionParser()
parser.add_option("--include",
action="store_true", default=False,
help="output the mapserver include path")
parser.add_option("--ldflags",
action="store_true", default=False,
help="output the mapserver library rpath option")
(options, args) = parser.parse_args()
if options.include:
print os.environ.get('npm_config_mapserver_include_dir', '')
if options.ldflags:
# write the library path into the resulting binary
lib_dir = get_lib_dir()
if lib_dir:
print "-Wl,-rpath=%s -L%s" % (lib_dir, lib_dir)
print '-Wl,--no-as-needed,-lmapserver' |
print(str(input()).upper()) |
# os module
import os
# 查看目录与切换目录
print(os.getcwd()) # 查看当前目录,D:\pythonPro\function
os.chdir("D:\pythonPro\day01") # 切换目录
print(os.getcwd()) # 再次查看, D:\pythonPro\变量-print
print("-"*30)
print(os.curdir) # 打印当前路径.
print(os.pardir) # 打印上级路径..
os.chdir(os.pardir) # 切换到上次目录
print(os.getcwd()) # 切换到上级目录后,打印当前路径
print(os.listdir(os.getcwd())) # 打印列出当前目录的所有文件(包括目录)
|
# HW1 for Computer Vision
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
# Asserts to test that the values are correct
# assert np.abs(np.sum(psi)) < 1e-8
# assert np.abs(np.sum(np.abs(psi) ** 2) - 1) < 1e-8
# Tutorial stuff to get used to cv2's API
# Loads an image
# grayscale image
# img = cv2.imread('small.jpg', cv2.IMREAD_COLOR)
# Displays an image
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
# plt.show()
# cv2.imshow("sigma: " + str(sigma) + " - theta: " + str(theta), out_img)
# plt.imshow(np.real(main_kernel), cmap=plt.get_cmap('gray'))
# plt.show()
WINDOW_SIZE = 37
HALF_SIZE = WINDOW_SIZE // 2
SIZE = 168
INPUT_IMAGE = "img/noisy_circle.jpg"
PENTAGON_LEFT = "img/left.png"
PENTAGON_RIGHT = "img/right.png"
EPSILON = -5000
NORMALISATION = 4000
def calculate_b(u_squared, sigma):
return np.exp(-1 * u_squared/(2 * (np.power(sigma, 2))))
def calculate_a(u, sigma):
return np.exp(1j * (np.pi/(2 * sigma)) * u)
def q1_make_wavelet(sigma, theta):
x_range = list(range(-HALF_SIZE, HALF_SIZE + 1))
y_range = list(range(-HALF_SIZE, HALF_SIZE + 1))
# Generates the matrix
[x, y] = np.meshgrid(x_range, y_range)
# Generates intermediary values
e_theta = (np.cos(theta), np.sin(theta))
u = x * e_theta[0] + y * e_theta[1]
u_squared = np.power(x, 2) + np.power(y, 2)
# Let a = e^(i * (pi/(2*sigma)) * ue)
a = calculate_a(u, sigma)
# Let b = e^(-(u^2/(2*(sigma^2))))
b = calculate_b(u_squared, sigma)
c2 = np.sum(a * b) / np.sum(b)
# TODO double check c1
c1 = 1 / np.sqrt(np.sum((1 - 2 * c2 * np.cos(u * np.pi / (2 * sigma)) + np.power(c2, 2)) * np.exp(-1 * u_squared / (1 * np.power(sigma, 2)))))
return np.transpose((c1 / sigma) * (a - c2) * b)
def init_kernel(sigma, theta):
kernel = np.zeros([WINDOW_SIZE, WINDOW_SIZE], np.complex)
x_range = range(-HALF_SIZE, HALF_SIZE + 1)
y_range = range(-HALF_SIZE, HALF_SIZE + 1)
numerator = 0
denominator = 0
for x in x_range:
for y in y_range:
b = calculate_b(np.power(x, 2) + np.power(y, 2), sigma)
# TODO double check numerator
numerator += (np.cos((np.pi/(2*sigma))*np.dot([x, y], [np.cos(theta), np.sin(theta)])) + (1j*np.sin((np.pi/(2*sigma))*np.dot([x, y], [np.cos(theta), np.sin(theta)]))))*b
denominator += b
c2 = numerator / denominator
psi = 0
for x in x_range:
for y in y_range:
psi += (1-(2*c2*np.cos((np.pi/(2*sigma))*np.dot([x, y], [np.cos(theta), np.sin(theta)])))+(c2**2))*np.exp(-(((x**2)+(y**2))/(sigma**2)))
c1 = 1 / np.sqrt(psi)
for x in x_range:
for y in y_range:
kernel[x + HALF_SIZE][y + HALF_SIZE] = (c1 / sigma) * (np.cos((np.pi / (2 * sigma)) * np.dot([x, y], [np.cos(theta), np.sin(theta)])) + (1j * np.sin((np.pi / (2 * sigma)) * np.dot([x, y], [np.cos(theta), np.sin(theta)]))) - c2) * calculate_b(np.power(x, 2) + np.power(y, 2), sigma)
return kernel
def print_q2_image(img, sigma, theta, count, hist_real, hist_imag, should_print_image, size):
# Output image
out_img = np.zeros(img.shape[:2], np.float64) # Matrix of black pixels the same size as the input image
main_kernel = init_kernel(sigma, theta)
q1_kernel = q1_make_wavelet(sigma, theta)
# We now normalise with 0 and output the real image
normalisation = cv2.normalize(main_kernel.real, np.zeros((WINDOW_SIZE, WINDOW_SIZE)), alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX)
out_img = cv2.filter2D(img, -1, np.real(main_kernel), out_img, (-1, -1), cv2.BORDER_DEFAULT)
if should_print_image:
out_img = add_text_to_img(out_img, "S: " + str(sigma) + ", T: " + str("%.2f" % theta) + ", real")
cv2.imwrite("output/out_img_real_" + str(count) + ".jpg", out_img)
plt.imshow(np.real(main_kernel), cmap=plt.get_cmap('gray'))
plt.title("S: " + str(sigma) + ", T: " + str("%.2f" % theta) + ", real")
plt.savefig("output/out_kernel_real_" + str(count) + ".png")
plt.gcf().clear()
hist_real = print_q3_histogram(out_img, hist_real, size)
# We now normalise with 1 and output the imaginary image
out_img = cv2.filter2D(img, -1, np.imag(main_kernel), out_img, (-1, -1), cv2.BORDER_DEFAULT)
if should_print_image:
out_img = add_text_to_img(out_img, "S: " + str(sigma) + ", T: " + str("%.2f" % theta) + ", imag")
cv2.imwrite("output/out_img_imag_" + str(count) + ".jpg", out_img)
plt.imshow(np.imag(main_kernel), cmap=plt.get_cmap('gray'))
plt.title("S: " + str(sigma) + ", T: " + str("%.2f" % theta) + ", imag")
plt.savefig("output/out_kernel_imag_" + str(count) + ".png")
plt.gcf().clear()
hist_imag = print_q3_histogram(out_img, hist_imag, size)
return np.dstack((hist_real, hist_imag)), hist_real, hist_imag
def gaussian_blur(img):
out = np.zeros(img.shape[:2], np.float64)
out = cv2.GaussianBlur(img, ((2 * HALF_SIZE) + 1, (2 * HALF_SIZE) + 1), 0, out)
return out
def print_q3_histogram(img, hist, size):
for x in range(size):
for y in range(size):
hist[x][y] = max(hist[x][y], img[x][y])
return hist
def detect_edges(real, imaginary, size):
epsilon = EPSILON
ratio = np.zeros((size, size))
edge_array = np.zeros((size, size))
for i in range(size):
for j in range(size):
ratio[i][j] = (real[i][j] + (0.001 + epsilon)) / (imaginary[i][j] + epsilon)
diff = np.amax(ratio)
edge_array[i][j] = 1 - (ratio[i][j] / diff)
return edge_array * NORMALISATION
def get_values(img, should_print_images):
# Image dimensions
height, width = img.shape
# 12 combinations of parameters, let lambda_{0 -> 11} = (sigma, theta)
sigmas = [1, 3, 6]
thetas = [0, math.pi / 4, math.pi / 2, (3 * math.pi) / 4]
count = 0
hist_real = np.ones(img.shape[:2], np.float64) # array to hold min of weights for each real pixel
hist_imag = np.zeros(img.shape[:2], np.float64) # array to hold max of weights for each imaginary pixel
hist = np.zeros(img.shape[:2], np.float64)
for sigma in sigmas:
for theta in thetas:
hist, hist_real, hist_imag = print_q2_image(img, sigma, theta, count, hist_real, hist_imag, should_print_images, height - 1)
count += 1
return hist, hist_real, hist_imag, height, width
def add_text_to_img(img, text):
height, width = img.shape
if isinstance(text, list):
# Multiple text line
number_of_text_lines = len(text)
text = text[::-1]
for text_line_index in range(number_of_text_lines):
cv2.putText(img, text[text_line_index], (0, height - (10 * (text_line_index + 1))),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_4)
else:
cv2.putText(img, text, (0, height - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_4)
return img
def main():
# Q1: Now we create all the different morlet wavelets
print("Q1: Generating Morlet Wavelet images now")
circle_img = cv2.imread(INPUT_IMAGE, cv2.IMREAD_GRAYSCALE)
circle_hist, circle_hist_real, circle_hist_imag, circle_height, circle_width = get_values(circle_img, True)
# Q2: Now we output the original image with a gaussian blur
print("Q2: Generating gaussian-blurred image now")
gaussian_img = gaussian_blur(circle_img)
gaussian_img = add_text_to_img(gaussian_img, "Gaussian Blur")
cv2.imwrite("output/gaussian.jpg", gaussian_img)
# Q3: Now we print out histograms for real and imaginary values
print("Q3: Generating real & imaginary value histograms now")
print("\tPlease wait, outputting real histogram")
plt.hist(circle_hist[:, :, 0], 10)
plt.title("Real Values Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.savefig("output/histogram_real.png")
plt.gcf().clear()
print("\tPlease wait, outputting imaginary histogram")
plt.hist(circle_hist[:, :, 1], 10)
plt.title("Imaginary Values Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.savefig("output/histogram_imaginary.png")
plt.gcf().clear()
# Q4a: Edge detection for input image
print("Q4a: Generating edge-detection for the input image")
edge_detection_circle = detect_edges(circle_hist_real, circle_hist_imag, circle_height - 1)
edge_detection = add_text_to_img(edge_detection_circle, ["Edge Detection:", "Circle"])
cv2.imwrite("output/edge.jpg", edge_detection)
# Q4b: Edge detection for pentagon image
print("Q4b: Generating edge-detection for the pentagon (takes ~20 seconds)")
pentagon_img = cv2.imread(PENTAGON_LEFT, cv2.IMREAD_GRAYSCALE)
pentagon_hist, pentagon_hist_real, pentagon_hist_imag, pentagon_height, pentagon_width = get_values(pentagon_img, False)
pentagon_edge_detection = detect_edges(pentagon_hist_real, pentagon_hist_imag, pentagon_height)
pentagon_edge_detection = add_text_to_img(pentagon_edge_detection, ["Edge Detection:", "Pentagon"])
cv2.imwrite("output/edge_pentagon.jpg", pentagon_edge_detection)
if __name__ == '__main__':
main()
|
# config/filesystem.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import os
from bloggertool.exceptions import FileNotFoundError, FileOutOfProject
class FileSystem(object):
class Impl(object):
exists = staticmethod(os.path.exists)
getmtime = staticmethod(os.path.getmtime)
open = staticmethod(codecs.open)
def __init__(self, root):
self._root = root
self._impl = self.Impl()
@property
def root(self):
return self._root
def expand_path(self, path, role='File'):
"""
Return full path.
normalized path if path is absolute
user expanded path if path starts with ~ or ~user
project expanded path if path starts with @
current dir expanded path otherwise
"""
# XXX check expanding for Windows
# should return arg if arg startswith C:\ etc
# XXX check expand for local user ~/filename
#ret = os.path.abspath(ret)
#if ret.startswith(self.root):
#ret = os.path.join(self.root, path)
if not os.path.isabs(path):
#ret = os.path.join(self.root, path)
ret = os.path.abspath(path)
else:
ret = os.path.normpath(path)
#ret = os.path.abspath(path)
#if not os.path.exists(ret):
# ret = os.path.join(self.root, path)
if not ret.startswith(self.root):
raise FileOutOfProject(ret, self.root, role)
return ret
def exists(self, rel_path):
full_path = self.abs_path(rel_path, no_existance_check=True)
return self._impl.exists(full_path)
def getmtime(self, rel_path):
full_path = self.abs_path(rel_path)
return self._impl.getmtime(full_path)
def open(self, rel_path, mode, encoding='utf-8'):
no_existance_check = mode == 'w'
full_path = self.abs_path(rel_path,
no_existance_check=no_existance_check)
return self._impl.open(full_path, mode, encoding)
def replace_ext(self, path, new_ext):
root, old_ext = os.path.splitext(path)
return root + new_ext
def abs_path(self, rel_path, no_existance_check=False, role='File'):
"""Return absolute path for rel_path,
assuming rel_path is relative on config.root.
By default performs check for file existance.
`role` used for exception message.
NB. Behavior differs from os.path.abspath!!!
"""
abs_path = os.path.join(self.root, rel_path)
if not no_existance_check:
self.check_existance(abs_path, role)
return abs_path
def check_existance(self, abs_path, role):
if not self._impl.exists(abs_path):
raise FileNotFoundError(abs_path, role=role)
def rel_path(self, abs_path, no_existance_check=False, role='File'):
"""Path, relative to project root"""
root = self.root
if not abs_path.startswith(root):
raise FileOutOfProject(abs_path, root, role)
if not no_existance_check:
self.check_existance(abs_path, role)
rel_path = abs_path[len(root):]
while rel_path.startswith('/'):
rel_path = rel_path[1:]
return rel_path
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : list of linked list
# @return the head node in the linked list
def mergeKLists(self, A):
nodes = []
for node in A:
current = node
while current:
nodes.append(current)
current = current.next
nodes.sort(key=lambda x: x.val)
for i in range(len(nodes) - 1):
nodes[i].next = nodes[i + 1]
nodes[-1].next = None
return nodes[0]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:18:34 2018
@author: user
字串加總
"""
val=input()
val=val.split(" ")
vl=list(map(eval,val))
print("Total = {:}".format(sum(vl)))
print("Average = {:}".format(sum(vl)/len(vl))) |
g2=map(int,input().split())
print(max(g2))
|
# https://stackoverflow.com/questions/70797/user-input-and-command-line-arguments
# https://docs.python.org/2.0/lib/module-binascii.html
# import sys
import binascii
# def hexTo64(input_hex):
# binary = binascii.a2b_hex(input_hex)
# output_64 = binascii.b2a_base64(binary)
# return output_64
# hexTo64(sys.argv[1])
def hexToBinary(input_hex):
# print(input_hex)
hex_int = int(input_hex, 16)
# print(hex_int)
binary_rep = bin(hex_int)
# print(binary_rep)
binary_str_bits = str(binary_rep)[2:]
while(len(binary_str_bits) % 8 != 0):
# print("ADDED 0")
binary_str_bits = "0" + binary_str_bits
# print(binary_str_bytes)
return binary_str_bits
def hexTo64(input_hex):
binary_str_bits = hexToBinary(input_hex)
# print(binary_str_bytes)
binary_str = ""
for i in range(len(binary_str_bits)/8):
temp_str = "0b" + binary_str_bits[8*i:8*i+8]
binary_str += chr(int(temp_str,2))
# print(binary_str)
output_64 = binascii.b2a_base64(binary_str)
# print(output_64)
# print(str(output_64).strip())
return str(output_64).strip()
def test():
hex = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d"
output = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"
if (hexTo64(hex) != output):
print("Failed")
return
hex = "68656c6c6f2c20776f726c6421"
output = "aGVsbG8sIHdvcmxkIQ=="
if (hexTo64(hex) != output):
print("Failed")
return
print("Success")
# test()
|
import unittest
from nab.files import File
from nab.show import Show
# fields: ext, group, tags
# episode, season
# title, eptitle
file_tests = [
('[gg]_C_The_Money_of_Soul_and_Possibility_Control_-_01_[7B880013].mkv',
{'entry': ('C: The Money of Soul and Possibility Control', 1, 1),
'ext': 'mkv', 'group': 'gg',
'episode': 1, 'eprange': 1, 'season': None, 'serange': None,
'title': 'c money of soul and possibility control', 'eptitle': None}),
('The Legend of Korra - The Complete Season 1 [720p-HDTV]',
{'entry': ('The Legend of Korra', 1),
'ext': None, 'group': None, 'tags': ['720p', 'hdtv'],
'episode': None, 'eprange': None, 'season': 1, 'serange': 1,
'title': 'legend of korra', 'eptitle': None}),
('[Furi] Avatar - The Last Airbender [720p] (Full 3 Seasons + Extras)',
{'entry': ('Avatar the Last Airbender', ),
'ext': None, 'group': 'furi', 'tags': ['720p'],
'episode': None, 'eprange': None, 'season': None, 'serange': None,
'title': 'avatar last airbender', 'eptitle': None}),
('[UTW]_Angel_Beats!_-_04v2_[BD][h264-1080p_FLAC][0C19DD1C].mkv',
{'entry': ('Angel Beats', 1, 4),
'ext': 'mkv', 'group': 'utw', 'tags': ['bd', '1080p', 'flac'],
'episode': 4, 'eprange': 4, 'season': None, 'serange': None,
'title': 'angel beats', 'eptitle': None}),
('The.Legend.of.Korra.S02E14.Light.in.the.Dark.WEB-DL.x264.AAC.mp4',
{'entry': ('The Legend of Korra', 2, 14),
'ext': 'mp4', 'group': None, 'tags': ['x264', 'aac'],
'episode': 14, 'eprange': 14, 'season': 2, 'serange': 2,
'title': 'legend of korra', 'eptitle': 'light in the dark'}),
('[uguu~] AIR 01-12 Complete Batch (BD-1080p)',
{'entry': ('Air', ),
'ext': None, 'group': 'uguu', 'tags': ['bd', '1080p'],
'episode': None, 'eprange': None, 'season': None, 'serange': None,
'title': 'air', 'eptitle': None}),
('[NoobSubs] Fate Zero S1 01-13 + SP01-03 (720p Blu-ray 8bit AAC MP4)',
{'entry': ('Fate/Zero', 1),
'ext': None, 'group': 'noobsubs',
'tags': ['720p', '8bit', 'aac', 'mp4'],
'episode': 1, 'eprange': 13, 'season': 1, 'serange': 1,
'title': 'fate zero', 'eptitle': None}),
('[UTW] Fate Zero - 14-25 + Specials [BD][h264-1080p_FLAC]',
{'entry': ('Fate/Zero', 2),
'ext': None, 'group': 'utw', 'tags': ['bd', 'h264', '1080p', 'flac'],
'episode': 14, 'eprange': 25, 'season': None, 'serange': None,
'title': 'fate zero', 'eptitle': None}),
('Psycho-Pass',
{'entry': ('Psycho-Pass', 1),
'ext': None, 'group': None, 'tags': [],
'episode': None, 'eprange': None, 'season': None, 'serange': None,
'title': 'psycho-pass', 'eptitle': None}),
('Game of Thrones S04E06 720p HDTV x264-DIMENSION',
{'entry': ('Game of Thrones', 4, 6),
'ext': None, 'group': 'dimension', 'tags': ['720p', 'hdtv', 'x264'],
'episode': 6, 'eprange': 6, 'season': 4, 'serange': 4,
'title': 'game of thrones', 'eptitle': None}),
('[HorribleSubs] Monogatari Series Second Season - 04 [720p].mkv',
{'entry': ('Bakemonogatari', 3, 4),
'ext': 'mkv', 'group': 'horriblesubs', 'tags': ['720p'],
'episode': 4, 'eprange': 4, 'season': None, 'serange': None,
'title': 'monogatari series second season', 'eptitle': None}),
('Battlestar Galactica Complete Series '
'2003-2009 720p XvidHD - RePack PsiClone',
{'entry': ('Battlestar Galactica', ),
'ext': None, 'tags': ['720p', 'xvidhd'],
'episode': None, 'eprange': None, 'season': None, 'serange': None,
'title': 'battlestar galactica', 'eptitle': None}),
('Blackadder Seasons 1-4 + Specials',
{'entry': ('Blackadder', ),
'ext': None,
'episode': None, 'eprange': None, 'season': 1, 'serange': 4,
'title': 'blackadder', 'eptitle': None}),
('Breaking Bad Season 5 Complete 720p.BRrip.Sujaidr',
{'entry': ('Breaking Bad', 5),
'ext': None, 'tags': ['720p', 'brrip'],
'episode': None, 'eprange': None, 'season': 5, 'serange': 5,
'title': 'breaking bad', 'eptitle': None}),
('[VIP]Black Lagoon - 1-24[BDrip,912x512,x264,AAC]',
{'ext': None, 'group': 'vip', 'tags': ['bdrip', 'x264', 'aac'],
'episode': 1, 'eprange': 24, 'season': None, 'serange': None,
'title': 'black lagoon', 'eptitle': None})
]
class TestFile(unittest.TestCase):
def test_file(self):
for filename, data in file_tests:
f = File(filename)
print filename
print f.__dict__
for name, value in data.iteritems():
if name == 'tags':
# if tags, make sure there are no MISSING tags
# extra tags are acceptable and unavoidable
for tag in value:
print "Asserting %s in tags" % tag
self.assertIn(tag, f.__dict__[name])
elif name == 'entry':
# lookup the details for this show
# and find out if it's a match
title = value[0]
show = Show(title)
entry = show
# iterate to individual seasons/episodes
for index in value[1:]:
entry = entry[index]
# test if this matches
print "Asserting matches %s" % entry
self.assertTrue(entry.match(f))
else:
print "Asserting %s = %s" % (name, value)
self.assertEquals(f.__dict__[name], value)
if __name__ == '__main__':
unittest.main()
|
#coding:utf-8
def script(s, player=None):
from NaoQuest.objective import Objective
from NaoCreator.setting import Setting
import NaoCreator.Tool.facebookor as FC
if not player:
Setting.error("Error in execution of pre_script of objective \"result\": player is None")
return
if 0 <= player.current_quest.point <= 3:
s.question = s.question.format(str(player.current_quest.point) + u" point. "
u"Tu peux faire mieux! "
u"N'hésite pas a recommencer ce qcm")
elif 4 <= player.current_quest.point <= 7:
s.question = s.question.format(str(player.current_quest.point) + u" point. "
u"Pas mal !")
else:
s.question = s.question.format(str(player.current_quest.point) + u" point. "
u"G,G")
FC.send_the_post("Le jardinier {} a marquer {} "
"point dans la quête du QCM! GG a lui !".format(player.player_name,
player.current_quest.point)) |
def sumSquareDif(num):
sumOfSquare, squareOfSum = 0, 0
for i in range(1, num + 1):
sumOfSquare += i * i
squareOfSum += i
squareOfSum *= squareOfSum
result = squareOfSum - sumOfSquare
return result
print(sumSquareDif(100))
|
#coding: utf-8
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'NOMBRE_CAMPANANA:STRING, '
'FECHA_GESTION:STRING, '
'NOMBRES:STRING, '
'APELLIDOS:STRING, '
'CODIGO:STRING, '
'ESTADO:STRING, '
'MOTIVO:STRING, '
'OTRA_UNIVERSIDAD:STRING, '
'JORNADA:STRING, '
'CONDICION_DE_INGRESO:STRING, '
'PROGRAMA_DE_INTERES:STRING, '
'PAGO:STRING, '
'CEDULA_CLIENTE:STRING, '
'EMAIL:STRING, '
'CELULAR:STRING, '
'DATE_1:STRING, '
'DATE_2:STRING, '
'DATE_3:STRING, '
'DATE_4:STRING, '
'DATE_5:STRING, '
'DATE_6:STRING, '
'DATE_7:STRING, '
'DATE_8:STRING, '
'DATE_9:STRING, '
'DATE_10:STRING, '
'DATE_11:STRING, '
'DATE_12:STRING, '
'DATE_13:STRING, '
'DATE_14:STRING, '
'DATE_15:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha': self.mifecha,
'NOMBRE_CAMPANANA' : arrayCSV[0],
'FECHA_GESTION' : arrayCSV[1],
'NOMBRES' : arrayCSV[2],
'APELLIDOS' : arrayCSV[3],
'CODIGO' : arrayCSV[4],
'ESTADO' : arrayCSV[5],
'MOTIVO' : arrayCSV[6],
'OTRA_UNIVERSIDAD' : arrayCSV[7],
'JORNADA' : arrayCSV[8],
'CONDICION_DE_INGRESO' : arrayCSV[9],
'PROGRAMA_DE_INTERES' : arrayCSV[10],
'PAGO' : arrayCSV[11],
'CEDULA_CLIENTE' : arrayCSV[12],
'EMAIL' : arrayCSV[13],
'CELULAR' : arrayCSV[14],
'DATE_1' : arrayCSV[15],
'DATE_2' : arrayCSV[16],
'DATE_3' : arrayCSV[17],
'DATE_4' : arrayCSV[18],
'DATE_5' : arrayCSV[19],
'DATE_6' : arrayCSV[20],
'DATE_7' : arrayCSV[21],
'DATE_8' : arrayCSV[22],
'DATE_9' : arrayCSV[23],
'DATE_10' : arrayCSV[24],
'DATE_11' : arrayCSV[25],
'DATE_12' : arrayCSV[26],
'DATE_13' : arrayCSV[27],
'DATE_14' : arrayCSV[28],
'DATE_15' : arrayCSV[29]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-ucc" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery ucc' >> beam.io.WriteToBigQuery(
gcs_project + ":ucc.base_campanas",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_ucc.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD") |
def interweave(s1, s2):
output = ''
for x in range(len(s1)):
if not s1[x].isdigit():
output += s1[x]
if x < len(s2) and not s2[x].isdigit():
output += s2[x]
return output
'''
Your friend Rick is trying to send you a message, but he is concerned that it
would get intercepted by his partner. He came up with a solution:
1) Add digits in random places within the message.
2) Split the resulting message in two. He wrote down every second character on
one page, and the remaining ones on another. He then dispatched the two messages separately.
Write a function interweave(s1, s2) that reverses this operation to decode his message!
Example 1: interweave("hlo", "el") -> "hello" Example 2: interweave("h3lo", "el4") -> "hello"
Rick's a bit peculiar about his formats. He would feel ashamed if he found out his message
led to extra white spaces hanging around the edges of his message...
'''
|
import os
import pickle
import argparse
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from sklearn.model_selection import train_test_split
import utils
import logReg
from logReg import logRegL2, kernelLogRegL2
from pca import PCA, AlternativePCA, RobustPCA
def load_dataset(filename):
with open(os.path.join('..','data',filename), 'rb') as f:
return pickle.load(f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-q','--question', required=True)
io_args = parser.parse_args()
question = io_args.question
if question == "1":
dataset = load_dataset('nonLinearData.pkl')
X = dataset['X']
y = dataset['y']
Xtrain, Xtest, ytrain, ytest = train_test_split(X,y,random_state=0)
# standard logistic regression
lr = logRegL2(lammy=1)
lr.fit(Xtrain, ytrain)
print("Training error %.3f" % np.mean(lr.predict(Xtrain) != ytrain))
print("Validation error %.3f" % np.mean(lr.predict(Xtest) != ytest))
utils.plotClassifier(lr, Xtrain, ytrain)
utils.savefig("logReg.png")
# kernel logistic regression with a linear kernel
lr_kernel = kernelLogRegL2(kernel_fun=logReg.kernel_linear, lammy=1)
lr_kernel.fit(Xtrain, ytrain)
print("Training error %.3f" % np.mean(lr_kernel.predict(Xtrain) != ytrain))
print("Validation error %.3f" % np.mean(lr_kernel.predict(Xtest) != ytest))
utils.plotClassifier(lr_kernel, Xtrain, ytrain)
utils.savefig("logRegLinearKernel.png")
elif question == "1.1":
dataset = load_dataset('nonLinearData.pkl')
X = dataset['X']
y = dataset['y']
Xtrain, Xtest, ytrain, ytest = train_test_split(X,y,random_state=0)
# kernel logistic regression with a poly kernel
lr_kernel = kernelLogRegL2(kernel_fun=logReg.kernel_poly, lammy=0.01, p=2)
lr_kernel.fit(Xtrain, ytrain)
print("Training error %.3f" % np.mean(lr_kernel.predict(Xtrain) != ytrain))
print("Validation error %.3f" % np.mean(lr_kernel.predict(Xtest) != ytest))
utils.plotClassifier(lr_kernel, Xtrain, ytrain)
utils.savefig("logRegPolyKernel.png")
# kernal logistic regression with a RBF kernel
lr_kernel = kernelLogRegL2(kernel_fun=logReg.kernel_RBF, lammy=0.01, sigma=0.5)
lr_kernel.fit(Xtrain, ytrain)
print("Training error %.3f" % np.mean(lr_kernel.predict(Xtrain) != ytrain))
print("Validation error %.3f" % np.mean(lr_kernel.predict(Xtest) != ytest))
utils.plotClassifier(lr_kernel, Xtrain, ytrain)
utils.savefig("logRegRBFKernel.png")
elif question == "1.2":
dataset = load_dataset('nonLinearData.pkl')
X = dataset['X']
y = dataset['y']
Xtrain, Xtest, ytrain, ytest = train_test_split(X,y,random_state=0)
# hyperparameter choose
m1 = np.array([-2,-1,0,1,2])
m2 = np.array([-4,-3,-2,-1,0])
ite1 = m1.shape
ite2 = m2.shape
sigma_record = np.zeros(ite1)
lammy_record = np.zeros(ite2)
training_error = np.zeros(25)
validation_error = np.zeros(25)
sigma_argmin_train = 0
lammy_argmin_train = 0
sigma_argmin_validation = 0
lammy_argmin_validation = 0
min_train = 1000000
min_validation = 100000
index = 0
for i in range(5):
sigma_record[i] = 10**float(m1[i])
for j in range(5):
lammy_record[j] = 10**float(m2[j])
lr_kernel = kernelLogRegL2(kernel_fun=logReg.kernel_RBF, lammy=lammy_record[j], sigma=sigma_record[i])
lr_kernel.fit(Xtrain, ytrain)
training_error[index] = np.mean(lr_kernel.predict(Xtrain) != ytrain)
validation_error[index] = np.mean(lr_kernel.predict(Xtest) != ytest)
if training_error[index] < min_train:
min_train = training_error[index]
sigma_argmin_train = sigma_record[i]
lammy_argmin_train = lammy_record[j]
if validation_error[index] < min_validation:
min_validation = validation_error[index]
sigma_argmin_validation = sigma_record[i]
lammy_argmin_validation = lammy_record[j]
index += 1
lr_kernel = kernelLogRegL2(kernel_fun=logReg.kernel_RBF, lammy=lammy_argmin_train, sigma=sigma_argmin_train)
lr_kernel.fit(Xtrain, ytrain)
print("Training error %.3f" % np.mean(lr_kernel.predict(Xtrain) != ytrain))
print("Validation error %.3f" % np.mean(lr_kernel.predict(Xtest) != ytest))
utils.plotClassifier(lr_kernel, Xtrain, ytrain)
utils.savefig("logRegRBFKernel_min_training.png")
lr_kernel = kernelLogRegL2(kernel_fun=logReg.kernel_RBF, lammy=lammy_argmin_validation, sigma=sigma_argmin_validation)
lr_kernel.fit(Xtrain, ytrain)
print("Training error %.3f" % np.mean(lr_kernel.predict(Xtrain) != ytrain))
print("Validation error %.3f" % np.mean(lr_kernel.predict(Xtest) != ytest))
utils.plotClassifier(lr_kernel, Xtrain, ytrain)
utils.savefig("logRegRBFKernel_min_validation1.png")
elif question == '4.1':
X = load_dataset('highway.pkl')['X'].astype(float)/255
n,d = X.shape
print(n,d)
h,w = 64,64 # height and width of each image
k = 5 # number of PCs
threshold = 0.1 # threshold for being considered "foreground"
model = AlternativePCA(k=k)
model.fit(X)
Z = model.compress(X)
Xhat_pca = model.expand(Z)
model = RobustPCA(k=k)
model.fit(X)
Z = model.compress(X)
Xhat_robust = model.expand(Z)
fig, ax = plt.subplots(2,3)
for i in range(10):
ax[0,0].set_title('$X$')
ax[0,0].imshow(X[i].reshape(h,w).T, cmap='gray')
ax[0,1].set_title('$\hat{X}$ (L2)')
ax[0,1].imshow(Xhat_pca[i].reshape(h,w).T, cmap='gray')
ax[0,2].set_title('$|x_i-\hat{x_i}|$>threshold (L2)')
ax[0,2].imshow((np.abs(X[i] - Xhat_pca[i])<threshold).reshape(h,w).T, cmap='gray')
ax[1,0].set_title('$X$')
ax[1,0].imshow(X[i].reshape(h,w).T, cmap='gray')
ax[1,1].set_title('$\hat{X}$ (L1)')
ax[1,1].imshow(Xhat_robust[i].reshape(h,w).T, cmap='gray')
ax[1,2].set_title('$|x_i-\hat{x_i}|$>threshold (L1)')
ax[1,2].imshow((np.abs(X[i] - Xhat_robust[i])<threshold).reshape(h,w).T, cmap='gray')
utils.savefig('highway_{:03d}.jpg'.format(i))
else:
print("Unknown question: %s" % question) |
__author__ = 'Magnus'
from Spiller import Spiller
from Aksjon import Aksjon
import random
class SpillerHistoriker(Spiller):
def __init__(self, husk):
assert(isinstance(husk, int))
self._husk = husk
self._historie = list()
def velg_aksjon(self):
mestsannsynlig = self.finnMestSaynnsynligNesteTrekk()
return Aksjon.motsatt(mestsannsynlig)
def motta_resultat(self, otherAction, pointsForYou):
assert(isinstance(otherAction, Aksjon))
assert(isinstance(pointsForYou, float))
self._historie.append(otherAction)
def oppgi_navn(self):
return "Historiker(" + str(self._husk) + ")"
def finnMestSaynnsynligNesteTrekk(self):
# print("Historie naa: " + str(self._historie))
if (len(self._historie) < self._husk):
# Ikke nok historie
print("Ikke nok historie, gir tilfeldig")
return self.giTilfeldigNesteTrekk()
stein = 0
saks = 0
papir = 0
historiesok = self._historie[len(self._historie) - self._husk:]
# print("Historiesok: " + str(historiesok))
for i1, aksjon1 in enumerate(self._historie):
# print("Soker pa index " + str(i1) + " i _historie")
riktigSekvens = True
for i2, aksjon2 in enumerate(historiesok):
# print("Soker pa index " + str(i2) + " i historiesok")
try:
# print("Sjekker om " + str(self._historie[i1 + i2]) + " = " + str(historiesok[i2]))
if (self._historie[i1 + i2] != historiesok[i2]):
# print("Er ikke lik")
riktigSekvens = False
break
except Exception:
# print("En feil oppsto 1")
break
if (riktigSekvens):
try:
# print("Ja, riktig sekvens funnet!")
aksjonTatt = self._historie[i1 +i2 + 1]
# print("Forrige gang disse ble spilt, var neste aksjon " + str(aksjonTatt))
if (aksjonTatt == Aksjon.stein): stein += 1
elif (aksjonTatt == Aksjon.saks): saks += 1
else: papir += 1
except Exception:
# print("En feil oppsto 2")
continue
if (stein > saks and stein > papir): return Aksjon.stein
elif (saks > stein and saks > papir): return Aksjon.saks
elif (papir > stein and papir > saks): return Aksjon.papir
else:
# print("Ingen vinner, gir tilfeldig")
return self.giTilfeldigNesteTrekk()
def giTilfeldigNesteTrekk(self):
rand = random.randint(0, 2)
if (rand == 0): return Aksjon.stein
elif (rand == 1): return Aksjon.saks
else: return Aksjon.papir
|
#!/usr/bin/python
# Yume Tower Defense
import yume.core
raise SystemExit(yume.core.main())
|
# coding:utf-8
import sys
import traceback
import dill
import easyquotation
import datetime
ACCOUNT_OBJECT_FILE = 'account.session'
class StrategyTemplate:
name = 'DefaultStrategyTemplate'
def __init__(self, log_handler, main_engine,stocks=[],additional_stocks=['000002'],except_stocks=['600556','000001']):
with open(ACCOUNT_OBJECT_FILE, 'rb') as f:
self.user = dill.load(f)
f.close()
self.main_engine = main_engine
self.clock_engine = main_engine.clock_engine
# 优先使用自定义 log 句柄, 否则使用主引擎日志句柄
self.log = self.log_handler() or log_handler
self.stocks = stocks
self.additional_stocks = additional_stocks
self.except_stocks = except_stocks
self.init()
def init(self):
# 进行相关的初始化操作
pass
@property
def trade_stocks(self):
return self.get_push_stocks()
def get_push_stocks(self):
quotation = easyquotation.use('qq')
holding_stocks = self.stocks
if not holding_stocks:
holding_stocks = self.user.position['证券代码'].values.tolist()
print('holding_stocks=',holding_stocks)
init_push_stocks = list(set( holding_stocks) | set(self.additional_stocks))
init_push_stocks = list(set(init_push_stocks).difference(set(self.except_stocks)))
if init_push_stocks:
this_quotation = quotation.stocks(init_push_stocks)
if 'sh000001' in holding_stocks:
this_quotation['sh000001'] = this_quotation.pop('000001')
else:
this_quotation = quotation.all
stop_stocks = []
print(list(this_quotation.keys()))
for stock_code in list(this_quotation.keys()):
if this_quotation[stock_code]:
#print(this_quotation[stock_code])
print(this_quotation[stock_code]['bid1_volume'], this_quotation[stock_code]['ask1_volume'])
if this_quotation[stock_code]['bid1_volume']>0 or this_quotation[stock_code]['ask1_volume']>0:
pass
else:
print(stock_code)
stop_stocks.append(stock_code)
push_stocks = list(set(init_push_stocks).difference(set(stop_stocks)))
return push_stocks
def strategy(self, event):
""":param event event.data 为所有股票的信息,结构如下
{'162411':
{'ask1': '0.493',
'ask1_volume': '75500',
'ask2': '0.494',
'ask2_volume': '7699281',
'ask3': '0.495',
'ask3_volume': '2262666',
'ask4': '0.496',
'ask4_volume': '1579300',
'ask5': '0.497',
'ask5_volume': '901600',
'bid1': '0.492',
'bid1_volume': '10765200',
'bid2': '0.491',
'bid2_volume': '9031600',
'bid3': '0.490',
'bid3_volume': '16784100',
'bid4': '0.489',
'bid4_volume': '10049000',
'bid5': '0.488',
'bid5_volume': '3572800',
'buy': '0.492',
'close': '0.499',
'high': '0.494',
'low': '0.489',
'name': '华宝油气',
'now': '0.493',
'open': '0.490',
'sell': '0.493',
'turnover': '420004912',
'volume': '206390073.351'}}
"""
def heartbeat(self):
if (datetime.datetime.now().minute)%3==0:
self.log.info('维持心跳,查询持仓信息:')
self.log.info(self.user.position)
if self.user.position.empty:
#self.user.prepare('yh.json')
account_dict={
"inputaccount": "331600036005",
"trdpwd": "F71281A2D62C4b3a8268C6453E9C42212CCC5BA9AB89CAFF4E97CC31AE0E4C48"
}
self.log.info('心跳丢失,再次登录查询持仓信息:')
self.user.prepare('yh.json')
#self.user.prepare(account_dict)
self.log.info(self.user.position)
return
def run(self, event):
try:
self.heartbeat()
self.strategy(event)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.log.error(repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
def clock(self, event):
pass
def log_handler(self):
"""
优先使用在此自定义 log 句柄, 否则返回None, 并使用主引擎日志句柄
:return: log_handler or None
"""
return None
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
def hex_to_rgb(v):
v = v.lstrip('#')
lv = len(v)
return list(int(v[i:i+lv//3], 16) for i in range(0, lv, lv//3))
def count_pixels(img, value):
np_img = np.fromstring(img, np.uint8)
jpg = cv2.imdecode(np_img, cv2.IMREAD_COLOR)
try:
hc = hex_to_rgb(value)
except Exception as e:
return 0
c_white_pix = np.sum(jpg == 255)
c_black_pix = np.sum(jpg == 0)
c_pix = np.sum(jpg == hc)
return [c_white_pix, c_black_pix, c_pix] |
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
import os
class CheckableDirModel(QDirModel):
def __init__(self, parent=None):
QDirModel.__init__(self, None)
self.checks = {}
self.rootDir = None
def data(self, index, role=QtCore.Qt.DisplayRole):
if role == QtCore.Qt.CheckStateRole and index.column() == 0:
return self.check_state(index)
return QDirModel.data(self, index, role)
def flags(self, index):
return QDirModel.flags(self, index) | QtCore.Qt.ItemIsUserCheckable
def check_state(self, index):
while index.isValid():
if index in self.checks:
return self.checks[index]
index = index.parent()
return QtCore.Qt.Unchecked
def are_parent_and_child(self, parent, child):
while child.isValid():
if child == parent:
return True
child = child.parent()
return False
def setData(self, index, value, role):
checksList = []
if role == QtCore.Qt.CheckStateRole and index.column() == 0:
try:
self.layoutAboutToBeChanged.emit()
# for i, v in self.checks.items():
# if self.are_parent_and_child(index, i):
# checksList.append(i)
# for index in checksList:
# self.checks.pop(index)
self.checks[index] = value
self.layoutChanged.emit()
except BaseException as e:
print(e)
return True
return QDirModel.setData(self, index, value, role)
def export_checked(self, accepted_suffix=['cpp', 'h', 'cc', 'c++', 'java', 'cs']):
selection= set()
for index in self.checks.keys():
if self.checks[index] == QtCore.Qt.Checked:
for path, dirs, files in os.walk(self.filePath(index)):
if self.rootDir is None:
self.rootDir = path
for filename in files:
if QtCore.QFileInfo(filename).suffix() in accepted_suffix:
if self.check_state(self.index(os.path.join(path, filename))) == QtCore.Qt.Checked:
try:
selection.add(os.path.join(path, filename))
except:
pass
return selection
|
import unittest
from katas.kyu_6.binding_within_the_list_monad import bind
class BindTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(bind([1, 2, 3], lambda a: [a]), [1, 2, 3])
def test_equals_2(self):
self.assertEqual(bind([7, 8, 9], lambda a: [[a]]), [[7], [8], [9]])
def test_equals_3(self):
self.assertEqual(bind([3, 4, 5],
lambda a: [[a, -a]]), [[3, -3], [4, -4], [5, -5]])
def test_equals_4(self):
self.assertEqual(bind([5, 6, 7], lambda a: [str(a)]), ['5', '6', '7'])
def test_equals_5(self):
self.assertEqual(bind([1, 2, 3], lambda a: [a + 1]), [2, 3, 4])
|
import mx
import mx_sdk
_suite = mx.suite("mozart-graal")
mx_sdk.register_graalvm_component(mx_sdk.GraalVmLanguage(
suite=_suite,
name="Mozart-Graal",
short_name="moz",
dir_name="oz",
license_files=["LICENSE_MOZART_GRAAL.txt"],
third_party_license_files=[],
truffle_jars=[
"mozart-graal:CORO",
"mozart-graal:MOZART_GRAAL",
],
support_distributions=[
"mozart-graal:MOZART_GRAAL_GRAALVM_SUPPORT",
],
launcher_configs=[
mx_sdk.LanguageLauncherConfig(
destination="bin/<exe:oz>",
jar_distributions=["mozart-graal:MOZART_GRAAL_LAUNCHER"],
main_class="org.mozartoz.truffle.OzLauncher",
build_args=["--language:oz"],
language='oz',
)
]
))
|
from unittest import TestCase
from phiml.math import tensor, batch, is_finite
from phi.field import *
class TestNoise(TestCase):
def test_multi_k(self):
grid = CenteredGrid(Noise(vector='x,y', scale=tensor([1, 2], batch('batch'))), x=8, y=8)
self.assertTrue(is_finite(grid.values).all)
|
from flask import Flask, render_template
from iceke.worm import Worm
import json
from iceke.util import Util
app = Flask(__name__)
flint_url = 'http://11.11.0.64:8099/'
flint_stage_url = 'http://11.11.0.64:4041/stages/'
spark_url = 'http://11.11.0.55:8090/'
spark_stage_url = 'http://11.11.0.55:4040/stages/'
@app.route('/')
def show_gc():
return render_template('show_gc.html', ctx='ab')
@app.route('/get_gc/')
def get_gc():
final_dict = {}
try:
spark_worm = Worm(spark_url, spark_stage_url, True, True, "Spark")
running_spark = spark_worm.get_running_spark()
if running_spark is None:
running_spark = spark_worm.get_finish_spark() #get the first finished
flint_worm = Worm(flint_url, flint_stage_url, True, True, "Flint")
running_flint = flint_worm.get_running_spark()
if running_flint is None:
running_flint = flint_worm.get_finish_spark()
except Exception, e:
print e
return None
if running_flint is None and running_spark is None:
return None
elif running_flint is None and running_spark is not None:
final_dict = {'spark': format_spark_json(running_spark), 'flint': 'none'}
elif running_flint is not None and running_spark is None:
final_dict = {'spark': 'none', 'flint': format_spark_json(running_flint)}
else:
final_dict = {'spark': format_spark_json(running_spark), 'flint': format_spark_json(running_flint)}
final_json = json.dumps(final_dict)
print(final_json)
return final_json
def format_spark_json(running_spark):
if running_spark is not None:
# spark_json = json.dumps(running_spark, default=running_spark.object2dict)
running_stages = running_spark.get_running_stages()
finished_stages = running_spark.get_finished_stages()
failed_stages = running_spark.get_failed_stages()
format_spark = {}
stages = []
for finished_stage in finished_stages:
stage_dict = {'stage_id': finished_stage.get_stage_id(), 'stage_duration': Util.format_time(
finished_stage.get_duration()), 'submit_time': finished_stage.get_submit_time(),
'tasks_percent': 100.0, 'gc_time': round(finished_stage.get_gc_time(), 1)}
stages.append(stage_dict)
stages.reverse()
for running_stage in running_stages:
stage_dict = {'stage_id': running_stage.get_stage_id(), 'stage_duration': Util.format_time(
running_stage.get_duration()), 'submit_time': running_stage.get_submit_time(),
'tasks_percent': Util.format_tasks_percent(running_stage.get_tasks_percent()),
'gc_time': round(running_stage.get_gc_time(), 1)}
stages.append(stage_dict)
format_spark['app_name'] = running_spark.get_app_name()
format_spark['total_time'] = Util.format_time(running_spark.get_total_time())
format_spark['status'] = running_spark.get_status()
format_spark['property'] = running_spark.get_property()
format_spark['stages'] = stages
return format_spark
else:
return None
if __name__ == '__main__':
app.run(port=7000)
|
import socket
ss=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ss.connect(('www.baidu.com',80))
data = "GET {} HTTP/1.1\r\n" \
"Host:{}\r\n" \
"Connection:close\r\n" \
"User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\r\n\r\n".format(
'/', 'www.baidu.com').encode('utf8')
ss.send(data)
res =b''
while True:
msg = ss.recv(1024)
res+=msg
if not msg:
break
print(res.decode('utf8'))
|
import cv2
import numpy as np
def find_rect_of_target_color(image):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)
h = hsv[:, :, 0]
s = hsv[:, :, 1]
mask = np.zeros(h.shape, dtype=np.uint8)
mask[((h < 20) | (h > 200)) & (s < 80)] = 255
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rects = []
for contour in contours:
approx = cv2.convexHull(contour)
rect = cv2.boundingRect(approx)
rects.append(np.array(rect))
return rects
img = cv2.imread("12_2_org.jpg")
rects = find_rect_of_target_color(img)
if len(rects) > 0:
rect = max(rects, key=(lambda x: x[2] * x[3]))
cv2.rectangle(img, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (0, 0, 255), thickness=2)
cv2.imwrite("check.jpg", img) |
from collections.abc import Collection
from .card import Card, CardSuit, CardRank
class BaseDeck(Collection):
cards = []
# TODO Deck should implement the collection interface
"""This class represents a deck of cards"""
def __init__(self, cards):
if cards is None:
self.cards = []
return
self.cards = cards
def __len__(self):
return self.cards.__len__()
def __contains__(self, thing):
return self.cards.__contains__(thing)
def __iter__(self):
return self.cards.__iter__()
def get_value(self):
pass
def update_value(self):
self.value = self.get_value()
def index(self, card):
return self.cards.index(card)
def to_list(self):
return self.cards
|
import sys
sys.path.append('..')
from run_command import run_command
from batman_socket import BatmanSocket
from BatmanServerSocket import BatmanServerSocket
class BatmanClientServerSocket(BatmanServerSocket):
'''CLASS: Representing a BATMAN-Advanced node capable of listening, interpreting
actions, and transmitting data.
FIELDS:
@transmission_client -- A socket capable of sending out data to other nodes.
'''
def __init__(self, address, client = None):
super(BatmanClientServerSocket, self).__init__(address)
# Initialize transmission client
if client is None:
self.transmission_client = BatmanSocket()
else:
self.transmission_client = client
def replace_client(self):
'''Create new transmission client.'''
self.transmission_client = BatmanSocket()
def read_client(self, client, address):
'''Get the arguments from the server socket and interpret them.
ARGS:
@client -- The client socket to read from.
@address -- The address of the client socket (for returning data)
RETURNS:
None
'''
args = BatmanServerSocket.read_client(client)
self.interpret_args(args, client)
def interpret_args(self, args, client, address):
'''Interpret the arguments received from another device and do what they ask.
ARGS:
@args -- The list of arguments to execute.
@client -- The client the data came from.
@address -- The client's return address.
RETURNS:
None
'''
if args[0] == 'ping':
(stdout, stderr) = run_command('sudo batctl ping -c 1' + args[1])
xml_ping = xml_parser.parse_ping_to_xml(stdout)
client_send_message(address, 'ping_response ' + open(xml_ping, 'r').read())
elif args[0] == 'traceroute':
(stdout, stderrr) = run_command('sudo batctl traceroute ' + args[1])
xml_traceroute = xml_parser.parse_traceroute_to_xml(stdout)
client_send_message(address, 'traceroute_response ' + open(xml_traceroute, 'r').read())
def client_send_message(self, host, message, port = 56634):
'''Send a message.
ARGS:
@host -- The host to send data to.
@message -- The data to send
@port -- The port the application is listening on.
RETURNS:
None
'''
self.transmission_client.connect(host, port)
self.transmission_client.write(message)
self.replace_client()
|
from django.conf.urls import include, url, patterns
from django.contrib import admin
from tastypie.api import Api
from quotes import views
from quotes.api import QuoteResource
v1_api = Api(api_name='v1')
v1_api.register(QuoteResource())
urlpatterns = patterns(
'',
url(r'^$', views.manager, name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(v1_api.urls)),
) |
from flask import request, g, Blueprint, json, Response
from ..models.RouteModel import RouteModel, RouteSchema
route_api = Blueprint('route_api', __name__)
route_schema = RouteSchema()
@route_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = route_schema.load(req_data)
if error:
return custom_response(error, 400)
post = RouteModel(data)
post.save()
data = route_schema.dump(post).data
return custom_response(data, 201)
@route_api.route('/list_origin', methods=['GET'])
def list_origin():
origin = RouteModel.list_origin()
response = route_schema.dump(origin, many=True).data
return custom_response(response, 200)
@route_api.route('/list_destination', methods=['GET'])
def list_destination():
destination = RouteModel.list_destination()
response = route_schema.dump(destination, many=True).data
return custom_response(response, 200)
def custom_response(res, status_code):
return Response(
mimetype="application/json",
response=json.dumps(res),
status=status_code
)
|
# making a tornado figure in graph
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(0 * np.pi, 10 * np.pi, 200)
z = np.linspace(0, 10, 200)
r = z ** 2 + 1
x, y = {}, {}
for i in range(0, 10):
x[i] = r * np.sin(theta + (9*i))
y[i] = r * np.cos(theta + (9*i))
l = "Tornado", i+1
ax.plot(x[i], y[i], z, label=l)
ax.legend()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.