text stringlengths 8 6.05M |
|---|
import a.b.c
|
"""
ASGI config for set project.
It exposes the ASGI callable as a module-level variable named ``application``.
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'set.settings')
application = get_asgi_application()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 11:39:23 2018
@author: yaua
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
class VariationalAutoencoder():
def __init__(self, train_inputs):
self.lr = 0.005
self.n_hidden = 8
self.n_z = 1
self.epochs = 80
self.batchsize = 300
self.inputs = tf.placeholder(shape = [None, train_inputs.shape[1]], dtype = tf.float32)
self.labels = tf.placeholder(shape = [None, train_inputs.shape[1]], dtype = tf.float32)
z_mean, z_stddev = self.encoder(self.inputs)
samples = tf.random_normal(shape=tf.shape(z_stddev),mean=0,stddev=1,dtype=tf.float32)
sampled_z = z_mean + (z_stddev * samples)
# self.decoded_data = tf.get_variable("output", initializer=tf.zeros(shape=tf.shape(sampled_z.shape)))
self.decoded_data = self.decoder(sampled_z)
self.decoded_loss = tf.reduce_mean(tf.square(self.decoded_data - self.labels))
self.latent_loss = 0.5 * tf.reduce_sum(tf.square(z_mean) + tf.square(z_stddev) -
tf.log(tf.square(z_stddev)) - [1,1])
self.cost = tf.reduce_mean(self.decoded_loss + self.latent_loss)
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.cost)
# encoder
def encoder(self, inputs):
with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE):
h1 = tf.layers.dense(inputs, self.n_hidden, tf.nn.relu)
w_mean = tf.layers.dense(h1, self.n_z, tf.nn.relu)
w_stddev = tf.layers.dense(h1, self.n_z, tf.nn.softplus)
return w_mean, w_stddev
# decoder
def decoder(self, z):
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
h1 = tf.layers.dense(z, self.n_hidden, tf.nn.relu)
w_mean = tf.layers.dense(h1, self.inputs.shape[1], tf.nn.relu)
w_stddev = tf.layers.dense(h1, self.inputs.shape[1], tf.nn.softplus)
w_samples = tf.random_normal(shape=tf.shape(w_stddev), mean=0, stddev=1, dtype=tf.float32)
h2 = w_mean + (w_stddev * w_samples)
return h2
# train model
def train(self, train_inputs):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(self.epochs):
avg_cost = 0
np.random.shuffle(train_inputs)
total_batch = int(train_inputs.shape[0] / self.batchsize)
x_batches = np.array_split(train_inputs, total_batch)
for i in range(total_batch):
batch_x = x_batches[i]
np.random.shuffle(batch_x)
_, c, dec_loss, lat_loss = sess.run([self.optimizer, self.cost, self.decoded_loss, self.latent_loss],
feed_dict={self.inputs: batch_x, self.labels: batch_x})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "| Cost =", "{:.9f}".format(avg_cost),
"| Generative loss =", "{:.9f}".format(dec_loss),
"| Latent loss =", "{:.9f}".format(lat_loss))
# %returns to prices
def chain_returns(self, n_out, x_test, x_test_price):
returns = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for k in range(n_out):
returns[k,:,:] = sess.run(self.decoded_data, feed_dict={self.inputs: x_test})
chained_returns = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
for k in range(n_out):
for j in range(chained_returns.shape[2]):
for i in range(chained_returns.shape[1]):
if i == 0:
chained_returns[k,i,j] = x_test_price[0,j]
else:
chained_returns[k,i,j] = returns[k,i-1,j]
# chained_returns[k,i,j] = chained_returns[k,i-1,j] * (1 + returns[k,i-1,j])
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./graphs', sess.graph)
print(sess.run(self.decoded_data, feed_dict = {self.inputs: x_test}))
writer.close()
return chained_returns
#def chain_returns(n_out, x_test, x_test_price):
# returns = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# for k in range(n_out):
# returns[k,:,:] = sess.run(model.decoded_data, feed_dict={x_test})
#
# chained_returns = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
# for k in range(n_out):
# for j in range(chained_returns.shape[2]):
# for i in range(chained_returns.shape[1]):
# if i == 0:
# chained_returns[k,i,j] = x_test_price[0,j]
# else:
# chained_returns[k,i,j] = chained_returns[k,i-1,j] * (1 + returns[k,i-1,j])
# return chained_returns
# inputs
def data(df):
# Find all index rows that contain NaN
null_rows = pd.isnull(df).any(axis=1).nonzero()[0]
x_dataset = dataframe.iloc[(null_rows[-1]+1):dataframe.shape[0],1:dataframe.shape[1]]
x_price = x_dataset.values
x_price = x_price.astype('float32')
x_diff = x_dataset.pct_change()
x_diff = x_diff.iloc[1:,]
x_r = x_diff.values
x_r = x_r.astype('float32')
x_train_size = int(len(x_r) * 0.7)
x_train, x_test = x_r[0:x_train_size,:], x_r[x_train_size:len(x_r),:]
x_train_price, x_test_price = x_price[0:x_train_size,:], x_price[x_train_size:len(x_r),:]
return x_train, x_test, x_train_price, x_test_price
dataframe = pd.read_csv('AlexData.csv') # bigger data set (23 inputs)
x_train, x_test, x_train_price, x_test_price = data(dataframe)
model = VariationalAutoencoder(x_train)
model.train(x_train)
n_out = 9
y = np.zeros((n_out, x_test.shape[0], x_test.shape[1]))
y = model.chain_returns(n_out, x_test, x_test_price)
asset_index = 0
plt.plot(y[:,:,asset_index].T, color = 'lightgrey')
plt.plot(x_test_price[:,asset_index])
#
#plt.plot(y[:,:,7].T, color = 'lightgrey')
#plt.plot(y[:,:,9].T, color = 'orange') |
"""
The purpose of this script is to handle the upload of the app to the server
this include the setup of the server, installation of git, nginx, gunicorn
supervisor and all the required libraries for the well functioning of the
site.
For this purpose we plan on using two simple API's fabric for task
automation and poseidon for automation of certain task within digitalocean.
"""
from fabric.api import abort, cd, local, settings, run, env
from fabric.contrib.files import append
#configuration
env.use_ssh_config = True
env.user = "root"
env.hosts = ["104.236.29.231"]
#env.hosts = ["45.55.175.61"]
env.key_filename = "/Users/leonardojimenez/.ssh/id_rsa"
env.password = "Jesusvictor1"
#env.ssh_config_path = "/Users/leonardojimenez/.ssh/known_hosts"
#import poseidon.api as po
def deploy():
"""
The purpose of this function is to be the one conecting to the remote
digitalocean servers and install all the basic issues needed to work
- Git.
"""
run("cd /home/")
install_git()
#create repository
#install nginx
#install gunicorn
#install supervisor
#configure nginx
#configure gunicorn
#configure supervisor
#set digital_remote locally
#push
def install_git():
"""Install git in a Ubuntu instance in digital ocean"""
print("installing updates for the system...")
run("sudo apt-get update")
print("installing last version of git...")
run('git config --global user.name "Leonardo Jimenez"')
run("sudo apt-get install git")
run('git config --global user.email ljimenez@stancedata.com')
run('git commit --amend --reset-author')
def create_repository():
"""
This function creates the bare repo for the app and the
related post hooks.
"""
hook_text = """#!/bin/sh
GIT_WORK_TREE=/home/django/app/ git checkout -f master
GIT_WORK_TREE=/home/django/app git reset --hard
"""
#run("mkdir /home/django/app")
#run("mkdir app_repo.git")
#with cd("app_repo.git"):
# run("git init --bare")
with cd("~/app_repo.git/hooks"):
run("touch post-receive")
run("chmod +x post-receive")
append('post-receive', hook_text)
def set_local_repo():
"""
This functions creates the remote for the digital remote locally.
"""
local("git remote add digital root@" + env.hosts[0] + ":~/app_repo.git")
def install_nginx():
"""
This function verifies if nginx is present, if not it install it.
"""
is_installed = run('nginx -v')
if is_installed.return_code == 0:
print("nginx installed already")
#here should be present and else
def install_gunicorn():
"""
This function verifies if gunicorn is present, if not it install it.
"""
is_installed = run('gunicorn -v')
if is_installed.return_code == 0:
print("nginx installed already")
#here should be present and else
def config_nginx():
"""
This function configure nginx for using our app, also makes a copy of the file
to avoid spoiling it.
"""
with cd("/etc/nginx/sites-enabled/"):
run("cp django django-copy")
run("sed -i '19s/.*/ alias ~\/home\/django\/app\/media;/' django")
run("sed -i '24s/.*/ alias ~\/home\/django\/app\/static;/' django")
run("service restart nginx")
def config_gunicorn():
"""
This function configure gunicorn for using our app, also makes a copy of the file
to avoid spoiling it.
The path for the logs are vim /var/log/nginx/error.log (nginx) and
vim /var/log/upstart/gunicorn.log (gunicorn)
"""
with cd("/etc/init/"):
run("cp gunicorn.conf gunicorn.conf-copy")
run("sed -i '11s/.*/chdir \/home\/django\/app/' gunicorn.conf")
run("sed -i '14s/.*/ --name=app \\\\ /' gunicorn.conf")
run("sed -i '15s/.*/ --pythonpath=app \\\\/ /' gunicorn.conf")
run("sed -i '18s/.*/ app.wsgi:application/' gunicorn.conf")
run("service gunicorn restart")
def install_supervisor():
run("apt-get install supervisor")
run("vim /var/log/upstart/gunicorn.log")
# luego se crea el file de configuracion de nginx and gunicorn
|
from collections import deque
from threading import Thread
import threading
#temporary class of queue b
class Queue:
def __init__(self):
self.lock = threading.Lock()
self.q_user1 = deque([])
self.q_user2 = deque([])
self.q_user3 = deque([])
def add_user1(self, input):
try:
self.lock.acquire()
self.q_user1.append(input)
finally:
self.lock.release()
return self.q_user1
def add_user2(self, input):
try:
self.lock.acquire()
self.q_user2.append(input)
finally:
self.lock.release()
return self.q_user2
def add_user3(self, input):
try:
self.lock.acquire()
self.q_user3.append(input)
finally:
self.lock.release()
return self.q_user3
def take1(self):
return self.q_user1.popleft()
def take2(self):
return self.q_user2.popleft()
def take3(self):
return self.q_user3.popleft()
#queue=Queue()
#queue.add('3,6,c')
#queue.add('5,6,c')
#queue.add('4,1,c')
#queue.add('3,4,c')
#print queue.q
#print queue.take()
#print queue.q
|
import threading
import time
def add():
sum = 1
for i in range(1,200000):
sum *= i
print('和是:%d'%sum)
print('和结束时间:%s'%(time.time()-start_time))
def mul():
sum1 = 1
for i in range(1,100000):
sum1 *= i
print('集是:%d' % sum1)
print('ji结束时间:%s'% (time.time() - start_time))
threads = []
t1 = threading.Thread(target=add)
t2 = threading.Thread(target=mul)
threads.append(t1)
threads.append(t2)
start_time = time.time()
if __name__ == '__main__':
# t1.setDaemon(True)
#for t in threads:
# t.start()
#for t in threads:
# t.join()
add()
mul()
|
N =int(input("N="))
q = N
while q >= 1:
r = q % 10
q = int(q/10)
print(r) |
from boto import sdb
class Sim:
def __init__(self, params=None):
self.params = params
def run(self):
print 'TODO\n'
def get_results(self):
print 'TODO\n'
def keep_simulating(self):
return False
class ExponentiationSim(Sim):
""" Stores exponents of the given base in a dictionary.
"""
def __init__(self, base):
self.base = base
self.iters = 0
self.vals = {0:1}
def run(self):
self.vals[self.iters+1] = self.vals[self.iters]*self.base
self.iters = self.iters+1
def get_results(self):
return {'vals':self.vals}
def keep_simulating(self):
# Arbitrary example
return self.iters < 1000
class SimLauncher:
def __init__(self, sim, domain_name, aws_region='us-east-1'):
# With boto 2.29.1, we won't specify the access keys in the code.
# Instead, place them in ~/.boto under [Credentials].
self.conn = sdb.connect_to_region(aws_region)
self.dom = self.conn.get_domain(domain_name)
self.sim = sim
def record(self):
records = self.sim.get_results()
self.dom.put_attributes(records)
# Using save() may only be necessary for dom.get_item() style
# assignment of data. Will test.
self.dom.save()
def main(self):
while(self.sim.keep_simulating()):
self.sim.run()
self.record()
if __name__ == '__main__':
print 'Testing!\n'
sim = ExponentiationSim(3)
sim.run()
print sim.get_results()
print 'Finished testing.\n'
# launcher = SimLauncher(sim, 'test1')
# launcher.main() |
import os, sys, subprocess, shutil
python_lib_path = '../../lib/python/'
shutil.copytree(python_lib_path, './python_lib/')
from python_lib.make_utils import *
clear_dirs(['../output/'])
run_stata(program = 'example.do')
shutil.rmtree('python_lib') |
import requests
def publish_post():
# https://github.com/Medium/medium-api-docs#33-posts
author_id = "schmidt.jake.c"
with open("post.md", "r") as f:
content = f.read()
requests.post(
f"https://api.medium.com/v1/users/{author_id}/posts",
json={
"title": "My Title",
"contentFormat": "markdown",
"content": content,
"publishStatus": "draft",
},
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-13 11:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('freelance', '0022_auto_20170513_1403'),
]
operations = [
migrations.AlterField(
model_name='orders',
name='mailed',
field=models.NullBooleanField(default=0),
),
]
|
# GCD Studied from Coursera 'Algorithmic Toolbox'
m = input().split()
def GCD(a, b):
if b==0:
return a
return GCD(b, a%b)
print(GCD(int(m[0]), int(m[1])))
|
./gasp/slt_calibration_master_flat_1month_180S.py |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 19:36:22 2018
功能:记录用户在某个时间点对系统的输入信息(用户的指令或着用户说的话)
接口:使用Mysql数据库记录数据 安装pymysql等驱动器
实现过程:安装好Mysql数据库 和相关python连接模块
先在数据库中建立数据库“record1” 在record1中建立表user_record1
用pymysql连接数据库 通过execute提交数据
@author: mynumber
"""
import pymysql
import time
class record(object):
"""用于向数据库提交、查询、修改信息"""
def __init__(self,database_name,table_name):
self.database_name=database_name
self.table_name=table_name
self.table=self.database_name+'.'+self.table_name
self.connect()
def connect(self):
try:
print('正在连接数据库...')
self.db = pymysql.connect("localhost","root","ZHUYINlong121217",self.database_name,charset='utf8')
self.cursor=self.db.cursor()
print('数据库连接成功...')
except:
print('连接失败...')
def insert(self,time,content):
"""插入信息"""
sql="INSERT INTO '%s' (`time`, `content`)"%(self.table)
sql=sql.replace('\'','')
sql=sql+"VALUES ('%s', '%s');" % (time,content)
print(sql)
try:
self.cursor.execute(sql)
self.db.commit()
print('数据添加成功...')
except:
self.db.rollback()
print('数据未成功添加...')
def qury(self):
sql= " SELECT * FROM '%s' " %(self.table)
sql=sql.replace('\'','')
try:
self.cursor.execute(sql)
results=self.cursor.fetchall()
self.db.commit()
print('数据查找成功...')
return results
except:
self.db.rollback()
print('数据查找失败...')
def update(self,text):
"""根据text内容进行匹配,找到内容之后或着找到相似内容之后"""
pass
R=record("record1","user_record")
T=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print(T)
text='关儿 我爱你'
R.insert(T,text)
result=R.qury() |
# -*- coding: utf-8 -*-
try:
import hashlib
md5hash = lambda s: hashlib.md5(s).hexdigest()
except ImportError:
import md5
md5hash = lambda s: md5.new(s).hexdigest()
try:
# If you have the fanstatic extra, add support to include widget JS
import fanstatic
import js.jquery
import zeam.jsontemplate
library = fanstatic.Library('zeam.form.ztk.widgets', 'static')
collection = fanstatic.Resource(
library, 'collection.js', depends=[js.jquery.jquery,
zeam.jsontemplate.jsontemplate])
requireCollectionResources = collection.need
except ImportError:
class Library(object):
name = ""
def init_library_nr(self):
pass
@property
def library_nr(self):
return 20000
@property
def known_assets(self):
return []
library = Library()
requireCollectionResources = lambda: None
from zeam.form.base.datamanager import NoneDataManager
from zeam.form.base.errors import Errors, Error
from zeam.form.base.fields import Fields
from zeam.form.base.markers import Marker
from zeam.form.base.form import cloneFormData
from zeam.form.base.interfaces import IField, IWidget, IWidgetExtractor
from zeam.form.base.markers import NO_VALUE
from zeam.form.base.widgets import WidgetExtractor, FieldWidget, Widgets
from zeam.form.ztk.fields import Field, registerSchemaField
from zeam.form.ztk.interfaces import ICollectionField, IListField
from zeam.form.ztk.widgets.choice import ChoiceField, ChoiceFieldWidget
from zeam.form.ztk.widgets.object import ObjectField
from grokcore import component as grok
from zope import component
from zope.i18nmessageid import MessageFactory
from zope.interface import Interface, implementer
from zope.schema import interfaces as schema_interfaces
_ = MessageFactory("zeam.form.base")
@implementer(ICollectionField)
class CollectionField(Field):
"""A collection field.
"""
collectionType = list
allowAdding = True
allowRemove = True
inlineValidation = False
def __init__(self, title,
valueField=None,
minLength=0,
maxLength=None,
**options):
super(CollectionField, self).__init__(title, **options)
self._valueField = IField(valueField, None)
self.minLength = minLength
self.maxLength = maxLength
@property
def valueField(self):
return self._valueField
def validate(self, value, form):
error = super(CollectionField, self).validate(value, form)
if error is not None:
return error
if not isinstance(value, Marker):
assert isinstance(value, self.collectionType)
if self.minLength and len(value) < self.minLength:
return _(u"There are too few items.")
if self.maxLength and len(value) > self.maxLength:
return _(u"There are too many items.")
return None
def isEmpty(self, value):
return value is NO_VALUE or not len(value)
# BBB
CollectionSchemaField = CollectionField
@implementer(IListField)
class ListField(CollectionField):
"""A list field
"""
collectionType = list
allowOrdering = True
# BBB
ListSchemaField = ListField
class SetField(CollectionField):
"""A set field
"""
collectionType = set
# BBB
SetSchemaField = SetField
class TupleField(CollectionField):
"""A tuple field.
"""
collectionType = tuple
# BBB
TupleSchemaField = TupleField
def newCollectionWidgetFactory(mode=u"", interface=IWidget):
def collectionWidgetFactory(field, form, request):
"""A widget of a collection is a bit advanced. We have to adapt
the sub-type of the field as well.
"""
widget = component.getMultiAdapter(
(field, field.valueField, form, request), interface, name=mode)
return widget
return collectionWidgetFactory
grok.global_adapter(
newCollectionWidgetFactory(mode='input'),
adapts=(ICollectionField, Interface, Interface),
provides=IWidget,
name='input')
grok.global_adapter(
newCollectionWidgetFactory(mode='input-list'),
adapts=(ICollectionField, Interface, Interface),
provides=IWidget,
name='input-list')
grok.global_adapter(
newCollectionWidgetFactory(mode='display'),
adapts=(ICollectionField, Interface, Interface),
provides=IWidget,
name='display')
grok.global_adapter(
newCollectionWidgetFactory(interface=IWidgetExtractor),
adapts=(ICollectionField, Interface, Interface),
provides=IWidgetExtractor)
class MultiGenericFieldWidget(FieldWidget):
grok.adapts(ICollectionField, Interface, Interface, Interface)
allowAdding = True
allowRemove = True
inlineValidation = False
def __init__(self, field, value_field, form, request):
super(MultiGenericFieldWidget, self).__init__(field, form, request)
self.allowAdding = field.allowAdding
self.allowRemove = field.allowRemove
self.inlineValidation = field.inlineValidation
self.valueField = value_field
self.valueWidgets = Widgets()
self.haveValues = False
def createValueWidget(self, new_identifier, value):
field = self.valueField.clone(new_identifier=str(new_identifier))
form = cloneFormData(self.form, prefix=self.identifier)
if value is not None:
form.ignoreContent = False
form.setContentData(NoneDataManager(value))
else:
form.ignoreRequest = False
form.ignoreContent = True
return form.widgetFactory.widget(field)
def addValueWidget(self, new_identifier, value):
widget = self.createValueWidget(new_identifier, value)
if widget is not None:
self.valueWidgets.append(widget)
return widget
def prepareContentValue(self, values):
count = 0
if values is not NO_VALUE:
for position, value in enumerate(values):
# Create new widgets for each value
self.addValueWidget(position, value)
count += len(values)
if self.allowAdding and self.required and not count:
self.addValueWidget(count, None)
count += 1
if count:
self.haveValues = True
return {self.identifier: str(count)}
def prepareRequestValue(self, values, extractor):
value_count = 0
errors = None
identifier_count = int(values.get(self.identifier, '0'))
remove_something = self.identifier + '.remove' in values
add_something = self.identifier + '.add' in values
if self.inlineValidation:
# If inlineValidation is on, and we removed or added
# something, we extract this field to get the
# validation messages right away (if the user clicked
# on add or remove, he cannot have clicked on an
# action button)
if add_something or remove_something:
ignored, errors = extractor.extract()
if errors:
self.form.errors.append(errors)
for position in range(0, identifier_count):
value_marker = (self.identifier, position,)
value_present = '%s.present.%d' % value_marker in values
if not value_present:
continue
value_identifier = '%s.field.%d' % value_marker
value_selected = '%s.checked.%d' % value_marker in values
if remove_something and value_selected:
if errors and value_identifier in errors:
# If the field have an error, remove it
del errors[value_identifier]
continue
# We need to provide the widget error now, but cannot set
# all of them on the form now, as we might remove them
# with delete
self.addValueWidget(position, None)
value_count += 1
if (add_something or
(self.allowAdding and self.required and not value_count)):
self.addValueWidget(identifier_count, None)
value_count += 1
values[self.identifier] = str(identifier_count + 1)
if value_count:
self.haveValues = True
if errors:
if len(errors) > 1:
self.form.errors.append(
Error(_(u"There were errors."), self.form.prefix))
else:
# If no errors are left, remove from the form (top level error)
del self.form.errors[self.identifier]
return values
@property
def jsonTemplateWidget(self):
widgets = Widgets()
widgets.append(self.createValueWidget('{identifier}', None))
widgets.update()
return list(widgets)[0]
def update(self):
super(MultiGenericFieldWidget, self).update()
self.valueWidgets.update()
requireCollectionResources()
self.jsonAddIdentifier = None
self.jsonAddTemplate = None
self.includeEmptyMessage = self.allowRemove
if self.allowAdding:
self.jsonAddIdentifier = 'id' + md5hash(
self.identifier.encode('utf-8'))
widgets = Widgets()
widgets.append(self.createValueWidget(
'{' + self.jsonAddIdentifier + '}', None))
widgets.update()
self.jsonAddTemplate = list(widgets)[0]
class ListGenericFieldWidget(MultiGenericFieldWidget):
grok.adapts(ListField, Interface, Interface, Interface)
def __init__(self, field, value_field, form, request):
super(ListGenericFieldWidget, self).__init__(
field, value_field, form, request)
self.allowOrdering = field.allowOrdering
class MultiGenericDisplayFieldWidget(MultiGenericFieldWidget):
grok.name('display')
# For collection of objects, generate a different widget (with a table)
class MultiObjectFieldWidget(MultiGenericFieldWidget):
grok.adapts(ICollectionField, ObjectField, Interface, Interface)
def getFields(self):
return self.valueField.objectFields
class ListObjectFieldWidget(MultiObjectFieldWidget):
grok.adapts(ListField, ObjectField, Interface, Interface)
def __init__(self, field, value_field, form, request):
super(ListObjectFieldWidget, self).__init__(
field, value_field, form, request)
self.allowOrdering = field.allowOrdering
# Still make possible to have the non-table implementation
class RegularMultiObjectFieldWidget(MultiGenericFieldWidget):
grok.adapts(ICollectionField, ObjectField, Interface, Interface)
grok.name('input-list')
class RegularListObjectFieldWidget(ListGenericFieldWidget):
grok.adapts(ListField, ObjectField, Interface, Interface)
grok.name('input-list')
class MultiGenericWidgetExtractor(WidgetExtractor):
grok.adapts(ICollectionField, Interface, Interface, Interface)
def __init__(self, field, value_field, form, request):
super(MultiGenericWidgetExtractor, self).__init__(
field, form, request)
self.valueField = value_field
def extract(self):
value = self.request.form.get(self.identifier, NO_VALUE)
if value is not NO_VALUE:
try:
value = int(value)
except ValueError:
return (None, u"Invalid internal input")
collectedValues = []
collectedErrors = Errors(identifier=self.identifier)
for position in range(0, value):
value_present = '%s.present.%d' % (
self.identifier, position) in self.request.form
if not value_present:
# This value have been removed
continue
field = self.valueField.clone(new_identifier=str(position))
form = cloneFormData(self.form, prefix=self.identifier)
data, errors = form.extractData(Fields(field))
if errors:
collectedErrors.extend(errors)
else:
collectedValues.append(data[field.identifier])
if collectedErrors:
return (None, collectedErrors)
value = self.component.collectionType(collectedValues)
return (value, None)
# Multi-Choice widget
class MultiChoiceFieldWidget(ChoiceFieldWidget):
grok.adapts(SetField, ChoiceField, Interface, Interface)
defaultHtmlClass = ['field', 'field-multichoice']
def __init__(self, field, value_field, form, request):
super(MultiChoiceFieldWidget, self).__init__(field, form, request)
self.source = value_field
def prepareContentValue(self, value):
form_value = []
if value is NO_VALUE:
return {self.identifier: form_value}
choices = self.choices()
for entry in value:
try:
term = choices.getTerm(entry)
form_value.append(term.token)
except LookupError:
pass
return {self.identifier: form_value}
def renderableChoice(self):
current = self.inputValue()
base_id = self.htmlId()
for i, choice in enumerate(self.choices()):
yield {'token': choice.token,
'title': choice.title,
'checked': choice.token in current,
'id': base_id + '-' + str(i)}
grok.global_adapter(
newCollectionWidgetFactory(mode='multiselect'),
adapts=(ICollectionField, Interface, Interface),
provides=IWidget,
name='multiselect')
class MultiSelectFieldWidget(MultiChoiceFieldWidget):
grok.name('multiselect')
class MultiChoiceDisplayFieldWidget(MultiChoiceFieldWidget):
grok.name('display')
def renderableChoice(self):
current = self.inputValue()
base_id = self.htmlId()
for i, choice in enumerate(self.choices()):
if choice.token in current:
yield {'title': choice.title,
'id': base_id + '-' + str(i)}
class MultiChoiceWidgetExtractor(WidgetExtractor):
grok.adapts(SetField, ChoiceField, Interface, Interface)
def __init__(self, field, value_field, form, request):
super(MultiChoiceWidgetExtractor, self).__init__(field, form, request)
self.source = value_field
def extract(self):
value, errors = super(MultiChoiceWidgetExtractor, self).extract()
if errors is None:
is_present = self.request.form.get(
self.identifier + '.present', NO_VALUE)
if is_present is NO_VALUE:
# Not in the request
return (NO_VALUE, None)
if value is NO_VALUE:
# Nothing selected
return (self.component.collectionType(), None)
choices = self.source.getChoices(self.form)
try:
if not isinstance(value, list):
value = [value]
value = self.component.collectionType(
[choices.getTermByToken(t).value for t in value])
except LookupError:
return (None, _(u'The selected value is not available.'))
return (value, errors)
def makeCollectionSchemaFactory(factory):
def CollectionSchemaFactory(schema):
field = factory(
schema.title or None,
identifier=schema.__name__,
description=schema.description,
required=schema.required,
readonly=schema.readonly,
minLength=schema.min_length,
maxLength=schema.max_length,
valueField=schema.value_type,
interface=schema.interface,
constrainValue=schema.constraint,
defaultFactory=schema.defaultFactory,
defaultValue=schema.__dict__['default'] or NO_VALUE)
return field
return CollectionSchemaFactory
def register():
registerSchemaField(
makeCollectionSchemaFactory(CollectionField),
schema_interfaces.ICollection)
registerSchemaField(
makeCollectionSchemaFactory(ListField),
schema_interfaces.IList)
registerSchemaField(
makeCollectionSchemaFactory(SetField),
schema_interfaces.ISet)
registerSchemaField(
makeCollectionSchemaFactory(TupleField),
schema_interfaces.ITuple)
|
from django import forms
from .models import Question, Choice, SetsAttempted,Book
from django.forms import formset_factory
class BookForm(forms.Form):
name = forms.CharField(
label='Add More',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter Pre-requisite here'
})
)
BookFormset = formset_factory(BookForm, extra=1) |
import os
import sys
import unittest
import pytorch_lightning as pl
import pytorch_lightning.loggers
from pytorch_lightning.utilities.parsing import AttributeDict
from deep_depth_transfer import PoseNetResNet, DepthNetResNet, UnsupervisedCriterion
from deep_depth_transfer.data import KittiDataModuleFactory
from deep_depth_transfer.models import ScaledUnsupervisedDepthModel
from test.data_module_mock import DataModuleMock
if sys.platform == "win32":
WORKERS_COUNT = 0
else:
WORKERS_COUNT = 4
class TestUnsupervisedDepthModel(unittest.TestCase):
def setUp(self) -> None:
current_folder = os.path.dirname(os.path.abspath(__file__))
dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti")
data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder)
self._data_module = data_module_factory.make_dataset_manager(
final_image_size=(128, 384),
transform_manager_parameters={"filters": True},
batch_size=1,
num_workers=WORKERS_COUNT,
split=(0.8, 0.1, 0.1)
)
self._data_module = DataModuleMock(self._data_module)
pose_net = PoseNetResNet()
depth_net = DepthNetResNet()
criterion = UnsupervisedCriterion(self._data_module.get_cameras_calibration(), 1, 1)
params = AttributeDict(
lr=1e-3,
beta1=0.99,
beta2=0.9,
scale_lr=1e-3,
initial_log_scale=0.,
initial_log_min_depth=0.,
initial_log_pose_scale=0.,
)
self._model = ScaledUnsupervisedDepthModel(params, pose_net, depth_net, criterion).cuda()
def test_unsupervised_depth_model(self):
tb_logger = pl.loggers.TensorBoardLogger('logs/')
trainer = pl.Trainer(logger=tb_logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20)
trainer.fit(self._model, self._data_module)
|
adj = [[0 for i in range(100)] for j in range(100)]
color = [0 for i in range(100)]
white = 1
gray = 2
black = 3
node,edge = map(int,input().split())
def dfsvisit(x):
color[x] = gray
for i in range(node):
if adj[x][i] == 1:
if color[i] == white:
dfsvisit(i)
def dfs():
for i in range(node):
color[i] = white
for i in range(node):
if color[i] == white:
dfsvisit(i)
for i in range(edge):
n1,n2 = map(int,input().split())
adj[n1][n2] = 1
adj[n2][n1] = 1
dfs() |
"""
This script will split the given File into the provided number of Files.
If number of files to be slitted into is not given default value of 2 will be considered.
The below execution will split file "a.txt" into 3 files - a_0.txt a_1.txt a_2.txt
python split_file.py a.txt 3
"""
# Import the Libraries
import sys
from os import path as path
# Check if Parameters are passed
if len(sys.argv) < 2:
print "Please pass the full filename"
exit(0)
# Get the Parameters
orig_file = sys.argv[1]
num_splits = int(sys.argv[2]) if len(sys.argv) > 2 else 2 # Default split is 2
orig_file_name = path.basename(orig_file)
orig_fp = open(orig_file, 'r')
file_length = len(orig_fp.readlines())
start_pos = end_pos = 0
num_lines = file_length / num_splits # Set the number of lines to write for each file
print "Splitting File {0} of length {1} into {2} files".format(orig_file_name, file_length, num_splits)
for i in range(num_splits):
orig_fp.seek(start_pos) # Reset file to start position
end_pos = (file_length if i + 1 == num_splits else start_pos + num_lines) # Set the End Position
# print 'start_pos : {} , end_pos : {}'.format(start_pos, end_pos)
# Get the new File Name
new_file = "{0}_{1}.{2}".format('.'.join(orig_file_name.split('.')[:-1]), str(i), orig_file_name.split('.')[-1])
# Create a new file to write the contents
new_fp = open(new_file, 'w')
new_fp.writelines(orig_fp.readlines()[start_pos: end_pos]) # Write the Lines
new_fp.close() # Close the file after writing
print "Created file {0}, number of lines {1}".format(new_file, len(open(new_file, 'r').readlines()))
start_pos += num_lines # Reset the start Position
# Close the Original File
orig_fp.close()
|
from collections import deque
import copy
def map_list_combination(params_list):
res = deque([{}])
for key in params_list:
value_list = params_list[key]
l = len(res)
for i in range(l):
cur_dict = res.popleft()
for value in value_list:
new_cur_dict = copy.deepcopy(cur_dict)
new_cur_dict[key] = value
res.insert(-1, (dict)(new_cur_dict))
return res
|
import numpy as np
class GloveLoader:
def __init__(self, file='glove.6B.100d.txt'):
self.file = file
self.gloveDict = {}
# private
def __readGloveTxt(self):
with open(self.file, 'r', encoding="ISO-8859-1") as in_file:
stripped = (line.strip() for line in in_file)
dictionary = {}
counter = 0
print('Loading Glove word embeddings...')
for line in stripped:
if(line):
frags = line.split(' ')
word = frags[0]
vector = np.asarray(frags[1:])
dictionary.update({word: vector})
# for debugging
# counter += 1
# if(counter == 3):
# break
print('Glove embeddings loaded.')
if(not self.gloveDict):
self.gloveDict = dictionary
# public
def getGloveDictionary(self):
if(not self.gloveDict):
# print('Dictionary is currently empty')
self.__readGloveTxt()
return self.gloveDict
else:
# print('Dictionary has been loaded')
return self.gloveDict
# gl = GloveLoader()
# # print(gl.gloveDict)
# print(gl.getGloveDictionary())
# print(gl.getGloveDictionary())
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Clock and reset (CAR) controllers for LUNA. """
import logging
from abc import ABCMeta, abstractmethod
from amaranth import Signal, Module, ClockDomain, ClockSignal, Elaboratable, Instance, ResetSignal
from ..utils.cdc import stretch_strobe_signal
from ..test import LunaGatewareTestCase, usb_domain_test_case, sync_test_case
class PHYResetController(Elaboratable):
""" Gateware that implements a short power-on-reset pulse to reset an attached PHY.
I/O ports:
I: trigger -- A signal that triggers a reset when high.
O: phy_reset -- The signal to be delivered to the target PHY.
"""
def __init__(self, *, clock_frequency=60e6, reset_length=2e-6, stop_length=2e-6, power_on_reset=True):
""" Params:
reset_length -- The length of a reset pulse, in seconds.
stop_length -- The length of time STP should be asserted after reset.
power_on_reset -- If True or omitted, the reset will be applied once the firmware
is configured.
"""
from math import ceil
self.power_on_reset = power_on_reset
# Compute the reset length in cycles.
clock_period = 1 / clock_frequency
self.reset_length_cycles = ceil(reset_length / clock_period)
self.stop_length_cycles = ceil(stop_length / clock_period)
#
# I/O port
#
self.trigger = Signal()
self.phy_reset = Signal()
self.phy_stop = Signal()
def elaborate(self, platform):
m = Module()
# Counter that stores how many cycles we've spent in reset.
cycles_in_reset = Signal(range(0, self.reset_length_cycles))
reset_state = 'RESETTING' if self.power_on_reset else 'IDLE'
with m.FSM(reset=reset_state, domain='sync') as fsm:
# Drive the PHY reset whenever we're in the RESETTING cycle.
m.d.comb += [
self.phy_reset.eq(fsm.ongoing('RESETTING')),
self.phy_stop.eq(~fsm.ongoing('IDLE'))
]
with m.State('IDLE'):
m.d.sync += cycles_in_reset.eq(0)
# Wait for a reset request.
with m.If(self.trigger):
m.next = 'RESETTING'
# RESETTING: hold the reset line active for the given amount of time
with m.State('RESETTING'):
m.d.sync += cycles_in_reset.eq(cycles_in_reset + 1)
with m.If(cycles_in_reset + 1 == self.reset_length_cycles):
m.d.sync += cycles_in_reset.eq(0)
m.next = 'DEFERRING_STARTUP'
# DEFERRING_STARTUP: Produce a signal that will defer startup for
# the provided amount of time. This allows line state to stabilize
# before the PHY will start interacting with us.
with m.State('DEFERRING_STARTUP'):
m.d.sync += cycles_in_reset.eq(cycles_in_reset + 1)
with m.If(cycles_in_reset + 1 == self.stop_length_cycles):
m.d.sync += cycles_in_reset.eq(0)
m.next = 'IDLE'
return m
class PHYResetControllerTest(LunaGatewareTestCase):
FRAGMENT_UNDER_TEST = PHYResetController
def initialize_signals(self):
yield self.dut.trigger.eq(0)
@sync_test_case
def test_power_on_reset(self):
#
# After power-on, the PHY should remain in reset for a while.
#
yield
self.assertEqual((yield self.dut.phy_reset), 1)
yield from self.advance_cycles(30)
self.assertEqual((yield self.dut.phy_reset), 1)
yield from self.advance_cycles(60)
self.assertEqual((yield self.dut.phy_reset), 1)
#
# Then, after the relevant reset time, it should resume being unasserted.
#
yield from self.advance_cycles(31)
self.assertEqual((yield self.dut.phy_reset), 0)
self.assertEqual((yield self.dut.phy_stop), 1)
yield from self.advance_cycles(120)
self.assertEqual((yield self.dut.phy_stop), 0)
class LunaDomainGenerator(Elaboratable, metaclass=ABCMeta):
""" Helper that generates the clock domains used in a LUNA board.
Note that this module should create three in-phase clocks; so these domains
should not require explicit boundary crossings.
I/O port:
O: clk_fast -- The clock signal for our fast clock domain.
O: clk_sync -- The clock signal used for our sync clock domain.
O: clk_usb -- The clock signal used for our USB domain.
O: usb_holdoff -- Signal that indicates that the USB domain is immediately post-reset,
and thus we should avoid transactions with the external PHY.
"""
def __init__(self, *, clock_signal_name=None, clock_signal_frequency=None):
"""
Parameters:
clock_signal_name = The clock signal name to use; or None to use the platform's default clock.
clock_signal_frequency = The frequency of clock_signal_name in Hz.
"""
self.clock_name = clock_signal_name
self.clock_frequency = clock_signal_frequency
#
# I/O port
#
self.clk_fast = Signal()
self.clk_sync = Signal()
self.clk_usb = Signal()
self.usb_holdoff = Signal()
@abstractmethod
def generate_fast_clock(self, m, platform):
""" Method that returns our platform's fast clock; used for e.g. RAM interfacing. """
@abstractmethod
def generate_sync_clock(self, m, platform):
""" Method that returns our platform's primary synchronous clock. """
@abstractmethod
def generate_usb_clock(self, m, platform):
""" Method that generates a 60MHz clock used for ULPI interfacing. """
def create_submodules(self, m, platform):
""" Method hook for creating any necessary submodules before generating clock. """
pass
def create_usb_reset(self, m, platform):
"""
Function that should create our USB reset, and connect it to:
m.domains.usb.rst / self.usb_rst
"""
m.submodules.usb_reset = controller = PHYResetController()
m.d.comb += [
ResetSignal("usb") .eq(controller.phy_reset),
self.usb_holdoff .eq(controller.phy_stop)
]
def elaborate(self, platform):
m = Module()
# Create our clock domains.
m.domains.fast = self.fast = ClockDomain()
m.domains.sync = self.sync = ClockDomain()
m.domains.usb = self.usb = ClockDomain()
# Call the hook that will create any submodules necessary for all clocks.
self.create_submodules(m, platform)
# Generate and connect up our clocks.
m.d.comb += [
self.clk_usb .eq(self.generate_usb_clock(m, platform)),
self.clk_sync .eq(self.generate_sync_clock(m, platform)),
self.clk_fast .eq(self.generate_fast_clock(m, platform)),
ClockSignal(domain="fast") .eq(self.clk_fast),
ClockSignal(domain="sync") .eq(self.clk_sync),
ClockSignal(domain="usb") .eq(self.clk_usb),
]
# Call the hook that will connect up our reset signals.
self.create_usb_reset(m, platform)
return m
class LunaECP5DomainGenerator(LunaDomainGenerator):
""" ECP5 clock domain generator for LUNA. Assumes a 60MHz input clock. """
# For debugging, we'll allow the ECP5's onboard clock to generate a 62MHz
# oscillator signal. This won't work for USB, but it'll at least allow
# running some basic self-tests. The clock is 310 MHz by default, so
# dividing by 5 will yield 62MHz.
OSCG_DIV = 5
# Quick configuration selection
DEFAULT_CLOCK_FREQUENCIES_MHZ = {
"fast": 240,
"sync": 120,
"usb": 60
}
def __init__(self, *, clock_frequencies=None, clock_signal_name=None, clock_signal_frequency=None):
"""
Parameters:
clock_frequencies -- A dictionary mapping 'fast', 'sync', and 'usb' to the clock
frequencies for those domains, in MHz. Valid choices for each
domain are 60, 120, and 240. If not provided, fast will be
assumed to be 240, sync will assumed to be 120, and usb will
be assumed to be a standard 60.
"""
super().__init__(clock_signal_name=clock_signal_name, clock_signal_frequency=clock_signal_frequency)
self.clock_frequencies = clock_frequencies
def create_submodules(self, m, platform):
self._pll_lock = Signal()
# Figure out our platform's clock frequencies -- grab the platform's
# defaults, and then override any with our local, caller-provided copies.
new_clock_frequencies = platform.DEFAULT_CLOCK_FREQUENCIES_MHZ.copy()
if self.clock_frequencies:
new_clock_frequencies.update(self.clock_frequencies)
self.clock_frequencies = new_clock_frequencies
# Use the provided clock name and frequency for our input; or the default clock
# if no name was provided.
clock_name = self.clock_name if self.clock_name else platform.default_clk
clock_frequency = self.clock_frequency if self.clock_name else platform.default_clk_frequency
# Create absolute-frequency copies of our PLL outputs.
# We'll use the generate_ methods below to select which domains
# apply to which components.
self._clk_240MHz = Signal()
self._clk_120MHz = Signal()
self._clk_60MHz = Signal()
self._clock_options = {
60: self._clk_60MHz,
120: self._clk_120MHz,
240: self._clk_240MHz
}
pll_params = {}
# Grab our input clock
# For debugging: if our clock name is "OSCG", allow using the internal
# oscillator. This is mostly useful for debugging.
if clock_name == "OSCG":
logging.warning("Using FPGA-internal oscillator for an approximately 62MHz.")
logging.warning("USB communication won't work for f_OSC != 60MHz.")
input_clock = Signal()
m.submodules += Instance("OSCG", p_DIV=self.OSCG_DIV, o_OSC=input_clock)
pll_params["CLKFB_DIV"] = 4
else:
input_clock = platform.request(clock_name)
divisor = 240e6 / clock_frequency
if not divisor.is_integer():
raise ValueError("Unsupported clock frequency {} MHz, must be an integer divisor of 240 MHz"
.format(clock_frequency/1e6))
pll_params["CLKFB_DIV"] = int(divisor)
# Instantiate the ECP5 PLL.
# These constants generated by Clarity Designer; which will
# ideally be replaced by an open-source component.
# (see https://github.com/SymbiFlow/prjtrellis/issues/34.)
m.submodules.pll = Instance("EHXPLLL",
# Clock in.
i_CLKI=input_clock,
# Generated clock outputs.
o_CLKOP=self._clk_240MHz,
o_CLKOS=self._clk_120MHz,
o_CLKOS2=self._clk_60MHz,
# Status.
o_LOCK=self._pll_lock,
# PLL parameters...
p_PLLRST_ENA="DISABLED",
p_INTFB_WAKE="DISABLED",
p_STDBY_ENABLE="DISABLED",
p_DPHASE_SOURCE="DISABLED",
p_CLKOS3_FPHASE=0,
p_CLKOS3_CPHASE=0,
p_CLKOS2_FPHASE=0,
p_CLKOS2_CPHASE=7,
p_CLKOS_FPHASE=0,
p_CLKOS_CPHASE=3,
p_CLKOP_FPHASE=0,
p_CLKOP_CPHASE=1,
p_PLL_LOCK_MODE=0,
p_CLKOS_TRIM_DELAY="0",
p_CLKOS_TRIM_POL="FALLING",
p_CLKOP_TRIM_DELAY="0",
p_CLKOP_TRIM_POL="FALLING",
p_OUTDIVIDER_MUXD="DIVD",
p_CLKOS3_ENABLE="DISABLED",
p_OUTDIVIDER_MUXC="DIVC",
p_CLKOS2_ENABLE="ENABLED",
p_OUTDIVIDER_MUXB="DIVB",
p_CLKOS_ENABLE="ENABLED",
p_OUTDIVIDER_MUXA="DIVA",
p_CLKOP_ENABLE="ENABLED",
p_CLKOS3_DIV=1,
p_CLKOS2_DIV=8,
p_CLKOS_DIV=4,
p_CLKOP_DIV=2,
p_CLKFB_DIV=pll_params["CLKFB_DIV"],
p_CLKI_DIV=1,
p_FEEDBK_PATH="CLKOP",
# Internal feedback.
i_CLKFB=self._clk_240MHz,
# Control signals.
i_RST=0,
i_PHASESEL0=0,
i_PHASESEL1=0,
i_PHASEDIR=0,
i_PHASESTEP=0,
i_PHASELOADREG=0,
i_STDBY=0,
i_PLLWAKESYNC=0,
# Output Enables.
i_ENCLKOP=0,
i_ENCLKOS=0,
i_ENCLKOS2=0,
i_ENCLKOS3=0,
# Synthesis attributes.
a_FREQUENCY_PIN_CLKI="60.000000",
a_FREQUENCY_PIN_CLKOS2="60.000000",
a_FREQUENCY_PIN_CLKOS="120.000000",
a_FREQUENCY_PIN_CLKOP="240.000000",
a_ICP_CURRENT="9",
a_LPF_RESISTOR="8"
)
# Set up our global resets so the system is kept fully in reset until
# our core PLL is fully stable. This prevents us from internally clock
# glitching ourselves before our PLL is locked. :)
m.d.comb += [
ResetSignal("sync").eq(~self._pll_lock),
ResetSignal("fast").eq(~self._pll_lock),
]
def generate_usb_clock(self, m, platform):
return self._clock_options[self.clock_frequencies['usb']]
def generate_sync_clock(self, m, platform):
return self._clock_options[self.clock_frequencies['sync']]
def generate_fast_clock(self, m, platform):
return self._clock_options[self.clock_frequencies['fast']]
def stretch_sync_strobe_to_usb(self, m, strobe, output=None, allow_delay=False):
"""
Helper that stretches a strobe from the `sync` domain to communicate with the `usn` domain.
Works for any chosen frequency in which f(usb) < f(sync).
"""
# TODO: replace with Amaranth's pulsesynchronizer?
to_cycles = self.clock_frequencies['sync'] // self.clock_frequencies['usb']
return stretch_strobe_signal(m, strobe, output=output, to_cycles=to_cycles, allow_delay=allow_delay)
|
import requests
from bs4 import BeautifulSoup
from PyQt5 import QtWidgets
from draw_words import DrawWords
from analyzer import Analyzer
if __name__ == '__main__':
import sys
url_string = input('Gimme a URL!\n')
# Make request to retrieve the article.
request = requests.get(url = url_string)
soup = BeautifulSoup(request.text, features='html.parser')
# Get all the text in between <p></p> tags.
paragraphs = [p_tag.text for p_tag in soup.findAll('p')]
# Do the NLP magic :D.
analyzer = Analyzer(paragraphs)
word_1, word_2, word_3 = analyzer.getRelevantWords()
app = QtWidgets.QApplication(sys.argv)
window = DrawWords(word_1, word_2, word_3)
window.show()
sys.exit(app.exec_())
|
import socket
import os
import time
port = 52918
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.bind(('0.0.0.0', port))
conn.listen(1)
while 1:
try:
os.system("clear")
print('[*] Listening for upcoming connections...')
client_conn , info = conn.accept()
print('[+] New client found from ip ' + str(info[0]) + ' !')
try:
msg = b''
while msg != 'end':
try:
msg = raw_input(str(info[0]) + ' > ')
if(msg == b''):
pass
else:
msg = msg.encode()
client_conn.send(msg)
recv = client_conn.recv(1024*3)
recv = recv.decode()
print(recv)
except BrokenPipeError:
print('[-] Client disconnected...')
break
except KeyboardInterrupt:
print('\n[-] Session Closed')
msg = 'end'
msg = msg.encode()
client_conn.send(msg)
client_conn.close()
time.sleep(2)
except KeyboardInterrupt:
print('\n[-] Shutting down the server...')
break
|
from sklearn.preprocessing import OneHotEncoder
import tensorflow.contrib.keras as keras
import numpy as np
class DataManager:
TOTAL_BATCH = "total_batch"
SUPERVISED_BATCH = "super_batch"
UNSUPERVISED_BATCH = "un_super_batch"
def __init__(self, number_of_unsupervised_data=10000):
cifar10 = keras.datasets.cifar10
# for batch count.
self._i = 0
# set divide size.
self.train_dataset_size = 50000
self.test_dataset_size = 10000
# load data using scikit-learn.
(self.train_X, self.train_y), (self.test_X, self.test_y) = cifar10.load_data()
rnd_index = np.random.permutation(len(self.train_X))
self.train_X = self.train_X[rnd_index]
self.train_y = self.train_y[rnd_index]
rnd_index = np.random.permutation(len(self.test_X))
self.test_X = self.test_X[rnd_index]
self.test_y = self.test_y[rnd_index]
# one hot
zeros_train = np.zeros([len(self.train_y), 10])
zeros_test = np.zeros([len(self.test_y), 10])
for i, value in enumerate(self.train_y):
zeros_train[i, value] = 1
for i, value in enumerate(self.test_y):
zeros_test[i, value] = 1
self.train_y = zeros_train
self.test_y = zeros_test
# change range 0 to 1.
self.train_X = self.train_X / 255
self.test_X = self.test_X / 255
self.supervised_train_X = self.train_X[:40000]
self.supervised_train_y = self.train_y[:40000]
# NOTICE : unsupervised data.
self.unsupervised_train_X = self.train_X[40000:40000 + number_of_unsupervised_data]
pass
def next_batch(self, batch_size, batchType=TOTAL_BATCH):
if batchType == self.TOTAL_BATCH:
x, y = self.train_X[self._i:self._i + batch_size], self.train_y[self._i: self._i + batch_size]
self._i = (self._i + batch_size) % len(self.train_X)
return x, y
elif batchType == self.SUPERVISED_BATCH:
x, y = self.supervised_train_X[self._i:self._i + batch_size], self.supervised_train_y[
self._i: self._i + batch_size]
self._i = (self._i + batch_size) % len(self.supervised_train_X)
return x, y
elif batchType == self.UNSUPERVISED_BATCH:
x = self.unsupervised_train_X[self._i:self._i + batch_size]
self._i = (self._i + batch_size) % len(self.unsupervised_train_X)
return x
# def shake_data(self):
# rnd_index = np.random.permutation(len(self.train_X))
# self.train_X = self.train_X[rnd_index]
# self.train_y = self.train_y[rnd_index]
|
import pygame
class Jarv:
def __init__(self, xpos, ypos, width, height):
self.startvalues = [xpos,ypos,width,height]
self.xpos = xpos
self.ypos = ypos
self.width = width
self.height = height
self.speed_x = 0
self.world_x = 0
self.attacking = False
self.image = pygame.image.load("img/jarv.png")
self.image_rect = (self.xpos, self.ypos, self.width, self.height)
def reset(self):
self.xpos = self.startvalues[0]
self.ypos = self.startvalues[1]
self.width = self.startvalues[2]
self.height = self.startvalues[3]
self.attacking = False
self.speed_x = 0
def render(self, screen):
self.image = pygame.transform.scale(self.image,(self.width, self.height))
self.image_rect= (self.xpos - self.world_x, self.ypos, self.width, self.height)
if self.xpos > 100:
screen.blit(self.image, self.image_rect)
def update(self, world_x, dt):
self.world_x = world_x
self.xpos -= self.speed_x
if(self.attacking):
self.speed_x = 1 * (1+dt)
def begin_attack(self):
self.attacking = True
def get_rect(self):
return self.image_rect
def attack_pos(self):
return self.xpos - 400
|
# def fun(b=None):
# b = []
# b.append(1)
# print(b.__repr__)
# #print(type(args))
# #print(type(**))
#
# fun()
# fun()
# fun()
#
# a = 1
# b = a
# b += 1
# print(b.__repr__)
# print(a.__repr__)
# a = [1, 2, 3]
# print(a)
# a.pop(1)
# print(a)
d = {"Ivan": 29, "Mike": 27, "Elena": 31, "Bob": 31}
list_d = list(d.items())
list_d.sort(key=lambda i: (i[1], i[0]))
for i in list_d:
print(i[0], ':', i[1])
# list_keys = list(d.keys())
# list_keys.sort()
# for i in list_keys:
# print(i, ':', d[i]) |
Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:18:55) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> a="1234567"
>>> len(a)
7
>>> a[7]
Traceback (most recent call last):
File "<pyshell#2>", line 1, in <module>
a[7]
IndexError: string index out of range
>>> a[:7]
'1234567'
>>> for i in range(5)
SyntaxError: invalid syntax
>>> for i in range(5):
print(i)
0
1
2
3
4
>>>
RESTART: C:\Users\admin\AppData\Local\Programs\Python\Python35\Kichu_FYP\Present_Py3.py
>>> S_box("1111")
'0010'
>>> type(S_box("1111"))
<class 'str'>
>>> a="1"*4
>>> a
'1111'
>>> '1111'^1
Traceback (most recent call last):
File "<pyshell#12>", line 1, in <module>
'1111'^1
TypeError: unsupported operand type(s) for ^: 'str' and 'int'
>>> '1111'^'0001'
Traceback (most recent call last):
File "<pyshell#13>", line 1, in <module>
'1111'^'0001'
TypeError: unsupported operand type(s) for ^: 'str' and 'str'
>>> int('1111',2)
15
>>> to_hex(int('1111',2))
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
to_hex(int('1111',2))
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\Kichu_FYP\Present_Py3.py", line 9, in to_hex
return hex(int(a,2))[-1].upper()
TypeError: int() can't convert non-string with explicit base
>>> int('1111',2)
15
>>> "1000"*16
'1000100010001000100010001000100010001000100010001000100010001000'
>>> 4919131752989213764*2
9838263505978427528
>>> int('1000100010001000100010001000100010001000100010001000100010001000',2)^1
9838263505978427529
>>> "{0:64b}".format(9838263505978427529,2)
'1000100010001000100010001000100010001000100010001000100010001001'
>>> '1000100010001000100010001000100010001000100010001000100010001001'=='1000100010001000100010001000100010001000100010001000100010001001'
True
>>> "{0:064b}".format(9838263505978427529,2)
'1000100010001000100010001000100010001000100010001000100010001001'
>>> 307445734561825860*2
614891469123651720
>>> 307445734561825860*2
614891469123651720
>>> 614891469123651720^1
614891469123651721
>>> "{0:064b}".format(614891469123651721,2)
'0000100010001000100010001000100010001000100010001000100010001001'
>>> len('0000100010001000100010001000100010001000100010001000100010001001')
64
>>>
|
# coding: utf-8
from wiki.forms import SignupForm
from django.shortcuts import redirect, render
from django.contrib.auth import login as django_login
from django.contrib.auth.forms import AuthenticationForm
from django.utils.http import is_safe_url
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.generic import FormView
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
def signup(request):
var = {}
if request.method == "POST":
form = SignupForm(data=request.POST)
if form.is_valid():
form.save()
return redirect('/')
else:
form = SignupForm()
var['form'] = form
return render(request, 'signup.html', var)
class LoginView(FormView):
success_url = '/'
form_class = AuthenticationForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'login.html'
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
request.session.set_test_cookie()
return super(LoginView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
django_login(self.request, form.get_user())
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return super(LoginView, self).form_valid(form)
def get_success_url(self):
redirect_to = self.request.GET.get(self.redirect_field_name)
if not is_safe_url(url=redirect_to, host=self.request.get_host()):
redirect_to = self.success_url
return redirect_to
|
#!/usr/bin/env python3
import time
import os
import sys
import socket
import logging
import logging.config
import subprocess
import tensorflow as tf
import numpy as np
# import matplotlib
# matplotlib.use('Agg')
from model.resnet import ResNet
from utils.utils_tf import print_variables
from dataset.data_provider import DataHandler
from configs.config import get_logging_config, args, train_dir
slim = tf.contrib.slim
streaming_mean_iou = tf.contrib.metrics.streaming_mean_iou
logging.config.dictConfig(get_logging_config(args.run_name))
log = logging.getLogger()
# Fixing the random seeds
# tf.set_random_seed(1234)
# np.random.seed(123)
def objective(logits, labels, n_classes):
"""Defines classification loss and useful metrics"""
# defining the loss
cross_entr = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entr)
inferred_class = tf.cast(tf.argmax(logits, 1), tf.int32)
tf.summary.scalar('loss', loss)
# defining the metrics
inferred_class = tf.cast(tf.argmax(logits, 1), tf.int32)
positive_matches = tf.equal(inferred_class, labels)
precision = tf.reduce_mean(tf.boolean_mask(tf.cast(positive_matches, tf.float32),
tf.cast(inferred_class, tf.bool)))
recall = tf.reduce_mean(tf.boolean_mask(tf.cast(positive_matches, tf.float32),
tf.cast(labels, tf.bool)))
train_acc = tf.reduce_mean(tf.cast(positive_matches, tf.float32))
tf.summary.scalar('metrics/accuracy', train_acc)
tf.summary.scalar('metrics/precision', precision)
tf.summary.scalar('metrics/recall', recall)
# adding up all losses
the_loss = loss
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
wd_loss = tf.add_n(regularization_losses)
tf.summary.scalar('loss/weight_decay', wd_loss)
the_loss += wd_loss
tf.summary.scalar('loss/full', the_loss)
return the_loss, train_acc
def extract_batch(datahand):
"""Extracts a batch from the queue and applies processing"""
def set_shape(t):
t_shape = [args.batch_size] + t.get_shape().as_list()[1:]
t.set_shape(t_shape)
with tf.device("/cpu:0"):
sample = datahand.get_feedable_iterator(args.dataset, args.split)
for t in sample.values():
# shape info is lost somehow. Restoring it like this.
set_shape(t)
return sample['img'], sample['label']
def train(sess, datahand, net):
"""Initialization and training routines"""
image_ph, labels_ph = extract_batch(datahand)
tf.summary.image('augmented_images', image_ph, max_outputs=2)
logits = net.build_net(image_ph, datahand.num_classes)
loss, train_acc = objective(logits, labels_ph, datahand.num_classes)
# setting up the learning rate
global_step = tf.train.get_or_create_global_step()
learning_rate = args.learning_rate
learning_rates = [args.warmup_lr, learning_rate]
steps = [args.warmup_step]
if len(args.lr_decay) > 0:
for i, step in enumerate(args.lr_decay):
steps.append(step)
learning_rates.append(learning_rate*10**(-i-1))
learning_rate = tf.train.piecewise_constant(tf.to_int32(global_step),
steps, learning_rates)
tf.summary.scalar('learning_rate', learning_rate)
#######
# Defining an optimizer
if args.optimizer == 'adam':
opt = tf.train.AdamOptimizer(learning_rate)
elif args.optimizer == 'nesterov':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError
######
train_vars = tf.trainable_variables()
print_variables('train', train_vars)
train_op = slim.learning.create_train_op(
loss, opt,
global_step=global_step,
variables_to_train=train_vars,
summarize_gradients=args.summarize_gradients)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000,
keep_checkpoint_every_n_hours=1)
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
val_summary_writer = tf.summary.FileWriter(os.path.join(train_dir,
'val'), sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if args.random_trunk_init:
print("Training from scratch")
else:
net.imagenet_init(opt, sess)
net.restore_ckpt(sess, saver)
starting_step = sess.run(global_step)
tf.get_default_graph().finalize()
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
log.info("Launching prefetch threads")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
log.info("Starting training...")
for step in range(starting_step, args.max_iterations+1):
start_time = time.time()
try:
train_loss, acc, lr = sess.run([train_op, train_acc,
learning_rate],
datahand.train_fd)
except (tf.errors.OutOfRangeError, tf.errors.CancelledError):
break
duration = time.time() - start_time
num_examples_per_step = args.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('step %d, loss = %.2f, acc = %.2f, lr=%.3f'
'(%.1f examples/sec; %.3f sec/batch)')
log.info(format_str % (step, train_loss, acc, -np.log10(lr),
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op, datahand.train_fd)
val_summary_str = sess.run(summary_op, datahand.val_fd)
summary_writer.add_summary(summary_str, step)
val_summary_writer.add_summary(val_summary_str, step)
if step % 1000 == 0 and step > 0:
summary_writer.flush()
val_summary_writer.flush()
log.debug("Saving checkpoint...")
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
summary_writer.close()
coord.request_stop()
coord.join(threads)
def main(argv=None): # pylint: disable=unused-argument
net = ResNet(depth=50, training=True, weight_decay=args.weight_decay)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
datahand = DataHandler(sess)
train(sess, datahand, net)
if __name__ == '__main__':
exec_string = ' '.join(sys.argv)
log.debug("Executing a command: %s", exec_string)
cur_commit = subprocess.check_output("git log -n 1 --pretty=format:\"%H\"".split())
cur_branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD".split())
git_diff = subprocess.check_output('git diff --no-color'.split()).decode('ascii')
log.debug("on branch %s with the following diff from HEAD (%s):" % (cur_branch, cur_commit))
log.debug(git_diff)
hostname = socket.gethostname()
if 'gpuhost' in hostname:
gpu_id = os.environ["CUDA_VISIBLE_DEVICES"]
nvidiasmi = subprocess.check_output('nvidia-smi').decode('ascii')
log.debug("Currently we are on %s and use gpu%s:" % (hostname, gpu_id))
log.debug(nvidiasmi)
tf.app.run()
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Customer
from advertisement.models import Advertisement
class AdvertisementInline(admin.StackedInline):
model = Advertisement
@admin.register(Customer)
class CustomerAdmin(UserAdmin):
list_display = ('username', 'email', 'first_name', 'last_name')
inlines = [AdvertisementInline]
|
from flask import Flask,render_template, request
import mysql.connector
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('login.html')
@app.route('/re_s')
def reg_s():
return render_template('register.html')
@app.route('/re_t')
def reg_t():
return render_template('register_teacher.html')
@app.route('/msg')
def mssg():
return render_template('admin_msg.html')
@app.route('/re_s', methods=['POST'])
def regi_s():
conn=mysql.connector.connect(host='localhost', db='db5', user='root', password='')
fn=str(request.form['r1'])
ln=str(request.form['r2'])
em=str(request.form['r3'])
yr=str(request.form['r4'])
br=str(request.form['r5'])
pw=str(request.form['r6'])
cur=conn.cursor()
cur.execute("insert into students values ('""','"+fn+"','"+ln+"','"+em+"','"+yr+"','"+br+"','"+pw+"','false')")
conn.commit()
return render_template('student.html')
@app.route('/re_t', methods=['POST'])
def regi_t():
conn=mysql.connector.connect(host='localhost',db='db5', user='root', password="")
fn=str(request.form['a1'])
ln=str(request.form['a2'])
em=str(request.form['a3'])
br=str(request.form['a4'])
sb=str(request.form['a5'])
pw=str(request.form['a6'])
cur=conn.cursor()
cur.execute("insert into teacher values ('""','"+fn+"','"+ln+"','"+em+"','"+br+"','"+sb+"','"+pw+"','false')")
conn.commit()
return render_template('teacher.html')
@app.route('/log_in', methods=['POST'])
def logcode():
em=str(request.form['p1'])
pw=str(request.form['p2'])
conn=mysql.connector.connect(host="localhost", db="db5", user="root", password="")
cur=conn.cursor()
cur.execute("select * From log_in where email='"+em+"' and password='"+pw+"'")
ar=cur.fetchone()
if (ar[2]=="admin"):
return render_template("admin.html",user=ar)
elif(ar[2]=="student"):
return render_template("after_login_student.html", user=ar)
elif(ar[2]=="teacher"):
return render_template("after_login_teacher.html", user=ar)
@app.route('/trlist')
def teacher():
conn=mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
cur.execute("select * from teacher")
ar = cur.fetchall()
return render_template('teacher_list.html', data=ar)
@app.route('/stulist')
def student():
conn=mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
cur.execute("select * from students")
ar = cur.fetchall()
return render_template('student_list.html', data=ar)
@app.route('/msglist')
def msglist():
conn=mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
cur.execute("select * from msg")
ar = cur.fetchall()
return render_template('admin_msg.html', data=ar)
@app.route('/upd')
def update():
conn= mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
id=request.args.get('id')
cur.execute("update teacher set status='true' where id="+id)
conn.commit()
cur.execute("select * from teacher where id="+id)
arl=cur.fetchone()
em=str(arl[3])
pwd=str(arl[6])
cur.execute("insert into log_in values('"+em+"','"+pwd+"','teacher')")
conn.commit()
cur.execute("select * from teacher")
ar = cur.fetchall()
return render_template('teacher_list.html')
@app.route('/upddt')
def del_t():
conn= mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
id=request.args.get('id')
cur.execute("update teacher set status='false' where id="+id)
conn.commit()
return render_template('teacher_list.html')
@app.route('/upds')
def update_st():
conn= mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
id=request.args.get('id')
cur.execute("update students set status='true' where id="+id)
conn.commit()
cur.execute("select * from students where id="+id)
arl=cur.fetchone()
em=str(arl[3])
pwd=str(arl[6])
cur.execute("insert into log_in values('"+em+"','"+pwd+"','student')")
conn.commit()
cur.execute("select * from students")
ar = cur.fetchall()
return render_template('student_list.html')
@app.route('/updds')
def del_st():
conn= mysql.connector.connect(host='localhost', db='db5', user='root', password='')
cur=conn.cursor()
id=request.args.get('id')
cur.execute("update students set status='false' where id="+id)
conn.commit()
return render_template('student_list.html')
@app.route('/msg', methods=['POST'])
def msg():
conn=mysql.connector.connect(host='localhost',db='db5', user='root', password="")
em=str(request.form['s1'])
br=str(request.form['s2'])
yr=str(request.form['s3'])
msg=str(request.form['s4'])
cur=conn.cursor()
cur.execute("insert into msg values ('""','"+em+"','"+br+"','"+yr+"','"+msg+"')")
conn.commit()
return render_template('login.html')
if __name__ == '__main__':
app.run()
|
#!/usr/local/bin/python
# _____ _
# | __ \ | |
# _ __ _ _| |__) |__ ___| | __
# | '_ \| | | | ___/ _ \/ _ \ |/ /
# | |_) | |_| | | | __/ __/ <
# | .__/ \__, |_| \___|\___|_|\_\ v0.0.1
# | | __/ |
# |_| |___/
import os, sys, httplib, urlparse
def unshorten_url(url):
try:
parsed = urlparse.urlparse(url)
if parsed.scheme == "https":
h = httplib.HTTPSConnection(parsed.netloc)
else:
h = httplib.HTTPConnection(parsed.netloc)
resource = parsed.path
if parsed.query != "":
resource += "?" + parsed.query
h.request('HEAD', resource )
response = h.getresponse()
if response.status/100 == 3 and response.getheader('Location'):
return unshorten_url(response.getheader('Location')) # changed to process chains of short urls
else:
return url
except:
return url
def color(text, color):
c = {
'yellow':'\033[93m',
'green':'\033[92m',
'bold':'\033[1m',
'end':'\033[0m',
}
return c['bold'] + c[color] + text + c['end']
if __name__ == "__main__":
if len(sys.argv)>1:
target = sys.argv[1]
file_op = False or target.lower().endswith(".txt")
if file_op:
if not os.path.exists(target):
print "ERROR: %s does not exist. Check the path and try again."%(target)
else:
print "Beginning batch processing of shortened URLs:"
print "-"*100
f = open(target, "r")
lines = f.readlines()
if len(sys.argv)>2:
output_file = sys.argv[2]
f2 = open(output_file, "w")
else:
output_file = "output.txt"
f2 = open(output_file, "w")
for i in lines:
f2.write(unshorten_url(i.replace("\n", ""))+"\n")
print "%s <-- forwards to --> %s"%(color(i.replace("\n", ""), 'yellow'), color(unshorten_url(i.replace("\n", "")), 'green'))
f.close()
f2.close()
print "-"*100
print "Unshortened links can be found here: %s"%(color(output_file, 'green'))
else:
#they are trying to plug in a single link to unshorten
print "-"*100
print "%s <-- forwards to --> %s"%(color(target, 'yellow'), color(unshorten_url(target), 'green'))
print "-"*100
else:
print "ERROR: You must supply a URL (or path to a file full of URLs) to unshorten."
|
from pyspark.sql import SparkSession
from pyspark.ml.feature import *
from pyspark.ml.regression import LinearRegression
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml import Pipeline
spark = SparkSession \
.builder \
.appName("quiz_modeltuning") \
.getOrCreate()
# read data; dataset is from https://stackoverflow.blog/2009/06/04/stack-overflow-creative-commons-data-dump/
sto_data = spark.read.json("data/train_onetag_small.json") # sto = stack overflow
# create train test split
train, test = sto_data.randomSplit([90.0, 10.0], seed=42)
regexTokenizer = RegexTokenizer(inputCol="Body", outputCol="words", pattern="\\W")
sto_data = regexTokenizer.transform(sto_data) # new column, where split words are saved as list
cv = CountVectorizer(inputCol="words", outputCol="TF", vocabSize=1000)
cvmodel = cv.fit(sto_data)
sto_data = cvmodel.transform(sto_data)
idf = IDF(inputCol="TF", outputCol="features")
idfmodel = idf.fit(sto_data)
sto_data = idfmodel.transform(sto_data)
indexer = StringIndexer(inputCol="oneTag", outputCol="label")
indexermodel = indexer.fit(sto_data)
sto_data = indexermodel.transform(sto_data)
lr = LogisticRegression(maxIter=10, regParam=0.0, elasticNetParam=0)
pipeline = Pipeline(stages=[regexTokenizer, cv, idf, indexer, lr])
# cross validation
paramGrid = ParamGridBuilder() \
.addGrid(cv.vocabSize, [1000, 5000]) \
.addGrid(lr.regParam, [0.0, 0.1]) \
.addGrid(lr.maxIter, [10.0]) \
.build()
crossval = CrossValidator(estimator=pipeline,
estimatorParamMaps=paramGrid,
evaluator=MulticlassClassificationEvaluator(),
numFolds=3)
# train = train.drop(train.words)
cvModel = crossval.fit(train)
cvModel.avgMetrics
results = cvModel.transform(test)
results.filter(results.label == results.prediction).count() # gives number of accurately predicted labels on test set
results.count()
spark.stop()
|
print("얍얍얍얍얍얍얍")
print("욥욥욥욥욥욥") |
import numpy as np
import pickle
class PriorBoxCreator(object):
def __init__(self):
self.image_shape = (300, 300)
self.model_configurations = self._get_model_configurations()
def _get_model_configurations(self):
return pickle.load(open('utils/model_configurations.p', 'rb'))
def create_boxes(self):
image_width, image_height = self.image_shape
boxes_parameters = []
for layer_config in self.model_configurations:
layer_width = layer_config["layer_width"]
layer_height = layer_config["layer_height"]
num_priors = layer_config["num_prior"]
aspect_ratios = layer_config["aspect_ratios"]
min_size = layer_config["min_size"]
max_size = layer_config["max_size"]
step_x = 0.5 * (float(image_width) / float(layer_width))
step_y = 0.5 * (float(image_height) / float(layer_height))
linspace_x = np.linspace(step_x, image_width - step_x, layer_width)
linspace_y = np.linspace(step_y, image_height - step_y, layer_height)
centers_x, centers_y = np.meshgrid(linspace_x, linspace_y)
centers_x = centers_x.reshape(-1, 1)
centers_y = centers_y.reshape(-1, 1)
assert(num_priors == len(aspect_ratios))
prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors))
box_widths = []
box_heights = []
for aspect_ratio in aspect_ratios:
if aspect_ratio == 1 and len(box_widths) == 0:
box_widths.append(min_size)
box_heights.append(min_size)
elif aspect_ratio == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(min_size * max_size))
box_heights.append(np.sqrt(min_size * max_size))
elif aspect_ratio != 1:
box_widths.append(min_size * np.sqrt(aspect_ratio))
box_heights.append(min_size / np.sqrt(aspect_ratio))
box_widths = 0.5 * np.array(box_widths)
box_heights = 0.5 * np.array(box_heights)
prior_boxes[:, ::4] -= box_widths
prior_boxes[:, 1::4] -= box_heights
prior_boxes[:, 2::4] += box_widths
prior_boxes[:, 3::4] += box_heights
prior_boxes[:, ::2] /= image_width
prior_boxes[:, 1::2] /= image_height
prior_boxes = prior_boxes.reshape(-1, 4)
layer_prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)
boxes_parameters.append(layer_prior_boxes)
return np.concatenate(boxes_parameters, axis=0)
if __name__ == '__main__':
prior_box_creator = PriorBoxCreator()
prior_boxes = prior_box_creator.create_boxes()
print('Number of prior boxes created:', len(prior_boxes))
|
import nltk
nltk.download('wordnet')
nltk.download('punkt')
word_1 = 'pintar'
word_2 = 'makan'
word_3 = 'cinta'
gloss = 'gloss'
example = 'example'
arti_kata = 'arti kata'
sentences_1_first = "rupanya pencuri itu lebih pintar daripada polisi"
sentences_1_second = "mereka sudah pintar membuat baju sendiri"
sentences_2_first = "pembangunan jembatan ini makan waktu lama"
sentences_2_second = "upacara adat itu makan ongkos besar"
sentences_3_first = "orang tuaku cinta kepada kami semua"
sentences_3_second = "cinta kepada sesama makhluk"
word_ambigu = {
word_1: [
{gloss: arti_kata, example: sentences_1_first},
{gloss: arti_kata, example: sentences_1_second}],
word_2: [
{gloss: arti_kata, example: sentences_2_first},
{gloss: arti_kata, example: sentences_2_second}],
word_3: [
{gloss: arti_kata, example: sentences_3_first},
{gloss: arti_kata, example: sentences_3_second}]}
length_dict = {key: len(value) for key, value in word_ambigu.items()}
def lesk(kata, sentence):
length_dict = {key: len(value) for key, value in word_ambigu.items()}
length_key = length_dict[kata]
best_sense = word_ambigu[kata][0]
max_overlap = 0
for i in range(length_key):
overlap = 0
for each in nltk.word_tokenize(sentence):
overlap += sum([1 for d in word_ambigu[kata][i].values() if each in d])
if overlap > max_overlap:
max_overlap = overlap
best_sense = word_ambigu[kata][i]
return best_sense
print(lesk(word_1, sentences_1_first))
print(lesk(word_1, sentences_1_second))
print(lesk(word_2, sentences_2_first))
print(lesk(word_2, sentences_2_second))
print(lesk(word_3, sentences_3_first))
print(lesk(word_3, sentences_3_second))
|
#!/usr/bin/python3
def square_matrix_simple(matrix=[]):
result = []
for i in matrix:
square = list(map(lambda n: n * n, i))
result.append(square)
return (result)
|
try:
import urllib3
except ImportError:
try:
from pip._internal import main
except ImportError:
from pip import main
main(['install', 'urllib3'])
import urllib3
try:
import requests
except ImportError:
try:
from pip._internal import main
except ImportError:
from pip import main
main(['install', 'requests'])
import requests
|
from .AddPartForm import AddPart |
import csv
"""
Intro to Python Lab 1, Task 4
Complete each task in the file for that task. Submit the whole folder
as a zip file or GitHub repo.
Full submission instructions are available on the Lab Preparation page.
"""
"""
Read file into texts and calls.
It's ok if you don't understand how to read files
You will learn more about reading files in future lesson
"""
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
callers = [r[0] for r in calls]
numbers_received_call = [r[1] for r in calls]
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
numbers_sent_msg = set([r[0] for r in texts])
numbers_received_msg = set([r[1] for r in texts])
result = set(callers) - numbers_received_msg - \
numbers_sent_msg - set(numbers_received_call)
print("These numbers could be telemarketers: ")
[print(r) for r in sorted(list(result))]
"""
TASK 4:
The telephone company want to identify numbers that might be doing
telephone marketing. Create a set of possible telemarketers:
these are numbers that make outgoing calls but never send texts,
receive texts or receive incoming calls.
Print a message:
"These numbers could be telemarketers: "
<list of numbers>
The list of numbers should be print out one per line in lexicographic order with no duplicates.
"""
|
from enum import Enum
class Symbol(Enum):
CARD0 = 0
CARD1 = 1
CARD2 = 2
CARD3 = 3
CARD4 = 4
CARD5 = 5
CARD6 = 6
CARD7 = 7
CARD8 = 8
CARD9 = 9
CARDA = 10
CARDB = 11
CARDC = 12
CARDD = 13
CARDE = 14
CARDF = 15
CARDG = 16
CARD_ = 17
@classmethod
def by_rep(cls, rep):
rep = rep.lower()
try:
return cls(int(rep))
except ValueError:
if rep == "_":
return cls["CARD_"]
return cls(ord(rep) - 87) # 'a' -> 10
@classmethod
def max_len(cls, title):
return max([len(e.name) - len("CARD") for e in cls] + [len(title)])
class State(Enum):
EAT = 0
INCR = 1
END = 2
@classmethod
def by_rep(cls, rep):
return cls[rep[2:].upper()]
@classmethod
def max_len(cls, title):
return max([len(e.name) for e in cls] + [len(title)])
class Direction(Enum):
RIGHT = 0
LEFT = 1
NONE = 2
@classmethod
def by_rep(cls, rep):
if rep == "-":
return cls["NONE"]
return cls["RIGHT"] if rep == ">" else cls["LEFT"]
@classmethod
def max_len(cls, title):
return max([len(e.name) for e in cls] + [len(title)])
|
import torch
from torch import nn
class ONet(nn.Module):
def __init__(self, class_num):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, class_num) # age
self.training = True
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
return a
def raspnet(**kwargs):
if kwargs['name'] == 'onet_a':
model = ONet(kwargs['class_num'])
else:
raise NotImplementedError
return model
if __name__ == '__main__':
pass
|
from Node import Node
from Edge import Edge
from Partition import Partition
class Graph:
def __init__(self, identifier):
if not (isinstance(identifier, int)):
raise TypeError("Please only call this function with an int as the argument")
self.identifier = identifier
self.edges = list()
self.nodes = set()
self.partitions = dict()
self.partition_start_nodes = list()
self.partition_center_nodes = list()
self.partition_end_nodes = list()
self.start_nodes = list()
self.adjacency_dict = dict()
self.total_weight = 0.0
# Partition variables
self.weight_criteria = -1.0
self.weight_overlap = -1.0
self.tmp_partition = set()
self.tmp_key = 0
self.reset_partition = set()
self.visited_nodes = set()
# Overlapping partition variables
self.partition_graph = dict()
# Verification variables
self.too_long_paths = list()
# Variables for visualization
self.partitioning_record = list()
def __str__(self):
return "Id: " + str(self.identifier) + \
", total weight: " + str(self.total_weight) + \
", number of nodes: " + str(len(self.nodes))
def add_edge(self, edge):
"""Method adds an edge and its nodes to the graph while constructing an adjacency dictionary"""
if not (isinstance(edge, Edge)):
raise TypeError("Please only call this function with an Edge object as the argument")
self.edges.append(edge)
self.nodes.add(edge.src)
self.nodes.add(edge.dest)
self.total_weight += edge.weight
if edge.src not in self.adjacency_dict:
self.adjacency_dict[edge.src] = Node(edge.src)
if edge.dest not in self.adjacency_dict:
self.adjacency_dict[edge.dest] = Node(edge.dest)
self.adjacency_dict[edge.src].add_forward(edge.dest, edge.weight)
self.adjacency_dict[edge.dest].add_backward(edge.src, edge.weight)
def add_node_properties(self, node, num_sensors):
"""Method adds properties to node in the graph"""
if not (isinstance(node, str) and isinstance(num_sensors, int)):
raise TypeError("Please only call this function with a str and a int as the arguments")
self.adjacency_dict[node].num_sensors = num_sensors
def reset_partition_id(self, nodes, new_id = -1):
"""Method resets the partition_id in the adjacency dictionary for a set of nodes"""
if not (isinstance(nodes, set) and isinstance(new_id, int)):
raise TypeError("Please only call this function with a set and a int as the arguments")
for node in nodes:
self.adjacency_dict[node].partition_id = new_id
def find_start_nodes(self, backwards=False):
"""Method finds nodes on the edge of the graph based"""
end_nodes = self.nodes.copy()
start_nodes = self.nodes.copy()
for edge in self.edges:
if edge.dest in start_nodes:
start_nodes.remove(edge.dest)
if edge.src in end_nodes:
end_nodes.remove(edge.src)
if backwards:
self.start_nodes = list(end_nodes)
else:
self.start_nodes = list(start_nodes)
def partition(self, weight_criteria, merge_criteria=0.0, backwards=True, record_steps=False):
"""
Method partitions graph into partitions based on weight criteria given in minutes.
Partitions with lower or equal size to merge_criteria will be merged into larger partition.
"""
if not (isinstance(weight_criteria, float) and isinstance(merge_criteria, float) and isinstance(backwards, bool)):
raise TypeError("Please only call this function with a str, a float and a boolean as the arguments")
self.weight_criteria = weight_criteria
if self.total_weight <= self.weight_criteria:
self.partitions = [self.nodes.copy()]
return
self.find_start_nodes(backwards)
self.partitions = dict()
self.tmp_key = 0
# While there are nodes to partition from
while len(self.start_nodes) > 0:
node = self.start_nodes.pop(0)
self.tmp_partition = set()
# start partitioning from start node.
self.partition_helper(node, 0.0, backwards)
if len(self.tmp_partition) == 0:
continue
# Update partition_id
for n in self.tmp_partition:
self.adjacency_dict[n].partition_id = self.tmp_key
# Add to partitions
self.partitions[self.tmp_key] = self.tmp_partition.copy()
self.tmp_key += 1
# Record of partitioning steps for visualization
if record_steps:
self.partitioning_record.append([x for x in self.partitions.values() if x])
# CLEAN UP PHASE
if merge_criteria > 0:
partition_ids = [part_id for part_id in self.partitions]
for part_id in partition_ids:
if part_id not in self.partitions:
# partition has been merged
continue
max_weight = self.partition_max_node_weight(self.partitions[part_id])
if max_weight < merge_criteria:
neighbour_id = self.find_smallest_neighbour_partition(self.partitions[part_id])
if neighbour_id == -1:
continue
self.merge_partitions(part_id, neighbour_id)
# Todo: update node weightscd
# remove empty partitions
self.partitions = [x for x in self.partitions.values() if x]
def partition_helper(self, node, current_weight, backwards=False):
"""Method runs through graph assigning nodes to partition"""
if not (isinstance(node, str) and isinstance(current_weight, float)):
raise TypeError("Please only call this function with a str and a float as the arguments")
if self.adjacency_dict.get(node).partition_id != -1:
# Stop if node has been assigned to a partition
return
if node in self.tmp_partition:
# Prevent partition cycles
return
if current_weight > self.weight_criteria:
# Weight criteria met
if node not in self.start_nodes:
self.start_nodes.append(node)
return
self.adjacency_dict[node].max_weight = current_weight
self.tmp_partition.add(node)
node_properties = self.adjacency_dict.get(node)
next_nodes = node_properties.backward_nodes if backwards else node_properties.forward_nodes
next_weights = node_properties.backward_weights if backwards else node_properties.forward_weights
for i in range(0, len(next_nodes)):
n_node = next_nodes[i]
part_id = self.adjacency_dict[n_node].partition_id
weight = next_weights[i]
if self.adjacency_dict[n_node].partition_id == -1:
# Node has not been assigned to a partition
self.partition_helper(
node=n_node,
current_weight=current_weight + weight,
backwards=backwards
)
else:
# Merge if intersecting with another partition
if self.loop_checker(node):
# Skip if partition loops
return
elif current_weight + weight <= self.adjacency_dict[n_node].max_weight:
# Partitions can be merged if weight does not effect previous partition
self.reset_partition_id(self.partitions[part_id])
self.tmp_partition = self.partitions[part_id].union(self.tmp_partition)
del self.partitions[part_id]
elif current_weight + weight <= self.weight_criteria:
# check if new partition is greater than
# Special case where we will need to modify previous partition with new weights
self.reset_partition_id(self.partitions[part_id])
self.reset_partition = self.partitions[part_id].copy()
del self.partitions[part_id]
self.partition_reset(n_node, backwards)
self.tmp_partition = self.reset_partition.union(self.tmp_partition)
self.partition_helper(n_node, current_weight + weight, backwards)
self.remove_disconnected_nodes(backwards)
def loop_checker(self, start_node):
if not (isinstance(start_node, str)):
raise TypeError("Please only call this function with a str as the argument")
self.visited_nodes = {start_node}
node_properties = self.adjacency_dict.get(start_node)
for f in range(0, len(node_properties.forward_nodes)):
f_node = node_properties.forward_nodes[f]
f_weight = node_properties.forward_weights[f]
if self.loop_checker_helper(f_node, start_node, f_weight):
return True
return False
def loop_checker_helper(self, node, start_node, weight):
if not (isinstance(node, str) and isinstance(start_node, str) and isinstance(weight, float)):
raise TypeError("Please only call this function with two str and a float as the arguments")
if node == start_node:
# Path loops
if weight <= self.weight_criteria:
# Ok to merge
return False
return True
if node in self.visited_nodes:
# Path loops
return False
self.visited_nodes.add(node)
node_properties = self.adjacency_dict.get(node)
for f in range(0, len(node_properties.forward_nodes)):
f_node = node_properties.forward_nodes[f]
f_weight = node_properties.forward_weights[f]
if self.loop_checker_helper(f_node, start_node, f_weight + weight):
return True
return False
def partition_reset(self, node, backwards=False):
if node in self.reset_partition:
self.adjacency_dict[node].max_weight = 0
self.reset_partition.remove(node)
node_properties = self.adjacency_dict.get(node)
next_nodes = node_properties.backward_nodes if backwards else node_properties.forward_nodes
for n_node in next_nodes:
self.partition_reset(n_node)
def remove_disconnected_nodes(self, backwards=False):
start_nodes = self.find_partition_edge_nodes(self.reset_partition, backwards)
for node in start_nodes:
connected = self.is_node_connected(node, backwards)
if not connected:
self.tmp_partition.remove(node)
self.start_nodes.append(node)
def is_node_connected(self, node, backwards=False):
if node in self.tmp_partition and node not in self.reset_partition:
return True
node_props = self.adjacency_dict[node]
next_nodes = node_props.backward_nodes if backwards else node_props.forward_nodes
for n_node in next_nodes:
if self.is_node_connected(n_node):
return True
return False
# Clean up phase functions
def partition_max_node_weight(self, partition):
if not (isinstance(partition, set)):
raise TypeError("Please only call this function with set as the argument")
max_weight = 0
for node in partition:
node_weight = self.adjacency_dict[node].max_weight
if node_weight > max_weight:
max_weight = node_weight
return max_weight
def find_smallest_neighbour_partition(self, partition):
if not (isinstance(partition, set)):
raise TypeError("Please only call this function with set as the argument")
start_nodes = self.find_partition_edge_nodes(partition, end=False)
end_nodes = self.find_partition_edge_nodes(partition, end=True)
partition_id = -1
partition_min_weight = -1
for e_node in end_nodes:
for f_node in self.adjacency_dict[e_node].forward_nodes:
part_id = self.adjacency_dict[f_node].partition_id
part_weight = self.partition_max_node_weight(self.partitions[part_id])
if partition_min_weight == -1 or partition_min_weight > part_weight:
partition_id = part_id
partition_min_weight = part_weight
for s_node in start_nodes:
for b_node in self.adjacency_dict[s_node].backward_nodes:
part_id = self.adjacency_dict[b_node].partition_id
part_weight = self.partition_max_node_weight(self.partitions[part_id])
if partition_min_weight == -1 or partition_min_weight > part_weight:
partition_id = part_id
partition_min_weight = part_weight
return partition_id
def merge_partitions(self, a_id, b_id):
if not (isinstance(a_id, int) and isinstance(b_id, int)):
raise TypeError("Please only call this function with ints as the arguments")
if a_id == b_id:
# don't merge a partition with itself
return
self.partitions[a_id] = self.partitions[a_id].union(self.partitions[b_id])
self.reset_partition_id(self.partitions[b_id], a_id)
del self.partitions[b_id]
def partition_with_overlap(self, base_partition_weight, forward_overlap, backward_overlap):
"""
Method partitions graph into overlapping partitions based on weight criteria and overlap both given in minutes.
Method starts by partitioning graph with partition method and then adds overlap.
"""
if not (isinstance(base_partition_weight, float) and isinstance(forward_overlap, int) and isinstance(backward_overlap, int)):
raise TypeError("Please only call this function with float, int and int as the arguments")
self.partition(base_partition_weight)
# Create partition dictionary
self.partitions = dict(zip(range(len(self.partitions)), self.partitions))
for i in self.partitions:
self.reset_partition_id(self.partitions[i], i)
# Generate partition graph
self.partition_graph = dict()
for index in self.partitions:
part = Partition(index)
part.add_nodes(self.partitions[index])
end_nodes = self.find_partition_edge_nodes(self.partitions[index], end=True)
start_nodes = self.find_partition_edge_nodes(self.partitions[index], end=False)
for node in end_nodes:
for f_node in self.adjacency_dict[node].forward_nodes:
part.add_forward_partition(self.adjacency_dict[f_node].partition_id)
for node in start_nodes:
for b_node in self.adjacency_dict[node].backward_nodes:
part.add_backward_partition(self.adjacency_dict[b_node].partition_id)
self.partition_graph[index] = part
# Generate overlapping partitions
for index in self.partition_graph:
for next_part in self.partition_graph[index].forward_partitions:
self.forward_overlap_helper(index, next_part, 0, forward_overlap)
for next_part in self.partition_graph[index].backward_partitions:
self.backward_overlap_helper(index, next_part, 0, backward_overlap)
# Clean up overlap between forward nodes and backward nodes
for index in self.partition_graph:
c_nodes = self.partition_graph[index].nodes
f_nodes = self.partition_graph[index].forward_nodes
b_nodes = self.partition_graph[index].backward_nodes
self.partition_graph[index].forward_nodes = f_nodes - b_nodes - c_nodes
self.partition_graph[index].backward_nodes = b_nodes - c_nodes
def forward_overlap_helper(self, partition_id, next_id, current_overlap, forward_overlap):
if current_overlap >= forward_overlap:
return
self.partition_graph[partition_id].add_forward_nodes(self.partition_graph[next_id].nodes)
for next_part in self.partition_graph[next_id].forward_partitions:
self.forward_overlap_helper(partition_id, next_part, current_overlap + 1, forward_overlap)
def backward_overlap_helper(self, partition_id, next_id, current_overlap, backward_overlap):
if current_overlap >= backward_overlap:
return
self.partition_graph[partition_id].add_backward_nodes(self.partition_graph[next_id].nodes)
for next_part in self.partition_graph[next_id].backward_partitions:
self.backward_overlap_helper(partition_id, next_part, current_overlap + 1, backward_overlap)
def find_partition_edge_nodes(self, partition, end=False):
"""Method finds start nodes for a given partition"""
if not (isinstance(partition, set)):
raise TypeError("Please only call this function with a set as the argument")
start_nodes = set()
end_nodes = set()
for node in partition:
node_properties = self.adjacency_dict.get(node)
if not set(node_properties.backward_nodes).intersection(partition):
start_nodes.add(node)
if not set(node_properties.forward_nodes).intersection(partition):
end_nodes.add(node)
if end:
return end_nodes
return start_nodes
def assign_nodes_to_group(self):
self.partition_start_nodes = list()
self.partition_center_nodes = list()
self.partition_end_nodes = list()
for partition in self.partitions:
self.partition_start_nodes += list(self.find_partition_edge_nodes(partition, end=False))
self.partition_end_nodes += list(self.find_partition_edge_nodes(partition, end=True))
# Ensure node is only assigned to one group
self.partition_end_nodes = list(set(self.partition_end_nodes) - set(self.partition_start_nodes))
self.partition_center_nodes = list(self.nodes - set(self.partition_start_nodes + self.partition_end_nodes))
# ---------- Verification Methods ---------- #
def graph_statistics(self, print_nodes=False, overlap=False):
"""Method prints out statistics about the graph"""
if overlap:
self.partitions = self.partitions.values()
print("Number of partitions in graph: " + str(len(self.partitions)))
print("Number of nodes in graph: " + str(len(self.nodes)))
count = sum([len(part) for part in self.partitions])
print("Number of nodes in partitions: " + str(count))
print("Number of unique nodes in all partitions: " + str(len(set.union(*self.partitions))))
self.assign_nodes_to_group()
print("Number of partition start nodes: " + str(len(self.partition_start_nodes)))
print("Number of partition center nodes: " + str(len(self.partition_center_nodes)))
print("Number of partition end nodes: " + str(len(self.partition_end_nodes)))
print(
"Number of nodes in start, center and end: " +
str(len(self.partition_start_nodes) + len(self.partition_center_nodes) + len(self.partition_end_nodes))
)
print("Partition sizes: " + str([len(part) for part in self.partitions]))
if print_nodes:
print("Partitions:" + str(self.partitions))
if count > len(self.nodes):
self.print_partition_intersection()
self.print_total_weight()
for i in range(0, len(self.partitions)):
print("Partition: " + str(i))
print("Max node weight:" + str(self.find_max_node_weight_in_partitions(i)))
print("Total weight of the longest path: " + str(self.find_longest_path_in_partitions(i)))
self.partition_load_balance_factor()
def find_max_node_weight_in_partitions(self, partition_id):
"""Method finds largest weight that a node has in a partition"""
max_weight = 0
for node in self.partitions[partition_id]:
node_weight = self.adjacency_dict[node].max_weight
if node_weight > max_weight:
max_weight = node_weight
return max_weight
def find_longest_path_in_partitions(self, partition_id):
"""Method finds largest weight that a node has in a partition"""
max_weight = 0
start_nodes = self.find_partition_edge_nodes(self.partitions[partition_id], end=False)
self.visited_nodes = set()
for s_node in start_nodes:
weight = self.longest_path_helper(s_node, partition_id)
if weight > max_weight:
max_weight = weight
return max_weight
def longest_path_helper(self, node, part_id):
if node not in self.partitions[part_id]:
return -1
if node in self.visited_nodes:
return -1
max_weight = 0
self.visited_nodes.add(node)
for i in range(0, len(self.adjacency_dict[node].forward_nodes)):
f_node = self.adjacency_dict[node].forward_nodes[i]
weight = self.adjacency_dict[node].forward_weights[i]
path_weight = self.longest_path_helper(f_node, part_id)
if path_weight == -1:
continue
if max_weight < weight + path_weight:
max_weight = weight + path_weight
self.visited_nodes.remove(node)
return max_weight
def print_total_weight(self):
"""Method prints out the total weight in the graph"""
print("Sum of edge weights: " + str(self.total_weight))
def print_partition_intersection(self):
"""Method prints intersection of partitions"""
partition_intersection_indices = []
partition_intersection_nodes = []
for i in range(0, len(self.partitions)):
for j in range(i + 1, len(self.partitions)):
intersect = self.partitions[i].intersection(self.partitions[j])
if len(intersect) > 0:
partition_intersection_indices.append("Partition indices: " + str(i) + str(j))
partition_intersection_nodes.append(intersect)
if partition_intersection_indices:
print("PARTITIONS INTERSECT!")
for i in range(0, len(partition_intersection_indices)):
print(partition_intersection_indices[i] + " nodes: " + str(partition_intersection_nodes[i]))
def partition_load_balance_factor(self):
"""Method calculates the load balance factor for graph"""
num_sensors_in_partition = [0.0 for _ in self.partitions]
for i in range(0, len(self.partitions)):
for node in self.partitions[i]:
num_sensors_in_partition[i] += self.adjacency_dict[node].num_sensors
max_num_sensors_in_partition = max(num_sensors_in_partition)
total_num_sensors = sum(num_sensors_in_partition)
print("Partition load balance factor: " + str(
max_num_sensors_in_partition / (total_num_sensors / len(num_sensors_in_partition))))
|
class Family(object):
def __init__(self, list_persons):
# danh sach thanh vien
self.members = []
for person in list_persons:
self.members.append(person)
# tong so thanh vien
self.count = len(self.members)
# hien thong tin Family
def show(self):
print "So luong thanh vien: " + str(self.count)
for m in self.members:
m.show()
|
# Code adapted from https://github.com/pytorch/examples/blob/master/reinforcement_learning/reinforce.py
from environment import environment
import numpy as np
from itertools import count
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
#parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
#parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
# help='discount factor (default: 0.99)')
#parser.add_argument('--seed', type=int, default=543, metavar='N',
# help='random seed (default: 543)')
#parser.add_argument('--render', action='store_true',
# help='render the environment')
#parser.add_argument('--log-interval', type=int, default=10, metavar='N',
# help='interval between training status logs (default: 10)')
#args = parser.parse_args()
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
class Policy(nn.Module):
def __init__(self, input_dim):
super(Policy, self).__init__()
self.dense1 = nn.Linear(input_dim, 32)
self.direction = nn.Linear(32, 2)
self.magnitude = nn.Linear(32, 4)
self.saved_log_probs_direction = []
self.saved_log_probs_magnitude = []
self.rewards = []
def forward(self, x):
print('x input:', x)
print('self.dense1(x):', self.dense1(x))
print('F.relu(self.dense1(x)):', F.relu(self.dense1(x)))
x = F.relu(self.dense1(x))
print('x after relu:', x)
action_direction = self.direction(x)
action_magnitude = self.magnitude(x)
print('action_direction:', action_direction)
print('action_magnitude:', action_magnitude)
scores_direction = F.softmax(action_direction, dim=1)
scores_magnitude = F.softmax(action_magnitude, dim=1)
return scores_direction, scores_magnitude
class reinforce(object):
def __init__(self, sim_steps=20000, traj_out_freq=100, episodes=20, output_dir=None):
# For reproducibility to initialize starting weights
torch.manual_seed(459)
self.sim_steps = sim_steps
self.traj_out_freq = traj_out_freq
self.episodes = episodes
self.policy = Policy(input_dim=sim_steps/traj_out_freq)
self.policy.apply(init_weights) # may not be necessary
self.optimizer = optim.SGD(self.policy.parameters(), lr=1e-8)
# Randomly choose an eps to normalize rewards
self.eps = np.finfo(np.float32).eps.item()
if not os.path.exists(output_dir):
raise Exception("Path " + str(output_dir) + " does not exist!")
self.output_dir = output_dir
# Build initial directories
if not os.path.exists(self.output_dir + "/results"):
os.mkdir(self.output_dir + "/results", 0755)
if not os.path.exists(self.output_dir + "/results/final_output"):
os.mkdir(self.output_dir + "/results/final_output")
if not os.path.exists(self.output_dir + "/results/final_output/intermediate_data"):
os.mkdir(self.output_dir + "/results/final_output/intermediate_data")
self.env = environment(cvae_weights_path="../model_150.dms",
sim_steps=self.sim_steps,
traj_out_freq=self.traj_out_freq,
output_dir=self.output_dir)
def select_action(self, state):
# TODO: ask about Todd about state variable
state = torch.from_numpy(state).float().unsqueeze(0)
probs_direction, probs_magnitude = self.policy.forward(state)
print("prob dir:",probs_direction)
print("prob mag:",probs_magnitude)
m_direction = Categorical(probs_direction)
m_magnitude = Categorical(probs_magnitude)
action_direction = m_direction.sample()
action_magnitude = m_magnitude.sample()
self.policy.saved_log_probs_direction.append(m_direction.log_prob(action_direction))
self.policy.saved_log_probs_magnitude.append(m_magnitude.log_prob(action_magnitude))
# Selecting new RMSD threshold
dirs = [-1, 1]
direction = dirs[action_direction.item()]
mags = [0.1, 0.2, 0.5, .9]
magnitude = mags[action_magnitude.item()]
return direction*magnitude
def finish_episode(self):
R = 0
policy_loss = []
rewards = []
gamma = 0.5
for r in self.policy.rewards[::-1]:
R = r + gamma * R
rewards.insert(0, R)
#for r in self.policy.rewards:
# R = r + gamma * R
# rewards.append(R)
rewards = torch.tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + self.eps)
for log_prob_dir, log_prob_mag, reward in zip(self.policy.saved_log_probs_direction, self.policy.saved_log_probs_magnitude, rewards):
policy_loss.append(-(log_prob_dir + log_prob_mag) * reward)
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
del self.policy.rewards[:]
del self.policy.saved_log_probs_direction[:]
del self.policy.saved_log_probs_magnitude[:]
def main(self):
# TODO: Clean up file IO and directory creation
path = self.output_dir + "/results/iteration_rl_"
if not os.path.exists(path + "%i" % 0):
os.mkdir(path + "%i" % 0, 0755)
path_1 = path + "%i/sim_%i_%i/" % (0,0,0)
if not os.path.exists(path_1):
os.mkdir(path_1, 0755)
os.mkdir(path_1 + "/cluster", 0755)
os.mkdir(path_1 + "/pdb_data", 0755)
state = self.env.initial_state(path_1)
for i_episode in count(1):
# Create Directories
if not os.path.exists(path + "%i" % i_episode):
os.mkdir(path + "%i" % i_episode, 0755)
# TODO: Set 3 as a user defined variable
for j_cycle in range(1):
path_1 = path + "%i/sim_%i_%i/" % (i_episode, i_episode, j_cycle)
if not os.path.exists(path_1):
os.mkdir(path_1, 0755)
os.mkdir(path_1 + "/cluster", 0755)
os.mkdir(path_1 + "/pdb_data", 0755)
print("state shape before select_action:", state.shape)
action = self.select_action(state)
print("state shape after select_action:", state.shape)
state, reward, done = self.env.step(action, path_1, i_episode, j_cycle)
print('\n\n\n\n')
print('reward:',reward)
print('\n\n\n\n')
self.policy.rewards.append(reward)
if done:
break
# TODO: Refactor
if (j_cycle < 2) or i_episode == self.episodes:
break
for name, param in self.policy.named_parameters():
if param.requires_grad:
print('Before finish name param.data:',name, param.data)
self.finish_episode()
print('After finish self.policy.parameters():', self.policy.parameters())
for name, param in self.policy.named_parameters():
if param.requires_grad:
print('After finish name param.data:',name, param.data)
# For plotting purposes
for i in range(1, i_episode + 1):
print("print %i episode" %i)
# TODO: update 3 to user defined variable
for j in range(1):
self.env.plot_intermediate_episode(self.env.output_dir + "/results/final_output/intermediate_data/",
self.env.output_dir + "/results/final_output/", i, j, 'Intermediate')
#if __name__ == '__main__':
# ren = reinforce#()
# ren.main()
|
# 7-1
print('7.1\n')
prompt = input('What kind of car would you like to rent? ')
print("I'll see if we have a " + prompt + " for you.")
# 7-2
print('7.2\n')
party = input('How many people are in your party? ')
if int(party) > 8:
print("I'm sorry, but you'll have to wait for a table.")
elif int(party) <= 8:
print("You're table is now ready.")
# 7-3
print('7.3\n')
num = input("Enter a number and I'll see if it is a multiple of 10: ")
if int(num) % 10 == 0:
print('Yup!')
else:
print('Nope!')
# 7-4
print('7-4\n')
while True:
topping = input("Enter a pizza topping you'd like to add. Enter 'q' when you are finished: ")
if topping != 'q':
print("We'll add " + topping + ' to your pizza.')
elif topping == 'q':
break
# 7-5
print('7.5\n')
while True:
age = input("Please enter your age. Enter 'q' to exit: ")
if age == 'q':
break
elif int(age) < 3:
print("Your ticket is free!")
elif int(age) >= 3 or age <= 12:
print("Your ticket is $10.")
elif int(age) > 12:
print("Your ticket is $15.")
# 7-6
print('7.6\n')
print("Enter a pizza topping (up to 3) you'd like to add. Enter 'q' when you are finished")
n = 1
while n < 4:
prompt = "Topping " + str(n) + ": "
topping = input(prompt)
print("We'll add " + topping + ' to your pizza.')
if topping == 'q':
break
n += 1 |
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
from Game import *
class IDS(object):
# MARK: Constructor for a state object.
def __init__(self, initial, goal, forbidden, is_heuristic):
# An integer.
self.initial = initial
# An integer.
self.goal = goal
# A set of integer.
self.forbidden = forbidden
# A boolean.
self.is_heuristic = is_heuristic
self.expanded_counter = 0
def search(self):
# Make reference outside the loop.
target_node = None
game = None
depth = 0
expanded_string = ""
# Iterative Search
while True:
# Initialising configuration.
game = Game(self.initial, self.goal, self.forbidden, self.is_heuristic)
# Searching Target Node.
target_node = self.search_recursive(game, game.initial, 0, depth)
# Adding string to expanded.
expanded_string = expanded_string + game.expanded_in_string() + ","
# Increment depth.
depth = depth + 1
if target_node is not None:
break
if self.expanded_counter >= 1000:
break
# Print out statement.
if target_node is None:
return "No solution found.\n{0}".format(expanded_string.rstrip(","))
else:
return "{0}\n{1}".format(target_node.path, expanded_string.rstrip(","))
# Return a node if found, or return None if not found.
def search_recursive(self, game, node, current_depth, target_depth):
# Stopping Condition.
if current_depth > target_depth:
return None
# Expand the node first.
game.expand_state(node)
if not game.is_previous_state_discarded:
self.expanded_counter += 1
if node.state == self.goal:
return node
if self.expanded_counter >= 1000:
return None
# Continue Recursive.
for child in node.child:
target_node = self.search_recursive(game, child, current_depth + 1, target_depth)
if target_node is not None:
return target_node
if self.expanded_counter >= 1000:
return None
return None
|
from celery import Celery
#//Settings module stuff
BROKER_HOST = "amqp://dss-radio:Ku9hwTn0XT5Xo@localhost:5672//"
#//Settings module stuff
app = Celery('server', BROKER_HOST)
@app.task
def add(x, y):
return x + y
|
from collections import defaultdict
from re import compile, match
REGEX = compile(r'(?P<num>\d+) (?P<adr>.+) (?P<st_zip>[A-Z]{2} \d{5})')
def travel(addresses, zipcode):
by_zipcode = defaultdict(lambda: defaultdict(list))
for address in addresses.split(','):
m = match(REGEX, address).groupdict()
by_zipcode[m['st_zip']]['adr'].append(m['adr'])
by_zipcode[m['st_zip']]['num'].append(m['num'])
result = by_zipcode[zipcode]
return '{}:{}/{}'\
.format(zipcode, ','.join(result['adr']), ','.join(result['num']))
|
class Locators():
#LoginPage objects
username_textbox_id = "inputEmail"
password_textbox_id = "inputPassword"
login_button_xpath = "//*[@id=\"new_user\"]/div/input"
#HomePage objects
profile_link_class_name = "profile"
sign_out_button_link_text = "Sign out" |
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
import uvicorn
from database.db import database
from resume.api import resume_router
from user.routers import user_router
app = FastAPI(title="Resume API", description="Simple api for load resume", version="0.1.0")
app.state.database = database # підключення бази
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
app.include_router(user_router)
app.include_router(resume_router)
if __name__ == '__main__':
uvicorn.run('main:app', port=8000, host='0.0.0.0', reload=True)
|
#Bounty04-Clickable Raindrops
import turtle
scr = turtle.Screen()
trt01 = turtle.Turtle()
trt01.shape("circle")
trt01.stamp()
scr.onscreenclick(trt01.clearstamps())
for i in range(1, 7):
trt01.shapesize(i)
trt01.color("blue")
scr.mainloop()
|
from app.models.questions import Questions, QuestionChoices
async def create_question(question: QuestionChoices):
return await Questions.objects.create(question=question.value)
async def get_question(question: QuestionChoices):
print(type(question))
return await Questions.objects.get(question=question.value)
async def del_question(id: int):
await Questions.objects.delete(id)
return {"Question has been deleted !"}
|
import bs4
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def go():
a = str(datetime.now().month)
b = str(datetime.now().day -1)
c = str(datetime.now().year -1)
d = str(datetime.now().year)
yesterday = a + '/' + b + '/' + d
last_year = a + '/' + b + '/' + c
return yesterday, last_year
|
# Generated by Django 2.2.6 on 2019-11-25 08:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('work', '0039_auto_20191125_0810'),
]
operations = [
migrations.RemoveField(
model_name='headline',
name='changeid',
),
]
|
"""Internal library for admin."""
import csv
import io
import ipaddress
import logging
import random
import string
from functools import wraps
from itertools import chain
import dns.resolver
from dns.name import IDNA_2008_UTS_46
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import Q
from django.utils.encoding import smart_str
from django.utils.translation import gettext as _
from django.contrib.auth import password_validation
from django.contrib.contenttypes.models import ContentType
from reversion import revisions as reversion
from modoboa.core import signals as core_signals
from modoboa.core.models import User
from modoboa.lib.exceptions import (
Conflict, ModoboaException, PermDeniedException
)
from modoboa.parameters import tools as param_tools
from . import signals
from .models import Alias, Domain, DomainAlias
def needs_mailbox():
"""Check if the current user owns at least one mailbox
Some applications (the webmail for example) need a mailbox to
work.
"""
def decorator(f):
@wraps(f)
def wrapped_f(request, *args, **kwargs):
if hasattr(request.user, "mailbox"):
return f(request, *args, **kwargs)
raise PermDeniedException(_("A mailbox is required"))
return wrapped_f
return decorator
def get_identities(user, searchquery=None, idtfilter=None, grpfilter=None):
"""Return all the identities owned by a user.
:param user: the desired user
:param str searchquery: search pattern
:param list idtfilter: identity type filters
:param list grpfilter: group names filters
:return: a queryset
"""
accounts = []
if idtfilter is None or not idtfilter or idtfilter == "account":
ids = user.objectaccess_set \
.filter(content_type=ContentType.objects.get_for_model(user)) \
.values_list("object_id", flat=True)
q = Q(pk__in=ids)
if searchquery is not None:
q &= Q(username__icontains=searchquery) \
| Q(email__icontains=searchquery)
if grpfilter is not None and grpfilter:
if grpfilter == "SuperAdmins":
q &= Q(is_superuser=True)
else:
q &= Q(groups__name=grpfilter)
accounts = User.objects.filter(q).prefetch_related("groups")
aliases = []
if idtfilter is None or not idtfilter \
or (idtfilter in ["alias", "forward", "dlist"]):
alct = ContentType.objects.get_for_model(Alias)
ids = user.objectaccess_set.filter(content_type=alct) \
.values_list("object_id", flat=True)
q = Q(pk__in=ids, internal=False)
if searchquery is not None:
q &= (
Q(address__icontains=searchquery) |
Q(domain__name__icontains=searchquery)
)
aliases = Alias.objects.select_related("domain").filter(q)
if idtfilter is not None and idtfilter:
aliases = [al for al in aliases if al.type == idtfilter]
return chain(accounts, aliases)
def get_domains(user, domfilter=None, searchquery=None, **extrafilters):
"""Return all the domains the user can access.
:param ``User`` user: user object
:param str searchquery: filter
:rtype: list
:return: a list of domains and/or relay domains
"""
domains = (
Domain.objects.get_for_admin(user).prefetch_related("domainalias_set"))
if domfilter:
domains = domains.filter(type=domfilter)
if searchquery is not None:
q = Q(name__contains=searchquery)
q |= Q(domainalias__name__contains=searchquery)
domains = domains.filter(q).distinct()
results = signals.extra_domain_qset_filters.send(
sender="get_domains", domfilter=domfilter, extrafilters=extrafilters)
if results:
qset_filters = {}
for result in results:
qset_filters.update(result[1])
domains = domains.filter(**qset_filters)
return domains
def check_if_domain_exists(name, dtypes):
"""Check if a domain already exists.
We not only look for domains, we also look for every object that
could conflict with a domain (domain alias, etc.)
"""
for dtype, label in dtypes:
if dtype.objects.filter(name=name).exists():
return label
return None
def import_domain(user, row, formopts):
"""Specific code for domains import"""
if not user.has_perm("admin.add_domain"):
raise PermDeniedException(_("You are not allowed to import domains"))
core_signals.can_create_object.send(
sender="import", context=user, klass=Domain)
dom = Domain()
dom.from_csv(user, row)
def import_domainalias(user, row, formopts):
"""Specific code for domain aliases import"""
if not user.has_perm("admin.add_domainalias"):
raise PermDeniedException(
_("You are not allowed to import domain aliases."))
core_signals.can_create_object.send(
sender="import", context=user, klass=DomainAlias)
domalias = DomainAlias()
domalias.from_csv(user, row)
def import_account(user, row, formopts):
"""Specific code for accounts import"""
account = User()
account.from_csv(user, row, formopts["crypt_password"])
def _import_alias(user, row, **kwargs):
"""Specific code for aliases import"""
alias = Alias()
alias.from_csv(user, row, **kwargs)
def import_alias(user, row, formopts):
_import_alias(user, row, expected_elements=4, formopts=formopts)
def import_forward(user, row, formopts):
_import_alias(user, row, expected_elements=4, formopts=formopts)
def import_dlist(user, row, formopts):
_import_alias(user, row, formopts=formopts)
def get_dns_resolver():
"""Return a DNS resolver object."""
dns_server = param_tools.get_global_parameter("custom_dns_server")
if dns_server:
resolver = dns.resolver.Resolver()
resolver.nameservers = [dns_server]
else:
resolver = dns.resolver
return resolver
def get_dns_records(name, typ, resolver=None):
"""Retrieve DNS records for given name and type."""
logger = logging.getLogger("modoboa.admin")
if not resolver:
resolver = get_dns_resolver()
try:
dns_answers = resolver.resolve(name, typ, search=True)
except dns.resolver.NXDOMAIN as e:
logger.error(_("No DNS record found for %s") % name, exc_info=e)
except dns.resolver.NoAnswer as e:
logger.error(
_("No %(type)s record for %(name)s") % {"type": typ, "name": name},
exc_info=e
)
except dns.resolver.NoNameservers as e:
logger.error(_("No working name servers found"), exc_info=e)
except dns.resolver.Timeout as e:
logger.warning(
_("DNS resolution timeout, unable to query %s at the moment") %
name, exc_info=e)
except dns.name.NameTooLong as e:
logger.error(_("DNS name is too long: %s" % name), exc_info=e)
else:
return dns_answers
return None
def get_domain_mx_list(domain):
"""Return a list of MX IP address for domain."""
result = []
logger = logging.getLogger("modoboa.admin")
resolver = get_dns_resolver()
dns_answers = get_dns_records(domain, "MX", resolver)
if dns_answers is None:
return result
for dns_answer in dns_answers:
mx_domain = dns_answer.exchange.to_unicode(
omit_final_dot=True, idna_codec=IDNA_2008_UTS_46)
for rtype in ["A", "AAAA"]:
ip_answers = get_dns_records(mx_domain, rtype, resolver)
if not ip_answers:
continue
for ip_answer in ip_answers:
try:
address_smart = smart_str(ip_answer.address)
mx_ip = ipaddress.ip_address(address_smart)
except ValueError as e:
logger.warning(
_("Invalid IP address format for "
"{domain}; {addr}").format(
domain=mx_domain,
addr=smart_str(ip_answer.address)
), exc_info=e)
else:
result.append((mx_domain, mx_ip))
return result
def domain_has_authorized_mx(name):
"""Check if domain has authorized mx record at least."""
valid_mxs = param_tools.get_global_parameter("valid_mxs")
valid_mxs = [ipaddress.ip_network(smart_str(v.strip()))
for v in valid_mxs.split() if v.strip()]
domain_mxs = get_domain_mx_list(name)
for _mx_addr, mx_ip_addr in domain_mxs:
for subnet in valid_mxs:
if mx_ip_addr in subnet:
return True
return False
def make_password():
"""Create a random password."""
length = int(
param_tools.get_global_parameter("random_password_length", app="core")
)
while True:
password = "".join(
random.SystemRandom().choice(
string.ascii_letters + string.digits) for _ in range(length))
try:
password_validation.validate_password(password)
except ValidationError:
continue
return password
@reversion.create_revision()
def import_data(user, file_object, options: dict):
"""Generic import function
As the process of importing data from a CSV file is the same
whatever the type, we do a maximum of the work here.
"""
try:
infile = io.TextIOWrapper(file_object.file, encoding="utf8")
reader = csv.reader(infile, delimiter=options["sepchar"])
except csv.Error as inst:
error = str(inst)
else:
try:
cpt = 0
for row in reader:
if not row:
continue
fct = signals.import_object.send(
sender="importdata", objtype=row[0].strip())
fct = [func for x_, func in fct if func is not None]
if not fct:
continue
fct = fct[0]
with transaction.atomic():
try:
fct(user, row, options)
except Conflict:
if options["continue_if_exists"]:
continue
raise Conflict(
_("Object already exists: %s")
% options["sepchar"].join(row[:2])
)
cpt += 1
msg = _("%d objects imported successfully") % cpt
return True, msg
except (ModoboaException) as e:
error = str(e)
return False, error
|
def frst(word,n):
for i in range (n):
print(word)
frst("abhisehk",12)
def dot(a):
for i in range(a):
print("*"*a);
dot(5)
def dot(a):
for i in range(a):
print("*"*a);
dot(5)
|
# Generated by Django 3.0.1 on 2019-12-19 09:29
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CatPhoto',
fields=[
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='更新日')),
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, verbose_name='UUID')),
('title', models.TextField(max_length=30, verbose_name='タイトル')),
('url', models.URLField(verbose_name='画像URL')),
('cat_type', models.CharField(choices=[('bk', 'Black'), ('we', 'White'), ('bt', 'brown tabby'), ('st', 'silver tabby'), ('rt', 'red tabby'), ('ct', 'calico_cat')], max_length=2)),
('latitude', models.DecimalField(decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(decimal_places=6, max_digits=9)),
],
options={
'db_table': 'cat_photos',
},
),
]
|
import requests
from spellchecker import SpellChecker
def escape_meme_text(text):
"""
Replaces special characters in text for use with the
memegen.link API
"""
replacements = {
" ": "_",
"?": "~q",
"%": "~p",
"#": "~h",
"/": "~s",
"''": "\"",
}
for r in replacements.keys():
text = text.replace(r, replacements[r])
return text
def generate_meme(top_text, bottom_text, meme_type):
top_text = escape_meme_text(top_text)
bottom_text = escape_meme_text(bottom_text)
url = f"https://memegen.link/{meme_type}/{top_text}/{bottom_text}.jpg"
res = requests.get(url)
return res.content
def correct_spelling(text):
spell_checker = SpellChecker()
return " ".join([spell_checker.correction(w) for w in text.split(" ")])
|
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework import serializers
from rest_framework.response import Response
from .models import ( CarouselDisplay, Kudos, SurveyTopics,
RespondentProfile, YaridAccount, QuestionPosts)
# ...creating a user
class UserSerializer(serializers.ModelSerializer):
password2 = serializers.CharField(style={'input_type': 'password'}, write_only=True)
password1 = serializers.CharField(style={'input_type': 'password'}, write_only=True)
class Meta:
model = YaridAccount
fields = ('name', 'lastname', 'Uemail', 'residence', 'country_of_origin', 'password1', 'password2')
depth = 1
def create(self, validated_data):
Uemail = self.validated_data['Uemail']
lastname = self.validated_data['lastname']
name = self.validated_data['name']
residence = self.validated_data['residence']
country_of_origin = self.validated_data['country_of_origin']
password1 = self.validated_data['password1']
password2 = self.validated_data['password2']
# photo = self.validated_data['photo']
if password1 != password2:
return Response({"Message": "Passwords Dont match Correct this"})
yariduser = YaridAccount(lastname=lastname, name=name, password=password1,
Uemail=Uemail,
country_of_origin=country_of_origin,
residence=residence)
yariduser.save()
uname = str(lastname) + ' ' + str(name)
user = User(email=Uemail, username=uname)
user.set_password(password2)
user.save()
Token.objects.create(user=user)
return yariduser
class CreatingSysUsers(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(email=validated_data['email'], username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
Token.objects.create(user=user)
return user
class RespondentProfileSerializers(serializers.ModelSerializer):
class Meta:
model = RespondentProfile
fields = ('RespondentName', 'RespondentRole', 'Respondentprofiling_date')
depth = 1
class SurveyTopicsSerializers(serializers.ModelSerializer):
class Meta:
model = SurveyTopics
fields = ('survey_name', 'added_date')
depth = 1
class KudosSerializers(serializers.ModelSerializer):
class Meta:
model = Kudos
fields = ('respondent_marked', 'survey_marked', 'survey_marks', 'recorded_by', 'posting_date')
depth = 3
class NewsPostsSerializers(serializers.ModelSerializer):
class Meta:
model = QuestionPosts
fields = ('poster_name', 'post_title', 'post_body', 'posting_date', 'post_image')
depth = 0
class CarouselDisplaySerializers(serializers.ModelSerializer):
image = serializers.ImageField(use_url=True)
class Meta:
model = CarouselDisplay
fields = ['title', 'image', 'body', 'creationDate']
depth = 1
# this is to help in posting base64 images
# class ProfileSerializer(serializers.ModelSerializer):
# class Meta:
# model = Profile
# fields = ['name', 'bio', 'pic']
# read_only_fields = ['pic']
#
#
# class ProfilePicSerializer(serializers.ModelSerializer):
# class Meta:
# model = Profile
# fields = ['pic']
|
from enum import Enum
class BitSet(set):
def combine(self):
result = 0
for entry in self:
if isinstance(entry, Enum):
result |= entry.value
else:
result |= entry
return result
|
import sys
sys.path.append('../src')
from creador_grafos import crearDigrafoCompleto
crearDigrafoCompleto(4, "../out/grafoPrueba.txt")
|
from typing import Dict, Any
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from shop.forms import SearchForm
from shop.models import Setting, ContactMessage,ContactForm
from Product.models import Product,Images,Category,Comment
from Orderapp.models import ShopCart
def home(request):
current_user=request.user
cart_product = ShopCart.objects.filter(user_id=current_user.id)
total_amount = 0
for p in cart_product:
total_amount += p.product.new_price * p.quantity
category=Category.objects.all()
setting=Setting.objects.get(id=3)
sliding_images = Product.objects.all().order_by('id')[:3]
lastest_products=Product.objects.all().order_by('id')[33:]
products = Product.objects.all().order_by('id')[4:]
jt_y = Product.objects.all().order_by('id')[8:16]
flash = Product.objects.all().order_by('id')[18:27]
tr = Product.objects.all().order_by('id')[27:37]
mr = Product.objects.all().order_by('id')[36:43]
total_quan = 0
for p in cart_product:
total_quan += p.quantity
context={'category':category,'setting':setting,'sliding_images':sliding_images,'jt_y': jt_y,'flash':flash,
'lastest_products' : lastest_products,'products':products,'tr':tr,'mr': mr,
'cart_product':cart_product,'total_amount': total_amount}
return render(request,'home.html',context)
def about(request):
category = Category.objects.all()
setting = Setting.objects.get(id=3)
context = {'category':category,'setting': setting}
return render(request,'about.html',context)
def product_single(request,id):
category = Category.objects.all()
setting = Setting.objects.get(id=3)
single_product=Product.objects.get(id=id)
images = Images.objects.filter( product_id=id)
products = Product.objects.all().order_by('id')[10:]
comment_show = Comment.objects.filter(product_id=id,status='True')
context={'category':category,'setting':setting,'single_product': single_product,
'images': images,
'products':products,
'comment_show':comment_show
}
return render(request,'product-single.html', context)
def category_product(request,id,slug):
sliding_images = Product.objects.all().order_by('id')[:3]
setting = Setting.objects.get(id=3)
category = Category.objects.all()
product_cat=Product.objects.filter(category_id=id)
context={'setting':setting,'category': category,
'product_cat':product_cat,'sliding_images':sliding_images}
return render(request,'category_product.html',context)
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
data = ContactMessage()
data.name = form.cleaned_data['name']
data.email = form.cleaned_data['email']
data.subject = form.cleaned_data['subject']
data.message = form.cleaned_data['message']
data.ip = request.META.get('REMOTE_ADDR')
data.save()
return HttpResponseRedirect(reverse('contact_dat'))
setting=Setting.objects.get(id=3)
category = Category.objects.all()
form = ContactForm
context = {
' setting': setting,
'category':category,
'form': form,
}
return render(request,'contact_form.html', context)
def SearchView(request):
if request.method == 'POST':
form =SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data['query']
cat_id = form.cleaned_data['cat_id']
if cat_id == 0:
products = Product.objects.filter(title__icontains=query)
else:
products = Product.objects.filter(
title__icontains=query,category_id=cat_id)
category = Category.objects.all()
sliding_images = Product.objects.all().order_by('id')[:3]
setting = Setting.objects.get(id=3)
context = {
'category': category,
'query': query,
'product_cat': products,
'sliding_images': sliding_images,
'setting': setting,
}
return render(request,'category_product.html', context)
return HttpResponseRedirect('category_product') |
#!/usr/bin/env python
# coding: utf-8
# In[247]:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
# In[381]:
data = pd.read_csv('movie_bd_v5.xls',sep = '\t', encoding='koi8-r', delimiter = ',')
data.sample(5)
# In[382]:
data.describe()
# # Предобработка
# In[383]:
answers = {} # создадим словарь для ответов
# тут другие ваши предобработки колонок например:
#add a column to the dataset with information about the total profit or loss of the movie
data ['profit'] = data['revenue'] - data['budget']
#the time given in the dataset is in string format.
#So we need to change this in datetime format by adding a new column
data['release_date_1'] = pd.to_datetime(data['release_date'])
# # 1. У какого фильма из списка самый большой бюджет?
# Использовать варианты ответов в коде решения запрещено.
# Вы думаете и в жизни у вас будут варианты ответов?)
# In[384]:
# в словарь вставляем номер вопроса и ваш ответ на него
# Пример:
answers['1'] = '2. Spider-Man 3 (tt0413300)'
# запишите свой вариант ответа
answers['1'] = 'Pirates of the Caribbean: On Stranger Tides (tt1298650) +'
# если ответили верно, можете добавить комментарий со значком "+"
# In[252]:
# тут пишем ваш код для решения данного вопроса:
data.loc[data['budget'] == data['budget'].max(),'original_title'].values[0]
# ВАРИАНТ 2
# In[201]:
# можно добавлять разные варианты решения
data[(data.budget == data.budget.max())].original_title.describe()
# # 2. Какой из фильмов самый длительный (в минутах)?
# In[385]:
# думаю логику работы с этим словарем вы уже поняли,
# по этому не буду больше его дублировать
answers['2'] = 'Gods and Generals (tt0279111) +'
# In[254]:
data.loc[(data.runtime == data.runtime.max()),'original_title'].values[0]
# # 3. Какой из фильмов самый короткий (в минутах)?
#
#
#
#
# In[386]:
answers['3'] = 'Winnie the Pooh (tt1449283) +'
data.loc[(data.runtime == data.runtime.min()),'original_title'].values[0]
# # 4. Какова средняя длительность фильмов?
#
# In[387]:
answers['4'] = '110 +'
round (data.runtime.mean())
# # 5. Каково медианное значение длительности фильмов?
# In[388]:
answers['5'] = '107 +'
round (data.runtime.median())
# # 6. Какой самый прибыльный фильм?
# #### Внимание! Здесь и далее под «прибылью» или «убытками» понимается разность между сборами и бюджетом фильма.
# ####(прибыль = сборы - бюджет) в нашем датасете это будет (profit = revenue - budget)
# In[389]:
answers['6'] = 'Avatar (tt0499549) +'
# лучше код получения столбца profit вынести в Предобработку что в начале
grouped_data = data.groupby(['original_title'])['profit'].sum().sort_values(ascending=False)
print(grouped_data.head(1))
# # 7. Какой фильм самый убыточный?
# In[390]:
answers['7'] = 'The Lone Ranger (tt1210819) +'
grouped_data = data.groupby(['original_title'])['profit'].sum().sort_values(ascending=True)
print(grouped_data.head(1))
# # 8. У скольких фильмов из датасета объем сборов оказался выше бюджета?
# In[391]:
answers['8'] = '1478 +'
len (data[data.profit > 0])
# # 9. Какой фильм оказался самым кассовым в 2008 году?
# In[392]:
answers['9'] = 'The Dark Knight (tt0468569) +'
df1 = data.loc[data['release_year'].isin(['2008'])].pivot_table(values=['revenue'],
index=['release_year'],
columns=['original_title'],
aggfunc='max')
df1.max().sort_values(ascending=False)
# # 10. Самый убыточный фильм за период с 2012 по 2014 г. (включительно)?
#
# In[393]:
answers['10'] = 'The Lone Ranger (tt1210819) +'
df = data.loc[data['release_year'].isin(['2012','2013','2014'])].pivot_table(values=['profit'],
index=['release_year'],
columns=['original_title'],
aggfunc='sum')
df.min().sort_values(ascending=True)
# # 11. Какого жанра фильмов больше всего?
# In[394]:
answers['11'] = 'Drama +'
# эту задачу тоже можно решать разными подходами, попробуй реализовать разные варианты
# если будешь добавлять функцию - выноси ее в предобработку что в начале
sample1 = data.genres.str.split('|', expand = True)
sample2 = sample1.stack()
s = sample2.tolist()
data_genres = pd.Series(s)
data_genres.value_counts()
# ВАРИАНТ 2
# In[265]:
display(data['genres'].str.split('|').explode().value_counts())
# # 12. Фильмы какого жанра чаще всего становятся прибыльными?
# In[395]:
answers['12'] = 'Drama +'
genres_profit = data[data.profit > 0]
sample1 = genres_profit.genres.str.split('|', expand = True)
sample2 = sample1.stack()
s = sample2.tolist()
data_genres = pd.Series(s)
data_genres.value_counts()
# # 13. У какого режиссера самые большие суммарные кассовые сбооры?
# In[396]:
answers['13'] = 'Peter Jackson +'
data ['directors'] = data.director.apply(lambda s: s.split('|'))
data_1 = data.explode('directors')
data_1.groupby(by = 'directors').revenue.sum().sort_values(ascending = False)
# # 14. Какой режисер снял больше всего фильмов в стиле Action?
# In[397]:
answers['14'] = 'Robert Rodriguez +'
genres1 = data[data.genres.str.contains('Action', na=False)]
director1 = genres1['director'].str.split('|',expand=True).stack().value_counts()
director1.head(1)
# # 15. Фильмы с каким актером принесли самые высокие кассовые сборы в 2012 году?
# In[398]:
answers['15'] = 'Chris Hemsworth +'
c1 = data[data.release_year == 2012][['cast', 'revenue']]
c1.cast = c1.cast.apply(lambda s: s.split('|'))
c2 = c1.explode('cast')
c2.groupby(by = 'cast').revenue.sum().sort_values(ascending = False)
# # 16. Какой актер снялся в большем количестве высокобюджетных фильмов?
# In[399]:
answers['16'] = 'Matt Damon +'
b1 = data[(data.budget > data.budget.mean())][['cast', 'budget']]
b1.cast = b1.cast.apply(lambda s: s.split('|'))
b2 = b1.explode('cast')
b2.groupby(by = 'cast').budget.count().sort_values(ascending = False)
# # 17. В фильмах какого жанра больше всего снимался Nicolas Cage?
# In[400]:
answers['17'] = 'Action +'
n1 = data[data.cast.str.contains('Nicolas Cage', na=False)]
n1.genres = n1.genres.apply(lambda s: s.split('|'))
n2 = n1.explode('genres')
n2.groupby(by = 'genres').genres.count().sort_values(ascending = False)
# # 18. Самый убыточный фильм от Paramount Pictures
# In[401]:
answers['18'] = 'K-19: The Widowmaker (tt0267626) +'
p1 = data[data.production_companies.str.contains('Paramount Pictures', na=False)]
p1.production_companies = p1.production_companies.apply(lambda s: s.split('|'))
p2 = p1.explode('production_companies')
pivot = p2.pivot_table(columns='production_companies', index = 'original_title', \
values = 'profit', aggfunc='sum', fill_value=0)
pivot['Paramount Pictures'].sort_values (ascending=True)
# # 19. Какой год стал самым успешным по суммарным кассовым сборам?
# In[402]:
answers['19'] = '2015 +'
grouped_df = data.groupby(['release_year'])['revenue'].sum().sort_values(ascending=False)
display(grouped_df)
# # 20. Какой самый прибыльный год для студии Warner Bros?
# In[403]:
answers['20'] = '2014 +'
data.production_companies = data.production_companies.apply(lambda s: s.split('|'))
w1 = data.explode('production_companies')
w2 = w1[w1.production_companies.str.contains('Warner Bros', na=False)]
grouped_df = w2.groupby(['release_year'])['profit'].sum().sort_values(ascending=False)
display(grouped_df)
# # 21. В каком месяце за все годы суммарно вышло больше всего фильмов?
# In[404]:
answers['21'] = 'Сентябрь +'
data['release_date_1'] = pd.to_datetime(data['release_date'])
data['month'] = data['release_date_1'].map(lambda x: x.month)
month_df= data.pivot_table(columns = 'month', index = 'original_title', \
values = 'revenue', aggfunc = 'count', fill_value=0)
month_df.sum().sort_values(ascending=False)
# # 22. Сколько суммарно вышло фильмов летом? (за июнь, июль, август)
# In[405]:
answers['22'] = '450 +'
data['release_date_1'] = pd.to_datetime(data['release_date'])
data['month'] = data['release_date_1'].map(lambda x: x.month)
count = 0
for i in data ['month']:
if 6 <= i <= 8:
count+=1
print (count)
# # 23. Для какого режиссера зима – самое продуктивное время года?
# In[406]:
answers['23'] = 'Peter Jackson +'
data['release_date_1'] = pd.to_datetime(data['release_date'])
data['month'] = data['release_date_1'].map(lambda x: x.month)
data.director = data.director.apply(lambda s: s.split('|'))
data_1 = data.explode('director')
pivot = data_1.loc[data_1['month'].isin(['12', '1', '2'])].pivot_table(columns = 'director', \
index = 'original_title', values = 'revenue', aggfunc = 'count', fill_value=0)
pivot.sum().sort_values(ascending=False)
# # 24. Какая студия дает самые длинные названия своим фильмам по количеству символов?
# In[407]:
answers['24'] = 'Four By Two Productions +'
data = pd.read_csv('movie_bd_v5.xls',sep = '\t', encoding='koi8-r', delimiter = ',')
data['title_lenght'] = data['original_title'].map(lambda x: len(x))
data.production_companies = data.production_companies.apply(lambda s: s.split('|'))
data_2 = data.explode('production_companies')
grouped_df = data_2.groupby(['production_companies'])['title_lenght']\
.max().sort_values(ascending=False)
display(grouped_df)
# # 25. Описание фильмов какой студии в среднем самые длинные по количеству слов?
# In[408]:
answers['25'] = 'Midnight Picture Show +'
data = pd.read_csv('movie_bd_v5.xls',sep = '\t', encoding='koi8-r', delimiter = ',')
data['overview_lenght'] = data['overview'].map(lambda x: len(x))
data.production_companies = data.production_companies.apply(lambda s: s.split('|'))
data_3 = data.explode('production_companies')
pivot = data_3.pivot_table(columns = 'production_companies',\
values = 'overview_lenght', aggfunc = 'mean', fill_value=0)
pivot.max().sort_values(ascending=False)
# # 26. Какие фильмы входят в 1 процент лучших по рейтингу?
# по vote_average
# In[409]:
answers['26'] = 'Inside Out, The Dark Knight, 12 Years a Slave +'
answers['27'] = 'Daniel Radcliffe & Rupert Grint +'
grouped_df = data.groupby(['original_title'])['vote_average'].max()\
.sort_values(ascending=False)
print(grouped_df.head(round(len(grouped_df)*0.01)))
# # 27. Какие актеры чаще всего снимаются в одном фильме вместе?
#
# ВАРИАНТ 2
# # Submission
# In[410]:
# в конце можно посмотреть свои ответы к каждому вопросу
answers
# In[411]:
# и убедиться что ни чего не пропустил)
len(answers)
# In[ ]:
# In[ ]:
|
"""
Open-source simulation toolkit built for optimization and machine learning applications.
Use one of the following imports:
* `from phi.flow import *` for NumPy mode
* `from phi.tf.flow import *` for TensorFlow mode
* `from phi.torch.flow import *` for PyTorch mode
* `from phi.jax.flow import *` for Jax mode
Project homepage: https://github.com/tum-pbs/PhiFlow
Documentation overview: https://tum-pbs.github.io/PhiFlow
PyPI: https://pypi.org/project/phiflow/
"""
import os as _os
with open(_os.path.join(_os.path.dirname(__file__), 'VERSION'), 'r') as version_file:
__version__ = version_file.read()
def verify():
"""
Checks your configuration for potential problems and prints a summary.
To run verify without importing `phi`, run the script `tests/verify.py` included in the source distribution.
"""
import sys
from ._troubleshoot import assert_minimal_config, troubleshoot
try:
assert_minimal_config()
except AssertionError as fail_err:
print("\n".join(fail_err.args), file=sys.stderr)
return
print(troubleshoot())
def detect_backends() -> tuple:
"""
Registers all available backends and returns them.
This includes only backends for which the minimal requirements are fulfilled.
Returns:
`tuple` of `phiml.backend.Backend`
"""
from phiml.backend._backend import init_backend
try:
init_backend('jax')
except ImportError:
pass
try:
init_backend('torch')
except ImportError:
pass
try:
init_backend('tensorflow')
except ImportError:
pass
from phiml.backend import BACKENDS
return tuple([b for b in BACKENDS if b.name != 'Python'])
def set_logging_level(level='debug'):
"""
Sets the logging level for PhiFlow functions.
Args:
level: Logging level, one of `'critical', 'fatal', 'error', 'warning', 'info', 'debug'`
"""
from phiml.backend import ML_LOGGER
ML_LOGGER.setLevel(level.upper())
|
import numpy as np
import cv2
img1 = cv2.imread('bus1.jpg')
img2 = cv2.imread('bus2.jpg')
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
x,y,z=img1.shape
x2,y2,z2=img2.shape
out = np.zeros((max([x,x2]),y+y2,3), dtype='uint8')
for i in range(0,y-1):
for j in range(0,x-1):
out[j,i]=img1[j,i]
k=500
for i in range (0,y2-1):
for j in range (0,x2-1):
if(k+i<y):
out[j,k+i][0]=img2[j,i][0]*0.33+img1[j,k+i][0]*0.67
out[j,k+i][1]=img2[j,i][1]*0.33+img1[j,k+i][1]*0.67
out[j,k+i][2]=img2[j,i][2]*0.33+img1[j,k+i][2]*0.67
else:
out[j,k+i][0]=img2[j,i][0]*0.98
out[j,k+i][1]=img2[j,i][1]*0.98
out[j,k+i][2]=img2[j,i][2]*0.98
cv2.imshow('img',out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from . import generate_bc
from . import build |
# Generated by Django 2.1.2 on 2018-12-23 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('systemoptions', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailwebservice',
name='email_port',
field=models.DecimalField(decimal_places=0, max_digits=5, verbose_name='EMAIL_PORT'),
),
migrations.AlterField(
model_name='phonestaff',
name='phone_manager',
field=models.DecimalField(decimal_places=0, max_digits=10, verbose_name='Телефонний номер менеджера:'),
),
migrations.AlterField(
model_name='systemoptions',
name='email_send',
field=models.BooleanField(blank=True, default=True, help_text='Якщо відміченно, то на всі Email адреси, що виділенні нижче будуть вілісланні електронні листи з інформацією про заказ', null=True, verbose_name='Розсилати електронні листи при отриманні заказу?'),
),
migrations.AlterField(
model_name='systemoptions',
name='phone_send',
field=models.BooleanField(blank=True, default=True, help_text='Якщо відміченно, то на всі телефонні номери, що виділенні нижче будуть вілісланні СМС про новий заказУВАГА! По СМС неможливо відправити повну інформацію про заказ: тому рекомендується використовувавти в парі з Email інформуванням', null=True, verbose_name='Розсилати СМС при отриманні заказу(послуга платна)?'),
),
]
|
############################################################
#
# poser2egg.py - Egg File Exporter for Poser Pro
#
# Version 0.1 - Current only exports vertex positions
# and normals. joints are exported, but there are still some
# issues. It currently doesnt export UV coords, textures,
# materials, morphs, weights or animation data, but i have
# plans for all of those.
#
# Run this script inside the Poser in the PoserPython
# interpreter. You should also have Panda3d installed
# on the system. I have only tested it on windows vista
# with Panda3D 1.6.2
# Author: satori(http://www.panda3d.org/forums/profile.php?mode=viewprofile&u=3839), v 0.1
# Author: is_blackhole
#
############################################################
import poser
import os
from utils import *
from egg import EggObject
class Poser2Egg():
SKIP_OVERWRITE = True
RECOMPUTE_NORMALS = False
COMPUTE_TBN = False
def export(self):
# get selected figure
figure = poser.Scene().CurrentFigure()
#figure = poser.Scene().CurrentActor()
body_part = False
assert figure, 'No currently selected figure!'
figureName = fix_name(figure.Name())
abort = False
getSaveFile = poser.DialogFileChooser(2, 0, "Save Egg File", figureName, '', '*.egg')
getSaveFile.Show()
fileName = getSaveFile.Path()
if os.path.exists(fileName) and not Poser2Egg.SKIP_OVERWRITE:
if not poser.DialogSimple.YesNo("Overwrite " + fileName + "?"):
abort = True
if not abort:
if body_part:
ikStatusList = self.remove_ik_chains(figure)
print 'Exporting character:', figureName, 'to', fileName
try:
egg_obj = EggObject(figure)
lines = egg_obj.export()
output = open(fileName, 'w')
output.write("".join(lines))
output.close()
# write anim
lines = egg_obj.write_animation()
#print lines
output = open(os.path.join(os.path.dirname(fileName), "a.egg"), 'w')
output.write("".join(lines))
output.close()
except IOError, (errno, strerror):
print 'failed to open file', fileName, 'for writing'
print "I/O error(%s): %s" % (errno, strerror)
else:
print 'finished writing data'
if body_part:
self.restore_ik_chains(figure, ikStatusList)
if Poser2Egg.RECOMPUTE_NORMALS:
self.recompute_egg_normals(fileName)
def recompute_egg_normals(self, fileName):
print "Recompute vertex normals"
os.chdir(os.path.dirname(fileName))
cmdln = 'egg-trans "' + fileName + '" -nv 120 -o ' + fileName
print cmdln
if os.system(cmdln) == 1:
print "Error while processing egg file!"
def remove_ik_chains(self, figure):
ikStatusList = []
for i in range(0, figure.NumIkChains()):
ikStatusList.append(figure.IkStatus(i))
figure.SetIkStatus(i, 0) # Turn off
return ikStatusList
def restore_ik_chains(self, figure, ikStatusList):
for i in range(0, figure.NumIkChains()):
figure.SetIkStatus(i, ikStatusList[i])
exporter = Poser2Egg()
exporter.export()
|
# def str_append_list_join(s, n):
# l1 = []
# i = 0
# while i < n:
# l1.append(s)
# i += 1
# print(l1)
# return ''.join(l1)
# str_append_list_join('hi', 4)
# #1
# string1= "Hello Danielle"
# list=string1.split()
# if len(list[0])>len(list[1]):
# print(list[0])
# else:
# print(list[1])
#2
# nums=[1,5]
# def addfun(nums):
# nums= list(range(nums[0],nums[1]+1))
# sumation=sum(nums)
# print(sumation)
# addfun(nums)
# #3
# arr1=['dan', 'ben']
# arr2=['dan', 'andy', 'ben', 'stuart']
# arr3=[]
# def comparray(arr1,arr2):
# for person in arr1:
# for guy in arr2:
# if person==guy:
# arr3.append(guy)
# return arr3
# comparray(arr1,arr2)
# print(arr3)
#4
line="__________________"
char="B"
# print(char+line)
def charcross(char,line):
for i in range(len(line)):
charline=char+line
line=line.replace(line[i],"B",1)
if i>0:
line=line.replace(line[i-1],"_",1)
print(line)
charline=f"{char}{line}"
print(charline)
charcross(char,line)
# def in_one(l1, l2):
# return set(l1) & set(l2) #^, |, &
# print(in_one([1,2,3],[3,4,5]))
# def find_longest_string(l):
# return max(l.split(), key=len)
# print(find_longest_string("this is hy and my nam"))
# import sys
# import time
# def pick_character(y, x, character):
# if y == x:
# return character
# elif any([y == 25 and x < y,
# y == 50 and x < y,
# y == 75 and x < y,
# y == 100 and x < y]):
# return chr(5603)
# elif x < y:
# return chr(8226)
# return " "
# def move(character, size, speed):
# for x in range(size):
# line = ["\b" * size, *[pick_character(y, x, character) for y in range(size)]]
# sys.stdout.write("".join(line))
# sys.stdout.flush()
# time.sleep(speed)
# sys.stdout.write("".join(["\b" * size]))
# sys.stdout.flush()
# move(chr(5607), 101, 0.1) |
x,k = input().split()
x,k = int(x),int(k)
p = input()
if(eval(p)==k):
print(True)
else:
print(False) |
import random
import sys
file_name = sys.argv[1]
f = open(file_name, 'w')
for i in range(1000):
rand = random.random
tmp_str = "%s,%s,%s\n" % (str(rand()*255), str(rand()*255), str(rand()*255))
f.write(tmp_str)
f.close()
|
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import os
import cpplint
import re
USE_PYTHON3 = True
# memcpy does not handle overlapping memory regions. Even though this
# is well documented it seems to be used in error quite often. To avoid
# problems we disallow the direct use of memcpy. The exceptions are in
# third-party code and in platform/globals.h which uses it to implement
# bit_cast and bit_copy.
def CheckMemcpy(filename):
if filename.endswith(os.path.join('platform', 'globals.h')) or \
filename.find('third_party') != -1:
return 0
fh = open(filename, 'r')
content = fh.read()
match = re.search('\\bmemcpy\\b', content)
if match:
offset = match.start()
end_of_line = content.index('\n', offset)
# We allow explicit use of memcpy with an opt-in via NOLINT
if 'NOLINT' not in content[offset:end_of_line]:
line_number = content[0:match.start()].count('\n') + 1
print("%s:%d: use of memcpy is forbidden" % (filename, line_number))
return 1
return 0
def RunLint(input_api, output_api):
result = []
cpplint._cpplint_state.ResetErrorCounts()
memcpy_match_count = 0
# Find all .cc and .h files in the change list.
for git_file in input_api.AffectedTextFiles():
filename = git_file.AbsoluteLocalPath()
if filename.endswith('.cc') or (
# cpplint complains about the style of #ifndefs in our .pbzero.h
# files, but they are generated by the protozero compiler, so we
# can't fix this.
not filename.endswith('.pbzero.h') and filename.endswith('.h')):
# Run cpplint on the file.
cpplint.ProcessFile(filename, 1)
# Check for memcpy use.
memcpy_match_count += CheckMemcpy(filename)
# Report a presubmit error if any of the files had an error.
if cpplint._cpplint_state.error_count > 0 or memcpy_match_count > 0:
result = [output_api.PresubmitError('Failed cpplint check.')]
return result
def CheckGn(input_api, output_api):
return input_api.canned_checks.CheckGNFormatted(input_api, output_api)
def CheckFormatted(input_api, output_api):
def convert_warning_to_error(presubmit_result):
if not presubmit_result.fatal:
# Convert this warning to an error.
result_json = presubmit_result.json_format()
return output_api.PresubmitError(
message=result_json['message'],
items=result_json['items'],
long_text=result_json['long_text'])
return presubmit_result
results = input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return [convert_warning_to_error(r) for r in results]
def CheckChangeOnUpload(input_api, output_api):
return (RunLint(input_api, output_api) + CheckGn(input_api, output_api) +
CheckFormatted(input_api, output_api))
def CheckChangeOnCommit(input_api, output_api):
return (RunLint(input_api, output_api) + CheckGn(input_api, output_api) +
CheckFormatted(input_api, output_api))
|
#!venv/bin/python3
# TODO: DOCUMENTATION!! && TESTS
from flask import (
Flask,
render_template,
jsonify
)
from retrieve_tweets import get_tweets
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os
# Create the application instance
app = Flask(__name__, template_folder="templates",
static_folder="../build", static_url_path='/home')
@app.route("/")
def index():
return app.send_static_file('home.html')
@app.errorhandler(404)
def not_found(e):
return app.send_static_file('index.html')
# Create a URL route in our application for "/"
@app.route('/request/location=<string:location>&keywords=<string:keyword>&languages=<string:languages>', methods=['GET'])
def home(location, keyword, languages):
"""
This function responds to the browser ULR
localhost:5000/request/<location>/<keyword%20keyword%20keyword>
where <______> represents a parameter passed to url
and %20 is a delimeter splitting keywords and hashtags
:return: '
"""
# return json serialized response and status code
# return error code if json is NULL
keywords = keyword.split("%20")
location = [float(coordinate) for coordinate in location.split(",")]
languages = languages.split(",")
reconstructed_model = keras.models.load_model("my_model")
response = get_tweets(keywords, languages, location, reconstructed_model)
if len(response) > 0:
status_code = 200
else:
status_code = 500
return jsonify(response), status_code
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, port=os.environ.get('PORT', 80))
|
from __future__ import print_function
__author__ = 'VHLAND002'
import llvmlite.binding as llvm
import ir_ula
import sys
# initalize the llvm
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
# we get the file name from command line
name = sys.argv[1]
# we get the output from our modified parser
llvm_ir = str(ir_ula.buildIRCode(name, False))
# creates the execution engine
def create_execution_engine():
# create target machine
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
# we create an empty backing module
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
return engine
# where we compile the ir code
def compile_ir(engine, llvm_ir):
# we create a module from the str( parser output)
mod = llvm.parse_assembly(llvm_ir)
mod.verify()
# we add the module
engine.add_module(mod)
engine.finalize_object()
return mod
# we make a new engine
engine = create_execution_engine()
# we generate a mod from the engine and our parser code
mod = compile_ir(engine, llvm_ir)
# we create a new target machine with the new mod
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
# we generate the assembly code
code = target_machine.emit_assembly(mod)
# write out to the file
file = open(name[0:-4] + '.asm', 'w')
file.write(code)
file.close()
|
from django.apps import AppConfig
class TreatmentSheetsConfig(AppConfig):
name = 'treatment_sheets'
|
# -*- coding: utf-8 -*-
"""
Perceptron Script
activation = sum(weight_i * x_i) + bias
bias = weights [0]
prediction = 1.0 if activation > 0.0 else 0.0
This is the treshold activation
"""
# Make a prediction with weights. If the weights are wrong the prediction will be wrong.
def predict(Row, weights):
activation = weights[0]
for i in range(numColumns):
activation += weights[i + 1] * dataset[Row][i]
# This is threshold activation
return 1.0 if activation > 0.0 else 0.0
#dataset can be set to anything or read from file
dataset = [[0.1, 0.9],
[0.2, 0.8],
[0.3, 0.75],
[0.5, 0.75],
[0.7, 0.65],
[0.8, 0.6],
[0.9, 0.7],
[0.1, 0.05],
[0.2, 0.1],
[0.3, 0.15],
[0.4, 0.2],
[0.5, 0.3],
[0.6, 0.55]]
#output can be set to anything or read from file but should be be equal to the number of rows of teh dataset
output = [1,1,1,1,1,1,1,0,0,0,0,0,0,0]
#weights can be set to anything to start but needs to have one more column then the dataset
#as the treshold actication either gives a zero or one as output the mean is 0.5 so setting the weights at 0.5 may be better than setting them to zero
weights = [.5, .5, 0.5]
#this returns the number of rows in the dataset
numRows = len(dataset)
#this returns the number of columns in the dataset
numColumns= len(dataset[0])
#this calls the predict function for each of the rows to get the prediction according to the weights
for Row in range(numRows):
prediction = predict(Row, weights)
print("Expected=%d, Predicted=%d" % (output[Row], prediction)) |
from django.contrib import admin
from .models import Post
class AuthorAdmin(admin.ModelAdmin):
list_display = ("title", "url", "author")
admin.site.register(Post, AuthorAdmin)
|
#!/usr/bin/python3
"""
cli
===
Helpful functions for dealing with the command line.
"""
import string
# Either this will create a new 'garden' logger, or it will inherit the
# pre-existing one from the importing script.
_logger = logging.getLogger('garden')
def read_until_valid(prompt, valid_inputs=None, lmbda=None):
"""Loop until a valid input has been received.
The lambda will be applied before the input is validated, so be aware of
any type transformation you incur.
It is up to the caller to handle exceptions that occur outside the realm of
calling their lambda, such as KeyboardInterrupts (^c, a.k.a C-c).
:arg str prompt: Prompt to display.
:kwarg ``Iterable`` valid_inputs: Acceptable inputs. If none are provided,
then the first non-exceptional value entered will be returned.
:arg ``func`` lmbda: Function to call on received inputs. Any errors will
result in a re-prompting.
"""
while True:
user_input = input(prompt).strip(string.whitespace)
# Apply a given function
if lmbda is not None:
try:
user_input = lmbda(user_input)
except Exception as e: # Any errors are assumed to be bad input
_logger.warning(e)
continue # So keep trying
if valid_inputs is not None:
if user_input in valid_inputs:
return user_input
else:
return user_input
def assemble_subparsers(groups, module_registry):
"""Aggregates entrypoints under a single ArgumentParser.
Example CLI call:
garden <action> <module> [args...]
The alternative would be to expect each entrypoint module to provide an
ArgumentParser, and to handle sub-commands ourselves.
"""
parser = argparse.ArgumentParser(prog='garden')
for group in groups: # For each sub-command, like 'bump'
# Create subparser for registration: <action>
subparsers = parser.add_subparsers(title=subgroup)
# For each implementing module
for k, v in module_registry.get(group, []):
# Which module will we be deferring to: <module>
module_parser = subparsers.add_parser(k)
# Consume all arguments remaining for passthrough: [args...]
module_parser.add_argument('args', nargs=argparse.REMAINDER)
# Set default to registered module's entrypoint function
module_parser.set_defaults(func=v.load())
return parser
class GardenShell(cmd.Cmd):
"""Interactive shell into Garden."""
intro = 'Welcome, {user}'.format(user=os.getenv('USER'))
prompt = '[garden]$ '
def do_about(self, arg):
"""Print the 'about' statement."""
_logger.info(__doc__)
|
'''
Created on 2016年12月28日
@author: admin
'''
import fileinput, random
fortunes = list(fileinput.input())
print(random.choice(fortunes)) |
#!/usr/bin/env python
# a bar plot with errorbars
import barPlot_general
if __name__ == "__main__":
relu16 = (39.716,46.034,56.702)
nrelu16= (45.46,49.12,58.432)
relu16_std = (1.01954892,0.7891957932,0.3577289477)
nrelu16_std = (1.396477712,1.138441918,0.8578869389)
relu32 = (46.97,58.118,63.656)
nrelu32= (49.998,60.208,63.926)
relu32_std = (2.004420116,1.009291831, 0.9342537129)
nrelu32_std = (0.8311257426, 1.397343909, 1.681615295)
relu64 = (57.672, 63.184, 62.21)
nrelu64= (59.748, 63.63, 62.354)
relu64_std = (1.278503031, 1.600025, 1.29703508)
nrelu64_std = (1.602473089, 0.9637167634, 1.204753087)
res_relu = relu16 + relu32 + relu64
res_nrelu = nrelu16 + nrelu32 + nrelu64
err_relu = relu16_std + relu32_std + relu64_std
err_nrelu = nrelu16_std + nrelu32_std + nrelu64_std
labels = barPlot_general.do_labels(3,[16,32,64])
barPlot_general.do_plot("ISR 67", res_relu, res_nrelu, err_relu, err_nrelu, labels, height_delta=3.5, patch_density="sparse")
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: com/daml/daml_lf_1_15/daml_lf.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import daml_lf_1_pb2 as com_dot_daml_dot_daml__lf__1__15_dot_daml__lf__1__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#com/daml/daml_lf_1_15/daml_lf.proto\x12\x0c\x64\x61ml_lf_1_15\x1a%com/daml/daml_lf_1_15/daml_lf_1.proto\"m\n\x0e\x41rchivePayload\x12\x14\n\x05minor\x18\x03 \x01(\tR\x05minor\x12\x30\n\tdaml_lf_1\x18\x02 \x01(\x0b\x32\x12.daml_lf_1.PackageH\x00R\x07\x64\x61mlLf1B\x05\n\x03SumJ\x06\x08\x8fN\x10\x90NJ\x04\x08\x01\x10\x02\"x\n\x07\x41rchive\x12?\n\rhash_function\x18\x01 \x01(\x0e\x32\x1a.daml_lf_1_15.HashFunctionR\x0chashFunction\x12\x18\n\x07payload\x18\x03 \x01(\x0cR\x07payload\x12\x12\n\x04hash\x18\x04 \x01(\tR\x04hash*\x1a\n\x0cHashFunction\x12\n\n\x06SHA256\x10\x00\x42|\n\x15\x63om.daml.daml_lf_1_15ZDgithub.com/digital-asset/dazl-client/v7/go/api/com/daml/daml_lf_1_15\xaa\x02\x1c\x43om.Daml.Daml_Lf_1_15.DamlLfb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'com.daml.daml_lf_1_15.daml_lf_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\025com.daml.daml_lf_1_15ZDgithub.com/digital-asset/dazl-client/v7/go/api/com/daml/daml_lf_1_15\252\002\034Com.Daml.Daml_Lf_1_15.DamlLf'
_HASHFUNCTION._serialized_start=325
_HASHFUNCTION._serialized_end=351
_ARCHIVEPAYLOAD._serialized_start=92
_ARCHIVEPAYLOAD._serialized_end=201
_ARCHIVE._serialized_start=203
_ARCHIVE._serialized_end=323
# @@protoc_insertion_point(module_scope)
|
from django.db import models
from api.constants import POSTGRES_ADAPTER
class Connection(models.Model):
db_adapter = models.CharField(max_length=256, null=False, default=POSTGRES_ADAPTER)
host = models.CharField(max_length=64, null=False, default='localhost')
port = models.IntegerField(default=5432)
username = models.CharField(max_length=256, null=False)
password = models.CharField(max_length=256, null=False)
db_name = models.CharField(max_length=256, null=False)
|
from bs4 import BeautifulSoup
import requests
import lxml
import time
class Document:
def __init__(self, url, header):
self.url = url
self.heading = header
|
import numpy as np
import random
import pickle
import os
# Data Reader class. Pass in data path with all patient files
# Provides methods to get certain data
class DataReader:
def __init__(self, patient_id, data_path='input_data/'):
self.window_len = 30*256
self.num_channels = 1
self.data_path = data_path
self.x_data = list()
self.y_data = list()
self.patient_id = patient_id
self.sub_windows = 6
self.sub_window_len = 5*256
self._read_data_directory()
def _read_data_directory(self):
# initiate data structures
# these will be converted to numpy arrays later on
x_train_temp = list()
y_train_temp = list()
x_test_temp = list()
y_test_temp = list()
# get file names
pid_num = str(self.patient_id)
x_file = self.data_path + "/patient" + pid_num + "/x_data" + pid_num + ".npy"
y_file = self.data_path + "/patient" + pid_num + "/y_data" + pid_num + ".npy"
interval_file = self.data_path + "/patient" +pid_num+ "/intervals.data"
# load data
x = np.load(str(x_file))
y = np.load(str(y_file))
num_time_steps = x.shape[0]
self.num_channels = x.shape[1]
with open(interval_file, "rb") as f:
intervals = pickle.load(f)
intervals.append(num_time_steps)
# split into windows
split_stat = "train"
self._add_windows(intervals, x, y)
# once all of the windows are added
self.x_data = np.dstack(self.x_data)
self.y_data = np.asarray(self.y_data)
self.x_data = np.rollaxis(self.x_data, -1)
self.y_data = np.reshape(self.y_data,(self.x_data.shape[0],-1))
def _add_windows(self, intervals, x, y):
# create windows
# split every hour interval into num_windows
#print("number of hours: " + str(len(intervals)))
#total_seizure_timepoints = 0
#frac_seizure_in_window = 0.0
for i in range(len(intervals)-1):
start = intervals[i]
end = intervals[i+1]
self.num_windows = np.int(np.floor((end-start)/self.window_len))
#print(str(num_windows))
# hour data
x_subset = x[start:end,:]
y_subset = y[start:end]
# within this hour split it up into windows
for j in range(self.num_windows):
window_x = x_subset[j*self.window_len:(j+1)*self.window_len,:]
for h in range(self.sub_windows):
window_y = int(max(y_subset[(j*self.window_len+h*self.sub_window_len):(j*self.window_len+(h+1)*self.sub_window_len)]))
self.y_data.append(window_y)
#print(y_subset[j*self.window_len:(j+1)*self.window_len])
#total_seizure_timepoints += np.count_nonzero(y_subset[j*self.window_len:(j+1)*self.window_len] == 1)
#if window_y == 1:
#print(y_subset[j*self.window_len:(j+1)*self.window_len])
#frac_seizure_in_window += float(np.count_nonzero(y_subset[j*self.window_len:(j+1)*self.window_len] == 1))/len(y_subset[j*self.window_len:(j+1)*self.window_len])
self.x_data.append(window_x)
#print(str(self.patient_id)+","+str(len(intervals)) + ", " + str(total_seizure_timepoints) +"," + str(self.y_data.count(1)) + "," + str(float(frac_seizure_in_window)/self.y_data.count(1)) )
class PatientInfo:
def __init__(self, pid, xdata, ydata, seizure_intervals):
self.xdata = ""
|
# File: lab10.py
# Author: Joel Okpara
# Date: 4/18/16
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: YOUR DESCRIPTION GOES HERE AND HERE
# YOUR DESCRIPTION CONTINUED SOME MORE
def convertToDict(fileContents):
stateDict = {}
for line in fileContents:
stateKey,stateValue =line.strip().split(",")
stateDict[stateKey] = stateValue
return stateDict
def main():
states = open("states.txt")
abbrev = convertToDict(states)
#print(abbrev)
userInput = ""
while userInput != "list" and userInput != "exit":
userInput = input("Choose a State to Abbreviate(list to get list and exit to exit): ")
while userInput not in abbrev and userInput != "list" and userInput != "exit":
userInput = input("Sorry. That is not a state, pick again:")
if userInput in abbrev and userInput != "list" and userInput != "exit":
print( "The abbreviation of the state:",userInput,"is",abbrev[userInput])
if userInput.lower() == "list":
print(abbrev.keys())
if userInput.lower() == "exit":
print("Thanks for using the state abbreviator!")
main()
|
import time
import logging
import requests
import threading
class myStrom(threading.Thread):
def __init__(self, ip,id, callback,logger):
threading.Thread.__init__(self)
_libName = str(__name__.rsplit('.', 1)[-1])
self._log = logging.getLogger(logger + '.' + _libName + '.' + self.__class__.__name__)
self._log.debug('Create myStrom Thread with id: %s',id)
self._handle = myStromSwitch(ip,logger)
self._ip = ip
self._id = id
self._callback = callback
self._temperature = 0.0
self._power = 0.0
self._energy = 0.0
self._state = None
self._startup = True
self._t0 = 0.0
self._polling = 10
def update(self):
# print('test',type( self._handle.consumption()))
if self._handle.get_status():
if self._startup:
self._log.debug('new Startup Reset all counters')
self._t0 = time.time()
self._power = self._handle.consumption()
self._temperature = self._handle.temperature()
self._startup = False
else:
self._log.debug('update counters')
self._power = (self._power + self._handle.consumption()) /2
self._temperature = (self._temperature + self._handle.temperature()) /2
self._log.debug('Counter update Power: %s, Temperautre: %s '%(self._power, self._temperature))
if self._state != self._handle.relay():
self._state = self._handle.relay()
self._callback(self._id)
# print(self._switch)
def getState(self):
_t1 = time.time() - self._t0
# print('T',self._t0, _t1, self._power)
_energy = self._power * _t1 /3600/1000
self._energy = self._energy + _energy
_data = ({'myStrom_Power': self._power, 'myStrom_Energy' :self._energy, 'myStrom_Temperature': self._temperature, 'myStrom_Switch': self._state})
self._startup = True
return _data
def run(self):
while True:
# print('update')
time.sleep(self._polling)
self.update()
class myStromSwitch(object):
def __init__(self,host, logger):
_libName = str(__name__.rsplit('.', 1)[-1])
self._log = logging.getLogger(logger + '.' + _libName + '.' + self.__class__.__name__)
self._log.debug('Create myStromSwitch with host: %s',host)
self._host = host
self._uri = 'http://' + host
self._consumption = 0.0
self._temperature = 0.0
self._relay = None
def get_status(self):
_state = False
try:
response = requests.get(self._uri + '/report', timeout=5)
# print('result',response.text, response.status_code)
# msg = 'Get Status' + str(self._url) + str(r.json())
#self._log.debug(msg)
self._log.debug('Response %s',response.text)
if response.status_code == 200:
_response= response.json()
# print(str(_response))
_state = True
self._consumption = float(_response["power"])
self._relay = str(_response["relay"])
try:
self._temperature = float(_response["temperature"])
except KeyError:
self._temperature = 0
else:
# print('failed to get valid data')
_state = 'UNEXPECTED RESPONSE'
except requests.Timeout:
self._debug.error('Timeout: %s',self._uri)
except requests.exceptions.ConnectionError:
self._debug.error('Connection Error %s', self._uri)
return (_state)
def relay(self):
return self._relay
def consumption(self):
return self._consumption
def temperature(self):
return self._temperature
|
from itertools import combinations
n = int(input())
l = input().split()
k = int(input())
chars = list(combinations(l, k))
print(len([1 for x in chars if 'a' in x]) / len(chars))
|
from openpyxl import *
#define a function that compute the interpolation between two values
def interpolate(x1, x2, y1, y2, x_inputss):
"""
this function gives y_inputss computing linear interpolation between (x1,y1) and (x2,y2) and finding the corresponding value for x_inputss
"""
y_inputss = (y1 - y2)*(x_inputss - x2)/(x1-x2) + y2;
return y_inputss
#define a funcition that gives back distribution losses
def distributionLosses(input_duct, input_insulation, input_leakage, input_conditioning, input_stories, input_load):
"""
this function takes as input:
input_duct: Duct location "Attic"/"Basement"/"Crawlspace"/"Conditioned space" string
input_insulation: Insulation [m^2*K/W]: float
input_leakage: Supply/return leakage: 5/11 integer
input_conditioning: Conditioning "C" / "H/F" / "H/HP" string
input_stories: Number of stories: integer
input_load: Total building load [W]: float
and gives as output power losses[W]
return float
"""
#We need to get the Fc coefficient (we call it coeff). In order to do that we need to read it from a table
#it takes the excel file from the folder where is located (we need table 6 from chapter 17 of ashrae)
ExcelFile = load_workbook("table6.xlsx");
#choose the right sheet of the file
WindowData = ExcelFile.get_sheet_by_name("Typical_Duct_Loss");
#read the table from excel starting from stories. It select the starting cell as [1][2:] (that is C2) and goes on on its row.
stories_cells = WindowData.rows[1][2:];
#create empty lists. treshold[] will be filled with resistances
treshold = {};
#We load all the data of the table in a smart dictionary (of subdictionaries of subdictionaries etc) which at the end will contain all the values of Fc.
# Fc will be then accessed in this way: duct_losses[input_stories][input_leakage][input_insulation][input_duct][input_conditioning]
duct_losses = {};
#It gives back 0 if the duct is conditioned space as table suggest
if (input_duct == "Conditioned space"):
return 0.0;
#with for cycle read the value scanning columns
for cell in stories_cells:
#find the column index because it needs to create a relation between the variable's indexes
column_index = stories_cells.index(cell);
#starting from stories it reads the values
stories = int(cell.value)
leakage = int(WindowData.rows[2][2+column_index].value)
insulation = float(WindowData.rows[3][2+column_index].value)
#Build a dictionary which has the resistances considered in the table as keys (useful for subsequent interpolation).
#The trick of using a dictionary and considering its keys only is in order to have unique values.
treshold[insulation] = "word";
#it reads the rest of the column(these cells contain the values for the duct loss factor)
col = WindowData.columns[2+column_index][4:];
#now it focus on these columns and it explores the rows
for row in col:
#find the row index because it needs to create a relation between the variable's indexes
row_index = col.index(row);
#it reads values for duct loction and working condition of the corresponding cells
duct = WindowData.columns[0][4+row_index].value.encode('utf-8')
conditioning = WindowData.columns[1][4+row_index].value.encode('utf-8')
#It creates lists behind list if doesn't already find the read value.
#So at the first cycle everything will be empty and it creates a structure of
#lists, then it fills it in every cycle.
if (not stories in duct_losses):
duct_losses[stories] = {}
if (not leakage in duct_losses[stories]):
duct_losses[stories][leakage] = {}
if (not insulation in duct_losses[stories][leakage]):
duct_losses[stories][leakage][insulation] = {}
if (not duct in duct_losses[stories][leakage][insulation]):
duct_losses[stories][leakage][insulation][duct] = {}
if (not conditioning in duct_losses[stories][leakage][insulation][duct]):
duct_losses[stories][leakage][insulation][duct][conditioning] = {}
value = WindowData.columns[2+column_index][4+row_index].value;
#if duct loss/gain factor is a long type transform it in float
if (isinstance(value, long )):
duct_losses[stories][leakage][insulation][duct][conditioning] = float(value)
else:
duct_losses[stories][leakage][insulation][duct][conditioning] = float(value.encode('utf-8'))
#in this part it interpolate (restistance-duct losses) if the insert resistance is a value between two listed resistances and gives back duct loss/gain factor
treshold = treshold.keys()
#if the insert resistance is smaller than the smallest one it select the smallest resistance and gives duct loss/gain factor
#if the insert resistance is bigger than the biggest one it select the biggest resistance and gives duct loss/gain factor
input_insulation = max(treshold[0], min(input_insulation, treshold[len(treshold)-1]))
for i in range(0,len(treshold)-1):
if(input_insulation <= treshold[i+1] and input_insulation >= treshold[i]):
left = duct_losses[input_stories][input_leakage][treshold[i]][input_duct][input_conditioning];
right = duct_losses[input_stories][input_leakage][treshold[i+1]][input_duct][input_conditioning];
coeff=interpolate( treshold[i], treshold[i+1], left, right, input_insulation)
#in the end compute distribution loss (W)
return coeff * input_load;
#validation with the ahrae values should gives 0.13 and 0.27 for ""Attic", 1.4 , 5, "H/F", 1, 1" and ""Attic", 1.4 , 5, "C", 1, 1"
c = distributionLosses("Attic", 1.2 , 5, "H/F", 1, 1);
print c;
d = distributionLosses("Attic", 1.2 , 5, "C", 1, 1);
print d;
|
# Generated by Django 2.2.4 on 2020-01-17 07:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0007_auto_20200116_2219'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='cart',
new_name='save',
),
]
|
#!/usr/bin/python3
import os
import sqlite3
def init(path, randomize, file_challenge_name=None):
init_db(path, file_challenge_name, randomize)
init_secret(path, randomize)
def init_db(path, file_challenge_name, randomize):
db = os.path.join(os.path.sep, "tmp", "idor.db")
if file_challenge_name:
db = os.path.join(path, "broken_idor.db")
file_challenge_path = os.path.join(path, file_challenge_name)
with open(file_challenge_path, "r") as chall:
file_chall_content = chall.read()
new_file_chall_content = file_chall_content.replace("/tmp/idor.db", db)
with open(file_challenge_path, "w") as chall:
chall.write(new_file_chall_content)
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS accounts")
conn.commit()
cur.execute("""CREATE TABLE accounts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
token TEXT NOT NULL UNIQUE,
balance INT NOT NULL DEFAULT 100,
description TEXT NOT NULL DEFAULT '')""")
conn.commit()
token = 'JqcY6oUYCiVtvyfyN7r6z461hjhG!r7SzfnndZDYvuzicSmAyaVvr6RFlZZhEorS'
cur.execute(
"INSERT INTO accounts(username, token, balance, description) VALUES(?, ?, ?, ?)",
('586b652384404', token, 1337, 'The secret is {}'.format(randomize))
)
conn.commit()
conn.close()
os.system('chown idor:idor ' + db)
os.system('chmod 640 ' + db)
def init_secret(path, randomize):
with open(os.path.join(path, 'secret'), "w") as secret:
secret.write(randomize)
|
import collections
import time
import cv2
import numpy as np
MODE = "MPI"
if MODE is "COCO":
protoFile = "D:\IDA\PROJECT\Video_Image_Pose\coco\pose_deploy_linevec.prototxt"
weightsFile = "D:\IDA\PROJECT\Video_Image_Pose\coco\pose_iter_440000.caffemodel"
nPoints = 18
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [0, 14], [0, 15], [14, 16], [15, 17]]
elif MODE is "MPI":
protoFile = r"D:\IDA\PROJECT\Video_Image_Pose\mpi\pose_deploy_linevec.prototxt"
weightsFile = r"D:\IDA\PROJECT\Video_Image_Pose\mpi\pose_iter_160000.caffemodel"
nPoints = 15
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 14], [14, 8], [8, 9], [9, 10],
[14, 11], [11, 12], [12, 13]]
userImageInput = cv2.VideoCapture(r"D:\IDA\PROJECT\videos\input2.mp4")
frame_width = int(userImageInput.get(3))
frame_height = int(userImageInput.get(4))
fps = int(userImageInput.get(5))
img = r"D:\IDA\PROJECT\Video_Image_Pose\White.jpg"
frameWhite = cv2.imread(img)
frameWidth1 = frameWhite.shape[1]
frameHeight1 = frameWhite.shape[0]
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
out = cv2.VideoWriter(r'D:\IDA\PROJECT\Video_Image_Pose\Video_Pose\outputVideo\3.mp4', fourcc, 30, (frameWidth1, frameHeight1))
out1 = cv2.VideoWriter(r'D:\IDA\PROJECT\Video_Image_Pose\Video_Pose\outputVideo\4.mp4', fourcc, fps, (frame_width, frame_height))
threshold = 0.1
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
t = time.time()
inWidth = 368
inHeight = 368
while True:
ret, frame = userImageInput.read()
if not ret:
break
img = r"D:\IDA\PROJECT\Video_Image_Pose\White.jpg"
frameWhite = cv2.imread(img)
frameWidth1 = frameWhite.shape[1]
frameHeight1 = frameWhite.shape[0]
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()
print("time taken by network : {:.3f}".format(time.time() - t))
H = output.shape[2]
W = output.shape[3]
points = []
points1 = []
for i in range(nPoints):
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original RunningImage
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
x1 = (frameWidth1 * point[0]) / W
y1 = (frameHeight1 * point[1]) / H
if prob > threshold:
cv2.circle(frame, (int(x), int(y)), 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
cv2.circle(frameWhite, (int(x1) + 70, int(y1)), 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
points.append((int(x), int(y)))
points1.append((int(x1) + 70, int(y1)))
else:
points.append(None)
points1.append(None)
dictAngle = {'leftHand': [5, 6, 7], 'leftLeg': [11, 12, 13], 'rightHand': [2, 3, 4], 'rightLeg': [8, 9, 10]}
dictAngle = collections.OrderedDict(sorted(dictAngle.items()))
usernameDict = []
userangleDict = []
heightAngle = [350, 370, 390, 410]
j = 0
for i in dictAngle:
dictPoint1 = np.array(points[dictAngle[i][0]])
dictPoint2 = np.array(points[dictAngle[i][1]])
dictPoint3 = np.array(points[dictAngle[i][2]])
if str(dictPoint1) != 'None' and str(dictPoint2) != 'None' and str(dictPoint3) != 'None':
ba = dictPoint1 - dictPoint2
bc = dictPoint3 - dictPoint2
tup1 = points1[dictAngle[i][1]]
if i is 'leftHand' or i is 'leftLeg':
pointAngle = (tup1[0] + 15, tup1[1] + 18)
if i is 'rightHand' or i is 'rightLeg':
pointAngle = (tup1[0] - 50, tup1[1])
# print(point)
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
ang = str(np.degrees(angle))
angleFloat = float(ang)
ang1 = round(angleFloat, 2)
# print(ang)
cv2.putText(frameWhite, "Right Side", (15, 40), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(frameWhite, "Left Side", (500, 40), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(frameWhite, "User-Image", (700, 420), cv2.FONT_HERSHEY_DUPLEX, 0.3, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(frameWhite, str(ang1), pointAngle, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(frameWhite, str(i + ": "), (15, heightAngle[j]), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1,
cv2.LINE_AA)
cv2.putText(frameWhite, str(ang), (90, heightAngle[j]), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1,
cv2.LINE_AA)
usernameDict.append(i)
userangleDict.append(ang)
else:
usernameDict.append(i)
userangleDict.append('0')
j += 1
# print(userangleDict, "aadmin")
# print(dictAngle,"admin.......")
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
cv2.line(frameWhite, points1[partA], points1[partB], (0, 255, 255), 2)
userDict = dict(zip(usernameDict, userangleDict))
out1.write(frame)
out.write(frameWhite)
AngleDict = str(
userDict['rightHand'] + "," + userDict['leftLeg'] + "," + userDict['leftHand'] + "," + userDict['rightLeg'])
|
def predate(date):
'''
计算前一天
:param date: (string)日期
:return:
'''
bigmonth = (1, 3, 5, 7, 8, 10, 12)
smallmonth = (4, 6, 9, 11)
monthlist = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
isleapyear = False
before = None
if len(date) != 8:
print('您的日期格式输入不合法')
else:
year = date[0:4]
month = date[4:6]
day = date[6:]
if year.isdigit() and month.isdigit() and day.isdigit():
year = int(year)
month = int(month)
day = int(day)
if 1000 <= year <= 2019:
if 1 <= month <= 12:
if (year % 4 == 0) and (year % 100 != 0) or (year % 400 == 0): # 闰年
isleapyear = True
monthlist[2] = 29
if 1 <= day <= monthlist[month]:
if day > 1: # 不是1号
before = str(year) + str(month).zfill(2) + str(day - 1)
print(before)
else: # 是1号 换月份
if month > 1: # 不是1月
month = month - 1
if month in bigmonth: # 大月
before = str(year) + str(month).zfill(2) + '31'
print(before)
elif month in smallmonth: # 小月
before = str(year) + str(month).zfill(2) + '30'
print(before)
else: # 2月
if isleapyear: # 闰年
before = str(year) + '02' + '29'
print(before)
else:
before = str(year) + '02' + '28'
print(before)
else: # 是1月 换年份
before = str(year - 1) + '12' + '31'
print(before)
else:
print('请输入合理的天数')
else:
print('请输入在1月到12月之间的月份')
else:
print('请输入在1000年到2019年之间的年份')
else:
print('请输入正确的数字日期')
if __name__ == '__main__':
date = input('请输入输入1000年到2019年之间的某个日期')
predate(date)
|
from __future__ import print_function
import json
import logging
import logging.config
import os
from os import getpid, remove
from os.path import abspath, basename, dirname, splitext, join, realpath
import re
import shutil
import subprocess
import requests
import tweepy
import keys
from utils import (
post_slack,
random_string)
with open('bot_supervisord.pid', 'w+') as pidfile:
pidfile.write(str(getpid()))
BASE_DIR = dirname(abspath(__file__))
def setup_logging(default_path='conf/logging_config.json', default_level=logging.INFO, env_key='LOG_CFG'):
"""Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
class DMListener(tweepy.StreamListener):
@staticmethod
def _get_api():
"""Get an Tweepy API instance
Args:
none
Returns:
tweepy.api instance.
"""
gb_keys = keys.KEYS[os.getenv('GBKEYS')]
# logger.debug("Getting api with keys {0}".format(", ".join([gb_keys.CONSUMER_KEY,gb_keys.CONSUMER_SECRET,gb_keys.ACCESS_KEY,gb_keys.ACCESS_SECRET])))
auth = tweepy.OAuthHandler(
gb_keys['CONSUMER_KEY'],
gb_keys['CONSUMER_SECRET'])
auth.set_access_token(gb_keys['ACCESS_KEY'], gb_keys['ACCESS_SECRET'])
api = tweepy.API(auth, wait_on_rate_limit=True)
return api
@staticmethod
def save_video(video_url):
"""Saves a video file to the file system.
Args:
video_url (str): URL of the MP4 to save to the file system.
Returns:
Filename (not path) of saved video as a string.
"""
logger.debug("Saving video", )
req = requests.get(video_url, stream=True)
video_name = "{0}.mp4".format(random_string())
with open(video_name, 'wb') as video_file:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
video_file.write(chunk)
video_file.flush()
return video_name
@staticmethod
def frames_to_gif(mp4):
"""Convert video to GIF.
Args:
folder_name (str): Name of the folder containing all the frames
extracted from the original video.
Returns:
Full path of the newly-created GIF on the file system.
"""
gif = splitext(basename(mp4))[0] + '.gif'
cmd = "{0}/mp4_to_gif.sh {1} {2}".format(BASE_DIR, mp4, gif)
subprocess.call(cmd, shell=True)
gif_path = realpath(gif)
remove(mp4)
return gif_path
@staticmethod
def upload_gif(gif):
"""Move GIF over to hosting site path.
Args:
gif (str): path to the GIF file.
Returns:
URL of GIF.
"""
moved_gif = shutil.move(gif, keys.GIF_DIR)
gif_name, _ = splitext(basename(moved_gif))
return "https://iseverythingstilltheworst.com/gifs/{0}".format(gif_name)
@staticmethod
def parse_entities(extended_entities):
logger.debug("Parsing extended entities")
logger.debug(extended_entities)
gifs = []
if not extended_entities:
logger.info("Couldn't find extended entities:\n\n{}".format(original_tweet))
else:
for media in extended_entities['media']:
if media.get('type') == 'animated_gif':
gifs.append(media['video_info']['variants'][0]['url'])
elif media.get('type') == 'video':
variants = media['video_info']['variants']
videos = []
for variant in variants:
if variant['content_type'] == 'video/mp4':
videos.append(variant)
video = sorted(videos, key=lambda video: videos[0]['bitrate'])[-1]
gifs.append(video['url'])
return gifs
def send_dm(self, sender_id=None, msg=None):
"""Sends a message to the user.
Args:
sender_id (int): Twitter ID of the sender of the Direct Message (i.e.,
the person that requested the GIF be made).
msg (str): Direct Message to send back to the user.
Returns:
True
"""
self.api.send_direct_message(user_id=sender_id, text=msg)
return True
def on_connect(self):
self.api = self._get_api()
logger.debug(self.api)
logger.info('Connected to Twitter. YAY!')
def on_event(self, event):
""" Auto follow back """
# Exclude events that originate with us.
if event.source['id_str'] == str(4012966701) or event.source['id_str'] == str(3206731269):
return True
try:
if event.event == 'follow':
follower = event.source['id_str']
self.api.create_friendship(user_id=follower)
logger.info("Followed {0}".format(follower))
else:
return True
except:
return True
def on_direct_message(self, status):
try:
sender = status.direct_message['sender']['id']
except Exception as e:
logger.critical(e)
return True
logging.info("Sender: {0}".format(sender))
# Check to see if TheGIFingBot is the sender. If so, pass & don't do
# anything.
if sender == 4012966701 or sender == 3206731269:
return True
dm = status._json
logging.debug(dm)
# Check to make sure there's an attached tweet. The regex looks for text along
# the lines of status/12437385203, which should be the linked tweet.
try:
shared_tweet = dm['direct_message']['entities']['urls'][0]['expanded_url']
match = re.search('status\/(\d+)', shared_tweet)
if match:
shared_id = match.groups()[0]
except Exception as e:
logger.warning(e)
self.send_dm(sender_id=sender, msg=keys.MGS['need_shared'])
return True
if shared_id:
original_tweet = self.api.get_status(shared_id)._json
else:
return True
# Next check to make sure that the original tweet had a GIF in it.
# At the moment, it seems you can only attach one GIF. This *should*
# take care of the possibility that you can attach more later.
extended_entities = original_tweet.get('extended_entities', None)
gifs = self.parse_entities(extended_entities)
if not gifs:
self.send_dm(sender_id=sender, msg=keys.MGS['no_gif'])
return True
# Yay, we're actually doing this!
for gif in gifs:
try:
video = self.save_video(gif)
converted_gif = self.frames_to_gif(video)
url = self.upload_gif(converted_gif)
msg = "I am good bot!! I made you a GIF: {}".format(url)
self.send_dm(sender_id=sender, msg=msg)
except Exception as e:
logger.error(e)
post_slack(msg=e)
return True
def main():
logger.debug("Running main() using {0}".format(os.getenv('GBKEYS')))
gb_keys = keys.KEYS[os.getenv('GBKEYS')]
auth = tweepy.OAuthHandler(
gb_keys['CONSUMER_KEY'],
gb_keys['CONSUMER_SECRET'])
auth.set_access_token(gb_keys['ACCESS_KEY'], gb_keys['ACCESS_SECRET'])
stream = tweepy.Stream(auth, DMListener())
stream.userstream()
if __name__ == '__main__':
setup_logging()
logger = logging.getLogger()
logger.info("Initialized GifingBot")
try:
main()
except KeyboardInterrupt:
print("\n\nLater, alligator\n\n")
|
from sqlalchemy import Column
from sqlalchemy.types import DateTime, Integer, String
from sqlalchemy.sql import func
from bitcoin_acks.database.base import Base
class Logs(Base):
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=func.now())
path = Column(String)
full_path = Column(String)
method = Column(String)
ip = Column(String)
user_agent = Column(String)
status = Column(Integer)
|
#!/usr/bin/env python
import argparse
import errno
import os
import os.path
import re
import sys
from subprocess import Popen, PIPE
gnuplot = "gnuplot"
#gnuplot = "cat"
#
# Functions
#
def parse_args():
parser = argparse.ArgumentParser(
description='process log file generated by "iostat -x -t -p {PERIOD} {COUNT}" command'
)
parser.add_argument(
'datadir',
help='where to put generated files',
)
parser.add_argument(
'name',
help='used in generated file names',
)
args = parser.parse_args()
return args
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main():
ctx = parse_args()
# 07/27/15 11:59:24
# avg-cpu: %user %nice %system %iowait %steal %idle
# 3.95 0.00 1.24 7.85 0.00 86.96
#
# Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
# sdf 0.00 1174.00 0.60 18.40 24.80 4769.60 504.67 1.53 80.42 1.33 83.00 1.56 2.96
# ...
time_r = re.compile('^\s*(\d\d/\d\d/\d\d \d\d:\d\d:\d\d)\s*$')
cols_r = re.compile('^\s*Device:\s+(.+)$')
data_r = re.compile('^\s*(sd[a-e]|sdg[0-9])\s+(.+)$')
time = ''
cols = []
data = []
res = {}
for line in sys.stdin:
m = time_r.match(line)
if m:
time = m.group(1)
continue
m = cols_r.match(line)
if m:
cols = m.group(1).split()
m = data_r.match(line)
if m:
disk = m.group(1)
data = m.group(2).split()
for key in cols:
i = cols.index(key)
if not res.get(key):
res[key] = {}
if not res[key].get(time):
res[key][time] = {}
res[key][time][disk] = data[i]
continue
mkdir_p(ctx.datadir)
for key in res.keys():
disks = None
filename = os.path.join(ctx.datadir, format('%s.%s.dat' % (ctx.name, key)).replace('/', '_'))
f = open(filename, 'w')
print >>f, "#", ctx.name, key
for time in sorted(res[key].keys()):
if not disks:
disks = sorted(res[key][time].keys())
print >>f, "#%s\t%s" % ('date time'.ljust(len(time) - 1), "\t".join(disks))
line = time
for disk in disks:
line += "\t" + res[key][time][disk]
print >>f, line
f.close()
plot = Popen(gnuplot, shell=True, stdin=PIPE).stdin
output = filename + '.png'
print >>plot, 'set term png size 1600,1200'
print >>plot, 'set style data l'
print >>plot, 'set grid'
print >>plot, 'set output "%s"' % output
print >>plot, 'set xdata time'
print >>plot, 'set timefmt "%m/%d/%y %H:%M:%S"'
print >>plot, 'set format x "%H:%M"'
print >>plot, 'set xlabel "time"'
print >>plot, 'set ylabel "%s"' % key
print >>plot, 'set title "%s [%s]"' % (key, ctx.name)
print >>plot, 'plot', ', '.join(['"%s" using 1:($%d + %d) title "%s"' % (filename, 3 + disks.index(disk), disks.index(disk) * 100, disk) for disk in disks])
#print >>plot, 'plot', ', '.join(['"%s" using 1:%d title "%s"' % (filename, 3 + disks.index(disk), disk) for disk in disks])
plot.close()
#
# Main
#
main()
|
import requests
payload = {
"type": "test",
"title": "Tester",
"task": "/home/Ron/test.sh"
}
headers = {
"source": "gitlab/testproject",
"auth": "7abcddbb2c74e4c0789c2c0aa6abcf5172e82e9f4916bc6409fc3989ed673e08"
}
r = requests.post("http://localhost:8080/api/post", json=payload, headers=headers)
print(r.status_code) |
from django.contrib import admin
from .models import *
class student_dataAdmin(admin.ModelAdmin):
list_display=["roll_no","name","sem","mobile","email","linkedin_url","github_url","photo","skill"]
search_fields=["roll_no","name","sem",]
admin.site.register(student_data,student_dataAdmin)
# class skill_dataAdmin(admin.ModelAdmin):
# list_display=["roll_no","skill"]
# admin.site.register(skill_data,skill_dataAdmin)
# Register your models here.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.