blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3791630f062620da26fe1853a589375020abeffb | 15d738a334fcccd98550d017db962d73deac84cf | /__init__.py | 132f67f469eaf1e0f0d57fcb69cfd7e30203be68 | [] | no_license | putaodoudou/blockchain_spider | d3d28cb8f847362f3133283ef3f7b3a12fb67104 | 476e642aee692335f64d35026e7c51fe57acb045 | refs/heads/master | 2020-04-06T18:01:24.016667 | 2018-11-06T09:19:56 | 2018-11-06T09:19:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # -*- coding: utf-8 -*-
"""
package blockchain_spider
@file: __init__.py
@time: 2018/11/4 21:52
Created by Junyi.
"""
from .spider import run_spider
__all__ = [run_spider]
__author__ = 'Junyi'
__version__ = 'v0.1'
| [
"365154018@qq.com"
] | 365154018@qq.com |
860b364888f23cea189e4ec3ce934ad91d6b9514 | c4f6a13c52ba5cdb6333f34cff4ffc0729eabeec | /TestStep/ts_send_data.py | 4b6adff8b576ce1cffe3d8e03bb56ad1dfc5e071 | [] | no_license | xhr-git/TestCase-DSL | 6d3ed613015ac9aa24cbed0367b5631a54f1addd | 3b6d54f2182b8d889af06b17589ad20eb4cb2a75 | refs/heads/main | 2023-03-03T09:51:43.079772 | 2021-02-10T08:39:24 | 2021-02-10T08:39:24 | 337,663,765 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | from core.include import *
import socket
default_client = 'host_default_client_random__'
default_port = 8000
class ts_send_data(TestStep):
desc = 'send data to server, client={}'
def __init__(self, prefix, deep, paras):
self.clt_name = paras.get('client', default_client)
super().__init__(desc=self.desc.format(self.clt_name),
prefix=prefix, deep=deep, paras=paras)
def action(self):
data = self.clt_name
clt: socket.socket = self.get_del_global(self.clt_name)
clt.send(data.encode())
clt.close()
self.log('send data server successfully (client={})'.format(self.clt_name))
return 0
if __name__ == '__main__':
pass
| [
"54583823+xhr-git@users.noreply.github.com"
] | 54583823+xhr-git@users.noreply.github.com |
1429d2a5feffca674a57f94cdafe2da55d56a7e5 | 8df6d70dd7b0e9e75bbb10e0225d86a440deb91a | /SaIL/learners/network.py | ae3d9a9822964c10ad2bd3a1082f9c85f8098009 | [
"BSD-3-Clause"
] | permissive | yonetaniryo/SaIL | cae7b9f2d6de372d23c8d810f94678ca65754eb1 | c7404024c7787184c3638e9730bd185373ed0bf6 | refs/heads/master | 2022-06-09T04:26:37.698199 | 2020-05-11T04:27:39 | 2020-05-11T04:27:39 | 262,938,282 | 0 | 0 | BSD-3-Clause | 2020-05-11T04:29:18 | 2020-05-11T04:29:18 | null | UTF-8 | Python | false | false | 7,362 | py | #!/usr/bin/env python
"""Generic network class for supervised regression
Created on: March 25, 2017
Author: Mohak Bhardwaj"""
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
import numpy as np
import random
class SupervisedRegressionNetwork():
def __init__(self, params):
self.initialized=False
self.output_size = params['output_size']
self.input_size = params['input_size']
self.learning_rate = params['learning_rate']
self.batch_size = params['batch_size']
self.training_epochs = params['training_epochs']
self.display_step = params['display_step']
# self.summary_dir_train = os.path.join(os.path.abspath('saved_data/summaries'), params['summary_file']+'_train')
# self.summary_dir_test = os.path.join(os.path.abspath('saved_data/summaries'), params['summary_file']+'_test')
# print self.summary_dir_test
# print self.summary_dir_train
self.seed_val = params['seed_val']
self.input_shape = [self.input_size]
if params['mode'] == "gpu":
self.device = '/gpu:0'
else:
self.device = '/cpu:0'
def initialize(self):
if not self.initialized:
global tf
global tflearn
# import matplotlib.pyplot as plt
import tensorflow as tf
import tflearn
config = tf.ConfigProto()
config.allow_soft_placement=True
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)#, log_device_placement=True))
with tf.device(self.device):
self.graph_ops = self.init_graph()
self.init_op = tf.global_variables_initializer()
self.sess.run(self.init_op)
self.initialized=True
print('network created and initialized')
def create_network(self):
"""Constructs and initializes core network architecture"""
state_input = tf.placeholder(tf.float32, [None] + self.input_shape)
net = tflearn.fully_connected(state_input, 100, activation='relu')
net = tflearn.fully_connected(net, 50, activation ='relu')
# net = tflearn.fully_connected(net, 25, activation='relu')
output = tflearn.fully_connected(net, self.output_size, activation = 'linear')
return state_input, output
def init_graph(self):
"""Overall architecture including target network,
gradient ops etc"""
state_input, output = self.create_network()
network_params = tf.trainable_variables()
target = tf.placeholder(tf.float32, [None] + [self.output_size])
cost = tf.reduce_sum(tf.pow(output - target, 2))/(2*self.batch_size)
optimizer = tf.train.RMSPropOptimizer(learning_rate = self.learning_rate)
train_net = optimizer.minimize(cost, var_list = network_params)
saver = tf.train.Saver()
graph_operations = {"s": state_input,\
"output": output,\
"target": target,\
"cost": cost,\
"train_net": train_net,\
"network_params": network_params,\
"saver": saver}
return graph_operations
def train(self, database):
#Shuffle the database
# random.shuffle(database)
for epoch in xrange(self.training_epochs):
random.shuffle(database)
avg_cost = 0.
total_batch = int(len(database)/self.batch_size)
for i in xrange(total_batch):
batch_x, batch_y = self.get_next_batch(database, i)
#Run optimization op(backprop) and cost op(to get loss value)
_, c = self.sess.run([self.graph_ops['train_net'], self.graph_ops['cost']],\
feed_dict = {self.graph_ops['s']:batch_x,\
self.graph_ops['target']:batch_y})
#Compute Average Loss
avg_cost+= c/total_batch
#Display logs per epoch
if epoch%self.display_step == 0:
print "epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(np.sqrt(avg_cost))
print('optimization finished!')
return np.sqrt(avg_cost)
def get_loss(self, features, label):
features = features.reshape(self.input_shape)
c = self.sess.run(self.graph_ops['cost'],\
feed_dict = {self.graph_ops['s']:features,\
self.graph_ops['target']:batch_y})
return np.sqrt(c)
def get_heuristic(self, features):
features = features.reshape(self.input_shape)
# output = self.sess.run(self.graph_ops['output'], feed_dict={self.graph_ops['s']:features})
output = self.graph_ops['output'].eval(session=self.sess, feed_dict={self.graph_ops['s']:[features]})
return output
def save_params(self, file_name):
#file_path = os.path.join(os.path.abspath('saved_data/saved_models'), file_name +'.ckpt')
save_path = self.graph_ops['saver'].save(self.sess, file_name)
print("Model saved in file: %s" % file_name)
return
def load_params(self, file_name):
#file_path = os.path.join(os.path.abspath('saved_data/saved_models'), file_name +'.ckpt')
self.graph_ops['saver'].restore(self.sess, file_name)
print('Weights loaded from file %s'%file_name)
def get_params(self):
return self.graph_ops['network_params']
def set_params(self, input_params):
[self.graph_ops['network_params'].assign(input_params[i]) for i in range(len(input_params))]
def get_next_batch(self, database, i):
batch = database[i*self.batch_size: (i+1)*self.batch_size]
batch_x = np.array([_[0] for _ in batch])
batch_y = np.array([_[1] for _ in batch])
new_shape_ip = [self.batch_size] + self.input_shape
new_shape_op = [self.batch_size] + [self.output_size]
batch_x = batch_x.reshape(new_shape_ip)
batch_y = batch_y.reshape(new_shape_op)
return batch_x, batch_y
def reset(self):
self.sess.run(self.init_op)
# def save_summaries(self, vars, iter_idx, train=True):
# print('Writing summaries')
# summary_str = self.sess.run(self.summary_ops,
# feed_dict = {self.episode_stats_vars[0]: vars[0],
# self.episode_stats_vars[1]: vars[1],
# self.episode_stats_vars[2]: vars[2],
# self.episode_stats_vars[3]: vars[3],
# self.episode_stats_vars[4]: vars[4]})
# if train:
# self.train_writer.add_summary(summary_str, iter_idx)
# self.train_writer.flush()
# else:
# self.test_writer.add_summary(summary_str, iter_idx)
# self.test_writer.flush()
# def build_summaries(self):
# # variable_summaries(episode_reward)
# episode_reward = tf.Variable(0.)
# episode_expansions = tf.Variable(0.)
# episode_expansions_std = tf.Variable(0.)
# episode_accuracy = tf.Variable(0.)
# num_unsolved = tf.Variable(0)
# episode_stats_vars = [episode_reward, episode_expansions, episode_expansions_std, episode_accuracy, num_unsolved]
# episode_stats_ops = [tf.summary.scalar("Rewards", episode_reward), tf.summary.scalar("Expansions(Task Loss)", episode_expansions),tf.summary.scalar("Std. Expansions", episode_expansions_std), tf.summary.scalar("RMS(Surrogate Loss)", episode_accuracy), tf.summary.scalar("Number of Unsolved Envs", num_unsolved)]
# return episode_stats_ops, episode_stats_vars
| [
"mohak.bhardwaj@gmail.com"
] | mohak.bhardwaj@gmail.com |
0ee0607eb4b262b59a73e5ce6677ce56a4a0858b | ff96afc5eb758f241231c39688a5a191d518a21a | /src/newsgroup_assembly.py | 9851e4f5ef69089f1b0c31c953dc74bf8fb08332 | [] | no_license | tstearns26-zz/we3tasks | f4e21a27a23971b7b8afdd9dbe1df6401a2cec3b | 0490e95bac60946a86276b14b818093201758f2b | refs/heads/master | 2021-09-02T03:27:34.323797 | 2017-12-29T23:16:02 | 2017-12-29T23:16:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #!/usr/bin/python
'''
Manipulates the 20newsgroup dataset for use
in a document classification task.
'''
import util as util
from sklearn.datasets import fetch_20newsgroups
def categorize_20newsgroup(base_dir):
corpus = \
fetch_20newsgroups(subset='train',
remove=('headers', 'footers', 'quotes'))
util.make_dir(base_dir)
for cat in range(20):
util.make_dir('{}/{}'.format(base_dir, 'c' + str(cat)))
for index in range(len(corpus.data)):
new = '{}/{}/{}'.format(base_dir,
'c' + str(list(corpus.target)[index]),
str(index) + '.txt')
util.write_file(new, corpus.data[index].encode('utf-8'))
return True
if __name__ == '__main__':
categorize_20newsgroup('20newsgroup')
| [
"TRS@Thomass-MacBook-Pro-2.local"
] | TRS@Thomass-MacBook-Pro-2.local |
bb582568334743c9a2c3068a16a6d0fc7d34d133 | 5db42e4ac92e16da56f33b091ee57827401a8fc0 | /computadora.py | 6e60887ae124a549fb2faea248bb6a6a175e7621 | [] | no_license | matiasjrodriguez/Lab-MundoPC | ad6c21b87ccac00f1d5a5b7d72ba3907ecf6172f | 360d68f5065b07f7beb93f44b6c93269963f670c | refs/heads/main | 2023-06-14T23:35:57.176003 | 2021-07-13T04:49:45 | 2021-07-13T04:49:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | from mouse import Mouse
from teclado import Teclado
from monitor import Monitor
class Computadora(Mouse, Teclado, Monitor):
contadorComputadoras = 0
def __init__(self, nombre, monitor, teclado, mouse):
Computadora.contadorComputadoras += 1
self._idComputadora = Computadora.contadorComputadoras
self._nombre = nombre
self._monitor = monitor
self._teclado = teclado
self._mouse = mouse
def __str__(self):
return f'''
Computadora: {self._nombre}
Id: {self._idComputadora}
{self._monitor}
{self._teclado}
{self._mouse}
'''
@property
def idComputadora(self):
return self._idComputadora
@property
def nombre(self):
return self._nombre
@nombre.setter
def nombre(self, nombre):
self._nombre = nombre
@property
def monitor(self):
return self._monitor
@monitor.setter
def monitor(self, monitor):
self._monitor = monitor
@property
def teclado(self):
return self._teclado
@teclado.setter
def teclado(self, teclado):
self._teclado = teclado
@property
def mouse(self):
return self._mouse
@mouse.setter
def mouse(self, mouse):
self._mouse = mouse
if __name__ == "__main__":
monitor1 = Monitor("LG", 19)
monitor2 = Monitor("Samsung", 27)
teclado1 = Teclado("USB", "Logitech")
teclado2 = Teclado("USB", "Redragon")
mouse1 = Mouse("USB", "Logitech")
mouse2 = Mouse("USB", "Redragon")
computadora1 = Computadora("Office", monitor1, teclado1, mouse1)
computadora2 = Computadora("Gamer", monitor2, teclado2, mouse2)
print(computadora1)
print(computadora2) | [
"mellarodriguez29@gmail.com"
] | mellarodriguez29@gmail.com |
0dfc17d3dbf789aec8c4651d8e5c9717d3b0d94e | c284f7f6466b85e6832c40ecc6a647cbc47dfa94 | /Formant_detection/formant_spec.py | 8b391f9679995e459449ca4201a252a54ff52f02 | [
"MIT"
] | permissive | akshat1145/Blab-Lab | 381f8265998a23c46038a9c1ff1056d31f883e3e | 0a339f9bc38ed1cd2775ff4a61e9481a9a949947 | refs/heads/master | 2020-05-18T19:33:18.921024 | 2019-05-02T16:44:41 | 2019-05-02T16:44:41 | 184,610,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import csv
import numpy
import matplotlib.pyplot as plt
import pandas
colnames = ['F1', 'F2', 'F3']
data = pandas.read_csv('data/ExamplePredictions2.csv', skipinitialspace=True, usecols=colnames)
F1 = data.F1.tolist()
F2 = data.F2.tolist()
F3 = data.F3.tolist()
print(data)
plt.plot(data)
plt.ylabel('formants-F1,F2,F3')
plt.show()
plt.imshow(numpy.transpose(data), extent=[0,2,0,3000], cmap='Greys',
vmin=00, vmax=3000, origin='lowest', aspect='auto')
plt.colorbar()
plt.show() | [
"noreply@github.com"
] | akshat1145.noreply@github.com |
af40836ae1e12cf5842b47bc19b85622481ed65d | 97118a484b20e188e6469fa8652f2a20f159745d | /awscrawler/awscrawler.py | 141cc2101a4ebd6c2d5851ef2194f5dc549c50af | [] | no_license | Justinyj/ruyiwebcrawl | 7cf7805d2d1b4a6c3fc7341ab47a2aba096a5323 | 6f7205b00f1a105f4505cf4ee571f2c53762dc3e | refs/heads/master | 2020-07-13T07:24:23.531659 | 2016-11-16T02:18:02 | 2016-11-16T02:18:02 | 73,888,981 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Yuande Liu <miraclecome (at) gmail.com>
from __future__ import print_function, division
from gevent import monkey; monkey.patch_all()
import hashlib
import json
import gevent
from rediscluster.redismanager import RedisManager
from settings import RECORD_REDIS, QUEUE_REDIS, CACHE_REDIS
MANAGER = RedisManager(RECORD_REDIS, QUEUE_REDIS, CACHE_REDIS)
def post_job(batch_id, method, gap, js, total_count, urls_func=None, priority=1, queue_timeout=10, failure_times=3, start_delay=0):
""" ThinHash depends on modulo algroithm, must calculate modulo in the begining.
Can not submit second job with same batch_id before first job finished.
:param queue_timeout: turn to several times larger of download timeout.
"""
parameter = '{method}:{gap}:{js}:{timeout}:'.format(
method=method,
gap=gap,
js=1 if js else 0,
timeout=queue_timeout)
queue_timeout *= 30 # magic number because of the queue.get
queue_dict = MANAGER.init_distributed_queue(batch_id,
parameter,
total_count,
priority,
timeout=queue_timeout,
failure_times=failure_times)
if urls_func:
for urls in urls_func():
MANAGER.put_urls_enqueue(batch_id, urls)
return gevent.spawn_later(start_delay, queue_dict['queue'].background_cleaning)
def delete_distributed_queue(greenlet):
""" In this callback, the greenlet.value is batch_id
this will be called after gevent.joinall
"""
return MANAGER.delete_queue(greenlet.value)
def main():
pass
if __name__ == '__main__':
main()
| [
"miraclecome@gmail.com"
] | miraclecome@gmail.com |
5fb73864ec7e125c60807a4ca31b47002c54600f | 88bab0f7d2e3673d03f08bb6009f6049aeb8bef0 | /open_publishing/catalog/thema.py | 734ccdc9447e8f1db602ab891def246b9efa27cd | [
"BSD-3-Clause"
] | permissive | open-publishing/open-publishing-api | f8b1067f141c0e74b42f1eece790bcdfe46de331 | d604bdc03df2a6d2e6c93dac54a4bd3d5110c519 | refs/heads/master | 2023-05-25T09:44:43.182448 | 2022-06-30T16:30:14 | 2022-06-30T16:30:14 | 119,554,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,751 | py | from datetime import datetime
from open_publishing.core import SequenceItem, SequenceField
from open_publishing.core import DatabaseObject, FieldDescriptor, SimpleField
from open_publishing.core.enums import ValueStatus, FieldKind, ThemaCode
class ThemaSubject(DatabaseObject):
_object_class = 'thema_subject'
def __init__(self,
context,
thema_subject_id):
super(ThemaSubject, self).__init__(context,
thema_subject_id)
self._fields['code'] = SimpleField(database_object=self,
aspect='*',
field_locator='code',
dtype=ThemaCode,
kind=FieldKind.readonly)
self._fields['name'] = SimpleField(database_object=self,
aspect='*',
field_locator='screenname',
dtype=str,
kind=FieldKind.readonly)
code = FieldDescriptor('code')
name = FieldDescriptor('name')
class Thema(SequenceItem):
def __init__(self,
subject):
super(Thema, self).__init__(ValueStatus.soft)
self._subject = subject
@property
def value(self):
return self._subject
@classmethod
def from_gjp(cls, gjp, database_object):
guid = gjp
subject_id = ThemaSubject.id_from_guid(guid)
subject = ThemaSubject(database_object.context,
subject_id)
return cls(subject)
def to_gjp(self):
return self._subject.guid
class ThemaList(SequenceField):
_item_type = Thema
def __init__(self,
document):
super(ThemaList, self).__init__(document,
"thema_subjects",
"thema_subjects")
def add(self,
thema_code):
if thema_code in ThemaCode:
subject_id = self.database_object.context.gjp.resolve_enum(ThemaCode,
enum=thema_code).internal_id
else:
subject_id = self.database_object.context.gjp.resolve_enum(ThemaCode,
code=thema_code).internal_id
subject = ThemaSubject(self.database_object.context,
subject_id)
new_thema = Thema(subject)
self._list.append(new_thema)
self._status = ValueStatus.hard
| [
"info@openpublishing.com"
] | info@openpublishing.com |
3ac055a9a6220c3dfd3f62733cbe42df617c163e | 0bd929784f82ee9af3fba56a56395dc7f17deadd | /Files.py | b5416049a71b6812cd70f67cdce4040714abcf90 | [] | no_license | umesh-gattem/Python_Programs | 3b7854de20da6f816c4c0a94ec07886d8f8e48c9 | 1ff676220476815d6fd601c839487b8d56946994 | refs/heads/master | 2020-09-27T16:24:32.119645 | 2016-08-24T14:46:21 | 2016-08-24T14:46:21 | 66,472,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import os
import time
print(os.getcwd())
metadata=os.stat("Files.py")
print(metadata)
print(time.localtime(metadata.st_mtime))
print("") | [
"umesh.gattem@gmail.com"
] | umesh.gattem@gmail.com |
201ff5ba934487f5b70186fc163eb7af010765d7 | 54515bbd3c2ee1510e9c88f02e4f41cb3277cead | /python基础一/day11/03装饰器.py | abee68af2ddd2e264eac017575ff58f72349cd66 | [] | no_license | Li96224/python_notes | 04c0287bfa7b5e296bfdda50cb2741bae7a05ba6 | 469690882736a20c148439a0fc8bea2e33b1197a | refs/heads/master | 2023-04-09T07:47:15.019205 | 2021-04-19T15:21:49 | 2021-04-19T15:21:49 | 359,506,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py |
#装饰器形成的过程: 最简单的装饰器 有返回值的 有一个参数 万能参数
#装饰器的作用
#原则:开放封闭原则
#装饰器的固定模式
# import time
# print(time.time()) #获取当前时间
# time.sleep(10) #代码跑到这个位置的时候暂停10秒
# def func(): #第一步定义函数func
# time.sleep(0.1)
# print('12345')
#
# def timmer(f): #第二步定义装饰器函数 (因为第三步的调用此时的形参f的内存地址等同于函数func)
# def inner(): #第四步执行inner函数
# start=time.time() #第八步
# f() #第九步
# end=time.time() #第十步
# print(end-start) #第十一步
# return inner #返回inner函数的内存地址 #第五步返回inner函数
#
# func=timmer(func) #第三步调用函数timer时将func作为实参传入了 第六步func等于返回值inner函数
# func() #第七步执行func函数
# 语法糖
#
# def timer(f): #装饰器函数
# def inner():
# start=time.time()
# f() #被装饰的函数
# end=time.time()
# print(end-start)
# return inner #返回函数不用带()
#
#
# @timer #语法糖 @装饰器函数名 在被装饰的函数前面加上@装饰函数 起到的作用相当于func=timer(func)且这行代码可以不用写
# def func(): #被装饰器的函数
# time.sleep(0.01)
# print("大家好")
# # func=timer(func) #函数作为实参传入函数不用带()
# func()
#装饰器的作用:在不修改函数的调用方式,但是还想在原来的函数前后添加功能
#timer就是一个装饰器函数,只是对一个函数 有一些装饰作用
# 原则:开放封闭原则
# 开放:对扩展是开放的
# 封闭:对修改是封闭的
#装饰带参数函数的装饰器
# def timer(f): #装饰器函数
# def inner(*args,**kwargs):
# start=time.time()
# ret=f(*args,**kwargs) #被装饰的函数
# end=time.time()
# print(end-start)
# return ret
# return inner #返回函数不用带()
#
#
# @timer #语法糖 @装饰器函数名
# def func(a,b): #被装饰器的函数
# time.sleep(0.01)
# print("大家好",a,b)
# return '新年好'
#
#
# @timer #语法糖 @装饰器函数名
# def func1(a): #被装饰器的函数
# time.sleep(0.01)
# print("大家好",a)
# return '新年好'
# # func=timer(func) #函数作为实参传入函数不用带()
# ret=func(1,b=3)
# print(ret)
#
# def timer(f): #装饰器函数
# def inner(*args,**kwargs):
# """在被装饰函数之前要做的事"""
# ret=f(*args,**kwargs) #被装饰的函数
# """在被装饰函数之后要做的事"""
# return ret
# return inner
#
# @timer #语法糖 @装饰器函数名
# def func(a,b): #被装饰的函数
# time.sleep(0.01)
# print('666',a,b)
# return '111'
# def wrapper(func): #这时候传的的实参就是qqxing
# def inner(*args,**kwargs):
# ret=func(*args,**kwargs) #被装饰的函数
# return ret
# return inner
#
# @wrapper #qqxing等于wrapper(qqxing)
# def qqxing(a,b,c):
# print(123,a,b,c)
# return "112"
#
# # ret = qqxing() #实际是执行inner
# qqxing(11,22,c=99)
def wrapper(func):
def inner(*args,**kwargs):
ret=func(*args,**kwargs)
return ret
return inner
@wrapper
def qqxing(*args,**kwargs):
print("打印的值",*args,**kwargs)
return "abc"
print(qqxing(1,23,3)) | [
"liyouming_21@163.com"
] | liyouming_21@163.com |
aa191c43266a3b7abde471820f7455bfd3435d0d | 7b9813e6c805edfeca538b69bd79119db439f284 | /siim-rsna-2021/exp/exp043/train.py | fc13cba7c650cb1167a2e07844f3e695a2796073 | [] | no_license | Ino-Ichan/SIIM-RSNA-Covid19-2021 | 61bfd0b2baef58f6b1673e02f45acaa998916a89 | caba038bbb403cb55753ecc68d5fb92ef93b1f8e | refs/heads/main | 2023-07-08T13:19:46.226739 | 2021-08-13T18:05:37 | 2021-08-13T18:05:37 | 373,910,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,606 | py | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import random
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
# from torch.cuda.amp import GradScaler, autocast
from torch.cuda.amp import autocast
import cv2
import albumentations
from albumentations.pytorch import ToTensorV2
from sklearn.metrics import confusion_matrix, roc_auc_score, average_precision_score
from tqdm import tqdm
import argparse
import os, sys, yaml
sys.path.append('/workspace/siim-rsna-2021')
from src.logger import setup_logger, LOGGER
from src.meter import mAPMeter, AUCMeter, APMeter, AverageValueMeter
from src.utils import plot_sample_images
from src.sam import SAM
from src.custom_grad_scaler import CustomGradScaler as GradScaler
# import neptune.new as neptune
import wandb
import pydicom
import time
from contextlib import contextmanager
import timm
import warnings
target_columns = [
"Negative for Pneumonia", "Typical Appearance", "Indeterminate Appearance", "Atypical Appearance"
]
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.0f} s')
def seed_torch(seed=516):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def ousm_loss(error, k=2):
# ousm, drop large k sample
bs = error.shape[0]
if len(error.shape) == 2:
error = error.mean(1)
_, idxs = error.topk(bs - k, largest=False)
error = error.index_select(0, idxs)
return error
# Freeze batchnorm 2d
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm2d') != -1:
m.eval()
# =============================================================================
# Model
# =============================================================================
class Net(nn.Module):
def __init__(self, name="resnest101e"):
super(Net, self).__init__()
self.model = timm.create_model(name, pretrained=True, num_classes=len(target_columns))
def forward(self, x):
x = self.model(x).squeeze(-1)
return x
# =============================================================================
# Dataset
# =============================================================================
class CustomDataset(Dataset):
def __init__(self,
df,
image_size,
transform=None,
mode="train",
clahe=False,
mix=False,
use_npy=False,
):
self.df = df.reset_index(drop=True)
self.image_size = image_size
self.transform = transform
self.mode = mode
self.clahe = clahe
self.mix = mix
if self.clahe or self.mix:
self.clahe_transform = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(16, 16))
self.cols = target_columns
self.use_npy = use_npy
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
row = self.df.iloc[index]
if self.use_npy:
# images = np.load(row.npy_path)
images = cv2.imread(row.npy_path)
else:
images = pydicom.read_file(row.dicom_path).pixel_array
if self.clahe:
single_channel = images[:, :, 0].astype(np.uint8)
single_channel = self.clahe_transform.apply(single_channel)
images = np.array([
single_channel,
single_channel,
single_channel
]).transpose(1, 2, 0)
elif self.mix:
single_channel = images[:, :, 0].astype(np.uint8)
clahe_channel = self.clahe_transform.apply(single_channel)
hist_channel = cv2.equalizeHist(single_channel)
images = np.array([
single_channel,
clahe_channel,
hist_channel
]).transpose(1, 2, 0)
if self.transform is not None:
images = self.transform(image=images)['image'] / 255
else:
images = images.transpose(2, 0, 1)
label = row[self.cols].values.astype(np.float16)
return {
"image": torch.tensor(images, dtype=torch.float),
# "image": images,
"target": torch.tensor(label, dtype=torch.float)
}
# =============================================================================
# one epoch
# =============================================================================
def disable_bn(model):
for module in model.modules():
if isinstance(module, nn.BatchNorm1d):
module.eval()
if isinstance(module, nn.BatchNorm2d):
module.eval()
def enable_bn(model):
model.train()
def train_one_epoch(train_dataloader, model, device, criterion, use_amp, wandb, meters_dict, enable_sam, mode="train"):
train_time = time.time()
LOGGER.info("")
LOGGER.info("+" * 30)
LOGGER.info(f"+++++ Epoch {e}")
LOGGER.info("+" * 30)
LOGGER.info("")
progress_bar = tqdm(train_dataloader)
model.train()
torch.set_grad_enabled(True)
# freeze bach norm
if freeze_bn:
model = model.apply(set_bn_eval)
# reset metrics
for m in meters_dict.values():
m.reset()
for step_train, data in enumerate(progress_bar):
if debug:
if step_train == 2:
break
inputs = data["image"].to(device)
target = data["target"].to(device)
bs = inputs.shape[0]
with autocast(enabled=use_amp):
output = model(inputs)
loss = criterion(output, target).mean()
if enable_sam:
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.first_step(optimizer, zero_grad=True)
with autocast(enabled=use_amp):
disable_bn(model)
output = model(inputs)
loss = criterion(output, target).mean()
scaler.scale(loss).backward()
enable_bn(model)
scaler.second_step(optimizer, zero_grad=True)
scaler.update()
else:
if accumulation_steps > 1:
loss_bw = loss / accumulation_steps
scaler.scale(loss_bw).backward()
if (step_train + 1) % accumulation_steps == 0 or step_train == len(train_dataloader):
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
meters_dict["loss"].add(loss.item(), n=bs)
meters_dict["AP"].add(output=output.detach(), target=target)
progress_bar.set_description(f"loss: {loss.item()} loss(avg): {meters_dict['loss'].value()[0]}")
LOGGER.info(f"Train loss: {meters_dict['loss'].value()[0]}")
LOGGER.info(f"Train mAP: {meters_dict['AP'].value().mean()}")
LOGGER.info(f"Train time: {(time.time() - train_time) / 60:.3f} min")
wandb.log({
"epoch": e,
"Loss/train": meters_dict['loss'].value()[0],
"mAP/train": meters_dict['AP'].value().mean(),
"mAP_metrics/train": (2 * meters_dict['AP'].value().mean()) / 3,
})
def val_one_epoch(val_dataloader, model, device, wandb, meters_dict, mode="val"):
val_time = time.time()
progress_bar = tqdm(val_dataloader)
model.eval()
torch.set_grad_enabled(False)
# reset metrics
for m in meters_dict.values():
m.reset()
for step_val, data in enumerate(progress_bar):
if debug:
if step_val == 2:
break
inputs = data["image"].to(device)
target = data["target"].to(device)
bs = inputs.shape[0]
with torch.no_grad():
output = model(inputs)
loss = criterion(output, target).mean()
meters_dict["loss"].add(loss.item(), n=bs)
meters_dict["AP"].add(output=output, target=target)
progress_bar.set_description(f"loss: {loss.item()} loss(avg): {meters_dict['loss'].value()[0]}")
LOGGER.info(f"Val loss: {meters_dict['loss'].value()[0]}")
LOGGER.info(f"Val mAP: {meters_dict['AP'].value().mean()}")
LOGGER.info(f"Val mAP score: {(2 * meters_dict['AP'].value().mean()) / 3}")
LOGGER.info(f"Val time: {(time.time() - val_time) / 60:.3f} min")
log_dict = {
"epoch": e,
"Loss/val": meters_dict['loss'].value()[0],
"mAP/val": meters_dict['AP'].value().mean(),
"mAP_metrics/val": (2 * meters_dict['AP'].value().mean()) / 3
}
for n_t, t in enumerate(target_columns):
log_dict[f"AP_{t}/val"] = meters_dict['AP'].value()[n_t]
wandb.log(log_dict)
return meters_dict['AP'].value().mean()
# def get_train_transforms(image_size):
# return albumentations.Compose([
# albumentations.Transpose(p=0.5),
# albumentations.VerticalFlip(p=0.5),
# albumentations.HorizontalFlip(p=0.5),
# albumentations.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2,
# brightness_by_max=False, p=0.5),
# albumentations.Blur(blur_limit=7, p=0.5),
# # albumentations.HueSaturationValue(p=0.5),
# albumentations.CenterCrop(540, 540, p=1),
# albumentations.Resize(image_size, image_size),
# # albumentations.RandomResizedCrop(height=image_size, width=image_size, scale=(0.08, 1)),
# albumentations.CoarseDropout(max_holes=3, max_height=50, max_width=50),
# ToTensorV2()
# ])
#
#
# def get_val_transforms(image_size):
# return albumentations.Compose([
# albumentations.CenterCrop(540, 540, p=1),
# albumentations.Resize(image_size, image_size),
# # albumentations.RandomResizedCrop(height=image_size, width=image_size, scale=(0.08, 1)),
# ToTensorV2()
# ], p=1.0)
def get_train_transforms(image_size):
return albumentations.Compose([
albumentations.ShiftScaleRotate(p=0.5),
albumentations.RandomResizedCrop(image_size, image_size, scale=(0.7, 1), p=1),
albumentations.HorizontalFlip(p=0.5),
albumentations.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5, p=0.5),
albumentations.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.5),
albumentations.CLAHE(clip_limit=(1, 4), p=0.5),
# albumentations.OneOf([
# albumentations.OpticalDistortion(distort_limit=1.0),
# albumentations.GridDistortion(num_steps=5, distort_limit=1.),
# albumentations.ElasticTransform(alpha=3),
# ], p=0.2),
albumentations.OneOf([
albumentations.GaussNoise(var_limit=[10, 50]),
albumentations.GaussianBlur(),
albumentations.MotionBlur(),
# albumentations.MedianBlur(),
], p=0.1),
albumentations.Resize(image_size, image_size),
# albumentations.OneOf([
# albumentations.augmentations.transforms.JpegCompression(),
# albumentations.augmentations.transforms.Downscale(scale_min=0.1, scale_max=0.15),
# ], p=0.2),
# albumentations.imgaug.transforms.IAAPiecewiseAffine(p=0.2),
# albumentations.imgaug.transforms.IAASharpen(p=0.2),
albumentations.Cutout(max_h_size=int(image_size * 0.1), max_w_size=int(image_size * 0.1), num_holes=5, p=0.5),
# albumentations.Normalize(
# mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225],
# ),
ToTensorV2(p=1)
])
def get_train_transforms2(image_size):
return albumentations.Compose([
albumentations.ShiftScaleRotate(rotate_limit=30, p=0.5),
albumentations.RandomResizedCrop(image_size, image_size, scale=(0.7, 1), p=1),
albumentations.HorizontalFlip(p=0.5),
albumentations.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
albumentations.OneOf([
albumentations.GaussNoise(),
albumentations.MotionBlur(blur_limit=3),
], p=0.1),
albumentations.Resize(image_size, image_size),
albumentations.Cutout(max_h_size=int(image_size * 0.1), max_w_size=int(image_size * 0.1), num_holes=2, p=0.5),
ToTensorV2(p=1)
])
def get_val_transforms(image_size):
return albumentations.Compose([
albumentations.Resize(image_size, image_size),
# albumentations.Normalize(
# mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225],
# ),
ToTensorV2(p=1)
])
if __name__ == "__main__":
print('Start!!!')
warnings.simplefilter('ignore')
parser = argparse.ArgumentParser(description="training")
parser.add_argument('-y', '--yaml_path', type=str,
help='configを書いたyamlのPath。例)-y ../config/exp0001.yaml')
args = parser.parse_args()
yaml_path = args.yaml_path
yaml_path = args.yaml_path
if os.path.isfile(yaml_path):
with open(yaml_path) as file:
cfg = yaml.safe_load(file.read())
else:
print('Error: No such yaml file')
sys.exit()
# seed_everythin
seed_torch()
# output
exp_name = cfg["exp_name"] # os.path.splitext(os.path.basename(__file__))[0]
output_path = os.path.join("/workspace/output", exp_name)
# path
model_path = output_path + "/model"
plot_path = output_path + "/plot"
oof_path = output_path + "/oof"
sample_img_path = output_path + "/sample_img"
os.makedirs(output_path, exist_ok=True)
os.makedirs(model_path, exist_ok=True)
os.makedirs(output_path + "/log", exist_ok=True)
os.makedirs(plot_path, exist_ok=True)
os.makedirs(oof_path, exist_ok=True)
os.makedirs(sample_img_path, exist_ok=True)
# logger
log_path = os.path.join(output_path, "log/log.txt")
setup_logger(out_file=log_path)
LOGGER.info("config")
LOGGER.info(cfg)
LOGGER.info('')
debug = cfg["debug"]
if debug:
LOGGER.info("Debug!!!!!")
# params
device_id = cfg["device_id"]
try:
device = "cuda:{}".format(device_id) if torch.cuda.is_available() else "cpu"
except Exception as e:
LOGGER.info('GPU is not available, {}'.format(e))
sys.exit()
print(device)
#######################################
## params
#######################################
model_name = cfg["model_name"]
img_size = cfg["img_size"]
batch_size = cfg["batch_size"]
n_workers = cfg["n_workers"]
n_epochs = cfg["n_epochs"]
start_epoch = cfg["start_epoch"]
transform = cfg["transform"]
hold_out = cfg["hold_out"]
accumulation_steps = cfg["accumulation_steps"]
early_stopping_steps = cfg["early_stopping_steps"]
freeze_bn = cfg["freeze_bn"]
use_amp = cfg["use_amp"]
use_npy = cfg["use_npy"]
clahe = cfg["clahe"]
mix = cfg["mix"]
#######################################
## CV
#######################################
df = pd.read_csv(cfg["df_train_path"])
cv_list = hold_out if hold_out else [0, 1, 2, 3, 4]
oof = np.zeros((len(df), len(target_columns)))
best_eval_score_list = []
for cv in cv_list:
LOGGER.info('# ===============================================================================')
LOGGER.info(f'# Start CV: {cv}')
LOGGER.info('# ===============================================================================')
# wandb
wandb.init(config=cfg, tags=[cfg['exp_name'], f"cv{cv}", model_name],
project='siim-rsna-covid19-2021', entity='inoichan',
name=f"{cfg['exp_name']}_cv{cv}_{model_name}", reinit=True)
df_train = df[df.cv != cv].reset_index(drop=True)
df_val = df[df.cv == cv].reset_index(drop=True)
val_index = df[df.cv == cv].index
#######################################
## Dataset
#######################################
# transform
train_transform = get_train_transforms(img_size)
val_transform = get_val_transforms(img_size)
train_dataset = CustomDataset(df=df_train, image_size=img_size, clahe=clahe, mix=mix,
transform=train_transform, use_npy=use_npy, mode="train")
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
pin_memory=False, num_workers=n_workers, drop_last=True)
# plot sample image
# plot_sample_images(train_dataset, sample_img_path, "train", normalize="imagenet")
plot_sample_images(train_dataset, sample_img_path, "train", normalize=None)
val_dataset = CustomDataset(df=df_val, image_size=img_size, clahe=clahe, mix=mix,
transform=val_transform, use_npy=use_npy, mode="val")
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False,
pin_memory=False, num_workers=n_workers, drop_last=False)
# plot_sample_images(val_dataset, sample_img_path, "val", normalize="imagenet")
plot_sample_images(val_dataset, sample_img_path, "val", normalize=None)
# ==== INIT MODEL
device = torch.device(device)
model = Net(model_name).to(device)
optimizer = optim.Adam(model.parameters(), lr=float(cfg["initial_lr"]), eps=1e-7)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_epochs, eta_min=float(cfg["final_lr"]))
criterion = nn.BCEWithLogitsLoss(reduction='none')
scaler = GradScaler(enabled=use_amp)
# load weight
load_checkpoint = cfg["load_checkpoint"][cv]
LOGGER.info("-" * 10)
if os.path.exists(load_checkpoint):
weight = torch.load(load_checkpoint, map_location=device)
model.load_state_dict(weight["state_dict"])
LOGGER.info(f"Successfully loaded model, model path: {load_checkpoint}")
optimizer.load_state_dict(["optimizer"])
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device)
else:
LOGGER.info(f"Training from scratch..")
LOGGER.info("-" * 10)
# wandb misc
wandb.watch(model)
# ==== TRAIN LOOP
best = -1
best_epoch = 0
early_stopping_cnt = 0
meters_dict = {
"loss": AverageValueMeter(),
"AP": APMeter(),
}
enable_sam = False
for e in range(start_epoch , start_epoch + n_epochs):
if e == 10:
enable_sam = True
optimizer = SAM(model.parameters(), optim.Adam, lr=float(cfg["initial_lr"])/2, eps=1e-7)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_epochs, eta_min=fl)
if e > 0:
wandb.log({
"Learning Rate": optimizer.param_groups[0]["lr"],
"epoch": e
})
train_one_epoch(train_dataloader, model, device, criterion, use_amp, wandb, meters_dict, enable_sam)
score = val_one_epoch(val_dataloader, model, device, wandb, meters_dict)
scheduler.step()
LOGGER.info('Saving last model ...')
model_save_path = os.path.join(model_path, f"cv{cv}_weight_checkpoint_last.pth")
torch.save({
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict()
}, model_save_path)
if best < score:
LOGGER.info(f'Best score update: {best:.5f} --> {score:.5f}')
best = score
best_epoch = e
LOGGER.info('Saving best model ...')
model_save_path = os.path.join(model_path, f"cv{cv}_weight_checkpoint_best.pth")
torch.save({
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict()
}, model_save_path)
early_stopping_cnt = 0
else:
# early stopping
early_stopping_cnt += 1
if early_stopping_cnt >= early_stopping_steps:
LOGGER.info(f"Early stopping at Epoch {e}")
break
LOGGER.info('-' * 20)
LOGGER.info(f'Best val score: {best}, at epoch {best_epoch} cv{cv}')
LOGGER.info('-' * 20)
best_eval_score_list.append(best)
wandb.log({
"Best mAP": best,
"Best mAP metrics": (2 * best) / 3,
})
#######################################
## Save oof
#######################################
mean_score = np.mean(best_eval_score_list)
LOGGER.info('-' * 20)
LOGGER.info(f'Oof score: {mean_score}')
LOGGER.info('-' * 20)
| [
"sys.b11noway@gmail.com"
] | sys.b11noway@gmail.com |
9eb024b7757898fd4593dc5608aba65cc479734d | 9d3310fbf7f4b446e6eed2b6eea0f62abd20795d | /code/plans/rcp/api/daemon.py | 5612cac34343f1b63eff2f288e6da73c46b308d8 | [] | no_license | zywan/Kariz | c6718d99ae4da6a89b8d75c003ceec82d0cca73d | 40cd7889e97f6d331f8e19d6449145fbd35cba45 | refs/heads/master | 2020-09-06T20:09:40.597379 | 2019-11-06T19:33:18 | 2019-11-06T19:33:18 | 220,536,708 | 1 | 0 | null | 2019-11-08T19:45:06 | 2019-11-08T19:45:05 | null | UTF-8 | Python | false | false | 1,387 | py | #!/usr/bin/python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
This is the daemon module and supports all the ReST actions for the
KARIZ cache management project
"""
# System modules
from datetime import datetime
# 3rd party modules
from flask import make_response, abort
import plans.rcp.rcp as rcp
import estimator.collector as col
import utils.objectstore as objs
g_collector = None
g_rcp = None
g_objectstore = None
def start_objectstore():
global g_objectstore;
objectstore = objs.ObjectStore()
g_objectstore = objectstore
return g_objectstore
def start_estimator():
global g_collector
collector = col.Collector()
g_collector = collector;
g_collector.objectstore = g_objectstore
return collector
def start_kariz():
global g_rcp
_rcp = rcp.RCP()
g_rcp = _rcp
g_rcp.objectstore = g_objectstore
return _rcp
def get_timestamp():
return datetime.now().strftime(("%Y-%m-%d %H:%M:%S"))
def notify_collector(stats):
g_collector.update_statistic_from_string(stats.decode("utf-8"))
def notify_newstage(new_stage):
g_rcp.notify_new_stage_from_string(new_stage.decode("utf-8"))
def submit_newdag(new_dag):
g_rcp.new_dag_from_string(new_dag.decode("utf-8"))
g_collector.new_dag_from_string(new_dag.decode("utf-8"))
def notify_completed(dagstr):
g_rcp.remove_dag(dagstr.decode("utf-8"))
| [
"mania.abdi287@gmail.com"
] | mania.abdi287@gmail.com |
c6dea658cec4b0e04a793d67a2558b2a13004941 | d5ff541c5e8cb00704746f44a195a69b4e3111fe | /tests/test_validators.py | a6e605e64c1d0f70b58eb49a1409a325b1a283bc | [] | no_license | lmergner/contrivers | 08b316fa9d90c9bced15575a52e5161990709dcd | 17b2c73e7fde848ccac7c227ccc180c9f7a909cb | refs/heads/master | 2021-06-26T23:49:50.941401 | 2020-05-20T01:51:58 | 2020-05-20T01:51:58 | 33,683,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
tests.test_validators
Test that when given a bunch of nearly correct isbn numbers, we get a
consistent return from our model
"""
import pytest
from contrivers.validators import validate_isbn
from contrivers.errors import ValidationError
@pytest.mark.parametrize('success', [
(10, u'0765378558', u'0765378558'),
(10, u'076-53,78-558', u'0765378558'),
(13, u'978-0765378552', u'9780765378552'),
(13, u'978-076-537,8552', u'9780765378552')
])
def test_validates(success):
""" app.core.validators should handle well-formed isbns """
length, isbn, expected = success
assert validate_isbn(isbn, length) == expected
@pytest.mark.parametrize('fails', [
(10, 'isbn0765378558'), # has strings
(13, '06978-0765378552'), # too long
(13, '978378552') # too short
])
def test_validate_fails(fails):
""" app.core.validators should raise an error on bad isbns """
length, isbn = fails
with pytest.raises(ValidationError) as exc:
validate_isbn(isbn, length)
| [
"lmergner@gmail.com"
] | lmergner@gmail.com |
35282def5bdb7aadb2e05e234dcac7ec82210bdb | 0c210894d7b1b7e522021184953a8fcd7ccd2e10 | /DemoKS/DemoKS/asgi.py | e6af21bea476a5c1eafdb1988724a99b818c17ea | [] | no_license | Sidmach-Analytics/demo-knowledge-sharing | a3c1e02ce27ad8c5d3bb2fe8e4a064241deb53b6 | 4e1d6586c5578eeeeefae8fd91ab9d7e433f8127 | refs/heads/main | 2023-06-15T15:35:08.162843 | 2021-07-15T14:07:00 | 2021-07-15T14:07:00 | 386,292,955 | 0 | 0 | null | 2021-07-15T14:07:01 | 2021-07-15T13:05:12 | Python | UTF-8 | Python | false | false | 389 | py | """
ASGI config for DemoKS project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DemoKS.settings')
application = get_asgi_application()
| [
"opakunlejo@gmail.com"
] | opakunlejo@gmail.com |
c869af1d081ae09597c15bccd827427a7a7871d2 | eb8b5cde971573668800146b3632e43ed6e493d2 | /python/oneflow/test/modules/test_nllloss_grad.py | aa296ca08d11bd5060db05573afda9a749419506 | [
"Apache-2.0"
] | permissive | big-data-ai/oneflow | 16f167f7fb7fca2ce527d6e3383c577a90829e8a | b1c67df42fb9c5ab1335008441b0273272d7128d | refs/heads/master | 2023-07-08T21:21:41.136387 | 2021-08-21T11:31:14 | 2021-08-21T11:31:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,599 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_nllloss_none_backward(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, -1.0, 0.0],
[0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0],
]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_mean_backward(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [
[-0.20000000298023224, 0.0, 0.0],
[0.0, 0.0, -0.20000000298023224],
[0.0, -0.20000000298023224, 0.0],
[0.0, -0.20000000298023224, 0.0],
[-0.20000000298023224, 0.0, 0.0],
]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_sum_backward(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, -1.0, 0.0],
[0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0],
]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_segmentation_none_backward(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[[0.0, -1.0], [-1.0, 0.0]], [[-1.0, 0.0], [0.0, -1.0]]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_segmentation_mean_backward(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[[0.0, -0.25], [-0.25, 0.0]], [[-0.25, 0.0], [0.0, -0.25]]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_segmentation_sum_backward(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[[0.0, -1.0], [-1.0, 0.0]], [[-1.0, 0.0], [0.0, -1.0]]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_bert_none_backward(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[0.0, -1.0, -1.0, 0.0], [-1.0, 0.0, 0.0, -1.0]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_bert_mean_backward(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[0.0, -0.25, -0.25, 0.0], [-0.25, 0.0, 0.0, -0.25]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_bert_sum_backward(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[0.0, -1.0, -1.0, 0.0], [-1.0, 0.0, 0.0, -1.0]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_none_backward_with_ignore_index(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_mean_backward_with_ignore_index(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [
[-0.33333, 0.0, 0.0],
[0.0, 0.0, -0.33333],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-0.33333, 0.0, 0.0],
]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_sum_backward_with_ignore_index(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_segmentation_none_backward_with_ignore_index(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[[0.0, -1.0], [-1.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_segmentation_mean_backward_with_ignore_index(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[[0.0, -0.5], [-0.5, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_segmentation_sum_backward_with_ignore_index(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[[0.0, -1.0], [-1.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_bert_none_backward_with_ignore_index(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[0.0, -1.0, -1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_bert_mean_backward_with_ignore_index(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[0.0, -0.5, -0.5, 0.0], [0.0, 0.0, 0.0, 0.0]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
def _test_nllloss_bert_sum_backward_with_ignore_index(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(
x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum", ignore_index=1)
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
of_out = of_out.sum()
of_out.backward()
np_grad = [[[0.0, -1.0, -1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
test_case.assertTrue(
np.allclose(input.grad.numpy(), np_grad, atol=1e-05, rtol=1e-05)
)
@flow.unittest.skip_unless_1n1d()
class TestNLLLossModule(flow.unittest.TestCase):
def test_nllloss(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_nllloss_none_backward,
_test_nllloss_mean_backward,
_test_nllloss_sum_backward,
_test_nllloss_segmentation_none_backward,
_test_nllloss_segmentation_mean_backward,
_test_nllloss_segmentation_sum_backward,
_test_nllloss_bert_none_backward,
_test_nllloss_bert_mean_backward,
_test_nllloss_bert_sum_backward,
_test_nllloss_none_backward_with_ignore_index,
_test_nllloss_mean_backward_with_ignore_index,
_test_nllloss_sum_backward_with_ignore_index,
_test_nllloss_segmentation_none_backward_with_ignore_index,
_test_nllloss_segmentation_mean_backward_with_ignore_index,
_test_nllloss_segmentation_sum_backward_with_ignore_index,
_test_nllloss_bert_none_backward_with_ignore_index,
_test_nllloss_bert_mean_backward_with_ignore_index,
_test_nllloss_bert_sum_backward_with_ignore_index,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | big-data-ai.noreply@github.com |
f653e6546975a09d84b6b3de8c352f49f3abf1bb | 54dfcbc5483f4530936b5416fd432a1278182df9 | /NotImplementedError.py | a5736d07c6050fc06abbd5254d0fa9900660112b | [] | no_license | airtondepauli/GeneticAlg | 22ba89449212d2abd5483b4612ebf7eea0a8bbd4 | f27eb5f457a3c9f381f28e572a5f24b2fce3ead0 | refs/heads/master | 2021-05-08T04:36:56.694969 | 2017-11-07T12:29:28 | 2017-11-07T12:29:28 | 108,352,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | class NotImplementedException(Exception):
pass | [
"airtondepauli@icloud.com"
] | airtondepauli@icloud.com |
97fbb34ca3b7555162df80865248f02f54ae4a03 | 47518f3524eec01a8ee045f108d612901214212c | /back_propagation_neural_network/nu1.py | 72998301ad845861a0ac24e658c7641c3a21c62c | [] | no_license | globe45/Face_Detection_and_Face_recognition | c2630f915f7e3877113589ecb4740c3159fe972d | 66ad3a3e928c2fb53171c9d4d138621eb90fe713 | refs/heads/master | 2020-05-07T22:55:20.735212 | 2019-04-12T09:00:37 | 2019-04-12T09:00:37 | 180,966,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,195 | py | import math
import numpy as np
class Connection:
def __init__(self, connectedNeuron):
self.connectedNeuron = connectedNeuron
self.weight = 0.1
self.dWeight = 0.0
class Neuron:
eta = 0.9 #learning rate
alpha = 0.15 #momentum rate
def __init__(self, layer):
self.dendrons = []
self.error = 0.0
self.gradient = 0.0
self.output = 0.0
if layer is None:
pass
else:
for neuron in layer:
con = Connection(neuron)
self.dendrons.append(con)
def addError(self, err):
self.error = self.error + err
def sigmoid(self, x):
return 1 / (1 + math.exp(-x * 1.0))
def dSigmoid(self, x):
return x * (1.0 - x)
def setError(self, err):
self.error = err
def setOutput(self, output):
self.output = output
def getOutput(self):
return self.output
def feedForword(self):
sumOutput = 0
if len(self.dendrons) == 0:
return
for dendron in self.dendrons:
sumOutput = sumOutput + dendron.connectedNeuron.getOutput() * dendron.weight
self.output = self.sigmoid(sumOutput)
def backPropagate(self):
self.gradient = self.error * self.dSigmoid(self.output);
for dendron in self.dendrons:
dendron.dWeight = Neuron.eta * (
dendron.connectedNeuron.output * self.gradient) + self.alpha * dendron.dWeight;
dendron.weight = dendron.weight + dendron.dWeight;
dendron.connectedNeuron.addError(dendron.weight * self.gradient);
self.error = 0;
class Net:
def __init__(self, topology):
self.layers = []
for numNeuron in topology:
layer = []
for i in range(numNeuron):
if (len(self.layers) == 0):
layer.append(Neuron(None))
else:
layer.append(Neuron(self.layers[-1]))
layer.append(Neuron(None))
layer[-1].setOutput(1)
self.layers.append(layer)
def setInput(self, inputs):
for i in range(len(inputs)):
self.layers[0][i].setOutput(inputs[i])
def feedForword(self):
for layer in self.layers[1:]:
for neuron in layer:
neuron.feedForword();
def backPropagate(self, target):
for i in range(len(target)):
self.layers[-1][i].setError(target[i] - self.layers[-1][i].getOutput())
for layer in self.layers[::-1]:
for neuron in layer:
neuron.backPropagate()
def getError(self, target):
err = 0
for i in range(len(target)):
e = (target[i] - self.layers[-1][i].getOutput())
err = err + e ** 2
err = err / len(target)
err = math.sqrt(err)
return err
def getResults(self):
output = []
for neuron in self.layers[-1]:
output.append(neuron.getOutput())
output.pop()
return output
"""def getThResults(self):
output = []
for neuron in self.layers[-1]:
o = neuron.getOutput()
if (o > >0.5):
o = 1
else:
o = 0
output.append(o)
output.pop()
return output"""
def main():
"""topology = []
topology.append(2)
topology.append(3)
topology.append(2)
net = Network(topology)
Neuron.eta = 0.09
Neuron.alpha = 0.015"""
net = Net([2,3,5,1])
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
outputs = [[0, 0], [1, 0], [1, 0], [0, 1]]
while True:
err = 0
for i in range(len(inputs)):
net.setInput(inputs[i])
net.feedForword()
net.backPropagate(outputs[i])
err = err + net.getError(outputs[i])
print "error: ", err
if ( err < 0.01):
break
while True:
a = input("type 1st input :")
b = input("type 2nd input :")
net.setInput([a, b])
net.feedForword()
print net.getThResults()
if __name__ == '__main__':
main()
| [
"golusingh9714@gmail.com"
] | golusingh9714@gmail.com |
2c77068c7b52da7b6bf28e8b8c235fbd59f0669f | 11277bc99ec0e7a3d47029acd7f5b7aff99bbd18 | /Electric_leakage_detection/CART_decision_tree.py | cf5031d0c98cc53fd699b7a64cdaf1b8e051853d | [] | no_license | woinews/Python_Practice | 4bbe6e5b3e11d1718113ba48b58e5063979bd424 | da6ddb9625e3b5eee5bd3ab2fb2ef09068062bab | refs/heads/master | 2021-09-20T09:47:14.929773 | 2018-08-08T05:48:34 | 2018-08-08T05:48:34 | 118,594,391 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | #-*- coding: utf-8 -*-
#构建并测试CART决策树模型
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
datafile = 'electricity_model.xls'
data = pd.read_excel(datafile) #读取数据,数据的前三列是特征,第四列是标签
#划分训练数据和测试数据,20%作为训练数据,80%作为测试数据
p = 0.8 #设置训练数据比例
train = data.iloc[:int(len(data)*p),:]
test = data.iloc[int(len(data)*p):,:]
x = train.iloc[:,:3].as_matrix()
y = train.iloc[:,3].as_matrix()
x_test = test.iloc[:,:3].as_matrix()
y_test = test.iloc[:,3].as_matrix()
#构建CART决策树模型
from sklearn.tree import DecisionTreeClassifier #导入决策树模型
from sklearn.metrics import accuracy_score #用于输出模型准确率
treefile = 'tree.pkl' #模型输出名字
tree = DecisionTreeClassifier() #建立决策树模型,模型参数https://www.cnblogs.com/pinard/p/6056319.html
tree.fit(x, y) #训练
#保存模型
from sklearn.externals import joblib
joblib.dump(tree, treefile) #保存模型
#定义一个用于绘制混淆矩阵图的函数
def cm_plot(y, yp):
from sklearn.metrics import confusion_matrix #导入混淆矩阵函数
cm = confusion_matrix(y, yp) #混淆矩阵
plt.matshow(cm, cmap=plt.cm.Greens) #画混淆矩阵图,配色风格使用cm.Greens
plt.colorbar() #颜色标签
for x in range(len(cm)): #数据标签
for y in range(len(cm)):
plt.annotate(cm[x,y], xy=(x, y), horizontalalignment='center', verticalalignment='center')
plt.ylabel('True label') #坐标轴标签
plt.xlabel('Predicted label') #坐标轴标签
return plt
predictions = tree.predict(x) #对数据进行预测,将训练模型运用于数据集x
train['预测值'] = [int(np.round(x)) for x in predictions]
cm_plot(y, tree.predict(x)).show() #cm_plot(y,y_predict)显示混淆矩阵可视化结果
#注意到Scikit-Learn使用predict方法直接给出预测结果
#模型的准确率
score = accuracy_score(y,predictions)
print("决策树模型准确率: %.2f%%" % (score*100))
#用模型预测测试样本的结果
predictions_test = tree.predict(x_test)
test['预测值'] = [int(np.round(x)) for x in predictions_test]
cm_plot(y_test, tree.predict(x_test)).show() #cm_plot(y,y_predict)显示混淆矩阵可视化结果
#模型预测测试样本的准确率
score_test = accuracy_score(y_test,predictions_test)
print("决策树模型预测测试样本的准确率: %.2f%%" % (score_test*100))
from sklearn.metrics import roc_curve #导入ROC曲线函数
#ROC详解https://blog.csdn.net/ice110956/article/details/20288239
fpr, tpr, thresholds = roc_curve(y_test, tree.predict_proba(x_test)[:,1], pos_label=1)
plt.plot(fpr, tpr, linewidth=2, label = 'ROC of CART', color = 'green') #作出ROC曲线
plt.xlabel('False Positive Rate') #误检率是相对于虚假目标的总量里有多少被误识为真实目标
plt.ylabel('True Positive Rate') #查准率是指检测到的目标里,真实目标所占的比例
plt.ylim(0,1.05) #边界范围
plt.xlim(0,1.05) #边界范围
plt.legend(loc=4) #图例
plt.show() #显示作图结果
| [
"inews101@163.com"
] | inews101@163.com |
fddcc4349d776fd2da91be99e9b383fe8e218faa | 45b180004c441663bd223219f8edef8c82481be1 | /venv/Lib/site-packages/pyrogram/api/functions/req_dh_params.py | e0048a2c0bc4917b98286ebc4405ffe94c2eea9d | [] | no_license | bradbann/mypy | a679e22fdd04525faf32a73934d813a45af1092f | 8bf6234be438aaf3ce2b69e4c10b2ce84eaccb98 | refs/heads/master | 2023-02-19T14:21:59.362385 | 2020-12-31T09:35:11 | 2020-12-31T09:35:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class ReqDHParams(TLObject):
"""Attributes:
LAYER: ``112``
Attributes:
ID: ``0xd712e4be``
Parameters:
nonce: ``int`` ``128-bit``
server_nonce: ``int`` ``128-bit``
p: ``bytes``
q: ``bytes``
public_key_fingerprint: ``int`` ``64-bit``
encrypted_data: ``bytes``
Returns:
Either :obj:`ServerDHParamsFail <pyrogram.api.types.ServerDHParamsFail>` or :obj:`ServerDHParamsOk <pyrogram.api.types.ServerDHParamsOk>`
"""
__slots__ = ["nonce", "server_nonce", "p", "q", "public_key_fingerprint", "encrypted_data"]
ID = 0xd712e4be
QUALNAME = "functions.ReqDHParams"
def __init__(self, *, nonce: int, server_nonce: int, p: bytes, q: bytes, public_key_fingerprint: int, encrypted_data: bytes):
self.nonce = nonce # int128
self.server_nonce = server_nonce # int128
self.p = p # bytes
self.q = q # bytes
self.public_key_fingerprint = public_key_fingerprint # long
self.encrypted_data = encrypted_data # bytes
@staticmethod
def read(b: BytesIO, *args) -> "ReqDHParams":
# No flags
nonce = Int128.read(b)
server_nonce = Int128.read(b)
p = Bytes.read(b)
q = Bytes.read(b)
public_key_fingerprint = Long.read(b)
encrypted_data = Bytes.read(b)
return ReqDHParams(nonce=nonce, server_nonce=server_nonce, p=p, q=q, public_key_fingerprint=public_key_fingerprint, encrypted_data=encrypted_data)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(Int128(self.nonce))
b.write(Int128(self.server_nonce))
b.write(Bytes(self.p))
b.write(Bytes(self.q))
b.write(Long(self.public_key_fingerprint))
b.write(Bytes(self.encrypted_data))
return b.getvalue()
| [
"127575708@qq.com"
] | 127575708@qq.com |
26e3b29d4d7494f59bb52fe9c855cbe9b4f7933b | 83d75366add7d1be4b84b6fb4a68dbe973825ad8 | /girlfriend_problem.py | 4fa95c2a186018f7fbcc5a1766de2077a5932e4c | [] | no_license | namankumar818/python-lab | 30cd419c4f4b8d7af21059b3926920cc60347a8b | f835cab46877c30edf3e91fc0a8dd0abc300d65f | refs/heads/main | 2023-04-08T23:12:34.204855 | 2021-04-12T07:09:15 | 2021-04-12T07:09:15 | 347,914,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | a = int(input('enter the number'))
b = a//5
c = a%5
d = c//4
e = c%4
f = e//3
g = e%3
h = g//2
i = g%2
j = i//1
print(b+d+f+h+i)
| [
"noreply@github.com"
] | namankumar818.noreply@github.com |
397e2e95121b8ee4e22f84e8c02fce1ef7225116 | 073b278054195f8e616834457d73ac20cd9e9a14 | /settings.py | aa176135abb638373fde28c374c554092af61742 | [] | no_license | ndn247/socialapp_django-web-backend | cd5c538dcc763b5e313d22359fa934ae35602029 | 3b3f5bcd43b76243ba1206fb46b720173389dbcf | refs/heads/master | 2021-01-22T23:20:29.525532 | 2014-08-17T20:20:12 | 2014-08-17T20:20:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,194 | py | from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
#"drum.links",
"posts",
"rest_framework",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.accounts",
#"mezzanine.blog",
#"mezzanine.forms",
#"mezzanine.pages",
#"mezzanine.galleries",
#"mezzanine.twitter",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
# "mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
# Drum-specific Mezzanine settings
AUTH_PROFILE_MODULE = "posts.Profile"
SITE_TITLE = "Visa Forums"
RATINGS_RANGE = (-1, 1)
RATINGS_ACCOUNT_REQUIRED = True
COMMENTS_ACCOUNT_REQUIRED = True
ACCOUNTS_PROFILE_VIEWS_ENABLED = True
# Drum settings
ALLOWED_DUPLICATE_LINK_HOURS = 24 * 7 * 3
ITEMS_PER_PAGE = 20
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| [
"namnguyen@Nams-MacBook-Air.local"
] | namnguyen@Nams-MacBook-Air.local |
dfcfe4783ffb3d4031454aa52750e1e8ea3df556 | 23d60f7a1f5a67c0dad6f2f56f602a089169197f | /venv/Scripts/pip-script.py | 9c0b85d15ddbe8c9fe2ea52a7a2ff3ed2b98b76e | [] | no_license | MitchelSmith/CSE4303-Homework | 420a1465f4185df527d3cb7fbf2ae9ac3e635b02 | 0653f289a34ac08a7f4d3100b76fc0fec7df6ad1 | refs/heads/master | 2021-09-11T22:01:59.420588 | 2018-04-12T19:31:40 | 2018-04-12T19:31:40 | 127,985,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #!C:\Users\MitchelSmith\PycharmProjects\HMWK_01_mjs9110\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
| [
"mitchel.smith@yahoo.com"
] | mitchel.smith@yahoo.com |
a17fa80d06a5789c317f062c7882dbdd43a58392 | 451fbd1c7d0ea0e11292887fd44c1b798dff5237 | /src/OnlineHist.py | 5195f4dcc0366e2b61ed680ceca96fcf43561481 | [] | permissive | standmit/trigger_sync | cc0784296dc310b20651d567c92d498fc86a534b | 66919d43bc4658859e1cd8c9cadde4b8d7939cad | refs/heads/master | 2021-11-08T11:46:21.200412 | 2018-09-07T13:48:33 | 2018-09-07T13:48:33 | 147,806,413 | 0 | 0 | BSD-3-Clause | 2018-09-07T10:03:57 | 2018-09-07T10:03:57 | null | UTF-8 | Python | false | false | 3,006 | py | from PlotWindow import PlotWindow
from time_sync.msg import Event
import rospy
import sys, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy
from std_msgs.msg import Int8
class OnlineHist(PlotWindow):
def __init__(self):
PlotWindow.__init__(self)
self.window_size=200
self.values=numpy.zeros((self.window_size))
self.index=0
rospy.init_node('visualizer', anonymous=True)
self.subscriber = rospy.Subscriber("event", Event, self.plotResults, queue_size = 1 )
def plotResults(self, data):
self.axes.clear()
self.axes.set_autoscaley_on(True)
if self.index==self.window_size-1:
self.index=0
else:
self.index=self.index+1
device_time = data.device_time.secs + data.device_time.nsecs /1.0e9
local_receive_time = data.local_receive_time.secs + data.local_receive_time.nsecs /1.0e9
corrected_local_time = data.corrected_local_time.secs + data.corrected_local_time.nsecs /1.0e9
self.values[self.index]=(1e3 * (local_receive_time - corrected_local_time) )
print self.values
n, bins, patches = self.axes.hist(self.values, 30, (0, 1), normed=True, facecolor='green', alpha=0.75)
output= "Data index "+str(data.local_receive_time.secs)
min_x, max_x=self.axes.get_xlim()
min_y, max_y=self.axes.get_ylim()
self.axes.text(max_x*0.6,max_y*0.7,output,horizontalalignment='left',verticalalignment='center')
self.canvas.draw()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = OnlineHist()
window.show()
app.exec_()
#!/usr/bin/env python
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class PlotWindow(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Sliding histogramm')
self.create_main_frame()
self.on_draw()
def save_plot(self):
pass
def on_about(self):
pass
def on_pick(self, event):
pass
def on_draw(self):
self.axes.clear()
self.axes.grid(True)
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.axes = self.fig.add_subplot(111)
self.canvas.mpl_connect('pick_event', self.on_pick)
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
| [
"a.english@qut.edu.au"
] | a.english@qut.edu.au |
1b0d898135f055671f3b24c6c17b9d59191378e2 | fe8cc4c318a2122aee94ceea7b671d1c47c9844a | /ICPC/2020_Ronda_2/K.py | ce23a5634ba8cd8bd8d027a9b7d09c9776f03f07 | [] | no_license | YoshiBrightside/Ode-to-my-Failures | 4540955e21ba69406d8bbbf26c6e0d47bcd8ac57 | 3fbbb469473443bbfe9459317dbd0fe1ff65fab5 | refs/heads/master | 2021-06-27T08:45:42.037526 | 2020-12-06T04:50:11 | 2020-12-06T04:50:11 | 193,014,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | aux = list(map(int, input().split())) | [
"yoshibrightside@ciencias.unam.mx"
] | yoshibrightside@ciencias.unam.mx |
e95c10e9db030859ff4ea6047892ede842060093 | f10465390bb4fcef77beca526d208eacb482da1a | /StoBatchTinyImageNet/taylor_loss.py | 4c1f683b1eb42146a1554d2399187362f9ce932a | [
"MIT"
] | permissive | haiphanNJIT/StoBatch | 414d7560744fecd33347ab41f99ec9e660e17a39 | 08a6505b1fad609850de6c6bbaaa54c0200f0adb | refs/heads/master | 2022-12-09T16:15:11.249292 | 2020-09-17T15:10:35 | 2020-09-17T15:10:35 | 270,418,676 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,817 | py | ########################################################################
# Author: NhaiHai Phan, Han Hu
# License: Apache 2.0
# source code snippets from: Tensorflow
########################################################################
'''
Taylor expansion of loss function, derived from Tensorflow implementation
'''
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
import math
import tensorflow as tf
def TaylorExp(logits, labels, adv_logits, b_labels, L, alpha, perturbW):
"""You can also add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.float32)
b_labels = tf.cast(b_labels, tf.float32)
# Differentially private sparse cross entropy error based on Taylor Expansion
zeros = array_ops.zeros_like(adv_logits, dtype=adv_logits.dtype)
cond = (adv_logits >= zeros)
relu_logits = array_ops.where(cond, adv_logits, zeros)
neg_abs_logits = array_ops.where(cond, -adv_logits, adv_logits)
Taylor_adv = math_ops.add(relu_logits - adv_logits * b_labels, math.log(2.0) + 0.5*neg_abs_logits + 1.0/8.0*neg_abs_logits**2)
### Taylor for benign x
zeros2 = array_ops.zeros_like(logits, dtype=logits.dtype)
cond2 = (logits >= zeros2)
relu_logits_benign = array_ops.where(cond2, logits, zeros2)
neg_abs_logits_benign = array_ops.where(cond2, -logits, logits)
Taylor_benign = math_ops.add(relu_logits_benign - logits * labels, math.log(2.0) + 0.5*neg_abs_logits_benign + 1.0/8.0*neg_abs_logits_benign**2)
zeros1 = array_ops.zeros_like(perturbW, dtype=perturbW.dtype)
cond1 = (perturbW >= zeros1)
perturbW = array_ops.where(cond1, perturbW, -perturbW)
### Adversarial training loss
#adv_loss = (1/(L + alpha*L))*(Taylor_benign + alpha * Taylor_adv)
# adv_loss = (1/(1 + alpha))*(Taylor_benign + alpha * Taylor_adv)
adv_loss = (Taylor_benign + alpha * Taylor_adv)
cross_entropy_mean = tf.reduce_mean(adv_loss, name='cross_entropy') + tf.reduce_mean(perturbW, name = 'perturbW')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def TaylorExp_no_noise(logits, labels, adv_logits, b_labels, L, alpha):
"""
You can also add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.float32)
b_labels = tf.cast(b_labels, tf.float32)
# Differentially private sparse cross entropy error based on Taylor Expansion
zeros = array_ops.zeros_like(adv_logits, dtype=adv_logits.dtype)
cond = (adv_logits >= zeros)
relu_logits = array_ops.where(cond, adv_logits, zeros)
neg_abs_logits = array_ops.where(cond, -adv_logits, adv_logits)
Taylor_adv = math_ops.add(relu_logits - adv_logits * b_labels, math.log(2.0) + 0.5*neg_abs_logits + 1.0/8.0*neg_abs_logits**2)
# Taylor_adv = tf.abs(math_ops.add(relu_logits - adv_logits * b_labels, math.log(2.0) + 0.5*neg_abs_logits))
### Taylor for benign x
zeros2 = array_ops.zeros_like(logits, dtype=logits.dtype)
cond2 = (logits >= zeros2)
relu_logits_benign = array_ops.where(cond2, logits, zeros2)
neg_abs_logits_benign = array_ops.where(cond2, -logits, logits)
Taylor_benign = math_ops.add(relu_logits_benign - logits * labels, math.log(2.0) + 0.5*neg_abs_logits_benign + 1.0/8.0*neg_abs_logits_benign**2)
# Taylor_benign = tf.abs(math_ops.add(relu_logits_benign - logits * labels, math.log(2.0) + 0.5*neg_abs_logits_benign))
### Adversarial training loss
# adv_loss = (1/(L + alpha*L))*(Taylor_benign + alpha * Taylor_adv)
# adv_loss = (1/(1 + alpha))*(Taylor_benign + alpha * Taylor_adv)
adv_loss = (Taylor_benign + alpha * Taylor_adv)
cross_entropy_mean = tf.reduce_mean(adv_loss, name='cross_entropy')# + tf.reduce_sum(perturbW, name = 'perturbW');
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
| [
"noreply@github.com"
] | haiphanNJIT.noreply@github.com |
3f130e21c88ebe41f11f97ba448bfbe01fd97cc1 | 84339e288ff97f597c5d2ec247deff1556367049 | /tools/fcn/python/test_image.py | 6169b4a3b5dbec5412816bcfeed164001f191507 | [] | no_license | wozy7/oirds | 35576162ebff5ef13f13e2c5b2366982f5310664 | 684a40725613c1903eb00ae836817f4f7cd2dc80 | refs/heads/master | 2020-12-03T01:46:39.019692 | 2016-10-31T12:48:02 | 2016-10-31T12:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | #!/usr/bin/env python
#import h5py, os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#%matplotlib inline
import lmdb
import gt_tool
import caffe
from PIL import Image
import numpy as np
import net_tool
import json_tools
def main():
from pathlib import Path
import os
import random
import pandas as pd
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
if len(sys.argv) < 2:
print "Usage: ", sys.argv[0], "image config"
sys.exit( 0 )
config = json_tools.loadConfig(sys.argv[2])
gtTool = gt_tool.GTTool(config)
net = net_tool.loadNet(config)
im = loadImgArray(sys.argv[1], gtTool,config);
print im.shape
result = net_tool.runcaffe(net, im, config)
if (json_tools.dumpBlobs(config)):
net_tool.dumpNetFilters(net, sys.argv[1][0:sys.argv[1].find('.')])
outputResult(result[0], result[2], result[1], im, config)
def loadImgArray(name, gtTool,config):
from PIL import Image
print name + '-----------'
initialiSize, imRaw = gtTool.loadImage(name)
return net_tool.convertImage(imRaw,config)
def outputResult(out, transformer, data, rawImage, config):
layrName = json_tools.getNetworkOutputName(config)
classPerPixel = out[layrName][0].argmax(axis=0)
print 'RANGE ' + str(np.min(out[layrName][0])) + " to " + str(np.max(out[layrName][0]))
print 'SHAPE ' + str(out[layrName][0].shape)
print 'HIST ' + str(np.histogram(classPerPixel))
print 'UNIQUE ' + str(np.unique(np.array(classPerPixel).flatten()))
plt.subplot(1, 2, 1)
plt.imshow(rawImage)
plt.subplot(1, 2, 2)
# imArray = toImageArray(classPerPixel);
# plt.imshow(imArray)
plt.imshow(classPerPixel)
plt.colorbar()
plt.savefig('im_output')
plt.close()
return classPerPixel
def toImageArray(classPerPixel):
ima = np.zeros((classPerPixel.shape[0], classPerPixel.shape[1], 3), dtype=np.uint8)
for i in range(0,ima.shape[0]):
for j in range(0,ima.shape[1]):
color = gt_tool.get_label_color(classPerPixel[i,j])
ima[i,j] = color
return ima
if __name__=="__main__":
main()
| [
"robertson_eric@bah.com"
] | robertson_eric@bah.com |
0d9943ed4e11f7fca9b0884c8f356288d17d11cd | ac47f86e4fbd46c641575b2a8ccc401fd70c98e9 | /Solutions/Word Break II.py | 0c0cfc253ff7c176ba7aa1db126349087ad46716 | [] | no_license | jimmy623/LeetCode | 0a19f6e32c29e087e2d808153cb7a6e3794e2b67 | c4c1838bcde53484d3df654714bbbf6589c03c37 | refs/heads/master | 2021-07-12T06:02:14.973878 | 2021-03-14T16:03:26 | 2021-03-14T16:03:26 | 25,859,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | class Solution:
# @param s, a string
# @param dict, a set of string
# @return a list of strings
def wordBreak(self, s, dict):
dict = dict
n = len(s)
result = [[] for i in range(n)]
for i in range(n-1,-1,-1):
if s[i:n] in dict:
result[i].append(s[i:n])
for j in range(i+1,n):
if len(result[j]) > 0:
if s[i:j] in dict:
for w in result[j]:
result[i].append(s[i:j]+" "+ w)
continue
return result[0]
s = Solution()
t = "catsanddog"
dict = ["cat", "cats", "and", "sand", "dog"]
print s.wordBreak(t,dict)
#Word Break II
#https://oj.leetcode.com/problems/word-break-ii/ | [
"jimmy623@gmail.com"
] | jimmy623@gmail.com |
8f577e608786ef296066682e54e8ce562420a1a7 | ba19b2e5f227d1aad3e2795bc800ca0b2f2ef6d5 | /gumbug/migrations/0001_initial.py | b361f77be238dab61f7800c35875c7cb951feb07 | [] | no_license | rheide/gumbug | 3a8b5f5c6e411cc3c3cbf8e892e71c5dc71af38a | 3d84144ae3cc323b680f9081f7b4406e5ee55427 | refs/heads/master | 2021-01-10T20:24:58.588628 | 2015-05-27T19:15:00 | 2015-05-27T19:15:00 | 29,777,119 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source_url', models.URLField()),
],
options={
},
bases=(models.Model,),
),
]
| [
"rheide7021@gmail.com"
] | rheide7021@gmail.com |
8ad451accd000a3ae4dec3b79c77716d1339c9bd | e3e3c2e5528cb26d20fe52098d435109753895d0 | /testsite/migrations/0001_initial.py | 40353c99e872f9af59be5e960612e49ef6c0653e | [] | no_license | imasgo/my-first-blog | 02da59b7ff84de004c462dfde7d0927b7fb75371 | 4c03b4ba9bd7cc7dc922129c89096cabdee9c68c | refs/heads/master | 2021-04-30T08:49:24.085545 | 2019-03-09T16:20:50 | 2019-03-09T16:20:50 | 121,384,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-14 13:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoryNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('name_in_sources', models.CharField(max_length=250)),
('titles', models.TextField()),
('life_dates', models.CharField(max_length=100)),
('biography', models.TextField()),
('family_relationship', models.TextField()),
('others', models.TextField()),
('sources', models.TextField()),
('literature', models.TextField()),
('author', models.ForeignKey(default='hello', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"sasmashka@gmail.com"
] | sasmashka@gmail.com |
c2cc23d29a6ae8dfcfbb5c3fa0be67c85ca989f9 | 94d8159881678baa2603bffd8844f999fd67f3e1 | /shopping/migrations/0002_add_to_cart.py | a57d237e25fbb7383a3fc10a776f646cb4c76d30 | [] | no_license | Arpitpandey99/FashionHub | d8316932ceda2f739b907a6e77448cd143fc98c2 | 10fea21aed91ea5764abceddb6021c03fa5c354e | refs/heads/master | 2021-05-17T09:05:52.173365 | 2020-10-02T16:13:51 | 2020-10-02T16:13:51 | 250,719,742 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-07-05 13:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shopping', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Add_to_cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shopping.Product')),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"you@example.com"
] | you@example.com |
18200e3b222e40e4b626e67f099e15776f35ac5c | 7ce479cac0a14d924159db9c784e3325b8f0bce7 | /schemaorgschemas/Thing/Intangible/Enumeration/ContactPointOption/__init__.py | c377546d23d57ba356c23c62339579600488b793 | [] | no_license | EvelineAndreea/AGRe | 1f0c27237eb047a60bbcfb8d73e3157035406409 | b952125896a82741f6617c259dd4060954583180 | refs/heads/master | 2020-04-08T16:08:11.517166 | 2018-11-28T07:15:56 | 2018-11-28T07:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | # -*- coding: utf-8 -*-
from schemaorgschemas.Thing import potentialActionProp, descriptionProp, sameAsProp, imageProp, urlProp, mainEntityOfPageProp, additionalTypeProp, alternateNameProp, nameProp
from schemaorgschemas.Thing.Intangible.Enumeration import supersededByProp
from schemaorgschemas.djangoschema import SchemaObject, SchemaProperty, SchemaEnumProperty, SCHEMA_ORG
from django.conf import settings
class ContactPointOptionSchema(SchemaObject):
"""Schema Mixin for ContactPointOption
Usage: place after django model in class definition, schema will return the schema.org url for the object
Enumerated options related to a ContactPoint.
"""
def __init__(self):
self.schema = 'ContactPointOption'
CONTACTOPTION_CHOICES = (
('TOLLFREE', 'TollFree: The associated telephone number is toll free.'),
('HEARINGIMPAIREDSUPPORTED',
'HearingImpairedSupported: Uses devices to support users with hearing impairments.'),
)
class contactOptionProp(SchemaEnumProperty):
"""
Enumeration for contactOption
Prepoulated with the Schema.org choices
"""
_enum = True
_prop_schema = 'contactOption'
choices = CONTACTOPTION_CHOICES
_format_as = "enum"
adapter = {
'TOLLFREE': 'TollFree',
'HEARINGIMPAIREDSUPPORTED': 'HearingImpairedSupported',
}
# schema.org version 2.0
| [
"mihai.nechita95@gmail.com"
] | mihai.nechita95@gmail.com |
deb1d25871f48dfa55e52871aa3be1b16dc631b2 | 7aa2d9f5fe4a7f50751a552bd282133dca562a12 | /rest_project/rest_svc/blueprints/github/resources.py | 069e5cdfa135defaba3b86c957e1ccae91304b88 | [] | no_license | anbarfirdaus/REST-API | 7b5897a4aed1c7331e8fb140eb218c1f76b3c805 | ca91a4b7eddd20d2977df5bb94c9b5533615b4e5 | refs/heads/master | 2020-04-24T11:16:19.768447 | 2019-02-22T07:42:06 | 2019-02-22T07:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | import requests
from flask import Blueprint
from flask_restful import Resource, Api, reqparse, marshal
from . import *
from blueprints import db
import logging, json
from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, get_jwt_claims
bp_github = Blueprint('bp_github', __name__)
api = Api(bp_github)
class GithubLogin(Resource):
wio_host = 'https://api.github.com'
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('q', location='args')
parser.add_argument('type', location='args')
parser.add_argument('page', location='args')
parser.add_argument('per_page', location='args')
args = parser.parse_args()
LIST_USER = []
# user in indonesia
rq = requests.get(self.wio_host + '/search/users', params={'q':args['q'],'type':args['type'], 'page':args['page'], 'per_page':args['per_page']})
layer1 = rq.json()
itemList = layer1["items"]
lenght = len(itemList)
# go to user detail
for j in range(lenght):
DICT_DATA = {}
username = itemList[j]["login"]
DICT_DATA['username'] = username
rq2 = requests.get(self.wio_host + '/users/%s/repos' %username)
layer2 = rq2.json()
lenght2 = len(layer2)
# find language
str_Language = ''
for k in range(lenght2):
LIST_Language = []
language = layer2[k]["language"]
if language is not None:
str_Language += language
DICT_DATA['language'] = str_Language
LIST_USER.append(DICT_DATA)
github = Github_input(None, DICT_DATA['username'], DICT_DATA['language'])
db.session.add(github)
db.session.commit()
return LIST_USER
api.add_resource(GithubLogin, '/search/users') | [
"muhammadridhorosa@gmail.com"
] | muhammadridhorosa@gmail.com |
9c9dec586f5fbb6ed71c859c29eb257a39aa98a8 | 8246e9fbdecdb37651e0d09497fd9428e434f33c | /help_subcategory/migrations/0002_auto_20180405_1018.py | 4af8bed9fafe7dced55ddbeb9e4b02205aa18aec | [] | no_license | rajeev1234/Landing-Page | 479995026ab01fc504a1e9502e7763dc04266009 | 4bfd22a6a1776907ba78b3dc9037064c820b049e | refs/heads/master | 2020-03-08T13:37:20.253252 | 2018-04-05T06:33:26 | 2018-04-05T06:33:26 | 128,162,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # Generated by Django 2.0.3 on 2018-04-05 04:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('help_subcategory', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='helpsubcategory',
name='Help_SubCategory_Creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='helpsubcategorys', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='Comment_Help_SubCategory',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='help_subcategory.HelpSubCategory'),
),
migrations.AddField(
model_name='comment',
name='Help_SubCategory_Comment_Author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commenthelpsubcategorys', to=settings.AUTH_USER_MODEL),
),
]
| [
"ccrcian.rajeev1@gmail.com"
] | ccrcian.rajeev1@gmail.com |
d38685aaff93073ac0957e743adc014a869d071c | fe2235917a8e6f942f71322e147f62ad2c2077e1 | /api/models.py | 29d0af0fc64b7ec8285aa2f8b2b313b2dfbe8af6 | [] | no_license | SergeyKorobenkov/softlogic-test | 732e1315ae83b27008c1f9dd5e3c3c91931f087a | b1af553ec2ceffab76400c920b3df3e0eb091e15 | refs/heads/main | 2023-01-04T04:52:33.863414 | 2020-10-25T23:13:10 | 2020-10-25T23:13:10 | 306,100,388 | 0 | 1 | null | 2020-10-29T06:37:15 | 2020-10-21T17:32:59 | Python | UTF-8 | Python | false | false | 558 | py | from django.db import models
import uuid
# Create your models here.
class Person(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
first_name = models.CharField(max_length=50) # имя
last_name = models.CharField(max_length=50) # фамилия
vector = models.TextField() # тут хранится вектор
is_vector = models.BooleanField() # тут хранится признак наличия/отсутствия вектора
def __str__(self):
return self.post_id | [
"skorobenkov@yandex.ru"
] | skorobenkov@yandex.ru |
b49275f3adf679d5fba6ab7e6dfdb203b288b68e | d89c4d1233cc22a5be2a17d0f7f20e2da0720931 | /templatebouger.py | 449da99eafda605e99c635a9f639548274c1acd1 | [] | no_license | Elmesias1010101/Drone | 80d91e310f1a8c73d1a3062de7d31527526d7fd5 | 23e86dc91062e9af2b3479a3afbf0ac2ab2fa87b | refs/heads/main | 2023-02-22T13:56:19.407755 | 2021-01-27T14:53:38 | 2021-01-27T14:53:38 | 333,486,217 | 0 | 0 | null | 2021-01-27T16:14:07 | 2021-01-27T16:14:07 | null | UTF-8 | Python | false | false | 1,300 | py | #!/usr/bin/env python
from __future__ import print_function
import roslib
#roslib.load_manifest('my_package')
import sys
import rospy
import cv2
from std_msgs.msg import Empty
from geometry_msgs.msg import Twist
import time
class MoveDrone:
def __init__(self):
self.takeoff_pub = rospy.Publisher("TOPIC_NAME", Empty) # TODO put the takeoff topic name here
self.landing_pub = rospy.Publisher("TOPIC_NAME", Empty) # TODO put the landing topic name here
self.move_pub = rospy.Publisher("TOPIC_NAME", TWIST) # Publish commands to drone
def move_drone(self, speed=[0.0, 0.0, 0.0], orient=[0.0, 0.0, 0.0]):
vel_msg = Twist()
# TODO: fill the velocity fields here with the desired values
# TODO: fill the angulare velocities here with the desired values
self.move_pub.publish(vel_msg)
return 0
def takeoff_drone(self):
empty_msg = Emtpy()
# TODO: send takeoff command to the drone
def land_drone(self):
empty_msg = Emtpy()
# TODO: send landing command to the drone
if __name__ == '__main__':
rospy.init_node('basic_controller', anonymous=True)
move = MoveDrone()
# TODO define your timer and your sequential commands here !
| [
"noreply@github.com"
] | Elmesias1010101.noreply@github.com |
071635d98085a117d772f1e9af887314b38c064a | 786de89be635eb21295070a6a3452f3a7fe6712c | /ImgAlgos/tags/V00-03-81/SConscript | 46a394eac2bcf67065024e8823eee3892f255c81 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package ImgAlgos
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
#
# For the standard SIT packages which build libraries, applications,
# and Python modules it is usually sufficient to call
# standardSConscript() function which defines rules for all
# above targets. Many standard packages do not need any special options,
# but those which need can modify standardSConscript() behavior using
# a number of arguments, here is a complete list:
#
# LIBS - list of additional libraries needed by this package
# BINS - dictionary of executables and their corresponding source files
# TESTS - dictionary of test applications and their corresponding source files
# SCRIPTS - list of scripts in app/ directory
# UTESTS - names of the unit tests to run, if not given then all tests are unit tests
# PYEXTMOD - name of the Python extension module, package name used by default
#
#
#standardSConscript(NEED_QT=1, UTESTS=[])
#standardSConscript()
standardSConscript( LIBS="psalg gomp rt pthread png tiff", UTESTS=[], CCFLAGS="-fopenmp")
| [
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 | |
99587659fc1db6efa0aaba60bf3bcce57ba885a7 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/cbc.py | 078fa65595708cd2badb67018da54e9688344b0c | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'cBC':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
440f2021394e0c377a41cacae183ecb9f63c14c1 | ac7bc719e33a67b7950eba8b5c809d2d4c52f251 | /src/metageta/formats/fast_l7a.py | 4ba085254fb8c575ecfc1214d7093ec12eaf672b | [] | no_license | GRSEB9S/LandsatProcessingPlugin | 1351a154af3c6540f9256e02022958f717ff4fe0 | 3e04c4a25ae7898d086fd97c6c876796d75bf984 | refs/heads/master | 2021-05-28T00:11:22.597812 | 2014-11-24T14:03:02 | 2014-11-24T14:03:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,862 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Australian Government, Department of Sustainability, Environment, Water, Population and Communities
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Metadata driver for ACRES Landsat FastL7A imagery
B{Format specification}:
- U{http://www.ga.gov.au/image_cache/GA10348.pdf}
B{General info}:
- U{http://www.ga.gov.au/remote-sensing/satellites-sensors/landsat}
'''
format_regex=[ #Landsat 7 FastL7A - Multispectral, Pan & Thermal
r'header\.h(rf|pn|tm)$', # - GA file names
r'l7[0-9]{7,7}\_[0-9]{11,11}\_h(rf|pn|tm).fst$', # - Standard file names
]
'''Regular expression list of file formats'''
#import base dataset module
import __dataset__
# import other modules (use "_" prefix to import privately)
import sys, os, re, glob, time, math, string
from metageta import utilities, geometry, spatialreferences
try:
from osgeo import gdal
from osgeo import gdalconst
from osgeo import osr
from osgeo import ogr
except ImportError:
import gdal
import gdalconst
import osr
import ogr
gdal.AllRegister()
class Dataset(__dataset__.Dataset):
'''Subclass of base Dataset class'''
def __init__(self,f):
if not f:f=self.fileinfo['filepath']
d=os.path.dirname(f)
if open(f).read(1024).strip()[0]=='<':#HTML file, ignore it.
raise NotImplementedError
if 'HRF' in f.upper():
self._filetype='HRF'
#rex='BAND[1-57]\.dat|L7[0-9]{7,7}_[0-9]{11,11}_B[1-57]0\.FST' #not picking up the ACRES .ers files
rex='BAND[1-57].*|L7[0-9]{7,7}_[0-9]{11,11}_B[1-57]0\.FST'
elif 'HTM' in f.upper():
self._filetype='HTM'
#rex='BAND6[LH]\.dat|L7[0-9]{7,7}_[0-9]{11,11}_B6[1-2]\.FST' #not picking up the ACRES .ers files
rex='BAND6[LH].*|L7[0-9]{7,7}_[0-9]{11,11}_B6[1-2]\.FST'
elif 'HPN' in f.upper():
self._filetype='HPN'
#rex='BAND8\.dat|L7[0-9]{7,7}_[0-9]{11,11}_B80\.FST' #not picking up the ACRES .ers files
rex='BAND8.*|L7[0-9]{7,7}_[0-9]{11,11}_B80\.FST'
filelist=[f] #header
filelist.extend([f for f in utilities.rglob(d,rex,regex=True, regex_flags=re.I, recurse=False)]) #bands
self.filelist=filelist #"self.filelist" is a property, not a list, we can only 'get' or 'set' it.
def __getmetadata__(self):
'''Read Metadata for an ACRES Landsat FastL7A format image as GDAL doesn't get it all.
Format description: http://www.ga.gov.au/image_cache/GA10348.pdf
Note:
hrf = ~30m VNIR/SWIR (bands 1-5 & 7)
htm = ~60m thermal (band 6)
hpn = ~15m pan (band 8)
'''
f=self.fileinfo['filepath']
d=os.path.dirname(f)
hdr=open(f).read()
err='Unable to open %s' % f
md=self.metadata
md['filesize']=sum([os.path.getsize(file) for file in self.filelist])
md['filetype'] = 'FAST/EOSAT FAST Format'
rl=1536#recordlength
######################################
##Record 1 - administrative
######################################
rec=1
req_id=utilities.readascii(hdr,(rec-1)*rl,9,28)
loc=utilities.readascii(hdr,(rec-1)*rl,35,51)
acquisition_date=utilities.readascii(hdr,(rec-1)*rl,71,78)
md['imgdate']='%s-%s-%s'%(acquisition_date[:4],acquisition_date[4:6],acquisition_date[6:])
md['satellite']=utilities.readascii(hdr,(rec-1)*rl,92,101)
md['sensor']=utilities.readascii(hdr,(rec-1)*rl,111,120)
md['mode']=utilities.readascii(hdr,(rec-1)*rl,135,140)
md['viewangle']=float(utilities.readascii(hdr,(rec-1)*rl,154,159))
product_type=utilities.readascii(hdr,(rec-1)*rl,655,672)
product_size=utilities.readascii(hdr,(rec-1)*rl,688,697)
level=utilities.readascii(hdr,(rec-1)*rl,741,751)
md['resampling']=utilities.readascii(hdr,(rec-1)*rl,765,766)
md['cols']=int(utilities.readascii(hdr,(rec-1)*rl,843,847))
md['rows']=int(utilities.readascii(hdr,(rec-1)*rl,865,869))
md['cellx']=float(utilities.readascii(hdr,(rec-1)*rl,954,959))
md['celly']=md['cellx']
md['nbits']=8 #int(utilities.readascii(hdr,(rec-1)*rl,984,985)) always 8 bit
md['datatype']='Byte'
md['nodata']='0'
bands_present=utilities.readascii(hdr,(rec-1)*rl,1056,1087)
bandindices=[[1131,1159],[1170,1198],[1211,1239],[1250,1278],[1291,1319],[1330,1358]]
bandfiles={}
for i in bandindices:
band=utilities.readascii(hdr,(rec-1)*rl,i[0],i[1])
if band:
exists,path=utilities.exists(os.path.join(d,band), True)
if exists:bandfiles[band]=path
else:#Assume ACRES format (band*.dat) instead Fast format (l7*.fst)...
bandid=band[23:25]
if bandid == '61':bandid='6l'
elif bandid == '62':bandid='6h'
else:bandid=bandid[0]
exists,path=utilities.exists(os.path.join(d,'band%s.dat'%bandid), True)
if not exists:raise RuntimeError, 'Unable to open band data files.'
bandfiles[band]=path
else:break
md['nbands']=len(bandfiles)
md['sceneid']=os.path.basename(bandfiles.keys()[0])[3:21] #Use path/row & aquisition date as sceneid - L7f[ppprrr_rrrYYYYMMDD]_AAA.FST
if self._filetype=='HRF':
md['bands']='1 (BLUE),2 (GREEN),3 (RED),4 (NIR),5 (SWIR),7 (SWIR)'
elif self._filetype=='HTM':
md['bands']='6L (THERMAL),6H (THERMAL)'
elif self._filetype=='HPN':
md['bands']='8 (PAN)'
######################################
##Record 2 - radiometric
######################################
#Move along, nothing to see here...
######################################
##Record 3 - geometric
######################################
rec=3
map_projection=utilities.readascii(hdr,(rec-1)*rl,32,35)
prjcode=spatialreferences.GCTP_PROJECTIONS.get(map_projection,0)
ellipsoid=utilities.readascii(hdr,(rec-1)*rl,48,65)
ellcode=spatialreferences.GCTP_ELLIPSOIDS.get(ellipsoid,0)
datum=utilities.readascii(hdr,(rec-1)*rl,74,79)
zone=utilities.readascii(hdr,(rec-1)*rl,521,526)
#Workaround for UTM zones as GDAL does not pick up southern hemisphere
#as some FST headers don't include a negative zone number to indicate southern hemisphere
#as per the FAST format definition
zone=int(zone) if zone else 0
usgs_indices = ((110,133),#Semi-major axis
(135,158),#Semi-minor axis
(161,184),
(186,209),
(211,234),
(241,264),
(266,289),
(291,314),
(321,344),
(346,369),
(371,394),
(401,424),
(426,449),
(451,474),
(481,504))
usgs_params=[]
for i in usgs_indices:
p=utilities.readascii(hdr,(rec-1)*rl,i[0],i[1])
if p:usgs_params.append(float(p))
else:usgs_params.append(0.0)
ulx=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,566,578), 'DDDMMSSSSSSSH')
uly=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,580,591), 'DDMMSSSSSSSH')
urx=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,646,658), 'DDDMMSSSSSSSH')
ury=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,660,671), 'DDMMSSSSSSSH')
lrx=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,726,738), 'DDDMMSSSSSSSH')
lry=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,740,751), 'DDMMSSSSSSSH')
llx=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,806,818), 'DDDMMSSSSSSSH')
lly=geometry.DMS2DD(utilities.readascii(hdr,(rec-1)*rl,820,831), 'DDMMSSSSSSSH')
ext=[[ulx,uly],[urx,ury],[lrx,lry],[llx,lly],[ulx,uly]]
md['UL']='%s,%s' % tuple(ext[0])
md['UR']='%s,%s' % tuple(ext[1])
md['LR']='%s,%s' % tuple(ext[2])
md['LL']='%s,%s' % tuple(ext[3])
if zone > 0 and uly < 0:zone*=-1
srs=osr.SpatialReference()
srs.ImportFromUSGS(prjcode,zone,usgs_params,ellcode)
if datum=='GDA':#Workaround for GDA94 datum as GDAL does not recognise it
#as per the FAST format definition
if map_projection=='UTM':
epsg=28300+abs(zone)
srs.ImportFromEPSG(epsg)
md['srs']=srs.ExportToWkt()
md['epsg']=epsg
md['units']='m'
else:
srs.SetGeogCS('GDA94','Geocentric_Datum_of_Australia_1994','GRS 1980', usgs_params[0], 298.257)
md['srs']=srs.ExportToWkt()
md['epsg'] = spatialreferences.IdentifyAusEPSG(md['srs'])
md['units'] = spatialreferences.GetLinearUnitsName(md['srs'])
else:
md['srs']=srs.ExportToWkt()
md['epsg'] = spatialreferences.IdentifyAusEPSG(md['srs'])
md['units'] = spatialreferences.GetLinearUnitsName(md['srs'])
md['rotation']=float(utilities.readascii(hdr,(rec-1)*rl,995,1000))
if abs(md['rotation']) < 1:
md['orientation']='Map oriented'
md['rotation']=0.0
else:md['orientation']='Path oriented'
md['sunelevation']=utilities.readascii(hdr,(rec-1)*rl,1062,1065)
md['sunazimuth']=utilities.readascii(hdr,(rec-1)*rl,1086,1090)
try:##Open dataset
self._gdaldataset = geometry.OpenDataset(f)
metadata=self._gdaldataset.GetMetadata()
md['metadata']='\n'.join(['%s: %s' %(m,metadata[m]) for m in metadata])
#Fix for Issue 17
for i in range(1,self._gdaldataset.RasterCount+1):
self._gdaldataset.GetRasterBand(i).SetNoDataValue(float(md['nodata']))
except:#build a VRT dataset - if we want to use this for anything other than overview generation, should probably fill out the geotransform, srs, metadata etc...
bands=bandfiles.values()
bands.sort()
#vrtxml=geometry.CreateRawRasterVRT(bands,md['cols'],md['rows'], md['datatype']) #Fix for Issue 17
vrtxml=geometry.CreateRawRasterVRT(bands,md['cols'],md['rows'], md['datatype'],nodata=md['nodata'])
self._gdaldataset = geometry.OpenDataset(vrtxml)
md['metadata']=hdr
if self._filetype=='HRF':
self._gdaldataset.GetRasterBand(4).SetRasterColorInterpretation(gdal.GCI_BlueBand)
self._gdaldataset.GetRasterBand(3).SetRasterColorInterpretation(gdal.GCI_GreenBand)
self._gdaldataset.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_RedBand)
if level == 'SYSTEMATIC' :md['level'] = '1G '
elif level == 'SYSTERRAIN':md['level'] = '1Gt'
elif level == 'PRECISION' :md['level'] = '1P'
elif level == 'TERRAIN' :md['level'] = '1T'
md['compressionratio']=0
md['compressiontype']='None'
self.extent=ext
for m in md:self.metadata[m]=md[m]
| [
"m.ludwig@datalyze-solutions.com"
] | m.ludwig@datalyze-solutions.com |
aa7ff60ebffe59c5a3b27a0bad0c068ba95e96f2 | 1a99cfd56a1a4f55322e8ebe3db69ded7b8599da | /polls/urls.py | 1d554f1a24816c2f25208d2210aaa478f1b1504a | [] | no_license | liuyongliuLYL/Django_myfirstsite | 544dd38c2acbb0f926875748637253b7cfbd5163 | 17d48da4ffd1fb447743ae9ce0c0283656ef563f | refs/heads/master | 2023-03-15T06:29:47.420584 | 2020-06-13T10:29:51 | 2020-06-13T10:29:51 | 271,985,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('books/', views.BookListView.as_view(), name='books'),
path('book/<int:pk>', views.BookDetailView.as_view(), name='book-detail'),
path('authors/', views.AuthorListView.as_view(), name='authors'),
path('author/<int:pk>', views.AuthorDetailView.as_view(), name='author-detail'),
]
'''
urlpatterns += [
path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-books'),
]
''' | [
"1365989980@qq.com"
] | 1365989980@qq.com |
f27f98c738809a0072e484cfbeb1bd4cb90e4844 | d05bb5c8bec975eceb1aad9959c3a05ba296b082 | /example/migrations/0005_comment.py | 96b19481e536dba887d7a487e596bb8b471315ca | [] | no_license | simonescob/materialize-django | 157f4d4ac1cb5319a2ada815998bcd4f5fc9a846 | 8ab1a5c1f962557d5f7454e18a6c7d03f02bfb5c | refs/heads/master | 2021-06-13T03:51:02.968840 | 2017-03-12T21:13:45 | 2017-03-12T21:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-27 17:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('example', '0004_auto_20161120_0012'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('image', models.ImageField(upload_to='comments/')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='example.Persona')),
],
),
]
| [
"snescobar25@gmail.com"
] | snescobar25@gmail.com |
d9e7f782354383b52a7fce0b2ad532a240368d69 | fe9387cb0bd07beb66d38d099834ebd91be1681c | /escola/models.py | e26024322526b8092a894163de2b71dfbd0d197b | [] | no_license | ibsenriou/alura-djangorest | cf2088e58b81d5b358664d7f10946e14184586b1 | 7cf0bfd72758637b71a08e17356068a4fd0cd946 | refs/heads/main | 2023-07-17T09:06:18.611999 | 2021-08-31T17:13:07 | 2021-08-31T17:13:07 | 401,783,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from django.db import models
class Aluno(models.Model):
nome = models.CharField(max_length=30)
rg = models.CharField(max_length=9)
cpf = models.CharField(max_length=11)
data_nascimento = models.DateField()
def __str__(self):
return self.nome
class Curso(models.Model):
NIVEL = (
('B', 'Básico'),
('I', 'Intermediário'),
('A', 'Avançado')
)
codigo_curso = models.CharField(max_length=10)
descricao = models.CharField(max_length=100)
nivel = models.CharField(max_length=1, choices=NIVEL, blank=False, null=False, default='B')
def __str__(self):
return self.descricao
| [
"ibsenriou@hotmail.com"
] | ibsenriou@hotmail.com |
bbb434dc4c82535d7eb8b5872ee58153af8988e0 | 397f7fb16ff977c756d22c8fba406d9ec8d9c160 | /espnet/nets/pytorch_backend/transformer/attention.py | 1f8a2264b70af058e2a7e960bc81136a2fcf7d7b | [] | no_license | gothiswaysir/espnet_Word-Scale_attention_causal | bfd3f921b467c21e0ca3edc6c129acabae83600c | a9696cceecc5411b968100085c5d57853056b400 | refs/heads/master | 2023-03-12T11:30:47.181972 | 2021-02-15T06:41:29 | 2021-02-15T06:41:29 | 337,008,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,987 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Multi-Head Attention layer definition."""
import math
import numpy
import torch
from torch import nn
import time
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class MultiHeadedAttention_wordscale(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention_wordscale, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, aver_mask, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
)
scores = scores.masked_fill(mask, min_value)
if aver_mask is not None:
scores_fillzero = scores.masked_fill(mask, 0)
score_aver = []
for i in range(len(aver_mask)): #batchsize
# aver_mask_suminword = aver_mask[i].sum(dim=0)
# aver_mask_suminword_reverse = torch.ones_like(aver_mask_suminword) - aver_mask_suminword
# score_aver_aword = torch.mul(scores_fillzero[i,:,:,:], aver_mask_suminword_reverse.unsqueeze(0).repeat(self.h,1,1))
score_aver_aword = torch.zeros(scores.shape[1], scores.shape[2], scores.shape[3]).unsqueeze(0).to(scores.device)
for j in range(aver_mask[i].shape[0]): #number of word
#first copy current word aver_mask to number of head, then causal mask.
aver_mask_causal = aver_mask[i][j].unsqueeze(0).repeat(self.h,1,1).masked_fill(mask[i], False)
one_word_part = torch.mul(scores_fillzero[i], aver_mask_causal)#a little bit worry about this. can dot product over head? can
if j == aver_mask[i].shape[0] -1: #last one is the background, donot need to average
score_aver_aword = score_aver_aword + one_word_part
else:
aver_value = (one_word_part.sum([1,2]).float()/aver_mask_causal.sum([1,2]).float()).unsqueeze(-1).unsqueeze(-1)#aver_value of each head
score_aver_aword = score_aver_aword + aver_mask_causal*aver_value
score_aver.append(score_aver_aword)
score_aver = torch.cat(score_aver, dim=0)
scores = score_aver.masked_fill(mask, min_value)
pass
else:
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def compress2word(self, scores, word_count):
n_subword_list=[]
scores_copy = scores.detach().clone()
for n in range(scores.size()[0]): #batch
tmp = []
for n_word in range(max(word_count[n]) + 1):
tmp.append(word_count[n].count(n_word))
n_subword_list.append(tmp)
for h in range(scores.size()[1]): #head
row = 0
for j in range(len(tmp)):
column = 0
for k in range(len(tmp)):
if j == k:
aver = scores[n][h][row:row + tmp[j], column:column + tmp[k]].sum()/((tmp[j]*(tmp[j]+1))/2)
else:
aver = scores[n][h][row:row + tmp[j], column:column + tmp[k]].mean()
scores[n][h][row:row + tmp[j], column:column + tmp[k]] = aver
column = column + tmp[k]
row = row + tmp[j]
#(j,k)
return scores
def forward(self, query, key, value, mask, aver_mask):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
# aver_mask = aver_mask.unsqueeze(1).expand(q.shape[0], q.shape[1], q.shape[2], q.shape[2]).float()
# q_w = torch.matmul(aver_mask, q)
# k_w = torch.matmul(aver_mask, k)
# scores_w = torch.matmul(q_w, k_w.transpose(-2, -1)) / math.sqrt(self.d_k)
word_level = self.forward_attention(v, scores, aver_mask, mask.clone())
subword_level = self.forward_attention(v, scores, None, mask)
return subword_level+word_level
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
# linear transformation for positional ecoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
pos_emb (torch.Tensor): Positional embedding tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
| [
"xinyuan_zhou@yeah.net"
] | xinyuan_zhou@yeah.net |
3f21e3fa98c9c46babe5fbcfec20c394f406500f | cf06008b5bd00f5fe539a5307decc932760908f5 | /198__House Robber.py | 9a85425e02d529b5444190ff5b878ca70a7d6de5 | [] | no_license | chengcheng8632/lovely-nuts | 47afc039da3c01f9f37058aacf2fc67a2cce58ca | eb5f6488c875c107743f84a44cbbf55ff7ed3296 | refs/heads/master | 2020-07-02T01:27:12.069402 | 2018-07-24T14:46:32 | 2018-07-24T14:46:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
if len(nums) == 2:
return max(nums[0], nums[1])
F = [0, nums[0], max(nums[0], nums[1])]
i = 3
while i <= len(nums):
temp = max(F[i - 1], F[i - 2] + nums[i - 1])
F.append(temp)
i = i + 1
return F[-1]
| [
"noreply@github.com"
] | chengcheng8632.noreply@github.com |
d3e69ba53e4990fc4570a66ade1318dfb5b7c305 | 65d05b70d719779205586a0ec725b7b0c64eae2e | /sockets/tsTservSS.py | 5ed5668afc8f376f405619d9839160a54ab9ea25 | [] | no_license | Hypocisy/demoPythonCodes | e4e932b57f674744ddf7395249da854835c6ae5f | fd016aab748b7bb809527269110f1523bbf03141 | refs/heads/master | 2023-07-12T00:09:36.723300 | 2021-08-14T06:47:02 | 2021-08-14T06:47:02 | 395,691,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from socketserver import StreamRequestHandler as SRH
from socketserver import TCPServer as TCP
from time import ctime
HOST = "0.0.0.0"
PORT = 21567
ADDR = (HOST, PORT)
class MyRequesthandler(SRH):
def handle(self):
print("...connected from:", self.client_address)
self.wfile.write(
bytes(f"[{ctime()}] %s" % self.rfile.readline().decode(), "utf-8")
)
tcpServ = TCP(ADDR, MyRequesthandler)
print("waiting for connection...")
tcpServ.serve_forever()
tcpServ.close_request()
| [
"1137943609@qq.com"
] | 1137943609@qq.com |
b189a5b77a082303fba6354e224a8c68aec729e1 | a0064e0c00e3245b83895a86ade76e2546befc5e | /EspiralColor.py | edee7e792143bf264dc83c6798663188a8e5d2c0 | [] | no_license | KarolDev/python | 7380d32f55b614cd098389113701d9bb18d02314 | dc6adc2bff5868378e9115147412038026e1e74c | refs/heads/master | 2020-12-29T11:17:11.815015 | 2020-03-03T01:44:03 | 2020-03-03T01:44:03 | 238,589,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | import turtle
tortuga = turtle.Pen()
turtle.bgcolor("black")
colores = ["hotpink","aquamarine","mediumorchid","steelblue", "pink","blue","yellow","violet"]
lados = int(turtle.numinput("Número de lados ", "Cuantos lados quieres tener (1-8) ?",4,1,8))
for x in range(360):
tortuga.pencolor(colores[x%lados])
tortuga.forward(x*3/lados+x)
tortuga.left(360/lados+1)
tortuga.width(x*lados/200)
| [
"lav.lorak.olleh@gmail.com"
] | lav.lorak.olleh@gmail.com |
dbe3b8c0312c7b00b15ccb536cd6663abff697fe | a0e79cef4779a9eedd512158e73c166bf6846110 | /NADE/deepnade/buml/TrainingController/EarlyStopping.py | 0ff15e3b222c3060680dc2ee1bd2a05f3a8001e2 | [
"BSD-3-Clause"
] | permissive | rogen-george/Deep-Autoregressive-Model | 9ca5135ed1d8c0c5c442cd95d75467208d6b0acd | 8d567ecb6d59b3003fba9aab2a0693dab67514ef | refs/heads/master | 2021-10-25T22:50:03.508798 | 2019-04-08T00:56:33 | 2019-04-08T00:56:33 | 172,263,626 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import numpy as np
from TrainingController.TrainingController import TrainingController
class EarlyStopping(TrainingController):
def __init__(self, measurement, n_iterations, maximization=False):
self.measurement = measurement
self.n_iterations = n_iterations
self.maximization = maximization
if self.maximization:
self.best_so_far = -np.inf
else:
self.best_so_far = np.inf
self.counter = 0
def after_training_iteration(self, trainer):
'''If training error improves by less than min_improvement, then stop'''
value = self.measurement.take_measurement(trainer)
if (self.maximization and value > self.best_so_far) or (not self.maximization and value < self.best_so_far):
self.best_so_far = value
self.counter = 0
else:
self.counter += 1
return self.counter >= self.n_iterations
| [
"roli.khanna1995@gmail.com"
] | roli.khanna1995@gmail.com |
ab9574a4fcecc7e98e1f184414e1f93a44731ef6 | df3ec0c046bdb722347bf8ddbcfafdefc974f7d6 | /405_422_428_445_Dharahara_Simulation/ep21_texturing_from_framebuffers_shadow.py | f4d9435f8f9d95642c91a14c393ce59ca67db7b8 | [
"MIT"
] | permissive | PawanPokhrel/Computer-Graphics-074-BEX | 87fac970000815f5d09be80c5818572ff0f13e96 | 77d537c020f4963a752cd603c7721c3c435c1526 | refs/heads/master | 2021-02-23T19:49:35.644098 | 2020-03-13T06:08:01 | 2020-03-13T06:08:01 | 245,326,212 | 0 | 0 | MIT | 2020-03-06T09:04:16 | 2020-03-06T04:01:11 | C | UTF-8 | Python | false | false | 8,434 | py | import glfw
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
import pyrr
from TextureLoader import load_texture
from ObjLoader import ObjLoader
import numpy as np
from camera import Camera
cam = Camera()
WIDTH, HEIGHT = 1280, 720
lastX, lastY = WIDTH / 2, HEIGHT / 2
first_mouse = True
left, right, forward, backward = False, False, False, False
# the keyboard input callback
def key_input_clb(window, key, scancode, action, mode):
global left, right, forward, backward
if key == glfw.KEY_ESCAPE and action == glfw.PRESS:
glfw.set_window_should_close(window, True)
if key == glfw.KEY_W and action == glfw.PRESS:
forward = True
elif key == glfw.KEY_W and action == glfw.RELEASE:
forward = False
if key == glfw.KEY_S and action == glfw.PRESS:
backward = True
elif key == glfw.KEY_S and action == glfw.RELEASE:
backward = False
if key == glfw.KEY_A and action == glfw.PRESS:
left = True
elif key == glfw.KEY_A and action == glfw.RELEASE:
left = False
if key == glfw.KEY_D and action == glfw.PRESS:
right = True
elif key == glfw.KEY_D and action == glfw.RELEASE:
right = False
# do the movement, call this function in the main loop
def do_movement():
##move=0.1
move=.1
if left:
cam.process_keyboard("LEFT", move)
if right:
cam.process_keyboard("RIGHT", move)
if forward:
cam.process_keyboard("FORWARD", move)
if backward:
cam.process_keyboard("BACKWARD", move)
# the mouse position callback function
def mouse_look_clb(window, xpos, ypos):
global first_mouse, lastX, lastY
if first_mouse:
lastX = xpos
lastY = ypos
first_mouse = False
xoffset = xpos - lastX
yoffset = lastY - ypos
lastX = xpos
lastY = ypos
cam.process_mouse_movement(xoffset, yoffset)
vertex_src = """
# version 330
layout(location = 0) in vec3 a_position;
layout(location = 1) in vec2 a_texture;
layout(location = 2) in vec3 a_normal;
uniform mat4 model;
uniform mat4 projection;
uniform mat4 view;
out vec2 v_texture;
void main()
{
gl_Position = projection * view * model * vec4(a_position, 1.0);
v_texture = a_texture;
}
"""
fragment_src = """
# version 330
in vec2 v_texture;
out vec4 out_color;
uniform sampler2D s_texture;
void main()
{
out_color = texture(s_texture, v_texture);
}
"""
# glfw callback functions
def window_resize(window, width, height):
glViewport(0, 0, width, height)
projection = pyrr.matrix44.create_perspective_projection_matrix(45, width / height, 0.1, 100)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
# initializing glfw library
if not glfw.init():
raise Exception("glfw can not be initialized!")
# creating the window
window = glfw.create_window(1280, 720, "My OpenGL window", None, None)
# check if window was created
if not window:
glfw.terminate()
raise Exception("glfw window can not be created!")
# set window's position
##glfw.set_window_pos(window, 400, 200)
glfw.set_window_pos(window, 10, 20)
# set the callback function for window resize
glfw.set_window_size_callback(window, window_resize)
# set the mouse position callback
glfw.set_cursor_pos_callback(window, mouse_look_clb)
# set the keyboard input callback
glfw.set_key_callback(window, key_input_clb)
# capture the mouse cursor
glfw.set_input_mode(window, glfw.CURSOR, glfw.CURSOR_DISABLED)
# make the context current
glfw.make_context_current(window)
# load here the 3d meshes
##chibi_indices, chibi_buffer = ObjLoader.load_model("meshes/final.obj")
chibi_indices, chibi_buffer = ObjLoader.load_model("meshes/chibi.obj")
plane_buffer = [0.0, 0.0, 0.0, 0.0, 0.0,
10.0, 0.0, 0.0, 1.0, 0.0,
10.0, 10.0, 0.0, 1.0, 1.0,
0.0, 10.0, 0.0, 0.0, 1.0]
plane_buffer = np.array(plane_buffer, dtype=np.float32)
plane_indices = [0, 1, 2, 2, 3, 0]
plane_indices = np.array(plane_indices, dtype=np.uint32)
shader = compileProgram(compileShader(vertex_src, GL_VERTEX_SHADER), compileShader(fragment_src, GL_FRAGMENT_SHADER))
# VAO and VBO
VAO = glGenVertexArrays(2)
VBO = glGenBuffers(2)
EBO = glGenBuffers(1)
# Chibi VAO
glBindVertexArray(VAO[0])
# Chibi Vertex Buffer Object
glBindBuffer(GL_ARRAY_BUFFER, VBO[0])
glBufferData(GL_ARRAY_BUFFER, chibi_buffer.nbytes, chibi_buffer, GL_STATIC_DRAW)
# chibi vertices
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, chibi_buffer.itemsize * 8, ctypes.c_void_p(0))
# chibi textures
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, chibi_buffer.itemsize * 8, ctypes.c_void_p(12))
# chibi normals
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, chibi_buffer.itemsize * 8, ctypes.c_void_p(20))
glEnableVertexAttribArray(2)
# Plane VAO
glBindVertexArray(VAO[1])
# Plane Vertex Buffer Object
glBindBuffer(GL_ARRAY_BUFFER, VBO[1])
glBufferData(GL_ARRAY_BUFFER, plane_buffer.nbytes, plane_buffer, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, plane_indices.nbytes, plane_indices, GL_STATIC_DRAW)
# plane vertices
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, plane_buffer.itemsize * 5, ctypes.c_void_p(0))
# plane textures
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, plane_buffer.itemsize * 5, ctypes.c_void_p(12))
textures = glGenTextures(2)
load_texture("meshes/chibi.png", textures[0])
##load_texture("meshes/final.png", textures[0])
# create texture for the plane
glBindTexture(GL_TEXTURE_2D, textures[1])
# texture wrapping params
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# texture filtering params
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1280, 720, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
glBindTexture(GL_TEXTURE_2D, 0)
depth_buff = glGenRenderbuffers(1)
glBindRenderbuffer(GL_RENDERBUFFER, depth_buff)
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, 1280, 720)
FBO = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, FBO)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textures[1], 0)
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depth_buff)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glUseProgram(shader)
glClearColor(0, 0.1, 0.1, 1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
projection = pyrr.matrix44.create_perspective_projection_matrix(45, 1280 / 720, 0.1, 100)
chibi_pos_main = pyrr.matrix44.create_from_translation(pyrr.Vector3([0, 0, -5]))
plane_pos = pyrr.matrix44.create_from_translation(pyrr.Vector3([-20, -3, -10]))
model_loc = glGetUniformLocation(shader, "model")
proj_loc = glGetUniformLocation(shader, "projection")
view_loc = glGetUniformLocation(shader, "view")
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
# the main application loop
while not glfw.window_should_close(window):
glfw.poll_events()
do_movement()
glClearColor(0, 0.1, 0.1, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
view = cam.get_view_matrix()
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
##rot_y = pyrr.Matrix44.from_y_rotation(0.8 * glfw.get_time())
rot_y = pyrr.Matrix44.from_y_rotation(25)
model = pyrr.matrix44.multiply(rot_y, chibi_pos_main)
# draw the chibi character
glBindVertexArray(VAO[0])
glBindTexture(GL_TEXTURE_2D, textures[0])
glUniformMatrix4fv(model_loc, 1, GL_FALSE, model)
glDrawArrays(GL_TRIANGLES, 0, len(chibi_indices))
# draw the chibi to the custom frame buffer
glBindFramebuffer(GL_FRAMEBUFFER, FBO)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDrawArrays(GL_TRIANGLES, 0, len(chibi_indices))
glBindVertexArray(0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
# draw the plane
glBindVertexArray(VAO[1])
glBindTexture(GL_TEXTURE_2D, textures[1])
glUniformMatrix4fv(model_loc, 1, GL_FALSE, plane_pos)
glDrawElements(GL_TRIANGLES, len(plane_indices), GL_UNSIGNED_INT, None)
glfw.swap_buffers(window)
# terminate glfw, free up allocated resources
glfw.terminate()
| [
"dipendrarajpanta1@gmail.com"
] | dipendrarajpanta1@gmail.com |
72b2a9746aaf9ac910a89bae175b63ad37ae3ba1 | af6187edd8b92d8edeb5a0127532e16905cc01c6 | /FileServer/appcode/Utils/logger.py | 6fb8f1f911d586ebff89a91129f221d70e306b0e | [] | no_license | ryunseok/BookServer | d7294a3e742b474b43403933502b9f1158a82fa7 | 89285d839ab4a252a12f471d498d43d23db49fe8 | refs/heads/master | 2020-03-11T06:30:21.454357 | 2018-04-22T01:33:47 | 2018-04-22T01:33:47 | 129,831,692 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import os
import logging
from os import F_OK
from stat import ST_MODE,S_ISDIR,S_ISREG
from datetime import date, datetime
class Logger() :
def __init__(self, log_tag, debug_mode, param):
self._logger = logging.getLogger(log_tag)
if debug_mode is True :
self._logger.setLevel(logging.DEBUG)
log_output_folder = param['OUTPUT_LOGFOLDER']
if (os.access(log_output_folder, F_OK)) is False:
os.makedirs(log_output_folder)
expression_time = '%Y-%m-%d-%p-%I-%M'
logFileName = log_output_folder + '/' + datetime.now().strftime(expression_time) + '.log'
logFileHandler = logging.FileHandler( logFileName)
logStreamHandler = logging.StreamHandler()
# add handlers to log instance
self._logger.addHandler(logFileHandler)
self._logger.addHandler(logStreamHandler)
def d(self,message):
return self._logger.debug(message)
def e(self,message):
return self._logger.error(message)
def w(self,message):
return self._logger.warning(message)
def i(self,message):
return self._logger.info(message)
def c(self,message):
return self._logger.critical(message) | [
"ryunseok@gmail.com"
] | ryunseok@gmail.com |
41fa2d9fa59b4b2c9abcdace97a3222472d4d998 | cac18bdef57fd233b2a0305927a0144bb5ac4a23 | /project/hunt/utils.py | 7563506a63895d21813f65d0ad8740a1edf105ae | [] | no_license | kohoto-san/press | a99e8340bea33b49299ee7461257b5d0f02d2755 | 008c26654a5538860f9fc45d108e70a0a9187722 | refs/heads/master | 2021-01-10T08:31:51.579187 | 2016-02-16T12:26:39 | 2016-02-16T12:26:39 | 47,452,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py |
from .models import Profile
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
# import urllib2
import urllib.request
def createProfile(social_account, user):
extra_data = social_account.extra_data
try:
profile = Profile.objects.get(user=user)
is_new_profile = False
except Profile.DoesNotExist:
is_new_profile = True
profile = Profile()
profile.user = user
if social_account.provider == "twitter":
avatar_url = extra_data['profile_image_url']
if social_account.provider == "vk":
avatar_url = extra_data['photo_big']
if social_account.provider == "facebook":
avatar_url = "http://graph.facebook.com/%s/picture?type=large" % extra_data['id']
# profile.avatar = extra_data['photo_big']
if is_new_profile:
try:
id_profile = Profile.objects.all().order_by('-id_profile').first().id_profile
except AttributeError:
id_profile = 0
profile.id_profile = id_profile + 1
img_format = avatar_url[-4:]
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(urllib.request.urlopen(avatar_url).read())
img_temp.flush()
profile.save()
profile.avatar.save(str(profile.id_profile) + str(img_format), File(img_temp))
return profile
| [
"kohotosan@gmail.com"
] | kohotosan@gmail.com |
c2212cccc185ab01f22af3be210f1abc35093520 | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/BsJPsiKst/python/BsJPsiKst/OurSites.py | 1f512c57497b1d85460fafcb4fa6ccb1a2a6621b | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | import os
SITE = "None"
if "cern.ch" in os.environ["HOME"]: SITE = "CERN"
if (0>1) : SITE = "USC" ### Write here appropriate condition
if "/lhcb/users/" in os.environ["HOME"] and not "cern.ch" in os.environ["HOME"] : SITE = "CPPM" ## """
if "/lhcb/users/akar" in os.environ["HOME"] and not "cern.ch" in os.environ["HOME"] : SITE = "SAKAR" ## """
if os.path.exists("/project/bfys"): SITE = "NIKHEF"
if "/home/diegoms" in os.environ["HOME"]: SITE = "DMSLAP"
#if (0>1) : SITE = "Russia"
print"SITE:",SITE
if SITE == "None":
print"ERROR: No NtuplePath specified"
exit(0)
NtuplePaths = {}
NtuplePaths["CERN"] = "/afs/cern.ch/user/d/diegoms/vol5/WG_sign_off/"
NtuplePaths["CPPM"] = "/zfs_lhcb/users/kanso/realData/for_jpsiK/"
NtuplePaths["SAKAR"] = "/zfs_lhcb/users/sakar/realData/JpsiKst/"
NtuplePaths["DMSLAP"] = "~/w7folder/NTuples/"
NTUPLE_PATH = NtuplePaths[SITE]
| [
"liblhcb@cern.ch"
] | liblhcb@cern.ch |
d771f7e102efb4b382699be33662ce108f210447 | 2cd0a84aefb8a7141d1c8da99845a8ada0cc009c | /tensorflow/python/client/timeline.py | f6aad8d04df2c2691db203032d906f92d7242051 | [
"Apache-2.0"
] | permissive | hholst80/tensorflow-old | d466cee96eac717524ab8e4ee85275ce28bb5d68 | 79df325975402e03df89747947ff5b7f18407c52 | refs/heads/master | 2022-12-20T22:07:40.427519 | 2016-05-13T09:57:24 | 2016-05-13T09:57:24 | 58,914,336 | 1 | 1 | Apache-2.0 | 2022-12-09T21:52:14 | 2016-05-16T08:00:04 | C++ | UTF-8 | Python | false | false | 21,382 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import six # pylint: disable=unused-import
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import logging
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the assicaiated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
nn, rest = label.split(' = ')
op, rest = rest.split('(')
if rest == ')':
inputs = []
else:
inputs = rest[:-1].split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
self._emit_op(node_stats, device_pid)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.warning('Can\'t find tensor %s', input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes))
allocations[allocator].append((tensor.last_unref, -num_bytes))
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
for time, num_bytes in alloc_list:
total_bytes += num_bytes
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=True):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If true, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return self._chrome_trace.format_to_string(pretty=True)
| [
"henrik.holst@frostbite.com"
] | henrik.holst@frostbite.com |
9d6cd4dc34c92023e5be0b25eac46b3aeccd5dc3 | 9b9fea16b0d763c0d7975ab6fac43a92f7ee60cc | /testCases/conftest.py | a9d54e0a79fa32c6418128e588b66a26b76fc4ed | [] | no_license | sumits000/seleniumframework | 0039b3ee83ef2ef2577352314cac13e082a66fc9 | fa82ded3e99e00158de5c7b307e5ce048c424f28 | refs/heads/master | 2022-12-19T21:09:29.554870 | 2020-09-23T06:57:15 | 2020-09-23T06:57:15 | 297,879,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # from selenium import webdriver
# from selenium.webdriver.common.by import By
# import time
# from webdriver_manager.chrome import ChromeDriverManager
# from webdriver_manager.firefox import GeckoDriverManager
# import pytest
#
#
# @pytest.fixture()
# def setup():
# driver = webdriver.Chrome()
# return driver
#
import pytest
from selenium import webdriver
@pytest.fixture()
def setup(browser):
if browser=='chrome':
driver=webdriver.Chrome()
print("Launching chrome browser.........")
elif browser=='firefox':
driver = webdriver.Firefox()
print("Launching firefox browser.........")
return driver
def pytest_addoption(parser): # This will get the value from CLI /hooks
parser.addoption("--browser")
@pytest.fixture()
def browser(request): # This will return the Browser value to setup method
return request.config.getoption("--browser")
########### pytest HTML Report ################
# It is hook for Adding Environment info to HTML Report
def pytest_configure(config):
config._metadata['Project Name'] = 'nop Commerce'
config._metadata['Module Name'] = 'Customers'
config._metadata['Tester'] = 'Sumit'
# It is hook for delete/Modify Environment info to HTML Report
@pytest.mark.optionalhook
def pytest_metadata(metadata):
metadata.pop("JAVA_HOME", None)
metadata.pop("Plugins", None)
| [
"sumit@appventurez.com"
] | sumit@appventurez.com |
423e0a0cd0644991a0f575f5474c0580e00884c2 | f1b8f1ab3ef97addc05c00a3d5b41c066d9fb80d | /mongodbTest/0insert&query.py | 3f1d951b8255b125faa0d12c923bb4c3ef754c8f | [] | no_license | chunlin99x/learn-note | 65c7869c65943d639fc53c4994760dda9197a58e | bbd58ef51ed93d575b80a5d5a41113f37c5ded01 | refs/heads/master | 2023-07-27T12:39:24.055233 | 2020-09-11T21:11:26 | 2020-09-11T21:11:26 | 175,378,290 | 0 | 0 | null | 2023-07-11T13:16:24 | 2019-03-13T08:30:38 | JavaScript | UTF-8 | Python | false | false | 1,476 | py | from pymongo import MongoClient
conn = MongoClient("127.0.0.1",27017)
db = conn.mydb # 连接mydb数据库,如果没有就自动创建
# 创建 table
col = db.col
## 插入数据
#col.insert_one({"name":"zhangsan","age":18})
#col.save({"name":"chunlin","age":26}) ## api过期 不推荐使用
mydict = { "name": "RUNOOB", "alexa": "10000", "url": "https://www.runoob.com" }
x = col.insert_one(mydict)
print(x) #<pymongo.results.InsertOneResult object at 0x00000201B179B7C8>
print(x.inserted_id) #5cac09d58af1be31389368e4 inserted_id 属性, 它是插入文档的 id 值。
## 出入多个文档
mylist = [
{ "name": "Taobao", "alexa": "100", "url": "https://www.taobao.com" },
{ "name": "QQ", "alexa": "101", "url": "https://www.qq.com" },
{ "name": "Facebook", "alexa": "10", "url": "https://www.facebook.com" },
{ "name": "知乎", "alexa": "103", "url": "https://www.zhihu.com" },
{ "name": "Github", "alexa": "109", "url": "https://www.github.com" }
]
x = col.insert_many(mylist )
print(x.inserted_ids)
#[ObjectId('5cac0a628af1be3a9000834b'), ObjectId('5cac0a628af1be3a9000834c'), ObjectId('5cac0a628af1be3a9000834d'), ObjectId('5cac0a628af1be3a9000834e'), ObjectId('5cac0a628af1be3a9000834f')]
#### 查询数据
# myquery = {"name":{"$gt":"z"}}
myquery = {"name":{"$regex":"^R"}}
## 查询一条数据
doc = col.find_one()
print(doc)
doc = col.find(myquery).limit(2).skip(0)
for x in doc:
print(x)
| [
"noreply@github.com"
] | chunlin99x.noreply@github.com |
37e59b7351d31e74cd94ef6632d9b2b1ba569f6c | 2c154c87c1609c2772176f69087ff9d2eeffde2b | /gullyboy.py | 39d2744f7b610a8a4d2fe2c240884798612719f2 | [] | no_license | Anvesh8263/python-lab-30 | 8edeba03e2601797ba7e81bcf8eb9327e622644d | 629516bff04b698d956e90d0a2207a1da7810928 | refs/heads/main | 2023-08-29T12:12:01.027985 | 2021-10-16T18:15:07 | 2021-10-16T18:15:07 | 349,360,571 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | st = input()
# st = st.split()
# st = ''.join(st)
st = st.replace(' ','')
node = int(input())-1
rp = int(input())
result = (st[node:]+st[:node])*rp
for i in result:
if i !=' ':
print(i,end =' ')
| [
"noreply@github.com"
] | Anvesh8263.noreply@github.com |
e8c5109c73ce40ce91690f71d3063378467a257d | 31795853bfed5eea199f1e330011c9c6045b250b | /PY1_Comments/comments.py | 41630bbad0c5cfcdbfb34cde657bc5270694e6ca | [] | no_license | britsa/Python_3.8.3 | 733a95f55bd30328dbf0b0d227ce08ffba1c0949 | 10cc462bedea5a4a00b8818c6b0f0f678f3323c5 | refs/heads/master | 2022-11-06T10:57:05.523593 | 2020-06-26T13:48:49 | 2020-06-26T13:48:49 | 274,488,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # comments
# This a comment - print('Hello World')
# The above print() do not work
print('# But this is not a comment') # The statement is enclosed with apostrophes.
# The above print() will work
'''
We hope that you understood the usage of comments.
You are seeing this text inside of multi-line comment
''' | [
"askregins@gmail.com"
] | askregins@gmail.com |
b48badeb049d7766f7d00cedafce6f64555028bc | b4038c217614fbded419056a249d05e8be9cab0a | /lib/scripts/prepare_data_stage2.py | 5b297585547c60895c03771729c15cf0c99069f4 | [] | no_license | ruslangrimov/nfl_impact_detection | dd93230000503c5e2b761424275a14ffd216e5b2 | 69a5b2a61d5ff35ceaa87737282aa107c9218591 | refs/heads/main | 2023-03-03T06:13:04.467054 | 2021-02-19T22:01:23 | 2021-02-19T22:01:23 | 327,161,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,872 | py | import cv2
import sys
import os
import argparse
from functools import partial
from tqdm.auto import tqdm
import multiprocessing as mp
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from lib.utils import imsave, get_print_fn
from lib.paths import DATA_PATH, WHOLE_IMG_PATH
def process_video(video_name, videos_path, output_path, jpeg=True):
v_output_path = video_name.split('.')[0]
v_output_path = os.path.join(output_path, v_output_path)
os.makedirs(v_output_path, exist_ok=True)
video_path = os.path.join(videos_path, video_name)
vc = cv2.VideoCapture(video_path)
f = 0
while True:
it_worked, img = vc.read()
if not it_worked:
break
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if jpeg:
imsave(os.path.join(v_output_path, f'{f}.jpg'), img,
p=[int(cv2.IMWRITE_JPEG_QUALITY), 90])
else:
imsave(os.path.join(v_output_path, f'{f}.png'), img)
f += 1
vc.release()
def main(output_path, p=4):
exp_name = "data_prep_stage2"
print_l = get_print_fn(exp_name)
print_l(f"Start {exp_name}")
videos_path = os.path.join(DATA_PATH, 'train')
videos = os.listdir(videos_path)
process_video_ = partial(
process_video, videos_path=videos_path, output_path=output_path, jpeg=True)
with mp.Pool(processes=p) as pool:
for _ in tqdm(pool.imap_unordered(process_video_, videos), total=len(videos)):
pass
print_l(f"Finish {exp_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", dest="p",
help="number of processes", required=True, type=int)
args = parser.parse_args()
output_path = WHOLE_IMG_PATH
os.makedirs(output_path, exist_ok=True)
main(output_path, args.p)
| [
"grimovr@gmail.com"
] | grimovr@gmail.com |
7b12f5dabb27a8a786b209d848dfd9d67f0a6331 | 8d402df39c18eba7e1c86c762f205c944357c5df | /scripts/make_class_flags.py | 49b003b375158acb7924de795302b62f4e71ed01 | [
"BSD-3-Clause"
] | permissive | brython-dev/brython | 87cc023e25550dec9ce459ba68774189f33712b6 | b33958bff0e8c7a280babc30232dc389a2500a7a | refs/heads/master | 2023-09-04T04:49:29.156209 | 2023-09-01T06:36:08 | 2023-09-01T06:36:08 | 24,046,239 | 6,569 | 625 | BSD-3-Clause | 2023-07-05T06:13:32 | 2014-09-15T06:58:21 | Python | UTF-8 | Python | false | false | 1,259 | py | import os
import builtins
dest_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src")
with open(os.path.join(dest_dir, 'py_flags.js'), 'w', encoding='utf-8') as out:
out.write('(function($B){\n' +
'$B.builtin_class_flags = {\n')
flags = {}
def add_flag(flag, attr):
if flag in flags:
flags[flag].add(attr)
else:
flags[flag] = set([attr])
for attr in dir(builtins):
if attr == '__loader__':
continue
obj = getattr(builtins, attr)
if hasattr(obj, '__flags__'):
#if not (obj.__flags__ & TPFLAGS['BASETYPE']):
add_flag(obj.__flags__, attr)
out.write(' builtins: {\n')
for flag in flags:
out.write(f" {flag}: {list(flags[flag])}," + '\n')
out.write(' },\n types: {\n')
flags = {}
import types
for attr in dir(types):
obj = getattr(types, attr)
if (hasattr(obj, '__module__')
and obj.__module__ == 'builtins'
and hasattr(obj, '__flags__')):
add_flag(obj.__flags__, obj.__name__)
for flag in flags:
out.write(f" {flag}: {list(flags[flag])}," + '\n')
out.write(' }\n}\n})(__BRYTHON__)')
| [
"pierre.quentel@gmail.com"
] | pierre.quentel@gmail.com |
4bb1a29c46798bb67389ef43064968bf383fdcc1 | 3fdcb37a6f6c225cbb28da5c671ed94dce96b018 | /task1_calculator.py | adda1dbb8a9ca5ab934b17df93a7446705803bbc | [] | no_license | lueked/audiocc-task | b514824243dae27ff5b71321adc9736c814cbdbc | 248233201354302bf3fc8295715910288273d718 | refs/heads/master | 2023-03-05T21:06:24.385726 | 2021-02-08T13:26:33 | 2021-02-08T13:26:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | class Calculator(object):
def __init__(self, expression: str):
self.expression = expression.replace(' ', '')
self.index = 0
def calculate(self):
if not self.expression or self.expression == '':
return 0
return self._calculate(self.expression)
def _calculate(self, expression):
previous_value = 0
current_value = 0
result = 0
sign = '+'
while self.index <= len(expression):
c = expression[self.index] if self.index < len(expression) else '#'
if c == '(':
self.index += 1
current_value = self._calculate(expression)
continue
if c.isdigit():
current_value = 10 * current_value + int(c)
if not c.isdigit() or c == '#' or c == ')':
if sign == '+':
result += previous_value
previous_value = current_value
elif sign == '-':
result += previous_value
previous_value = -current_value
elif sign == '*':
previous_value = previous_value * current_value
elif sign == '/':
previous_value = previous_value / current_value
sign = c
current_value = 0
self.index += 1
if c == ')' or c == '#':
result += previous_value
return result
return result
if __name__ == '__main__':
equation = input('Enter an expression to be calculated: ')
calculator = Calculator(equation)
print(f'The answer is: {calculator.calculate()}')
| [
"kshitj.saxena@mecuris.com"
] | kshitj.saxena@mecuris.com |
89a8502417480c52d16a34e7d2d399c78dfc8c1f | 13d0ad57a2f5deb83593e73843be7cbeeaad8d3d | /medium/top_k_frequent_words.py | e4f059cf48137b020c8b29784284c2f730070320 | [] | no_license | mwong33/leet-code-practice | b21f277d73b30df9e681499733baad07979480a1 | 9c0e6294bf3b3614b185f0760906abad60f8d9b6 | refs/heads/main | 2023-03-29T20:35:43.841662 | 2021-03-31T22:05:44 | 2021-03-31T22:05:44 | 317,382,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | import heapq
class Solution:
# Dictionary + Heap - O(words + k * log(words)) time O(words) space
def topKFrequent(self, words: List[str], k: int) -> List[str]:
word_dict = {}
heap = []
for word in words:
if word not in word_dict:
word_dict[word] = 1
else:
word_dict[word] += 1
for word, count in word_dict.items():
heapq.heappush(heap, (-1*count, word))
output = []
for _ in range(k):
output.append(heapq.heappop(heap)[1])
return output
| [
"noreply@github.com"
] | mwong33.noreply@github.com |
a13eb009f3fdd84d7c4f91ea464792329029e5dc | fe499e3d2fe4c9050ce189c723d551f82b8ede36 | /boltzmann/tests/tests.py | fbde74abff0e17e5d06674da9e93eef0af65d72c | [
"MIT"
] | permissive | ARLM-Attic/boltzmann | 53bea070a4996388dccf36b7f2e830e46df3762c | 95741cacee7684752bb77d37462d2b8045ae54f2 | refs/heads/master | 2021-05-11T14:00:16.881190 | 2017-01-19T16:13:19 | 2017-01-19T16:13:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,870 | py | '''
Created on 20160917
Based on original works done for OXT in January 2015
@author: LaurentMT
'''
import os
import math
import sys
# Adds boltzmann directory into path
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../../")
from boltzmann.linker.txos_linker import TxosLinker
from datetime import datetime
def build_test_vectors():
test_vectors = []
options = ['PRECHECK', 'LINKABILITY']
# Test case A
name = 'TEST A'
inputs = [ ('a', 10), ('b', 10) ]
outputs = [ ('A', 8), ('B', 2), ('C', 3), ('D', 7) ]
test_vectors.append((name, inputs, outputs, options))
# Test case A with additional information (A & B controlled by same entity)
name = 'TEST A with additional info'
inputs = [ ('a', 10), ('b', 10) ]
outputs = [ ('A', 10), ('C', 3), ('D', 7) ]
test_vectors.append((name, inputs, outputs, options))
# Test case B
name = 'TEST B'
inputs = [ ('a', 10), ('b', 10) ]
outputs = [ ('A', 8), ('B', 2), ('C', 2), ('D', 8) ]
test_vectors.append((name, inputs, outputs, options))
# Test case B with additional information (A & B controlled by same entity)
name = 'TEST B with additional info'
inputs = [ ('a', 10), ('b', 10) ]
outputs = [ ('A', 10), ('C', 2), ('D', 8) ]
test_vectors.append((name, inputs, outputs, options))
# Test case C
name = 'TEST C'
inputs = [ ('a', 10), ('b', 10) ]
outputs = [ ('A', 5), ('B', 5), ('C', 5), ('D', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case C with additional information (A & B controlled by same entity)
name = 'TEST C with additional info'
inputs = [ ('a', 10), ('b', 10) ]
outputs = [ ('A', 10), ('C', 5), ('D', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case D
name = 'TEST D'
inputs = [ ('a', 10), ('b', 10), ('c', 2) ]
outputs = [ ('A', 8), ('B', 2), ('C', 2), ('D', 8), ('E', 2) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P2
name = 'TEST P2'
inputs = [ ('a', 5), ('b', 5) ]
outputs = [ ('A', 5), ('B', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P3
name = 'TEST P3'
inputs = [ ('a', 5), ('b', 5), ('c', 5) ]
outputs = [ ('A', 5), ('B', 5), ('C', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P3 with fees
name = 'TEST P3 with fees'
inputs = [ ('a', 5), ('b', 5), ('c', 5) ]
outputs = [ ('A', 5), ('B', 3), ('C', 2) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P3b
name = 'TEST P3b'
inputs = [ ('a', 5), ('b', 5), ('c', 10) ]
outputs = [ ('A', 5), ('B', 5), ('C', 10) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P4
name = 'TEST P4'
inputs = [ ('a', 5), ('b', 5), ('c', 5), ('d', 5) ]
outputs = [ ('A', 5), ('B', 5), ('C', 5), ('D', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P5
name = 'TEST P5'
inputs = [ ('a', 5), ('b', 5), ('c', 5), ('d', 5), ('e', 5) ]
outputs = [ ('A', 5), ('B', 5), ('C', 5), ('D', 5), ('E', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P9
name = 'TEST P9'
inputs = [ ('a', 5), ('b', 5), ('c', 5), ('d', 5), ('e', 5), ('f', 5), ('g', 5), ('h', 5), ('i', 5) ]
outputs = [ ('A', 5), ('B', 5), ('C', 5), ('D', 5), ('E', 5), ('F', 5), ('G', 5), ('H', 5), ('I', 5) ]
test_vectors.append((name, inputs, outputs, options))
# Test case P10
name = 'TEST P10'
inputs = [ ('a', 5), ('b', 5), ('c', 5), ('d', 5), ('e', 5), ('f', 5), ('g', 5), ('h', 5), ('i', 5), ('j', 5) ]
outputs = [ ('A', 5), ('B', 5), ('C', 5), ('D', 5), ('E', 5), ('F', 5), ('G', 5), ('H', 5), ('I', 5), ('J', 5) ]
test_vectors.append((name, inputs, outputs, options))
return test_vectors
def process_test(inputs, outputs, options, max_duration):
t1 = datetime.now()
sum_inputs = sum([v[1] for v in inputs])
sum_outputs = sum([v[1] for v in outputs])
fees = sum_inputs - sum_outputs
linker = TxosLinker(inputs, outputs, fees, max_duration)
(mat_lnk, nb_cmbn, inputs, outputs) = linker.process(options=options)
print('Duration = %s' % str( (datetime.now() - t1).total_seconds()))
return mat_lnk, nb_cmbn, inputs, outputs, fees
def display_results(mat_lnk, nb_cmbn, inputs, outputs, fees):
print('\nInputs = ' + str(inputs))
print('\nOutputs = ' + str(outputs))
print('\nFees = %i' % fees)
print('\nNb combinations = %i' % nb_cmbn)
if nb_cmbn > 0:
print('Tx entropy = %f' % math.log2(nb_cmbn))
if mat_lnk is not None:
if nb_cmbn != 0:
print('\nLinkability Matrix (probabilities) :')
print(mat_lnk / nb_cmbn)
else:
print('\nLinkability Matrix (#combinations with link) :')
print(mat_lnk)
print('\nDeterministic links :')
for i in range(0, len(outputs)):
for j in range(0, len(inputs)):
if (mat_lnk[i,j] == nb_cmbn) and mat_lnk[i,j] != 0 :
print('%s & %s are deterministically linked' % (inputs[j], outputs[i]))
if __name__ == '__main__':
max_duration_test = 600
# Builds test vectors
test_vectors = build_test_vectors()
for test in test_vectors:
test_name = test[0]
print('\n\n--- %s -------------------------------------' % test_name)
# Processes the test
(mat_lnk, nb_cmbn, inputs, outputs, fees) = process_test(test[1], test[2], test[3], max_duration_test)
# Displays the results
display_results(mat_lnk, nb_cmbn, inputs, outputs, fees)
| [
"laurent.salat@bbox.fr"
] | laurent.salat@bbox.fr |
1960b50186b779acd635b4d5c64bbe995e7e4971 | b98c79303a74069d78f2d5479a86c93744518c84 | /clustering/tools.py | e548211955a8b8b5c1d8ee8f9f200eee4de762a5 | [] | no_license | DanielLSM/clustering-trajs | ca9e43b88247d034c1be66e3ae8240e3272bde1b | fa88650c1fd9ccf43dcd72495d7465a92bc5db98 | refs/heads/main | 2023-04-23T05:52:22.609774 | 2021-04-26T17:52:34 | 2021-04-26T17:52:34 | 361,827,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | import numpy as np
import pickle5 as pkl
from datetime import datetime
DATA_DIR = "../data/raw/"
PROCESSED_DIR = "../data/processed"
def save_pickle(obj, directory, name):
with open(directory + name + ".pkl", 'wb') as handle:
pkl.dump(obj, handle, protocol=pkl.HIGHEST_PROTOCOL)
def load_pickle(directory, name):
with open(directory + name + ".pkl", 'rb') as handle:
return pkl.load(handle)
def return_date():
now = datetime.now()
dt_string = now.strftime("%d-%m-%Y %H:%M:%S")
return dt_string
if __name__ == '__main__':
file_name = "trajectories_nocollision"
data = load_pickle(DATA_DIR, file_name) | [
"daniellsmarta@gmail.com"
] | daniellsmarta@gmail.com |
34247801de12ae00efba811166f8e0d13c3ee1f6 | 8510a309405fd924e2d4d9a4e2d0dbb650ffe8dd | /gym_gamblers/envs/__init__.py | 1c0a5b75537ce69801ae2f59cf277a67f8e7c789 | [
"MIT"
] | permissive | prasad-madhale/gym-gamblers | b289ac046703097cb4e3f4ae92b7563d4d21ee65 | 99f30ef70609b499eb2877c01d77b5723d7bbcd1 | refs/heads/master | 2020-04-12T16:55:00.267144 | 2019-01-04T20:48:44 | 2019-01-04T20:48:44 | 162,627,998 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from gym_gamblers.envs.gamblers_env import GamblersEnv
| [
"prasadchelsea33@gmail.com"
] | prasadchelsea33@gmail.com |
962fbb14795b9c15bc3ea3931f9575d595e59c8c | 66ad6bd8e1743ff1bce5b0f6860041de0ba5d91e | /dynamic_input_clerigo.py | d8ef12929a4c7510da6aa79237e5f60d73182cfe | [] | no_license | MarkClerigo/fcpc_python_projects | d6ff9d1a8188596f72e31d92273bb0273a7665b6 | 2236484a62adb78eed65b667a426252ef905516a | refs/heads/master | 2020-04-08T09:56:55.917797 | 2019-03-12T01:50:51 | 2019-03-12T01:50:51 | 159,246,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #accept input form user
name = input("Enter your name:")
print("Your name is: " + name)
#accept numerical input -- int()
fnum = int(input("Enter First Number: "))
snum = int(input("Enter Second Number: "))
to_int_value = int("10")
print(fnum + snum)
print(to_int_value ** 2)
| [
"noreply@github.com"
] | MarkClerigo.noreply@github.com |
05a2150b0affb55aa6e432500723bc7299ab5630 | e45bc0309496d9bd25e1121e01c99811cf4db746 | /Training CD/IDA Pro/IDA Plug-ins/IDA Python/1.1.1/python/init.py | c81483d3d0b97a46c4280f9ecf6f4fc6e3fdcedb | [
"MIT",
"BSD-3-Clause"
] | permissive | fengjixuchui/Malware-Analysis-Training | d9165c75591934181586c4fb3e4db1dfa8a48c72 | 08ad2a088bf5e7230476ab823cad9bae2c00de58 | refs/heads/master | 2021-01-08T15:31:14.505959 | 2020-02-28T14:20:35 | 2020-02-28T14:20:35 | 242,067,909 | 0 | 1 | MIT | 2020-02-28T14:20:37 | 2020-02-21T06:12:40 | null | UTF-8 | Python | false | false | 6,383 | py | #!/usr/bin/env python
#------------------------------------------------------------
# IDAPython - Python plugin for Interactive Disassembler Pro
#
# Copyright (c) 2004-2009 Gergely Erdelyi <dyce@d-dome.net>
#
# All rights reserved.
#
# For detailed copyright information see the file COPYING in
# the root of the distribution archive.
#------------------------------------------------------------
# init.py - Essential init routines
#------------------------------------------------------------
import os
import sys
import time
import warnings
import _idaapi
__EA64__ = _idaapi.BADADDR == 0xFFFFFFFFFFFFFFFFL
# FIXME: Should fix the offending constant instead
warnings.filterwarnings('ignore', category=FutureWarning)
def addscriptpath(script):
"""
Add the path part of the scriptfile to the system path to
allow modules to be loaded from the same place.
Each path is added only once.
"""
pathfound = 0
scriptpath = os.path.dirname(script)
for pathitem in sys.path:
if pathitem == scriptpath:
pathfound = 1
break
if pathfound == 0:
sys.path.append(scriptpath)
# Add the script to ScriptBox if it's not there yet
if not script in scriptbox.list:
scriptbox.list.insert(0, script)
def runscript(script):
"""
Run the specified script after adding its directory path to
system path.
This function is used by the low-level plugin code.
"""
addscriptpath(script)
watchdog.reset()
argv = sys.argv
sys.argv = [ script ]
execfile(script, globals())
sys.argv = argv
def print_banner():
version1 = "Python interpreter version %d.%d.%d %s (serial %d)" % sys.version_info
version2 = "Copyright (c) 1990-2009 Python Software Foundation - http://www.python.org/"
if __EA64__:
version3 = "IDAPython 64-bit"
else:
version3 = "IDAPython"
version3 += " version %d.%d.%d %s (serial %d)" % IDAPYTHON_VERSION
version4 = "Copyright (c) 2004-2009 Gergely Erdelyi - http://d-dome.net/idapython/"
linelen = max(len(version1), len(version2), len(version3), len(version4))
print '-' * linelen
print version1
print version2
print
print version3
print version4
print '-' * linelen
#-----------------------------------------------------------
# Take over the standard text outputs
#-----------------------------------------------------------
class MyStdOut:
"""
Dummy file-like class that receives stout and stderr
"""
def write(self, text):
_idaapi.msg(text.replace("%", "%%"))
def flush(self):
pass
def isatty(self):
return False
# Redirect stderr and stdout to the IDA message window
sys.stdout = sys.stderr = MyStdOut()
# Assign a default sys.argv
sys.argv = [ "" ]
# Have to make sure Python finds our modules
if _idaapi.idainfo_is_64bit(_idaapi.cvar.inf):
pythonDir = "python64"
else:
pythonDir = "python"
sys.path.append(_idaapi.idadir(pythonDir))
print_banner()
#-----------------------------------------------------------
# Import all the required modules
#-----------------------------------------------------------
from idaapi import Choose, get_user_idadir, cvar
from idc import *
from idautils import *
#-----------------------------------------------------------
# Build up the ScriptBox tool
#-----------------------------------------------------------
class ScriptBox(Choose):
def __init__(self, list=None):
if list:
self.list = list
else:
self.list = []
Choose.__init__(self, self.list, "ScriptBox", 1)
self.width = 50
def run(self):
if len(self.list) == 0:
Warning("ScriptBox history is empty.\nRun some script with Alt-9 and try again.")
return None
n = self.choose()
if n > 0:
return self.list[n-1]
else:
return None
def addscript(self, scriptpath):
self.list.append(scriptpath)
scriptbox = ScriptBox()
#-------------------------------------------------------------
# Watchdog to catch runaway scripts after a specified timeout
#
# Usage:
# watchdog.install()
# watchdog.activate(10) # Use 10-second timeout
#
# Note: The watchdog only works for code running inside
# functions, not in global/module namespace.
#-------------------------------------------------------------
class WatchDog():
"""
Python tracer-based watchdog class
"""
def __init__(self, timeout=10):
self.timestamp = 0
self.timeout = timeout
self.installed = False
self.active = False
def install(self):
""" Install the tracer function, required for the watchdog """
if not self.installed:
sys.settrace(self.tracer)
self.installed = True
def activate(self, timeout=None):
""" Activate the watchdog, with optional timeout change """
assert self.installed, "WatchDog must be installed before activating"
if timeout:
self.timeout = timeout
self.reset()
self.active = True
def deactivate(self):
""" Deactivate the watchdog """
self.active = True
def reset(self):
""" Reset the timer, useful for long-running scripts """
self.timestamp = time.clock()
def tracer(self, frame, event, arg):
""" Tracer function that receives the tracing events """
if not self.active:
return None
if event == 'line':
if time.clock() - self.timestamp > self.timeout:
if AskYN(0, "The script has not finished in %d seconds\nWould you like to stop it now?" % self.timeout) == 1:
raise KeyboardInterrupt
else:
self.timestamp = time.clock()
return self.tracer
watchdog = WatchDog(10)
# Load the users personal init file
userrc = get_user_idadir() + os.sep + "idapythonrc.py"
if os.path.exists(userrc):
runscript(userrc)
# Remove the user script from the history
del scriptbox.list[0]
# All done, ready to rock.
| [
"pedram.amini@gmail.com"
] | pedram.amini@gmail.com |
1c0597fc90c5ca82d46b382016f2caf6ba0ded43 | 6a9721174baba35c3e2536d6b5fd1e5386c96b77 | /venv/bin/easy_install-3.7 | 03b60a646b5153b5f3f27a34f7213c5465024d2f | [] | no_license | SolidRocketBooster/SDA_DevOps_tests | 80e8f2a6c3cf0c251129e3b0b51262c2c8ee9e33 | fd6ed2dc4b857e117b029ee3f57e97993e258eef | refs/heads/master | 2020-04-28T18:33:54.953081 | 2019-03-20T17:01:56 | 2019-03-20T17:01:56 | 175,482,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | 7 | #!/home/srb/PycharmProjects/end_of_story/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"aleksander.wojtal@tieto.pl"
] | aleksander.wojtal@tieto.pl |
55d98fd560361bfd4ff2ee4b424703be01d1342b | 26774b0a8133955c05fe9b8973e011f0730241c6 | /MyAnimations/box/box_animation.py | 7b0679217a3287df28a69fa3b81db187c85533e6 | [] | no_license | codecxx/manim_sandbox | 056d7b1654738b5c0f7104dd73a157ff4f7d6d1c | 2d546b99403018bd1362bbe58b45683762e32716 | refs/heads/master | 2022-11-24T03:29:04.077646 | 2020-07-25T13:43:04 | 2020-07-25T13:43:04 | 282,556,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | #from manimlib.imports import *
from manimlib.imports import *
from MyAnimations.box.box_object import *
class BoxAnimation(Scene):
def construct(self):
#Set objects
box=Box()
note=NoteBox()
label=TexMobject("A",color=BLACK)
#Set properties
note.set_height(label.get_height()*2)
note.move_to(box)
label.move_to(note)
self.play(DrawBorderThenFill(box))
self.wait()
self.play(FadeIn(note))
note.add_updater(lambda d: d.move_to(box.box_center()))
self.play(Write(label))
label.add_updater(lambda d: d.move_to(note))
self.play(box.shift,DOWN*3+LEFT*2,path_arc=PI/4)
self.wait()
self.play(open_box(box))
self.play(box.shift,UP*4.5+RIGHT*4)
self.wait()
self.play(close_box(box))
self.wait()
| [
"919791421@qq.com"
] | 919791421@qq.com |
25e591adef5ca985e8cab7d3e2b459c786644f81 | f999bc5a6e0da4f0904ef2112d7b6191f180ca5b | /180Geeks/Arrays/Leaders in an array.py | 781946d8a3bb6668110d53804e507f78136e1677 | [] | no_license | ritesh-deshmukh/Algorithms-and-Data-Structures | 721485fbe91a5bdb4d7f99042077e3f813d177cf | 2d3a9842824305b1c64b727abd7c354d221b7cda | refs/heads/master | 2022-11-09T00:18:51.203415 | 2018-10-08T22:31:05 | 2018-10-08T22:31:05 | 132,504,988 | 0 | 1 | null | 2022-10-23T00:51:15 | 2018-05-07T19:07:33 | Python | UTF-8 | Python | false | false | 550 | py | # Write a program to print all the LEADERS in the array.
# An element is leader if it is greater than all the elements to its right side.
# The rightmost element is always a leader.
arr = [16,17,4,3,5,2]
size = len(arr)
def leaderfunc(arr,size):
leader = arr[-1]
l = []
l.append(leader)
# print(leader)
for i in range(size-1,0,-1):
if leader < arr[i]:
# print(arr[i])
l.append(arr[i])
leader = arr[i]
print(f"The leaders in the given array are: {l[::-1]}")
leaderfunc(arr,size) | [
"riteshdeshmukh260@gmail.com"
] | riteshdeshmukh260@gmail.com |
ab1c997d8bee9c087d806f0f271e68e964de7b0f | ffe019bf25ca51dc1e30d04bf51823107ca58089 | /skmultiflow/demos/_test_prequential_mol.py | 3e232030a016f4f31b36e595d596da3efe1e988b | [
"BSD-3-Clause"
] | permissive | yupbank/scikit-multiflow | dc759285bf1b095681cb7094baaa9b0356a068f2 | ef4a37144bb7d377cfd358643cd3f34e9056ea40 | refs/heads/master | 2021-09-11T09:50:11.918721 | 2017-11-30T15:44:38 | 2017-11-30T15:44:38 | 112,630,179 | 1 | 0 | null | 2017-11-30T15:52:30 | 2017-11-30T15:52:30 | null | UTF-8 | Python | false | false | 2,153 | py | __author__ = 'Guilherme Matsumoto'
from sklearn.linear_model.stochastic_gradient import SGDClassifier, SGDRegressor
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.linear_model.perceptron import Perceptron
from skmultiflow.classification.perceptron import PerceptronMask
from skmultiflow.classification.multi_output_learner import MultiOutputLearner
from skmultiflow.core.pipeline import Pipeline
from skmultiflow.data.file_stream import FileStream
from skmultiflow.options.file_option import FileOption
from skmultiflow.data.generators.multilabel_generator import MultilabelGenerator
from skmultiflow.data.generators.waveform_generator import WaveformGenerator
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
def demo(output_file=None, instances=40000):
""" _test_prequential_mol
This demo shows the evaluation process of a MOL classifier, initialized
with sklearn's SGDClassifier.
Parameters
----------
output_file: string
The name of the csv output file
instances: int
The evaluation's max number of instances
"""
# Setup the File Stream
#opt = FileOption("FILE", "OPT_NAME", "../datasets/music.csv", "CSV", False)
#stream = FileStream(opt, 0, 6)
stream = MultilabelGenerator(n_samples=instances)
#stream = WaveformGenerator()
stream.prepare_for_use()
# Setup the classifier
classifier = MultiOutputLearner(SGDClassifier(n_iter=100))
#classifier = SGDClassifier()
#classifier = PassiveAggressiveClassifier()
#classifier = SGDRegressor()
#classifier = PerceptronMask()
# Setup the pipeline
pipe = Pipeline([('Classifier', classifier)])
# Setup the evaluator
eval = EvaluatePrequential(pretrain_size=5000, max_instances=instances-10000, batch_size=1, n_wait=200, max_time=1000,
output_file=output_file, task_type='multi_output', show_plot=True, plot_options=['hamming_score', 'j_index', 'exact_match'])
# Evaluate
eval.eval(stream=stream, classifier=pipe)
if __name__ == '__main__':
demo('log_mol1.csv', 50000) | [
"guilhermekmatsumoto@gmail.com"
] | guilhermekmatsumoto@gmail.com |
51bccdfb0ed05ff0a075f4464cd01bc92b7d52bd | 06733bdb8fdc692631842253884a7079ff5917cc | /unittests/test_overrides.py | a6ad18a4b4533398d1a8cc5ddef4091855a8eaf6 | [
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0"
] | permissive | pombredanne/pygccxml | ef9303cf6e0d26685219493e69ee32e9526c4c77 | 27aa1a805b0e1d5bdb24eef64f7b39873adb4627 | refs/heads/master | 2023-05-14T00:39:54.972935 | 2021-05-25T19:12:20 | 2021-05-25T19:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | # Copyright 2014-2020 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import unittest
from . import parser_test_case
from pygccxml import parser
from pygccxml import declarations
class Test(parser_test_case.parser_test_case_t):
global_ns = None
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = "test_overrides.hpp"
self.global_ns = None
self.config.castxml_epic_version = 1
def setUp(self):
if not self.global_ns:
decls = parser.parse([self.header], self.config)
Test.global_ns = declarations.get_global_namespace(decls)
Test.xml_generator_from_xml_file = \
self.config.xml_generator_from_xml_file
self.xml_generator_from_xml_file = Test.xml_generator_from_xml_file
self.global_ns = Test.global_ns
def test(self):
"""
Check that the override information is populated for the
simple::goodbye function. It should contain the decl for the
base::goodbye function. Base::goodbye has no override so it
will be none
"""
base = self.global_ns.class_("base").member_function("goodbye")
override_decl = self.global_ns.class_("simple")\
.member_function("goodbye")
self.assertTrue(base.overrides is None)
self.assertFalse(override_decl.overrides is None)
self.assertEqual(override_decl.overrides, base)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
| [
"joe.snyder@kitware.com"
] | joe.snyder@kitware.com |
5314bcd247fba7ad6e690fc9f393d04c07ee36bf | e03c6f8453396cf3743e376e775f0f760b94f355 | /backend.py | afe88e2346a2587a0b31902d7e74fd095260569e | [] | no_license | tommynti/Digital-Library | 6d044aa4dc69911d072c263d7aa4144003d0c68b | 6271c950207238f1c9dbb8a2e14176fb0e0b806d | refs/heads/master | 2022-12-24T18:42:53.252521 | 2020-10-01T12:47:24 | 2020-10-01T12:47:24 | 300,272,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | import sqlite3
def connect():
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS booK (id INTEGER PRIMARY KEY, title text, author text, year integer, isbn integer)")
conn.commit()
conn.close()
def insert(title,author,year,isbn):
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("INSERT INTO book VALUES (NULL,?,?,?,?)", (title,author,year,isbn))
conn.commit()
conn.close()
#view()
def view():
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("SELECT * FROM book")
rows=cur.fetchall()
conn.close()
return rows
def search(title="",author="",year="",isbn=""):
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("SELECT * FROM book WHERE title=? OR author=? OR year=? OR isbn=?", (title,author,year,isbn))
rows=cur.fetchall()
conn.close()
return rows
def delete(id):
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("DELETE FROM book WHERE id=?", (id,))
conn.commit()
conn.close()
def update(id,title,author,year,isbn):
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("UPDATE book SET title=?, author=?, year=?, isbn=? WHERE id=?", (title,author,year,isbn,id)) # tis metablhtes tis dinoume me thn idia seira
conn.commit()
conn.close()
connect() # kaloume th synarthsh gt otan tha kanoyme import backend ,kathe fora poy trexoume to kwdika tha ekteleitai h connect xwris na th kaloume kai sto allo script
#insert("Post Office","Bukowski",1990,234235412)
#print(view())
#print(search(author="Bukowski")) # einai upper case sensitive , me bukowski bgazei kenh lista
| [
"work.with.ntinas@gmail.com"
] | work.with.ntinas@gmail.com |
0d48d030220a35939e0381ff0776d35c898236d0 | 849d27e666651be44849999d1cca75df5c9e68ee | /testcases/tc3.py | b60fb4a22a6118e9cfb2c0182bef1c3575aaafaf | [] | no_license | smominnt/Cpp-Python | a28279ac63328b9645a2a94c7e60994925a36259 | d73b2e0fc3380eefca00af1648bbbaf71bc1ac17 | refs/heads/master | 2020-06-26T17:55:34.391504 | 2019-07-30T18:27:59 | 2019-07-30T18:27:59 | 199,705,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | def f():
print("recursive function")
return f()
def f1(n):
if n==1:
return 1
else:
return n * f1(n+1)
f()
f1(5)
| [
"noreply@github.com"
] | smominnt.noreply@github.com |
425b5455d764d14857902124565390053183ac18 | fcbe63e53f90be593975d897c84633354dd92684 | /most.py | ab91bb32f0707cd52189fa80f92da63af75b66df | [] | no_license | Kuljat991/Diplomski-rad | d631017c7dc73234abeeccf112f83d85227d8221 | f23fa72a6862b5087b7a46ca8888e2da912da753 | refs/heads/master | 2020-03-22T00:43:51.967626 | 2018-06-30T16:08:38 | 2018-06-30T16:08:38 | 139,262,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,965 | py | # -*- coding: utf-8 -*-
#solver = icoFoam
#foamMonitor -l -r 2 -i 5 postProcessing/residuals/0/residuals.dat
#gdje su dodatne oznake:
#-l oznaka za logaritamski y skalu (koristi se za residuale)
#-r 2 refreshanje grafa svake 2 sekunde
#-i 5 možeš ga zatvorit 5 sekundi nakon što se prestanu zapisivat podaci
# funkcija simulacija prima vektor brojeva koju rasporeduje po 360 stupnjeva u rx i ry
# i broj posto ce se vrtit paralelno
# kopira se cijeli folder most pod novim nazivom most1 ili most2 i td ovisno o fileNumberu tako da folder most ostaje uvijek netaknut
# zatim se izacunaju karakteristicne tocke mreze tj. tocke blokova koje se zatim upisuju u file blockMeshDict
# otvara se controlDict u koji se upisuje l_ref i A_ref za izracunavanje sila
# brisu se folderi postProcessing i dynamicCode ako postoje
# zapocinje simulacija koja racuna lift i drag force
# nakon proracuna se brise folder u kojem se izvodila simulacija
import time
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import subprocess
import shutil
import math
from scipy.optimize import bisect
from scipy import interpolate
from math import pi, cos, sin
def simulacija(X,fileNumber):
newFileName = './proracun/proracun/most_'+str(fileNumber)
shutil.copytree('./proracun/best/best',newFileName)
#print(newFileName)
#dubina uronjene noge
h=10.
#posto je profil simetrican racuna se samo gornja krivulja od 0 do 180 rx ide s lijeva
n = 10 #interpolacijske tocke
r_temp = X
fi_temp = np.linspace(0., pi, len(r_temp))
#print(len(fi_temp), len(r_temp))
interp_r = interpolate.interp1d(fi_temp,r_temp,kind='cubic')
def func_max_x(x,xt):
delta_x=[]
for i in range(len(x)):
delta_x.append(abs(x[i]-xt))
return max(delta_x)
def r_phi(phi):
phi=np.abs(phi)
phi=phi%(2*pi)
if(phi<pi):
return interp_r(phi)
else:
return interp_r(2*pi-phi)
def ogranicenje_superponirano (qx, qy, r, fi, Ix, Iy):
M_x = qy * h**2 /2.
M_y = qx * h**2 /2.
# M_x = 10000
# M_y = 10000
x=[]
y=[]
x_prvi_kvadr=[]
y_prvi_kvadr=[]
x_drugi_kvadr=[]
y_drugi_kvadr=[]
x_treci_kvadr=[]
y_treci_kvadr=[]
x_cetvrti_kvadr=[]
y_cetvrti_kvadr=[]
for i in range (len(fi)):
if (fi[i]<=pi/2):
x_prvi_kvadr.append(r[i]*cos(fi[i]))
y_prvi_kvadr.append(r[i]*sin(fi[i]))
x.append(x_prvi_kvadr[-1])
y.append(y_prvi_kvadr[-1])
elif (fi[i]>=pi/2 and fi[i]<pi):
x_drugi_kvadr.append(r[i]*cos(fi[i]))
y_drugi_kvadr.append(r[i]*sin(fi[i]))
x.append(x_drugi_kvadr[-1])
y.append(y_drugi_kvadr[-1])
elif (fi[i]>=pi and fi[i]<3*pi/2):
x_treci_kvadr.append(r[i]*cos(fi[i]))
y_treci_kvadr.append(r[i]*sin(fi[i]))
x.append(x_treci_kvadr[-1])
y.append(y_treci_kvadr[-1])
else:
x_cetvrti_kvadr.append(r[i]*cos(fi[i]))
y_cetvrti_kvadr.append(r[i]*sin(fi[i]))
x.append(x_cetvrti_kvadr[-1])
y.append(y_cetvrti_kvadr[-1])
sigma_x=[]
sigma_y=[]
for i in range (len(x_prvi_kvadr)):
sigma_x.append(- M_x / Ix * abs(y_prvi_kvadr[i]))
sigma_y.append(- M_y / Iy * abs(x_prvi_kvadr[i]))
for i in range (len(x_drugi_kvadr)):
sigma_x.append(- M_x / Ix * abs(y_drugi_kvadr[i]))
sigma_y.append( M_y / Iy * abs(x_drugi_kvadr[i]))
for i in range (len(x_treci_kvadr)):
sigma_x.append( M_x / Ix * abs(y_treci_kvadr[i]))
sigma_y.append( M_y / Iy * abs(x_treci_kvadr[i]))
for i in range (len(x_cetvrti_kvadr)):
sigma_x.append( M_x / Ix * abs(y_cetvrti_kvadr[i]))
sigma_y.append(- M_y / Iy * abs(x_cetvrti_kvadr[i]))
sigma_ekv_plus=[]
sigma_ekv_minus=[]
for i in range(len(sigma_x)):
sigma_ekv_plus.append(sigma_x[i]+sigma_y[i])
sigma_ekv_minus.append(-sigma_x[i]+sigma_y[i])
sigma_ekv_plus_max=max([abs(max(sigma_ekv_plus)),abs(min(sigma_ekv_plus))])
sigma_ekv_minus_max=max([abs(max(sigma_ekv_minus)),abs(min(sigma_ekv_minus))])
sigma_superponirano =max([sigma_ekv_minus_max,sigma_ekv_plus_max])
# print(sigma_ekv_plus_max)
# print(sigma_ekv_minus_max)
# plt.plot(x,y)
# fi_for_plot=np.rad2deg(fi)
# plt.plot(fi_for_plot, sigma_x, label='sigma_lift')
# plt.plot(fi_for_plot, sigma_y, label='sigma_drag')
# plt.plot(fi_for_plot, sigma_ekv_plus, label='sigma_superpon_plus')
# plt.plot(fi_for_plot, sigma_ekv_minus, label='sigma_superpon_minus')
# plt.xlabel('fi')
# plt.ylabel('Sigma')
# plt.legend()
# plt.savefig('sigme.png')
return sigma_superponirano*20
def ogranicenje (qx, qy, x_max, y_max, Ix, Iy):
Mx = qy * h**2 /2.
My = qx * h**2 /2.
sigma_x = Mx * y_max/Ix
sigma_y = My * x_max/Iy
return sigma_x , sigma_y
#citanje sila
def line2dict(line):
tokens_unprocessed = line.split()
tokens = [x.replace(")","").replace("(","") for x in tokens_unprocessed]
floats = [float(x) for x in tokens]
data_dict = {}
data_dict['time'] = floats[0]
force_dict = {}
force_dict['pressure'] = floats[1:4]
force_dict['viscous'] = floats[4:7]
force_dict['porous'] = floats[7:10]
moment_dict = {}
moment_dict['pressure'] = floats[10:13]
moment_dict['viscous'] = floats[13:16]
moment_dict['porous'] = floats[16:19]
data_dict['force'] = force_dict
data_dict['moment'] = moment_dict
return data_dict
fi_prvi = np.linspace(0.,pi/4, n/2+1)
fi_drugi = np.linspace(pi/4, 3*pi/4, n+2)
fi_treci = np.linspace(3*pi/4, pi, n/2+1)
r_prvi = interp_r(fi_prvi[0:-1])
x_prvi =[]
y_prvi =[]
for i in range (len(r_prvi)):
x_prvi.append(r_prvi[i]*cos(fi_prvi[i]))
y_prvi.append(r_prvi[i]*sin(fi_prvi[i]))
#print(np.rad2deg(fi_prvi[i]))
r_drugi = interp_r(fi_drugi[1:-1])
x_drugi = []
y_drugi = []
for i in range (len(r_drugi)):
x_drugi.append(r_drugi[i]*cos(fi_drugi[i+1]))
y_drugi.append(r_drugi[i]*sin(fi_drugi[i+1]))
#print(np.rad2deg(fi_drugi[i+1]))
r_treci = interp_r(fi_treci[1:])
x_treci =[]
y_treci =[]
for i in range (len(r_treci)):
x_treci.append(r_treci[i]*cos(fi_treci[i+1]))
y_treci.append(r_treci[i]*sin(fi_treci[i+1]))
#print(np.rad2deg(fi_treci[i+1]))
# Moment inercije Ix, Iy
fi_fi=np.linspace(0.,2*pi,101)
R=[]
for i in range (len(fi_fi)):
R.append((r_phi(fi_fi[i]))**2/2)
Area=integrate.simps(R,fi_fi)
print(Area)
def integrand_x(fi_kut):
return 1/4.*(r_phi(fi_kut))**4*(sin(fi_kut))**2
r_x=[]
for i in range(len(fi_fi)):
r_x.append(integrand_x(fi_fi[i]))
Ix = integrate.simps(r_x,fi_fi)
#print(Ix)
############################
# def fr_y(r,phi):
# return r**3*(cos(phi))**2
#Iy,err2=integrate.nquad(fr_y,[bound_r,bound_phi])
#print(Iy,err2)
#provjera
##########################
def integrand_y(fi_kut):
return 1/4.*(r_phi(fi_kut))**4*(cos(fi_kut))**2
r_y=[]
for i in range(len(fi_fi)):
r_y.append(integrand_y(fi_fi[i]))
Iy = integrate.simps(r_y,fi_fi)
#print(Iy)
##########################
# def fr_y(r,phi):
# return r*sin(phi)
#Ty,err3=integrate.nquad(fr_y,[bound_r,bound_phi])
#print(Ty)
#provjera
##########################
def integrand_Ty(fi_kut):
return 1/3.*(r_phi(fi_kut))**3*sin(fi_kut)
r_Ty=[]
for i in range(len(fi_fi)):
r_Ty.append(integrand_Ty(fi_fi[i]))
Ty =1/Area*integrate.simps(r_Ty,fi_fi)
print(Ty)
##########################
# def fr_x(r,phi):
# return r*cos(phi)
#Tx,err4=integrate.nquad(fr_x,[bound_r,bound_phi])
#print(Tx)
#provjera
##########################
def integrand_Tx(fi_kut):
return 1/3.*(r_phi(fi_kut))**3*cos(fi_kut)
r_Tx=[]
for i in range(len(fi_fi)):
r_Tx.append(integrand_Tx(fi_fi[i]))
Tx = 1/Area*integrate.simps(r_Tx,fi_fi)
print(Tx)
#print('Tx='+str(Tx)+' Ty='+str(Ty))
#domena simulacije noge mosta
domena_x = [-45., 115.]
domena_y = [-50., 50.]
poddomena_x = [-15., 15.]
poddomena_y = [-15., 15.]
podjela_poddomena_x=30
podjela_poddomena_y=50
podjela_domena_x_lijevo=20
podjela_domena_x_desno=50
podjela_domena_y=20
gradacija=20
#trazenje karakteristinih tocki za blockMesh
prva=[interp_r(pi/4)*cos(pi/4),interp_r(pi/4)*sin(pi/4)]
druga=[interp_r(3*pi/4)*cos(3*pi/4),interp_r(3*pi/4)*sin(3*pi/4)]
treca=[druga[0],-druga[1]]
cetvrta=[prva[0],-prva[1]]
#interpolacijske tocme izmedu glavnih tocaka
interp_prva_cetvrta=[]
for i in reversed(range(len(r_prvi))):
interp_prva_cetvrta.append([x_prvi[i],y_prvi[i],-0.5])
for i in range(len(r_prvi)-1):
interp_prva_cetvrta.append([x_prvi[i+1],-y_prvi[i+1],-0.5])
interp_prva_druga=[]
for i in range (len(r_drugi)):
interp_prva_druga.append([x_drugi[i],y_drugi[i],-0.5])
interp_druga_treca=[]
for i in range(len(r_treci)):
interp_druga_treca.append([x_treci[i],y_treci[i],-0.5])
for i in reversed (range(len(interp_druga_treca)-1)):
interp_druga_treca.append([interp_druga_treca[i][0],-interp_druga_treca[i][1],-0.5])
interp_treca_cetvrta=[]
for i in reversed(range(len(interp_prva_druga))):
interp_treca_cetvrta.append([interp_prva_druga[i][0],-interp_prva_druga[i][1],-0.5])
####################max_x,max_y
xevi=[]
yloni=[]
for i in range(len(interp_prva_cetvrta)):
xevi.append(interp_prva_cetvrta[i][0])
yloni.append(interp_prva_cetvrta[i][1])
for i in range(len(interp_prva_druga)):
xevi.append(interp_prva_druga[i][0])
yloni.append(interp_prva_druga[i][1])
for i in range(len(interp_druga_treca)):
xevi.append(interp_druga_treca[i][0])
yloni.append(interp_druga_treca[i][1])
for i in range(len(interp_treca_cetvrta)):
xevi.append(interp_treca_cetvrta[i][0])
yloni.append(interp_treca_cetvrta[i][1])
x_max = func_max_x(xevi,Tx)
y_max = max(yloni)
# print ('x_max= '+str(x_max)+' ,y_max='+str(y_max))
############################################################
#Pisanje blockMesh-a
############################################################
points = []
points.append(prva) #0
points.append(druga) #1
points.append(treca) #2
points.append(cetvrta) #3
points.append([poddomena_x[1],poddomena_y[1]]) #4
points.append([poddomena_x[0],poddomena_y[1]]) #5
points.append([poddomena_x[0],poddomena_y[0]]) #6
points.append([poddomena_x[1],poddomena_y[0]]) #7
points.append([domena_x[1],domena_y[1]]) #8
points.append([poddomena_x[1],domena_y[1]]) #9
points.append([poddomena_x[0],domena_y[1]]) #10
points.append([domena_x[0],domena_y[1]]) #11
points.append([domena_x[0],poddomena_y[1]]) #12
points.append([domena_x[0],poddomena_y[0]]) #13
points.append([domena_x[0],domena_y[0]]) #14
points.append([poddomena_x[0],domena_y[0]]) #15
points.append([poddomena_x[1],domena_y[0]]) #16
points.append([domena_x[1],domena_y[0]]) #17
points.append([domena_x[1],poddomena_y[0]]) #18
points.append([domena_x[1],poddomena_y[1]]) #19
spline=[]
spline.append(" spline 0 3 (")
spline.append(" spline 0 1 (")
spline.append(" spline 1 2 (")
spline.append(" spline 2 3 (")
spline.append(" spline 20 23 (")
spline.append(" spline 20 21 (")
spline.append(" spline 21 22 (")
spline.append(" spline 22 23 (")
# print(interp_prva_cetvrta[0])
for i in range(len(interp_prva_cetvrta)):
spline[0]=spline[0]+"("+str(interp_prva_cetvrta[i][0])+" "+str(interp_prva_cetvrta[i][1])+" "+"-0.5) "
spline[4]=spline[4]+"("+str(interp_prva_cetvrta[i][0])+" "+str(interp_prva_cetvrta[i][1])+" "+"0.5) "
spline[0]=spline[0][0:-1]+")"
spline[4]=spline[4][0:-1]+")"
for i in range(len(interp_prva_druga)):
spline[1]=spline[1]+"("+str(interp_prva_druga[i][0])+" "+str(interp_prva_druga[i][1])+" "+"-0.5) "
spline[5]=spline[5]+"("+str(interp_prva_druga[i][0])+" "+str(interp_prva_druga[i][1])+" "+"0.5) "
spline[1]=spline[1][0:-1]+")"
spline[5]=spline[5][0:-1]+")"
for i in range(len(interp_druga_treca)):
spline[2]=spline[2]+"("+str(interp_druga_treca[i][0])+" "+str(interp_druga_treca[i][1])+" "+"-0.5) "
spline[6]=spline[6]+"("+str(interp_druga_treca[i][0])+" "+str(interp_druga_treca[i][1])+" "+"0.5) "
spline[2]=spline[2][0:-1]+")"
spline[6]=spline[6][0:-1]+")"
for i in range(len(interp_treca_cetvrta)):
spline[3]=spline[3]+"("+str(interp_treca_cetvrta[i][0])+" "+str(interp_treca_cetvrta[i][1])+" "+"-0.5) "
spline[7]=spline[7]+"("+str(interp_treca_cetvrta[i][0])+" "+str(interp_treca_cetvrta[i][1])+" "+"0.5) "
spline[3]=spline[3][0:-1]+")"
spline[7]=spline[7][0:-1]+")"
# print(spline[3])
target = open('./'+newFileName+'/system/blockMeshDict', 'r')
contents = target.readlines()
target.close()
for j in range (len(contents)):
# pisanje tocaka
if contents [j] == ' pointField points(20);\n':
for i in range (len (points)):
contents [j+i+1] = " points[" + str(i) + "] = point(" + str(points[i][0]) +", "+ str(points[i][1]) + ", -0.5);\n"
if contents [j] =='edges\n':
for i in range (len(spline)):
contents[j+i+2] =spline[i]+"\n"
if contents [j] =='blocks\n':
contents [j+2] =' hex (3 7 4 0 23 27 24 20) ('+str(podjela_poddomena_y)+' '+str(podjela_poddomena_x)+' 1) edgeGrading ('+str(gradacija)+' 1 1)\n'
contents [j+3] =' hex (0 4 5 1 20 24 25 21) ('+str(podjela_poddomena_y)+' '+str(podjela_poddomena_x)+' 1) edgeGrading ('+str(gradacija)+' 1 1)\n'
contents [j+4] =' hex (2 1 5 6 22 21 25 26) ('+str(podjela_poddomena_x)+' '+str(podjela_poddomena_y)+' 1) edgeGrading (1 '+str(gradacija)+' 1)\n'
contents [j+5] =' hex (2 6 7 3 22 26 27 23 22) ('+str(podjela_poddomena_y)+' '+str(podjela_poddomena_x)+' 1) edgeGrading ('+str(gradacija)+' 1 1)\n'
contents [j+6] =' hex (12 5 10 11 32 25 30 31) ('+str(podjela_domena_x_lijevo)+' '+str(podjela_domena_y)+' 1) simpleGrading (1 1 1)\n'
contents [j+7] =' hex (5 4 9 10 25 24 29 30) ('+str(podjela_poddomena_x)+' '+str(podjela_domena_y)+' 1) simpleGrading (1 1 1)\n'
contents [j+8] =' hex (4 19 8 9 24 39 28 29) ('+str(podjela_domena_x_desno)+' '+str(podjela_domena_y)+' 1) simpleGrading (1 1 1)\n'
contents [j+9] =' hex (13 6 5 12 33 26 25 32) ('+str(podjela_domena_x_lijevo)+' '+str(podjela_poddomena_x)+' 1) simpleGrading (1 1 1)\n'
contents [j+10] =' hex (7 18 19 4 27 38 39 24) ('+str(podjela_domena_x_desno)+' '+str(podjela_poddomena_x)+' 1) simpleGrading (1 1 1)\n'
contents [j+11] =' hex (14 15 6 13 34 35 26 33) ('+str(podjela_domena_x_lijevo)+' '+str(podjela_domena_y)+' 1) simpleGrading (1 1 1)\n'
contents [j+12] =' hex (15 16 7 6 35 36 27 26) ('+str(podjela_poddomena_x)+' '+str(podjela_domena_y)+' 1) simpleGrading (1 1 1)\n'
contents [j+13] =' hex (16 17 18 7 36 37 38 27) ('+str(podjela_domena_x_desno)+' '+str(podjela_domena_y)+' 1) simpleGrading (1 1 1)\n'
#print(contents [j+13])
out = open('./'+newFileName+'/system/blockMeshDict', 'w')
for i in range(len(contents)):
out.writelines(str(contents[i]))
out.close()
l_ref=abs(X[0])+abs(X[-1])
ry=[]
for i in range (len(r_temp)):
ry.append(r_temp[i]*sin(fi_temp[i]))
A_ref=max(ry)*2.
target = open('./'+newFileName+'/system/controlDict', 'r')
contents_controlDict = target.readlines()
target.close()
contents_controlDict[73] = ' lRef '+str(l_ref)+';\n'
contents_controlDict[74] = ' ARef '+str(A_ref)+';\n'
out_controlDict = open('./'+newFileName+'/system/controlDict', 'w')
for i in range(len(contents_controlDict)):
out_controlDict.writelines(str(contents_controlDict [i]))
out_controlDict.close()
case_dir = newFileName
solver = 'icoFoam'
"""
Start OpenFOAM simulation.
"""
print(' - Starting calculation... %s' % case_dir)
#brisanje foldera postProcessing i dynamicCode ako postoji
list_file = os.listdir('./'+newFileName)
for i in range (len(list_file)):
if list_file[i] == 'postProcessing':
subprocess.call(['rm', '-rf', './'+newFileName+'/postProcessing'])
elif list_file[i] == 'dynamicCode':
subprocess.call(['rm', '-rf', './'+newFileName+'/dynamicCode'])
cmd = 'bash start_case.sh %s %s' % (case_dir, solver)
proc = subprocess.Popen(cmd.split(), cwd='./', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
print (out, err)
print(' - Calculation finished! %s' % case_dir)
exit_code = proc.wait()
print(' - Return code:', exit_code)
if exit_code != 0:
#shutil.rmtree(newFileName)
return 1e15, 1e15, 1e10*Area, 1e15, 1e15, 1e10
forces_file = './'+newFileName+'/postProcessing/forcesIncompressible/0/forces.dat'
if not os.path.isfile(forces_file):
print ("Forces file not found at "+forces_file)
print ("Be sure that the case has been run and you have the right directory!")
print ("Exiting.")
#shutil.rmtree(newFileName)
sys.exit()
time = []
drag = []
lift = []
moment = []
with open(forces_file,"r") as datafile:
for line in datafile:
if line[0] == "#":
continue
data_dict = line2dict(line)
time += [data_dict['time']]
drag += [data_dict['force']['pressure'][0] + data_dict['force']['viscous'][0]]
lift += [data_dict['force']['pressure'][1] + data_dict['force']['viscous'][1]]
moment += [data_dict['moment']['pressure'][2] + data_dict['moment']['viscous'][2]]
datafile.close()
#plt.plot(time[5:],lift[5:])
# maksimalne sile po metru visine mosta
#drag_max = (max([max(drag[500:-1]),abs(min(drag[500:-1]))]))+1000
#lift_max = (max([max(lift[500:-1]),abs(min(lift[500:-1]))]))+1000
drag_max = np.max(np.abs(drag[500:])) + 1000.
lift_max = np.max(np.abs(lift[500:])) + 1000.
n_points= 361
fi=np.linspace(0.,2*pi,n_points)
r=[]
for i in range (len(fi)):
r.append(r_phi(fi[i]))
# plt.plot(x_prvi_kvadr,y_prvi_kvadr)
# plt.plot(x_drugi_kvadr,y_drugi_kvadr)
Sigma_max = ogranicenje_superponirano (drag_max, lift_max, r, fi, Ix, Iy)
#Sigma_x, Sigma_y = ogranicenje (drag_max, lift_max, x_max, y_max, Ix, Iy)
#shutil.rmtree(newFileName)
#print(Sigma_x,Sigma_y)
#print (Sigma_x/10**5, Sigma_y/10**5, Area)
return Sigma_max, Area, drag_max, lift_max, Ix, Iy
faktor_sigurnosti =1.
sigma_dop = 2*10**5/faktor_sigurnosti
if __name__ == "__main__":
def kvadr_pop_presjek (b,h):
fi_diag=math.atan2(h/2,b/2)
#print(np.rad2deg(fi_diag))
fi=np.linspace(0,pi,360)
r=[]
for i in range (len(fi)):
if (fi[i]<=fi_diag):
x=b/2
r.append(x/np.cos(fi[i]))
elif (fi[i]>fi_diag and fi[i]<=pi-fi_diag):
y=h/2
r.append(y/np.sin(fi[i]))
else:
x=b/2
r.append(abs(x/np.cos(fi[i])))
return r
#X=[ 2., 2.2, 1.8, 1.8, 2.,2.,2.,2.]
#X=[3.69803155,3.78795551,2.09015827,4.44300334,1.08248567,3.67183216,0.59487738,3.24516277]
#X=[4.38561015,2.11143417,5.3934392,4.44132813,2.04115363,5.51451399,3.67158115,1.35242348]
#X=[2.50690218,4.92808389,2.86215534,1.9794298,5.63875802,4.49786406,4.33377154,1.7460372]
#X=[3.69567485,1.11910148,4.08031717,2.60037542,1.05344528,2.62814699,5.04284431,1.58244596]
#X=[2,2,2,2,2,2,2,2]
X=[3.2207603893825074, 0.90801560199401887, 3.157813375761541, 2.7724328583924893, 2.0377827745169523, 1.8084522673720427, 1.9963162071726572, 6.0]
start = time.time()
#X= kvadr_pop_presjek(2,4)
sigma_max, A, drag_max, lift_max, Ix, Iy =simulacija(X,'gg')
end = time.time()
print(sigma_max, A, drag_max, lift_max, Ix, Iy)
print (end-start)
#sigma_x, sigma_y, cilj = simulacija(X,1)
#print ((sigma_dop -sigma_x)*10**-6, (sigma_dop - sigma_y)*10**-6, cilj)
| [
"kuljat991@gmail.com"
] | kuljat991@gmail.com |
c57a263d7d3c8f34cde58823cfa794bb82d9788f | 71b72a7caa66c1453203c972c86aaa703989a6b1 | /djangowebsite/settings.py | 3f5bb4f3baa09bafa413e91ddc3297a16852b9f2 | [] | no_license | lego666/pythondev | 786d118951281d1ad51731799567833a5529b6f5 | a8a1bb99156078c1f591fc26f717c67cf8585a11 | refs/heads/master | 2021-03-20T15:03:41.105648 | 2020-03-14T07:46:07 | 2020-03-14T07:46:07 | 247,215,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | """
Django settings for djangowebsite project.
Generated by 'django-admin startproject' using Django 2.2.11.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '84_y96h1o4y^0ufkondc+_@hpv^$$_hdq%zin-58m=4$bdv4-e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'mainApp',
'news',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangowebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangowebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru'
LANGUAGES = [
('ru', 'Russian'),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MEDIA_ROOT = "/home/django/djangowebsite/media"
STATIC_URL = '/static/'
| [
"komitetsx@gmail.com"
] | komitetsx@gmail.com |
55fdd27357fef324e815b7aa812a8aa6cbae5d6b | 7a2d2cfbe99a13920e55e462bd40627e34d18f23 | /tests/openbb_terminal/forecast/test_mstl_view.py | dd57c9c97a035d072474b258f58e1ed64881516b | [
"MIT"
] | permissive | conrad-strughold/GamestonkTerminal | b9ada627929dbc1be379f19c69b34e24764efcff | c9aa674d979a7c7fd7f251410ceaa1c8a4ef2e6e | refs/heads/main | 2023-06-24T02:59:45.096493 | 2023-05-16T15:15:20 | 2023-05-16T15:15:20 | 342,313,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | import pytest
try:
from openbb_terminal.forecast import mstl_view
except ImportError:
pytest.skip(allow_module_level=True)
def test_display_mstl_forecast(tsla_csv):
mstl_view.display_mstl_forecast(
tsla_csv,
target_column="close",
seasonal_periods=3,
n_predict=1,
start_window=0.5,
forecast_horizon=1,
)
| [
"noreply@github.com"
] | conrad-strughold.noreply@github.com |
ed5415ab65f0be90b74c57b2574130610b45775e | 4cbe1a48b8e55a31782bcdb540213daee4e2486a | /dsplot/tree/tree_node.py | 0ddb9d9cbb15339b32ad191f41deed2909cbdbe3 | [
"MIT"
] | permissive | subhamChakraborty23/dsplot | 140694332512fedd2589d67ec7f8fac5f79608e5 | b4d399544aa1556395f729ee2e6163dc6cedbff9 | refs/heads/master | 2023-07-15T06:57:01.490108 | 2021-08-08T11:05:37 | 2021-08-08T11:05:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | class BinaryTreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
| [
"trantriducs@gmail.com"
] | trantriducs@gmail.com |
66151a83ec088503878c239841ec21bda8cc9c6e | 90f211662add017213701c4aaa933451857ca3a3 | /crawler/dblp_conf_spider/dblp_conf_spider/spiders/rotate_useragent.py | 6b2f71542384dbff1b0f3cc34f258061cf2fabee | [] | no_license | yungu-imr/dblp-search | 656bc67988ab0f2931bebd030642d31cce87ea15 | a217c5d1b077f10b520591eb834dfb66b8d23a1f | refs/heads/master | 2023-08-18T14:34:44.834124 | 2016-06-14T17:10:18 | 2016-06-14T17:10:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | # -*-coding:utf-8-*-
from scrapy import log
"""避免被ban策略之一:使用useragent池。
使用注意:需在settings.py中进行相应的设置。
"""
import random
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
class RotateUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
#显示当前使用的useragent
print "********Current UserAgent:%s************" %ua
#记录
# log.msg('Current UserAgent: '+ua, level='INFO')
request.headers.setdefault('User-Agent', ua)
#the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
#for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [\
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
# "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
# "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
# "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
# "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
# "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
# "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
# "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
# "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
# "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
# "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
# "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
] | [
"geron762@sjtu.edu.cn"
] | geron762@sjtu.edu.cn |
e2a594fe676c056cabe454d935e579994841c1dd | 4d6e3c55fc839191b97a40751fd4cbc211969fb0 | /08_apples_and_bananas/solution1_iterate_chars.py | d7efb98a2fab3470de1859d7b15949963f87046d | [
"MIT"
] | permissive | wFanhua/tiny_python_projects | 580860c82a840214ba6255eb6a5a544edc5943a4 | f639352c0cf206cd4bee649c6d45d28ae10c877d | refs/heads/master | 2022-12-02T19:13:16.158045 | 2020-08-23T06:14:48 | 2020-08-23T06:14:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | #!/usr/bin/env python3
"""Apples and Bananas"""
import argparse
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Apples and bananas',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='text', help='Input text or file')
parser.add_argument('-v',
'--vowel',
help='The vowel to substitute',
metavar='vowel',
type=str,
default='a',
choices=list('aeiou'))
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
text = args.text
vowel = args.vowel
new_text = []
for char in text:
if char in 'aeiou':
new_text.append(vowel)
elif char in 'AEIOU':
new_text.append(vowel.upper())
else:
new_text.append(char)
print(''.join(new_text))
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"744118724@qq.com"
] | 744118724@qq.com |
0156d344f559f46eae61beca0b02e4facf7eb7c7 | 8a3c7cf72f37236c43a180feeb89cf0355e6c901 | /hw3/text_manager.py | dc6c6c630985c25fe7e8ded1028131b0b21f859c | [] | no_license | itay-berko/DL-course-097200-technion | 71abe42a9027d996e5249cb3f51ca30748339fef | a4fdd4bcfbcedea2b030b3e671ab411147ca2052 | refs/heads/master | 2022-03-29T03:07:48.085033 | 2020-01-30T10:47:52 | 2020-01-30T10:47:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | import json
from collections import Counter
import itertools
import config
import data
import utils
def extract_vocab(iterable, top_k=None, start=0):
""" Turns an iterable of list of tokens into a vocabulary.
These tokens could be single answers or word tokens in questions.
"""
all_tokens = itertools.chain.from_iterable(iterable)
counter = Counter(all_tokens)
if top_k:
most_common = counter.most_common(top_k)
most_common = (t for t, c in most_common)
else:
most_common = counter.keys()
# descending in count, then lexicographical order
tokens = sorted(most_common, key=lambda x: (counter[x], x), reverse=True)
vocab = {t: i for i, t in enumerate(tokens, start=start)}
return vocab
def preprocess_text():
questions = utils.path_for(train=True, question=True)
answers = utils.path_for(train=True, answer=True)
with open(questions, 'r') as fd:
questions = json.load(fd)
with open(answers, 'r') as fd:
answers = json.load(fd)
questions = data.prepare_questions(questions)
answers = data.prepare_answers(answers)
question_vocab = extract_vocab(questions, start=1)
answer_vocab = extract_vocab(answers, top_k=config.max_answers)
vocabs = {
'question': question_vocab,
'answer': answer_vocab,
}
with open(config.vocabulary_path, 'w') as fd:
json.dump(vocabs, fd)
if __name__ == '__main__':
preprocess_text()
| [
"noreply@github.com"
] | itay-berko.noreply@github.com |
102c4de4c693165f76ea729d639e60df2acffa74 | b5d6219ac738ed05485439540f38d63d21694c51 | /DAT/ED6_DT01/R2402.阿伊纳街道.py | 8e819262eb8ffe2163c28982576d61cad7038f0e | [] | no_license | otoboku/ED6-FC-Steam-CN | f87ffb2ff19f9272b986fa32a91bec360c21dffa | c40d9bc5aaea9446dda27e7b94470d91cb5558c5 | refs/heads/master | 2021-01-21T02:37:30.443986 | 2015-11-27T07:41:41 | 2015-11-27T07:41:41 | 46,975,651 | 1 | 0 | null | 2015-11-27T10:58:43 | 2015-11-27T10:58:42 | null | UTF-8 | Python | false | false | 44,997 | py | from ED6ScenarioHelper import *
def main():
# 阿伊纳街道
CreateScenaFile(
FileName = 'R2402 ._SN',
MapName = 'Ruan',
Location = 'R2402.x',
MapIndex = 103,
MapDefaultBGM = "ed60020",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'黑衣男子', # 9
'黑衣男子', # 10
'阿加特', # 11
'蒙面队长', # 12
'目标用摄像机', # 13
'卢安方向', # 14
'艾尔·雷登关所方向', # 15
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 103,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH00341 ._CH', # 00
'ED6_DT07/CH00151 ._CH', # 01
'ED6_DT07/CH00152 ._CH', # 02
'ED6_DT07/CH00260 ._CH', # 03
'ED6_DT07/CH00340 ._CH', # 04
'ED6_DT07/CH00341 ._CH', # 05
'ED6_DT07/CH00342 ._CH', # 06
'ED6_DT07/CH00344 ._CH', # 07
'ED6_DT07/CH00260 ._CH', # 08
'ED6_DT07/CH00261 ._CH', # 09
'ED6_DT07/CH00262 ._CH', # 0A
'ED6_DT07/CH00264 ._CH', # 0B
'ED6_DT07/CH00265 ._CH', # 0C
'ED6_DT09/CH10520 ._CH', # 0D
'ED6_DT09/CH10521 ._CH', # 0E
'ED6_DT09/CH10340 ._CH', # 0F
'ED6_DT09/CH10341 ._CH', # 10
'ED6_DT09/CH11040 ._CH', # 11
'ED6_DT09/CH11041 ._CH', # 12
'ED6_DT09/CH11070 ._CH', # 13
'ED6_DT09/CH11071 ._CH', # 14
'ED6_DT09/CH11080 ._CH', # 15
'ED6_DT09/CH11081 ._CH', # 16
)
AddCharChipPat(
'ED6_DT07/CH00341P._CP', # 00
'ED6_DT07/CH00151P._CP', # 01
'ED6_DT07/CH00152P._CP', # 02
'ED6_DT07/CH00260P._CP', # 03
'ED6_DT07/CH00340P._CP', # 04
'ED6_DT07/CH00341P._CP', # 05
'ED6_DT07/CH00342P._CP', # 06
'ED6_DT07/CH00344P._CP', # 07
'ED6_DT07/CH00260P._CP', # 08
'ED6_DT07/CH00261P._CP', # 09
'ED6_DT07/CH00262P._CP', # 0A
'ED6_DT07/CH00264P._CP', # 0B
'ED6_DT07/CH00265P._CP', # 0C
'ED6_DT09/CH10520P._CP', # 0D
'ED6_DT09/CH10521P._CP', # 0E
'ED6_DT09/CH10340P._CP', # 0F
'ED6_DT09/CH10341P._CP', # 10
'ED6_DT09/CH11040P._CP', # 11
'ED6_DT09/CH11041P._CP', # 12
'ED6_DT09/CH11070P._CP', # 13
'ED6_DT09/CH11071P._CP', # 14
'ED6_DT09/CH11080P._CP', # 15
'ED6_DT09/CH11081P._CP', # 16
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x80,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -4970,
Z = 0,
Y = 153310,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -930,
Z = 0,
Y = -3800,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = 4300,
Z = -30,
Y = 113330,
Unknown_0C = 180,
Unknown_0E = 15,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1AB,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -19510,
Z = 210,
Y = 102750,
Unknown_0C = 180,
Unknown_0E = 13,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1AE,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -2490,
Z = 110,
Y = 49730,
Unknown_0C = 180,
Unknown_0E = 17,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1AD,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 1490,
Z = 740,
Y = 62250,
Unknown_0C = 180,
Unknown_0E = 21,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1B3,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclActor(
TriggerX = -2060,
TriggerZ = 0,
TriggerY = 120820,
TriggerRange = 1500,
ActorX = -2060,
ActorZ = 1500,
ActorY = 120820,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 7,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -30,
TriggerZ = 240,
TriggerY = 75790,
TriggerRange = 1000,
ActorX = 360,
ActorZ = 240,
ActorY = 76370,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 8,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_2FA", # 00, 0
"Function_1_320", # 01, 1
"Function_2_368", # 02, 2
"Function_3_1BEF", # 03, 3
"Function_4_1CD5", # 04, 4
"Function_5_1D76", # 05, 5
"Function_6_1F19", # 06, 6
"Function_7_1F6D", # 07, 7
"Function_8_1FCA", # 08, 8
)
def Function_0_2FA(): pass
label("Function_0_2FA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 2)), scpexpr(EXPR_END)), "loc_31F")
OP_A3(0x3FA)
Event(0, 2)
OP_4F(0x1B, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4F(0x1A, (scpexpr(EXPR_PUSH_LONG, 0x1B58), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetMapFlags(0x4)
label("loc_31F")
Return()
# Function_0_2FA end
def Function_1_320(): pass
label("Function_1_320")
OP_16(0x2, 0xFA0, 0xFFFDECC0, 0xFFFF34E0, 0x30025)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x98, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_344")
OP_6F(0x0, 0)
Jump("loc_34B")
label("loc_344")
OP_6F(0x0, 60)
label("loc_34B")
SoundDistance(0x1C5, 0xFFFFF092, 0x3E8, 0x20FC6, 0x2710, 0x9C40, 0x64, 0x0)
Return()
# Function_1_320 end
def Function_2_368(): pass
label("Function_2_368")
FadeToBright(2000, 0)
OP_77(0x41, 0x64, 0x82, 0x0, 0x0)
ClearMapFlags(0x1)
EventBegin(0x0)
OP_6D(-4600, 3000, 117500, 0)
OP_6B(3400, 0)
OP_67(0, 3300, -10000, 0)
OP_6C(52000, 0)
SetChrPos(0x8, -4900, 0, 125700, 0)
SetChrPos(0x9, -3900, 0, 125000, 0)
SetChrPos(0xA, 5300, 0, 139100, 0)
def lambda_3EE():
OP_6C(45000, 5000)
ExitThread()
QueueWorkItem(0x8, 1, lambda_3EE)
OP_6D(-4600, 0, 117500, 5000)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0xA, 0x80)
def lambda_41E():
OP_8E(0xFE, 0xFFFFE9BC, 0x0, 0x1C4BC, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_41E)
def lambda_439():
OP_8E(0xFE, 0xFFFFEE08, 0x0, 0x1CAFC, 0x1F40, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_439)
WaitChrThread(0x9, 0x1)
def lambda_459():
OP_8C(0x9, 0, 800)
ExitThread()
QueueWorkItem(0x9, 1, lambda_459)
WaitChrThread(0x8, 0x1)
def lambda_46C():
OP_8C(0x8, 0, 800)
ExitThread()
QueueWorkItem(0x8, 1, lambda_46C)
ChrTalk(
0x8,
"呼呼~~~~\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"这家伙怎么这么缠人呀!\x02",
)
CloseMessageWindow()
def lambda_4AA():
OP_6C(21000, 3000)
ExitThread()
QueueWorkItem(0x9, 2, lambda_4AA)
OP_43(0xA, 0x1, 0x0, 0x5)
ChrTalk(
0xA,
"#10A别逃!!(※假定)\x05\x02",
)
Sleep(200)
OP_43(0x8, 0x1, 0x0, 0x3)
Sleep(200)
OP_43(0x9, 0x1, 0x0, 0x4)
OP_6A(0x9)
Sleep(600)
ChrTalk(
0x9,
(
"#20A带着这么大把剑\x01",
"怎么还能追的上来呀!?\x05\x02",
)
)
Sleep(2400)
ChrTalk(
0xA,
"#5P#10A呵,我和你们锻炼的方法可不一样啊。\x05\x02",
)
Sleep(1400)
ChrTalk(
0xA,
"#5P呀啊~~~~~!\x02",
)
CloseMessageWindow()
WaitChrThread(0xA, 0x1)
def lambda_593():
OP_6C(348000, 4000)
ExitThread()
QueueWorkItem(0xA, 1, lambda_593)
ChrTalk(
0x8,
(
"唔……\x01",
"怎么都甩不掉吗?\x02",
)
)
CloseMessageWindow()
def lambda_5CC():
OP_97(0x8, 0xFFFFE37C, 0xDC50, 0x9C40, 0x5DC, 0x2)
ExitThread()
QueueWorkItem(0x8, 1, lambda_5CC)
ChrTalk(
0x9,
"没办法了,迎击吧!\x02",
)
CloseMessageWindow()
def lambda_604():
OP_97(0x9, 0xFFFFE37C, 0xDC50, 0xFFFF63C0, 0x5DC, 0x2)
ExitThread()
QueueWorkItem(0x9, 1, lambda_604)
OP_96(0xA, 0xFFFFE0C0, 0x0, 0xE358, 0x1F4, 0x3A98)
OP_99(0xA, 0x7, 0x0, 0x7D0)
ChrTalk(
0xA,
(
"#050F你们好像勉勉强强\x01",
"才撑到现在嘛。\x02\x03",
"和你们捉迷藏追到你们厌烦,\x01",
"是件很开心的事哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"被你穷追不舍的怨恨\x01",
"一定要你的死来补偿!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"愚蠢的家伙!\x01",
"2对1你还想赢吗!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F哈哈,\x01",
"当然是想赢的啰。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"什么……!?\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F打架要全神贯注。\x01",
"在气魄上输了就完了。\x02\x03",
"在你们夹着尾巴逃跑的时候\x01",
"就注定你们是丧家之犬。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"胡说!\x01",
"你这个游击会的狗!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"看我们两个\x01",
"怎么折磨死你!(※假定发生自动战斗)\x02",
)
)
CloseMessageWindow()
TurnDirection(0x8, 0xA, 0)
TurnDirection(0x9, 0xA, 0)
def lambda_838():
OP_94(0x1, 0xFE, 0x0, 0x7D0, 0x2710, 0x0)
ExitThread()
QueueWorkItem(0x8, 2, lambda_838)
def lambda_84E():
OP_94(0x1, 0xFE, 0x0, 0x7D0, 0x2710, 0x0)
ExitThread()
QueueWorkItem(0x9, 2, lambda_84E)
Sleep(500)
Fade(1000)
OP_44(0xA, 0xFF)
OP_6B(3000, 0)
OP_67(0, 8200, -10000, 0)
OP_6C(45000, 0)
SetChrPos(0x8, -8400, 0, 54200, 0)
SetChrPos(0x9, -9800, 0, 54800, 0)
SetChrPos(0xB, -6700, 0, 58700, 180)
SetChrPos(0xA, -7300, 0, 56400, 0)
TurnDirection(0xA, 0x8, 0)
TurnDirection(0x8, 0xA, 0)
TurnDirection(0x9, 0xA, 0)
OP_51(0x8, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x9, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x8, 7)
SetChrChipByIndex(0x9, 7)
ChrTalk(
0x8,
"唔~~~~~!\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"唔……\x01",
"怎么能在这里被抓住………\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F哼,快点投降\x01",
"给我老实坦白吧!\x02\x03",
"你们是什么人\x01",
"有什么企图……\x02",
)
)
CloseMessageWindow()
OP_9F(0xB, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
ClearChrFlags(0xB, 0x80)
OP_4F(0x1A, (scpexpr(EXPR_PUSH_LONG, 0x4E20), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Fade(500)
OP_9F(0xB, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ChrTalk(
0xB,
"青年的声音──那样的话会让我十分困扰呀。\x02",
)
CloseMessageWindow()
def lambda_9FD():
OP_8C(0xA, 0, 400)
ExitThread()
QueueWorkItem(0xA, 2, lambda_9FD)
ChrTalk(
0xA,
"#050F#10A啊!?\x05\x02",
)
Sleep(300)
SetChrChipByIndex(0xA, 1)
OP_96(0xA, 0xFFFFE890, 0x0, 0xD6D8, 0x3E8, 0x2710)
def lambda_A43():
label("loc_A43")
TurnDirection(0xA, 0xB, 0)
OP_48()
Jump("loc_A43")
QueueWorkItem2(0xA, 1, lambda_A43)
ChrTalk(
0xA,
"#050F你什么时候………\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"啊,队长!\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"您来救我们了吗!\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
"真拿你们没办法。\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"我正想怎么联络比约定的时候晚,\x01",
"原来你们在这里玩耍呀。\x02",
)
)
CloseMessageWindow()
def lambda_AEC():
OP_99(0x8, 0x3, 0x0, 0x1F4)
ExitThread()
QueueWorkItem(0x8, 1, lambda_AEC)
ChrTalk(
0x8,
"实,实在是抱歉!\x02",
)
CloseMessageWindow()
def lambda_B1A():
OP_99(0x9, 0x3, 0x0, 0x1F4)
ExitThread()
QueueWorkItem(0x9, 1, lambda_B1A)
ChrTalk(
0x9,
"有很多人在妨碍我们………\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F原来如此……\x01",
"你就是首领啊!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"呵呵,\x01",
"我只不过是现场责任人……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"对于部下的失礼我道歉。\x01",
"能不能在这放过我们一把呢?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F啊?\x02\x03",
"你刚才说什么?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
"我说能不能放过我们一把。\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"我们这里也不打算\x01",
"和游击士协会扯上什么关系。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F你是傻瓜啊!\x01",
"我怎么可能会放过啊!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"哎呀哎呀~~~\x01",
"我本来不打算说难听的话。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"……你们两个。\x01",
"这里有我来挡住他。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
"你们快点去汇合地点。\x02",
)
CloseMessageWindow()
OP_44(0xA, 0xFF)
ChrTalk(
0x8,
"是!\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0x8, 5)
def lambda_D3D():
OP_8E(0xFE, 0xFFFFDD3C, 0x0, 0xAB7C, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_D3D)
ChrTalk(
0x9,
"太感谢了,队长!\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0x9, 5)
def lambda_D75():
OP_8E(0xFE, 0xFFFFDD3C, 0x0, 0xAB7C, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_D75)
OP_8C(0xA, 225, 800)
ChrTalk(
0xA,
"#050F想逃吗?喂!\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0xB, 9)
OP_8E(0xB, 0xFFFFE318, 0x0, 0xDEA8, 0x1F40, 0x0)
def lambda_DCF():
OP_6D(-8700, 0, 52500, 1000)
ExitThread()
QueueWorkItem(0xB, 2, lambda_DCF)
def lambda_DE7():
OP_97(0xFE, 0xFFFFEC14, 0xD034, 0xFFFEDB08, 0x36B0, 0x1)
ExitThread()
QueueWorkItem(0xB, 1, lambda_DE7)
SetChrChipByIndex(0xA, 1)
OP_8E(0xA, 0xFFFFDF94, 0x0, 0xCF08, 0x1F40, 0x0)
TurnDirection(0xA, 0xB, 0)
def lambda_E23():
label("loc_E23")
TurnDirection(0xFE, 0xA, 0)
OP_48()
Jump("loc_E23")
QueueWorkItem2(0xB, 0, lambda_E23)
SetChrChipByIndex(0xA, 2)
OP_51(0xA, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 8)
OP_94(0x1, 0xA, 0xB4, 0x3E8, 0xBB8, 0x0)
TurnDirection(0xA, 0xB, 0)
TurnDirection(0xB, 0xA, 400)
ChrTalk(
0xA,
(
"#050F你……\x02\x03",
"哼,算了。\x01",
"那样我最多换个猎物抓。\x02\x03",
"你身上应该带着\x01",
"很重要的情报吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"呵呵……\x01",
"那么简单就想抓住我吗?\x02",
)
)
CloseMessageWindow()
OP_96(0xB, 0xFFFFD760, 0x0, 0xC738, 0x3E8, 0x1F40)
def lambda_F2A():
label("loc_F2A")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_F2A")
QueueWorkItem2(0xA, 0, lambda_F2A)
ChrTalk(
0xA,
"#050F上等货色!\x02",
)
CloseMessageWindow()
SetChrFlags(0xB, 0x20)
SetChrChipByIndex(0xA, 1)
SetChrFlags(0xB, 0x4)
SetChrFlags(0x8, 0x80)
SetChrFlags(0x9, 0x80)
def lambda_F67():
OP_6C(315000, 1000)
ExitThread()
QueueWorkItem(0xB, 3, lambda_F67)
OP_94(0x1, 0xA, 0xB4, 0x12C, 0x3E8, 0x0)
OP_51(0xC, 0x1, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x1), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x1), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xC, 0x2, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x2), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x2), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xC, 0x3, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x3), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x3), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_69(0xC, 0x3E8)
OP_43(0x8, 0x1, 0x0, 0x6)
OP_93(0xA, 0xB, 0x44C, 0x3A98, 0x0)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 12)
SetChrChipByIndex(0xA, 2)
def lambda_FF6():
OP_94(0x1, 0xFE, 0x0, 0xC8, 0x1F4, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_FF6)
def lambda_100C():
OP_94(0x1, 0xFE, 0xB4, 0xC8, 0x1F4, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_100C)
OP_9E(0xB, 0x1E, 0x0, 0x12C, 0x1388)
WaitChrThread(0xB, 0x1)
def lambda_103A():
OP_94(0x1, 0xFE, 0x0, 0x5DC, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_103A)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 8)
OP_96(0xB, 0xFFFFCFF4, 0x514, 0xC47C, 0x514, 0x3A98)
Sleep(100)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 10)
def lambda_108C():
OP_96(0xFE, 0xFFFFD828, 0x514, 0xC670, 0x3E8, 0x2710)
ExitThread()
QueueWorkItem(0xB, 1, lambda_108C)
def lambda_10AA():
OP_99(0xFE, 0x0, 0x7, 0x7D0)
ExitThread()
QueueWorkItem(0xB, 2, lambda_10AA)
Sleep(200)
def lambda_10BF():
OP_96(0xFE, 0xFFFFDC10, 0x0, 0xCF08, 0x3E8, 0x2710)
ExitThread()
QueueWorkItem(0xA, 1, lambda_10BF)
SetChrChipByIndex(0xA, 1)
WaitChrThread(0xB, 0x1)
OP_96(0xB, 0xFFFFDAE4, 0x0, 0xBAB8, 0x3E8, 0x2710)
def lambda_10FE():
OP_99(0xB, 0x7, 0xB, 0x5DC)
ExitThread()
QueueWorkItem(0xB, 2, lambda_10FE)
OP_6B(2900, 1000)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 9)
OP_96(0xB, 0xFFFFDF94, 0x0, 0xC030, 0x1F4, 0xBB8)
def lambda_113E():
OP_6B(2500, 1000)
ExitThread()
QueueWorkItem(0xA, 1, lambda_113E)
OP_96(0xB, 0xFFFFD954, 0x0, 0xC3B4, 0x1F4, 0x1B58)
def lambda_1165():
OP_8C(0xA, 30, 500)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1165)
OP_96(0xB, 0xFFFFE188, 0x0, 0xCA58, 0x1F4, 0x2710)
def lambda_118A():
OP_96(0xB, 0xFFFFDCD8, 0x0, 0xCB20, 0x1F4, 0x2710)
ExitThread()
QueueWorkItem(0xB, 1, lambda_118A)
OP_44(0xA, 0xFF)
OP_8C(0xA, 315, 1300)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 8)
def lambda_11C3():
OP_96(0xFE, 0xFFFFDAE4, 0x1F4, 0xCABC, 0x3E8, 0x1B58)
ExitThread()
QueueWorkItem(0xB, 1, lambda_11C3)
OP_8C(0xA, 135, 1600)
Sleep(350)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 10)
def lambda_11FD():
OP_99(0xFE, 0x0, 0x2, 0xBB8)
ExitThread()
QueueWorkItem(0xB, 2, lambda_11FD)
TurnDirection(0xA, 0xB, 0)
SetChrChipByIndex(0xA, 2)
OP_99(0xA, 0x6, 0x7, 0x5DC)
def lambda_1222():
OP_99(0xA, 0x5, 0x0, 0xBB8)
ExitThread()
QueueWorkItem(0xA, 0, lambda_1222)
def lambda_1232():
OP_99(0xFE, 0x2, 0x0, 0xBB8)
ExitThread()
QueueWorkItem(0xB, 2, lambda_1232)
def lambda_1242():
OP_96(0xB, 0xFFFFDD3C, 0x0, 0xDDE0, 0xFA0, 0x1B58)
ExitThread()
QueueWorkItem(0xB, 1, lambda_1242)
Sleep(200)
def lambda_1265():
OP_8C(0xA, 0, 500)
ExitThread()
QueueWorkItem(0xA, 3, lambda_1265)
Sleep(300)
def lambda_1278():
OP_99(0xFE, 0x0, 0x7, 0xBB8)
ExitThread()
QueueWorkItem(0xB, 2, lambda_1278)
def lambda_1288():
OP_96(0xA, 0xFFFFDD3C, 0x0, 0xDA5C, 0x7D0, 0x2710)
ExitThread()
QueueWorkItem(0xA, 1, lambda_1288)
def lambda_12A6():
OP_99(0xA, 0x0, 0x7, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_12A6)
Sleep(450)
def lambda_12BB():
OP_99(0xFE, 0x7, 0xB, 0x5DC)
ExitThread()
QueueWorkItem(0xB, 2, lambda_12BB)
OP_8F(0xB, 0xFFFFE34A, 0x0, 0xDDE0, 0x3A98, 0x0)
WaitChrThread(0xA, 0x1)
PlayEffect(0x12, 0xFF, 0xFF, -8900, -1000, 56800, 0, 0, 0, 3000, 3000, 3000, 0xFF, 0, 0, 0, 0)
OP_7C(0x0, 0x1F4, 0xBB8, 0x64)
OP_96(0xB, 0xFFFFEC78, 0x0, 0xDD7C, 0x514, 0x1388)
Sleep(1000)
def lambda_1346():
OP_99(0xA, 0x7, 0x0, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1346)
def lambda_1356():
TurnDirection(0xA, 0xB, 400)
ExitThread()
QueueWorkItem(0xA, 3, lambda_1356)
Sleep(500)
ChrTalk(
0xA,
"#050F呼,很不错嘛!\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"用厚重的铁块\x01",
"把激情全都释放出来吗……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"你……\x01",
"和我有点相似啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F………………………\x02\x03",
"……你说什么……?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"你曾经因为\x01",
"自己的无用而被打败过……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
"你有那样的眼神。\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
"#050F………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#050F呵呵呵,不错嘛。\x02\x03",
"虽然我和你不相识,\x01",
"但却相当中意你……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"我也和你一样,\x01",
"对不中用的男人感到讨厌。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"我们就在这里\x01",
"互相和解怎么样?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
"#050F不要开玩笑!!\x02",
)
CloseMessageWindow()
LoadEffect(0x0, "battle\\\\mgaria0.eff")
def lambda_157C():
OP_9E(0xFE, 0x1E, 0x0, 0x3E8, 0x1388)
ExitThread()
QueueWorkItem(0xA, 1, lambda_157C)
PlayEffect(0x0, 0x0, 0xA, 0, 200, 0, 0, 0, 0, 900, 900, 900, 0xFF, 0, 0, 0, 0)
ChrTalk(
0xA,
(
"#050F我没发话听你说,\x01",
"你就给我见风使舵的瞎扯。\x02\x03",
"你真是个完完全全混淆视听的家伙!\x02",
)
)
CloseMessageWindow()
OP_44(0x8, 0xFF)
ChrTalk(
0xB,
"唔……\x02",
)
CloseMessageWindow()
def lambda_1637():
OP_9E(0xFE, 0x1E, 0x0, 0x3E8, 0x1388)
ExitThread()
QueueWorkItem(0xA, 1, lambda_1637)
ChrTalk(
0xA,
"#050F噢喔喔喔喔喔!\x02",
)
CloseMessageWindow()
PlayEffect(0x0, 0x1, 0xB, 0, 200, 0, 0, 0, 0, 900, 900, 900, 0xFF, 0, 0, 0, 0)
def lambda_16A7():
OP_9E(0xFE, 0x1E, 0x0, 0x3E8, 0x1388)
ExitThread()
QueueWorkItem(0xB, 1, lambda_16A7)
ChrTalk(
0xB,
"呀啊啊啊啊啊!\x02",
)
CloseMessageWindow()
OP_44(0xB, 0xFF)
SetChrFlags(0xB, 0x40)
SetChrFlags(0xA, 0x40)
OP_99(0xA, 0x0, 0x3, 0x7D0)
def lambda_16F4():
OP_99(0xA, 0x3, 0x7, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_16F4)
def lambda_1704():
TurnDirection(0xA, 0xB, 400)
ExitThread()
QueueWorkItem(0xA, 3, lambda_1704)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 10)
def lambda_1722():
OP_99(0xFE, 0x0, 0x3, 0xBB8)
ExitThread()
QueueWorkItem(0xB, 3, lambda_1722)
def lambda_1732():
OP_8E(0xFE, 0xFFFFDD3C, 0x0, 0xDA5C, 0x4E20, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_1732)
def lambda_174D():
OP_8E(0xFE, 0xFFFFEC78, 0x0, 0xDD7C, 0x4E20, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_174D)
Sleep(200)
FadeToDark(1, 16777215, -1)
OP_0D()
OP_82(0x0, 0x0)
OP_82(0x1, 0x0)
FadeToBright(200, 16777215)
def lambda_1787():
OP_6C(0, 3000)
ExitThread()
QueueWorkItem(0xB, 2, lambda_1787)
Sleep(3000)
OP_99(0xB, 0x3, 0x7, 0x5DC)
OP_51(0xB, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xB, 11)
OP_99(0xB, 0x0, 0x3, 0x514)
ChrTalk(
0xB,
"哼……\x02",
)
CloseMessageWindow()
def lambda_17CC():
OP_99(0xA, 0x7, 0x0, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_17CC)
def lambda_17DC():
TurnDirection(0xA, 0xB, 400)
ExitThread()
QueueWorkItem(0xA, 3, lambda_17DC)
ChrTalk(
0xA,
(
"#050F呼~~\x01",
"你不止是个嘴上说说的家伙嘛。\x02\x03",
"你们注定要\x01",
"被游击会完全盯上的……\x02",
)
)
CloseMessageWindow()
OP_9F(0xB, 0xFF, 0xFF, 0xFF, 0x64, 0x3E8)
ChrTalk(
0xA,
"#050F啊,什么!\x02",
)
CloseMessageWindow()
OP_9F(0xB, 0xFF, 0xFF, 0xFF, 0x0, 0x3E8)
ChrTalk(
0xA,
(
"#050F这,这是……\x01",
"分身的战技!?\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"在昏暗树丛的空隙中\x01",
"有隐隐约约的人影漂浮着。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("男人的声音")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"《呵呵呵……》\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetChrName("男人的声音")
AnonymousTalk(
(
"《那一击不错呀,\x01",
"不过现在好像很迷茫的样子啊。》\x02",
)
)
CloseMessageWindow()
SetChrName("男人的声音")
AnonymousTalk(
"《那种迷茫会把刀法弄乱的。》\x02",
)
CloseMessageWindow()
OP_56(0x0)
ChrTalk(
0xA,
"#050F什么!?\x02",
)
CloseMessageWindow()
SetChrName("男人的声音")
AnonymousTalk(
(
"《要成为修罗的话\x01",
"就必须要有舍弃一切的觉悟。》\x02",
)
)
CloseMessageWindow()
AnonymousTalk(
(
"《对于想活下去的人来说……\x01",
"愤怒和悲伤的事还是忘了比较好。》\x02",
)
)
CloseMessageWindow()
AnonymousTalk(
"《就这样吧,再见啰……》\x02",
)
CloseMessageWindow()
AnonymousTalk(
"《………………………》\x02",
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"树丛间漂浮的人影\x01",
"隐入黑暗消失了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
def lambda_1AC4():
OP_6D(-5100, 1400, 56900, 1100)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1AC4)
Sleep(1100)
ChrTalk(
0xA,
(
"#050F…………………………\x02\x03",
"……竟然叫我忘了它……\x02\x03",
"这种事……\x01",
"……怎么可能办得到嘛……\x02",
)
)
CloseMessageWindow()
def lambda_1B41():
OP_9E(0xFE, 0x1E, 0x0, 0x7D0, 0x1388)
ExitThread()
QueueWorkItem(0xA, 0, lambda_1B41)
def lambda_1B5B():
OP_6B(2400, 2000)
ExitThread()
QueueWorkItem(0xB, 3, lambda_1B5B)
def lambda_1B6B():
OP_67(0, 6000, -10000, 2300)
ExitThread()
QueueWorkItem(0xB, 1, lambda_1B6B)
def lambda_1B83():
OP_6C(54000, 2000)
ExitThread()
QueueWorkItem(0xB, 0, lambda_1B83)
OP_99(0xA, 0x0, 0x3, 0x7D0)
Fade(1000)
def lambda_1BA1():
OP_99(0xA, 0x3, 0x7, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1BA1)
ChrTalk(
0xA,
"#6P#20A呜喔喔喔喔喔!\x05\x02",
)
Sleep(1500)
FadeToDark(2000, 0, -1)
OP_0D()
OP_A2(0x3FF)
NewScene("ED6_DT01/T2100 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_2_368 end
def Function_3_1BEF(): pass
label("Function_3_1BEF")
SetChrFlags(0xFE, 0x40)
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0xBB8, 0x0)
OP_8E(0xFE, 0xFFFFDF30, 0x0, 0x186A0, 0x2710, 0x0)
OP_8E(0xFE, 0xFFFFD634, 0x0, 0x15180, 0x2710, 0x0)
def lambda_1C31():
OP_6C(324000, 3000)
ExitThread()
QueueWorkItem(0xFE, 2, lambda_1C31)
def lambda_1C41():
OP_67(0, 5200, -10000, 3000)
ExitThread()
QueueWorkItem(0xA, 3, lambda_1C41)
def lambda_1C59():
OP_6D(-8000, 1300, 66400, 2700)
ExitThread()
QueueWorkItem(0x9, 3, lambda_1C59)
ClearMapFlags(0x1)
OP_8E(0xFE, 0xFFFFE0C0, 0x0, 0x11A08, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFE7C8, 0x0, 0x10CC0, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFEC14, 0x0, 0xFBF4, 0x32C8, 0x0)
OP_8E(0xFE, 0xFFFFE7C8, 0x0, 0xE54C, 0x32C8, 0x0)
OP_8E(0xFE, 0xFFFFDA80, 0x0, 0xBD74, 0x32C8, 0x0)
Return()
# Function_3_1BEF end
def Function_4_1CD5(): pass
label("Function_4_1CD5")
SetChrFlags(0xFE, 0x40)
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0xBB8, 0x0)
OP_8E(0xFE, 0xFFFFDF30, 0x0, 0x186A0, 0x2710, 0x0)
OP_8E(0xFE, 0xFFFFD634, 0x0, 0x15180, 0x2AF8, 0x0)
OP_8E(0xFE, 0xFFFFE0C0, 0x0, 0x11A08, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFE7C8, 0x0, 0x10CC0, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFEC14, 0x0, 0xFBF4, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFE7C8, 0x0, 0xE54C, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFDF30, 0x0, 0xC670, 0x2EE0, 0x0)
Return()
# Function_4_1CD5 end
def Function_5_1D76(): pass
label("Function_5_1D76")
OP_8E(0xFE, 0x0, 0x0, 0x20850, 0x2EE0, 0x0)
OP_8E(0xFE, 0xFFFFDF30, 0x0, 0x186A0, 0x32C8, 0x0)
OP_8E(0xFE, 0xFFFFD634, 0x0, 0x15180, 0x36B0, 0x0)
OP_8E(0xFE, 0xFFFFE0C0, 0x0, 0x11A08, 0x3A98, 0x0)
OP_44(0x8, 0x2)
OP_44(0x8, 0x3)
SetChrFlags(0xFE, 0x4)
OP_96(0xFE, 0xFFFFE0C0, 0x5DC, 0x10360, 0x7D0, 0x1F40)
def lambda_1DF0():
OP_6D(-7300, 0, 56400, 600)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1DF0)
SetChrFlags(0xA, 0x20)
SetChrChipByIndex(0xA, 2)
def lambda_1E12():
OP_99(0xA, 0x0, 0x7, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 0, lambda_1E12)
def lambda_1E22():
OP_6C(24000, 800)
ExitThread()
QueueWorkItem(0xA, 3, lambda_1E22)
OP_96(0xFE, 0xFFFFE37C, 0x0, 0xDC50, 0x7D0, 0x1F40)
PlayEffect(0x12, 0xFF, 0xFF, -6800, -1000, 55400, 0, 0, 0, 3000, 3000, 3000, 0xFF, 0, 0, 0, 0)
OP_7C(0x0, 0x1F4, 0xBB8, 0x64)
OP_44(0x8, 0xFF)
OP_44(0x9, 0xFF)
TurnDirection(0x8, 0xA, 0)
TurnDirection(0x9, 0xA, 0)
def lambda_1EA5():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0x2710, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_1EA5)
def lambda_1EBB():
OP_96(0xFE, 0xFFFFEA84, 0x0, 0xD5AC, 0x1F4, 0x1770)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1EBB)
WaitChrThread(0x9, 0x1)
TurnDirection(0x9, 0xA, 400)
def lambda_1EE5():
OP_94(0x1, 0xFE, 0xB4, 0x12C, 0x1F4, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1EE5)
WaitChrThread(0x8, 0x1)
def lambda_1F00():
OP_94(0x1, 0xFE, 0xB4, 0x1F4, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_1F00)
OP_6A(0x0)
ClearMapFlags(0x1)
Return()
# Function_5_1D76 end
def Function_6_1F19(): pass
label("Function_6_1F19")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_1F6C")
OP_51(0xC, 0x1, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x1), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x1), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xC, 0x2, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x2), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x2), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xC, 0x3, (scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x3), scpexpr(EXPR_GET_CHR_WORK, 0xA, 0x3), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_69(0xC, 0x0)
OP_48()
Jump("Function_6_1F19")
label("loc_1F6C")
Return()
# Function_6_1F19 end
def Function_7_1F6D(): pass
label("Function_7_1F6D")
FadeToDark(300, 0, 100)
SetChrName("")
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"北 卢安市\x01",
"南 艾尔·雷登 175塞尔矩\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
TalkEnd(0xFF)
Return()
# Function_7_1F6D end
def Function_8_1FCA(): pass
label("Function_8_1FCA")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x98, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_20BF")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x0, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x287, 1)"), scpexpr(EXPR_END)), "loc_2042")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"死之刃2\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x4C3)
Jump("loc_20BC")
label("loc_2042")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"死之刃2\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"死之刃2\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x0, 60)
OP_70(0x0, 0x0)
label("loc_20BC")
Jump("loc_2114")
label("loc_20BF")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x8C)
label("loc_2114")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_8_1FCA end
SaveToFile()
Try(main)
| [
"Hiromi.Kaede@gmail.com"
] | Hiromi.Kaede@gmail.com |
513df6f938a5ae43120df15ddbb973bad05c57ab | d0181f0cea5d45a24b2ca57286bc9059d5be55a6 | /play.py | 7ada1317e180dce5edaf0eb9153709a788475ed6 | [] | no_license | Miatosz/Bomberman-game | af68ed18883a6130d983896748141aebd0682a7c | 461b566e9b9f0bb428c874ff7306ed25b8cd3109 | refs/heads/master | 2022-11-16T20:19:13.127911 | 2020-07-11T16:08:13 | 2020-07-11T16:08:13 | 278,895,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | import board, character, pygame
class Play:
def __init__(self, level):
pygame.init()
player = character.Character()
b = board.Board(player)
b.draw(1, player)
| [
"drelewski@gmail.com"
] | drelewski@gmail.com |
6423e20d4242199e03d2bad7d1c5abe7f05adf75 | 5db4a741d248a96e88634a6bbd5444d35e511d8e | /gr-CyberRadio/python/__init__.py | f41fe6c79807900397e732b66d637f85e4bd3581 | [] | no_license | luwangg/gr-cyberradio | f223c4aff47c3325cf3930b472af6aca802fb28a | e4ea67f70523aca3a3d221c512ccdfa0984347ef | refs/heads/master | 2020-04-12T05:30:13.658415 | 2018-01-25T04:48:01 | 2018-01-25T04:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,597 | py | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio CYBERRADIO module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the CyberRadio namespace
try:
# this might fail if the module is python-only
from CyberRadio_swig import *
except ImportError:
pass
# import any pure python here
from NDR304_source import NDR304_source
from NDR304_coherent_control import NDR304_coherent_control
from NDR308_source import NDR308_source
from NDR470_source import NDR470_source
from NDR472_source import NDR472_source
from NDR551_source import NDR551_source
from NDR651_source import NDR651_source
from ndr_control import ndr_control
from NDR_demo_control import NDR_demo_control
from generic_tuner_control_block import generic_tuner_control_block
from generic_ddc_control_block import generic_ddc_control_block
from generic_group_control_block import generic_group_control_block
from generic_radio_interface_block import generic_radio_interface_block
from ndr804ptt_wideband_spectral_source import ndr804ptt_wideband_spectral_source
from ndr804ptt_snapshot_fft_source import ndr804ptt_snapshot_fft_source
from ndr804ptt_narrowband_source import ndr804ptt_narrowband_source
from file_like_object_source import file_like_object_source
from py_msg_strobe import py_msg_strobe
from qt_freq_time_sink_iq import qt_freq_time_sink_iq
from qt_freq_time_sink_real import qt_freq_time_sink_real
from safe_audio_sink import safe_audio_sink
from sinad_calc_block import sinad_calc_block
from generic_ndr_command_block import generic_ndr_command_block
from generic_radio_control_block import generic_radio_control_block
from freq_msg_converter import freq_msg_converter
from freq_msg_strobe import freq_msg_strobe
from log_mag_fft import log_mag_fft
#
| [
"crs-nh@users.noreply.github.com"
] | crs-nh@users.noreply.github.com |
6bca8503d36403378d588fd22ef3051a3e505a38 | a33be4d406a44aba3c3b5b5390e7e875d951eee4 | /simulator/send.py | 093bd703ccf786456fd745ab609ca21708614bf4 | [] | no_license | CyrilBellotti/Projet_SpeDev | 5c9280ba3f241aba399174e3a3e5ec26a76f9e04 | dfd58a67c807a9046a1a938f1359b4fc46286076 | refs/heads/master | 2023-01-09T02:11:02.444882 | 2019-10-09T11:11:05 | 2019-10-09T11:11:05 | 213,896,520 | 0 | 0 | null | 2023-01-01T12:10:55 | 2019-10-09T11:10:27 | TypeScript | UTF-8 | Python | false | false | 1,064 | py | #!/usr/bin/env python
import pika
import json
import random
import time
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='172.20.10.11', credentials=pika.PlainCredentials("cyril", "azerty")))
channel = connection.channel()
channel.exchange_declare(exchange='atlantis-exchange', durable='true', exchange_type="fanout")
listUid = ["Kitchen", "Garage", "Device1", "Device2", "Device3"];
i = 0;
while i < 10000:
message2 = {
"uid": random.choice(listUid),
"sensors": [
{
"type": "Temperature",
"value": random.randint(-30,40),
"uid": "TempKitchen"
},
{
"type": "Luminosity",
"value": random.randint(40,150),
"uid": "LumKitchen"
}
]
}
channel.basic_publish(exchange='atlantis-exchange', routing_key='', properties=pika.BasicProperties(content_type="application/json"), body=json.dumps(message2))
print(" [x] Sent sensor with value")
time.sleep(5);
i = i + 1
connection.close()
| [
"bellotticyril@gmail.com"
] | bellotticyril@gmail.com |
661bbdf1650ffa78f3e6c3e1f60eb557c81f47bd | 4476597f6af6b9cd4614bf558553a7eb57c9f993 | /kaggle/数据结构.py | f12cc0c6bfdb5348c92a61533aff811eacaf725e | [] | no_license | zhengziqiang/mypython | 07dff974f475d1b9941b33518af67ece9703691a | 7a2b419ff59a31dc937666e515490295f6be8a08 | refs/heads/master | 2021-07-14T20:01:34.231842 | 2017-04-19T01:18:25 | 2017-04-19T01:18:25 | 56,583,430 | 3 | 1 | null | 2020-07-23T11:46:35 | 2016-04-19T09:26:39 | Python | UTF-8 | Python | false | false | 131 | py | #coding=utf-8
import pandas as pd
with pd.HDFStore("/home/zzq/kaggle/train.h5","r") as train:
df=train.get("train")
print len(df)
| [
"1174986943@qq.com"
] | 1174986943@qq.com |
cf362caab39d92d55368251d63e897f4faef0053 | e664ce599c5e31e787f004f68970ace2202e32b9 | /blog/models.py | 1e63f16cb15c8fe5fa759f81cb9fcaa7443d04de | [] | no_license | AdamC66/Django-Blog-P4 | bd12d0ad7fb7f52511422f7310b8845d96c0f678 | d11c33434ade968fe789d027981a578ebd248f44 | refs/heads/master | 2020-06-27T06:13:42.997716 | 2019-07-31T14:18:51 | 2019-07-31T14:18:51 | 199,866,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from django.db import models
class Article(models.Model):
title = models.CharField(max_length = 255, null=False)
body = models.TextField(blank = True, null=False)
draft = models.BooleanField()
published_date = models.DateField()
author = models.CharField(max_length = 63, null = False)
def __str__(self):
return (f'{self.title}, By: {self.author}')
class Comment(models.Model):
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
message = models.TextField()
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='comments')
| [
"adam.cote66@gmail.com"
] | adam.cote66@gmail.com |
ad447df8131a08aa324a0422bb83f8862c30bfac | 5b68845f5e24085d6f85fbc805251ffb690eae6e | /Pmw/Pmw_2_0_1/lib/PmwLoader.py | 0b6f2871ca74be0e88296a6a9e9fac7877c30d2c | [] | no_license | rdb/panda3d-thirdparty | 15bf5646635b812c7c68f6dfc7a9a43013c2b7c9 | a7db4147cfdf22c639e79fc20843496966038e6c | refs/heads/main | 2023-08-31T18:27:12.388222 | 2023-08-24T10:22:02 | 2023-08-24T10:22:02 | 56,599,172 | 33 | 29 | null | 2023-02-19T15:47:23 | 2016-04-19T13:29:06 | M4 | UTF-8 | Python | false | false | 6,220 | py | # This module is used by the Pmw package system.
# The PmwLoader class can be used to simulate a python module,
# but also supports importing of submodules on demand. This technique
# reduces startup time because Pmw submodules which are not used are
# not loaded.
#
# The PmwLoader class also supports runtime selection of the Pmw
# version(s) to use.
import sys
import os
import types
_PMW_DEF = 'Pmw.def' # Pmw definition file
_BASEMODULE = 'Base' # Name of Base module
class PmwLoader:
def __init__(self, dirpath, instdirs, dirs):
self._dirpath = dirpath
self._instdirs = instdirs
self._dirs = dirs
self._initialised = 0
self._version = str.replace(instdirs[0][4:], '_', '.')
self._alpha_versions = ()
#======================================================================
# Public methods. These methods will be seen as "module methods".
def setversion(self, version):
if self._version == version:
return
if self._initialised:
raise ValueError('Cannot change Pmw version after initialisation')
self._version = version
def setalphaversions(self, *alpha_versions):
if self._alpha_versions == alpha_versions:
return
if self._initialised:
raise ValueError('Cannot change Pmw alpha versions after initialisation')
self._alpha_versions = alpha_versions
def version(self, alpha = 0):
if alpha:
return self._alpha_versions
else:
return self._version
def installedversions(self, alpha = 0):
rtn = []
if alpha:
dirs = [x for x in self._dirs if x[:5] == 'Alpha']
dirs.sort()
dirs.reverse()
for dir in dirs:
rtn.append(str.replace(dir[6:], '_', '.'))
else:
for dir in self._instdirs:
rtn.append(str.replace(dir[4:], '_', '.'))
return rtn
#======================================================================
# Private methods
def _getmodule(self,modpath):
__import__(modpath)
mod = sys.modules[modpath]
return mod
def _initialise(self):
searchpath = []
for version in self._alpha_versions:
alphadir = '_Pmw.Alpha_%s.lib' % str.replace(version, '.', '_')
searchpath.append(alphadir)
libdir = '_Pmw.Pmw_%s.lib' % str.replace(self._version, '.', '_')
searchpath.append(libdir)
# Create attributes for the PmwBase classes and functions.
for path in searchpath:
try:
basemodule = self._getmodule(path + '.Pmw' + _BASEMODULE)
break
except ImportError as msg:
if path == searchpath[-1]:
# No PmwBase module found.
raise ImportError(msg)
for k,v in list(basemodule.__dict__.items()):
if k[0] != '_' and type(v) != types.ModuleType:
self.__dict__[k] = v
# Set the Pmw definitions from the Pmw.def file.
#former variable name 'dict' clashed when checking isinstance(v, dict)
dictionary = {
'_widgets' : {},
'_extraWidgets' : {},
'_functions' : {},
'_modules' : {},
}
for name in list(dictionary.keys()):
self.__dict__[name] = {}
searchpath.reverse()
for path in searchpath:
pathbit = os.path.join(*tuple(str.split(path[5:], '.')))
lpath = os.path.join(self._dirpath, pathbit)
d = {}
exec(compile(open(os.path.join(lpath,_PMW_DEF)).read(), os.path.join(lpath,_PMW_DEF), 'exec'), d)
for k,v in list(d.items()):
if k in dictionary:
if type(v) == tuple:
for item in v:
modpath = path + '.Pmw' + item
dictionary[k][item] = modpath
elif isinstance(v, dict):
for k1, v1 in list(v.items()):
modpath = path + '.Pmw' + v1
dictionary[k][k1] = modpath
self.__dict__.update(dictionary)
self._widgets_keys = list(self._widgets.keys())
self._extraWidgets_keys = list(self._extraWidgets.keys())
self._functions_keys = list(self._functions.keys())
self._modules_keys = list(self._modules.keys())
self._initialised = 1
def __getattr__(self, name):
if not self._initialised:
self._initialise()
# Beware: _initialise may have defined 'name'
if name in list(self.__dict__.keys()):
return self.__dict__[name]
# The requested attribute is not yet set. Look it up in the
# tables set by Pmw.def, import the appropriate module and
# set the attribute so that it will be found next time.
if name in self._widgets_keys:
# The attribute is a widget name.
mod = self._getmodule(self._widgets[name])
cls = getattr(mod,name)
self.__dict__[name] = cls
return cls
if name in self._functions_keys:
# The attribute is a function from one of the modules.
modname = self._functions[name]
mod = self._getmodule(modname)
func = getattr(mod, name)
self.__dict__[name] = func
return func
if name in self._modules_keys:
# The attribute is a module
mod = self._getmodule(self._modules[name])
self.__dict__[name] = mod
return mod
if name in self._extraWidgets_keys:
# XXX I should import them all, once I've started.
# The attribute is a widget name in a module of another name
modname = self._extraWidgets[name]
mod = self._getmodule(modname)
cls = getattr(mod, name)
self.__dict__[name] = cls
return cls
# The attribute is not known by Pmw, report an error.
raise AttributeError(name)
| [
"git@rdb.name"
] | git@rdb.name |
037b7b147c7f3197184c2e5c46416288ed605101 | 0332c80c7829075759e504d484eae4436336fa3c | /__main__.py | b3595d6191166ffb192cd0d68cec7d39df07923d | [] | no_license | AshishYUO/PySnake | bc804ce94729ca0b31fbb9b9da85ef1be2681acb | 1e01be05b56b7ef2c4344be16caaa925a91331be | refs/heads/master | 2020-05-16T16:51:49.588778 | 2019-09-23T14:56:34 | 2019-09-23T14:56:34 | 183,176,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from tkinter import Tk
import __player__ as pl
def init() :
__game__ = Tk()
__game__.title("Snake")
play = pl.Player(__game__)
play.Update(__game__)
__game__.mainloop()
if __name__ == "__main__" :
init() | [
"noreply@github.com"
] | AshishYUO.noreply@github.com |
430c663277731eab65f3d86a584b42573a3df135 | 78db5bc74181173f2d00bea409997a64b4682adf | /venv/lib/python3.9/site-packages/pip/_vendor/pep517/compat.py | 5ae94f60b698b9a6b387c60dc0ab6bd3764e319e | [
"MIT"
] | permissive | CiscoDevNet/meraki-code | dfe680f077ebd053a3b663f1434f648f5a91b541 | d031aab82e3fa5ce7cf57b257fef8c9a4c63d71e | refs/heads/master | 2023-05-28T18:43:28.848983 | 2022-04-11T19:45:19 | 2022-04-11T19:45:19 | 188,288,487 | 67 | 60 | MIT | 2023-05-23T00:51:58 | 2019-05-23T18:43:15 | Python | UTF-8 | Python | false | false | 814 | py | """Python 2/3 compatibility"""
import json
import sys
# Handle reading and writing JSON in UTF-8, on Python 3 and 2.
if sys.version_info[0] >= 3:
# Python 3
def write_json(obj, path, **kwargs):
with open(path, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs)
def read_json(path):
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
else:
# Python 2
def write_json(obj, path, **kwargs):
with open(path, 'wb') as f:
json.dump(obj, f, encoding='utf-8', **kwargs)
def read_json(path):
with open(path, 'rb') as f:
return json.load(f)
# FileNotFoundError
try:
FileNotFoundError = FileNotFoundError
except NameError:
FileNotFoundError = IOError
| [
"agentle@cisco.com"
] | agentle@cisco.com |
f4d754f2961e43900540a4af7b2ecdaf0117b821 | 9cd86202b6160f423220e20203038d4576a3a40c | /case1/app.py | ebf9537a80a3b75ec38ed9d47bb3837d868f1a9d | [] | no_license | rush8170/docker | e40e5329f1afd647e90a0b1e072e117e75ad4514 | ec96af2a3b0a008c18b7bef777b4873c5aeb9a1b | refs/heads/master | 2020-06-01T17:49:19.276420 | 2019-06-08T10:24:43 | 2019-06-08T10:24:43 | 190,871,331 | 0 | 0 | null | 2019-10-31T17:45:26 | 2019-06-08T09:49:40 | Python | UTF-8 | Python | false | false | 600 | py | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def hello_world():
#return '<h1>Flask created</h1>'
return render_template('prod.html')
#import sys
@app.route('/action',methods = ["POST"])
def product():
product = request.form['product_name']
print(product)
try:
fh = open("backup/data.txt","a+")
fh.write(product+'\n')
fh.close()
#print('File created',file=sys.stderr)
except IOError:
print("Error")
#write code to store in volume storage
return "Product received:"+product
if __name__=='__main__' :
app.run(debug=True,host='0.0.0.0')
| [
"shahrushil@yahoo.com"
] | shahrushil@yahoo.com |
a1be2fca03092fcb54fe9d3430814646fdc8b536 | 989880f013fb35bbb67ea9106d072c157015dba8 | /price_aggregator/migrations/0001_initial.py | 903afb46dd0f5026a7365b4f9108107f138333b8 | [] | no_license | inuitwallet/price_aggregator | b88bf1c2e862d28693f0ecf507fbba89a8658911 | 838e600c3ab1270e7af4f916d2b3a11f4f502d03 | refs/heads/master | 2021-04-29T20:46:00.160398 | 2021-01-04T09:48:04 | 2021-01-04T09:48:04 | 121,603,435 | 0 | 1 | null | 2021-04-16T20:48:58 | 2018-02-15T07:50:08 | Python | UTF-8 | Python | false | false | 1,612 | py | # Generated by Django 2.0.2 on 2018-02-14 20:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AggregatedPrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(auto_now_add=True)),
('aggregated_price', models.DecimalField(decimal_places=10, max_digits=25)),
],
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Provider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='currency',
name='supported_providers',
field=models.ManyToManyField(to='price_aggregator.Provider'),
),
migrations.AddField(
model_name='aggregatedprice',
name='currency',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='price_aggregator.Currency'),
),
]
| [
"contact@inuit-wallet.co.uk"
] | contact@inuit-wallet.co.uk |
1499ffc21bf07911e411682fdfe4260bab3863f1 | d1a0293ed4cf978a37b4b186896df209d9d32fc7 | /pyboleto/bank/febraban.py | 1ba3a8c499e6a3d3555887b0f532914524a610d5 | [
"BSD-3-Clause"
] | permissive | thiagosm/pyboleto | 408a2a90348303481426c1e47cc92184a026294e | 95151eb9738cb6903d2030b686e4c21afd8f0c39 | refs/heads/master | 2023-07-06T17:59:01.095935 | 2023-06-30T13:36:48 | 2023-06-30T13:36:48 | 6,001,976 | 20 | 14 | NOASSERTION | 2021-05-29T22:42:09 | 2012-09-28T19:23:02 | Python | UTF-8 | Python | false | false | 3,568 | py | # -*- coding: utf-8 -*-
from ..data import BoletoData, CustomProperty
from decimal import Decimal
import re
class BoletoArrecadacao(BoletoData):
"""
Arrecadação Febraban
"""
carteira = CustomProperty('carteira', 1)
def __init__(self):
BoletoData.__init__(self)
self.moeda_formato = '8'
def dac11(self,num):
r = self.modulo11(num,9,1)
if r in [0,1]:
return 0
else:
return 11-r
@property
def identificacao(self):
return re.sub('[^0-9]','',self.convenio)
@property
def linha_digitavel(self):
"""Monta a linha digitável a partir do barcode
Esta é a linha que o cliente pode utilizar para digitar se o código
de barras não estiver legível.
"""
linha = ''
if self.carteira not in ['6','8']:
self.carteira = '8'
c1 = self.barcode[0:11]
c2 = self.barcode[11:22]
c3 = self.barcode[22:33]
c4 = self.barcode[33:44]
d1 = str(self.dac11(c1))
d2 = str(self.dac11(c2))
d3 = str(self.dac11(c3))
d3 = str(self.dac11(c4))
if self.carteira == '6':
d1 = str(self.modulo10(c1))
d2 = str(self.modulo10(c2))
d3 = str(self.modulo10(c3))
d3 = str(self.modulo10(c4))
linha = '%s-%s %s-%s %s-%s %s-%s' (c1,d1,c2,d2,c3,d3,c4,d4)
return str(linha)
@property
def campo_livre(self):
doc_zfill = 17
identp2 = ''
if self.carteira in ['6','9']:
identp2 = self.identificacao[4:8]
doc_zfill -= len(identp2)
content = "%s%8s" % (identp2,
self.data_vencimento.strftime('%Y%m%d'),
str(self.numero_documento).zfill(doc_zfill)
)
return str(content)
@property
def barcode(self):
"""Função para gerar código de barras para arrecadação - FEBRABAN
Convenio: Codigo de identificacao no banco
Carteiras:
1. Prefeituras;
2. Saneamento;
3. Energia Eletrica e Gas;
4. Telecomunicacoes;
5. Orgaos Governamentais;
6. Carnes e Assemelhados
ou demais Empresas / Orgaos que serao identificadas atraves do CNPJ.
7. Multas de transito
9. Uso exclusivo do banco
Posição # Conteúdo
01 a 01 01 produto
02 a 02 01 segmento ( carteira )
03 a 03 01 moeda formato
04 a 04 01 digito verificador geral
05 a 15 11 valor
# ----- Se telecomunicacao ---------
16 a 19 04 Identificacao empresa
20 a 44 25 Campo Livre
# ------ Se empresa CNPJ -----------
16 a 23 08 Identificacao Empresa e/ou ident. + codigo definido no banco
24 a 44 21 Campo Livre
Total 44
"""
if self.carteira not in ['6','8']:
self.carteira = '8'
barcode = '%1s%1s%1s%011d%4s%25s' %('8',
self.carteira,
self.moeda_formato,
Decimal(self.valor_documento) * 100,
self.identificacao[0:4],
self.campo_livre)
dv = self.dac11(barcode)
if self.carteira == '6':
dv = self.modulo10(barcode)
return barcode[0:4] + str(dv) + barcode[4:]
| [
"opencode@gmail.com"
] | opencode@gmail.com |
de55432aa62203c7893bb2e5f0e16ea10e91e600 | 7f9c6238b2fb44117adb3bc40eda3fa9f9b743f3 | /CARDAINATION-UPDATED/cardination/__init__.py | 87755830e64a9b5316b80c46c33ecfb4a5365e4d | [] | no_license | Sarthak251099/Login-System | faea23e2b3cf71eec20c1036a17cd2bc3348071d | 935dd6851ba735286518ad56e2af0b31f0a86223 | refs/heads/main | 2023-01-05T12:10:08.290035 | 2020-11-01T04:56:52 | 2020-11-01T04:56:52 | 309,026,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | from flask import Flask
import os
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_cors import CORS
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
CORS(app)
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
from cardination.patient.routes import patient
from cardination.doctor.routes import doctor
from cardination.main.routes import main
app.register_blueprint(patient)
app.register_blueprint(doctor)
app.register_blueprint(main) | [
"noreply@github.com"
] | Sarthak251099.noreply@github.com |
559d97cdce99a1160f9152e0378028e6b5c18267 | 9e02e411290fa4fc5d6fac8cb51e437377e05bbb | /tests/transformers/test_image.py | 83672f4fa7241eb3255f19e3cd465e229ccd74ca | [
"MIT"
] | permissive | nagyistoce/mila-udem-fuel | f62551a9937fc86ea0b0c40a736318a89522e4eb | 3b82041b10700e001cd294df0a0de5cb3e738613 | refs/heads/master | 2021-01-22T18:32:49.567897 | 2015-07-27T13:25:42 | 2015-07-27T13:25:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,846 | py | from collections import OrderedDict
from io import BytesIO
import numpy
from numpy.testing import assert_raises
from PIL import Image
from picklable_itertools.extras import partition_all
from six.moves import zip
from fuel import config
from fuel.datasets.base import IndexableDataset
from fuel.schemes import ShuffledScheme, SequentialExampleScheme
from fuel.streams import DataStream
from fuel.transformers.image import (ImagesFromBytes,
MinimumImageDimensions,
RandomFixedSizeCrop)
def reorder_axes(shp):
if len(shp) == 3:
shp = (shp[-1],) + shp[:-1]
elif len(shp) == 2:
shp = (1,) + shp
return shp
class ImageTestingMixin(object):
def common_setup(self):
ex_scheme = SequentialExampleScheme(self.dataset.num_examples)
self.example_stream = DataStream(self.dataset,
iteration_scheme=ex_scheme)
self.batch_size = 2
scheme = ShuffledScheme(self.dataset.num_examples,
batch_size=self.batch_size)
self.batch_stream = DataStream(self.dataset, iteration_scheme=scheme)
class TestImagesFromBytes(ImageTestingMixin):
def setUp(self):
rng = numpy.random.RandomState(config.default_seed)
self.shapes = [
(10, 12, 3),
(9, 8, 4),
(12, 14, 3),
(4, 7),
(9, 8, 4),
(7, 9, 3)
]
pil1 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[0])
.astype('uint8'), mode='RGB')
pil2 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[1])
.astype('uint8'), mode='CMYK')
pil3 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[2])
.astype('uint8'), mode='RGB')
pil4 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[3])
.astype('uint8'), mode='L')
pil5 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[4])
.astype('uint8'), mode='RGBA')
pil6 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[5])
.astype('uint8'), mode='YCbCr')
source1 = [pil1, pil2, pil3]
source2 = [pil4, pil5, pil6]
bytesio1 = [BytesIO() for _ in range(3)]
bytesio2 = [BytesIO() for _ in range(3)]
formats1 = ['PNG', 'JPEG', 'BMP']
formats2 = ['GIF', 'PNG', 'JPEG']
for s, b, f in zip(source1, bytesio1, formats1):
s.save(b, format=f)
for s, b, f in zip(source2, bytesio2, formats2):
s.save(b, format=f)
self.dataset = IndexableDataset(
OrderedDict([('source1', [b.getvalue() for b in bytesio1]),
('source2', [b.getvalue() for b in bytesio2])]),
axis_labels={'source1': ('batch', 'bytes'),
'source2': ('batch', 'bytes')})
self.common_setup()
def test_images_from_bytes_example_stream(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source1', 'source2'),
color_mode=None)
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
s1_shape = set(s.shape for s in s1)
s2_shape = set(s.shape for s in s2)
actual_s1 = set(reorder_axes(s) for s in self.shapes[:3])
actual_s2 = set(reorder_axes(s) for s in self.shapes[3:])
assert actual_s1 == s1_shape
assert actual_s2 == s2_shape
def test_images_from_bytes_batch_stream(self):
stream = ImagesFromBytes(self.batch_stream,
which_sources=('source1', 'source2'),
color_mode=None)
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
s1 = sum(s1, [])
s2 = sum(s2, [])
s1_shape = set(s.shape for s in s1)
s2_shape = set(s.shape for s in s2)
actual_s1 = set(reorder_axes(s) for s in self.shapes[:3])
actual_s2 = set(reorder_axes(s) for s in self.shapes[3:])
assert actual_s1 == s1_shape
assert actual_s2 == s2_shape
def test_images_from_bytes_example_stream_convert_rgb(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source1'),
color_mode='RGB')
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
actual_s1_gen = (reorder_axes(s) for s in self.shapes[:3])
actual_s1 = set((3,) + s[1:] for s in actual_s1_gen)
s1_shape = set(s.shape for s in s1)
assert actual_s1 == s1_shape
def test_images_from_bytes_example_stream_convert_l(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source2'),
color_mode='L')
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
actual_s2_gen = (reorder_axes(s) for s in self.shapes[3:])
actual_s2 = set((1,) + s[1:] for s in actual_s2_gen)
s2_shape = set(s.shape for s in s2)
assert actual_s2 == s2_shape
def test_axis_labels(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source2',))
assert stream.axis_labels['source1'] == ('bytes',)
assert stream.axis_labels['source2'] == ('channel', 'height',
'width')
bstream = ImagesFromBytes(self.batch_stream,
which_sources=('source1',))
assert bstream.axis_labels['source1'] == ('batch', 'channel', 'height',
'width')
assert bstream.axis_labels['source2'] == ('batch', 'bytes')
def test_bytes_type_exception(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source2',))
assert_raises(TypeError, stream.transform_source_example, 54321,
'source2')
class TestMinimumDimensions(ImageTestingMixin):
def setUp(self):
rng = numpy.random.RandomState(config.default_seed)
source1 = []
source2 = []
source3 = []
self.shapes = [(5, 9), (4, 6), (3, 6), (6, 4), (2, 5), (4, 8), (8, 3)]
for i, shape in enumerate(self.shapes):
source1.append(rng.normal(size=shape))
source2.append(rng.normal(size=shape[::-1]))
source3.append(i)
self.dataset = IndexableDataset(OrderedDict([('source1', source1),
('source2', source2),
('source3', source3)]),
axis_labels={'source1':
('batch', 'channel',
'height', 'width')})
self.common_setup()
def test_minimum_dimensions_example_stream(self):
stream = MinimumImageDimensions(self.example_stream, (4, 5),
which_sources=('source1',))
it = stream.get_epoch_iterator()
for example, shp in zip(it, self.shapes):
assert example[0].shape[0] >= 4 and example[0].shape[1] >= 5
assert (example[1].shape[1] == shp[0] and
example[1].shape[0] == shp[1])
def test_minimum_dimensions_batch_stream(self):
stream = MinimumImageDimensions(self.batch_stream, (4, 5),
which_sources=('source1',))
it = stream.get_epoch_iterator()
for batch, shapes in zip(it, partition_all(self.batch_size,
self.shapes)):
assert (example.shape[0] >= 4 and example.shape[1] >= 5
for example in batch[0])
assert (example.shape[1] == shp[0] and
example.shape[0] == shp[1]
for example, shp in zip(batch[1], shapes))
def test_axes_exception(self):
stream = MinimumImageDimensions(self.example_stream, (4, 5),
which_sources=('source1',))
assert_raises(NotImplementedError,
stream.transform_source_example,
numpy.empty((2, 3, 4, 2)),
'source1')
def test_resample_exception(self):
assert_raises(ValueError,
MinimumImageDimensions, self.example_stream, (4, 5),
resample='notarealresamplingmode')
class TestFixedSizeRandomCrop(ImageTestingMixin):
def setUp(self):
source1 = numpy.zeros((9, 3, 7, 5), dtype='uint8')
source1[:] = numpy.arange(3 * 7 * 5, dtype='uint8').reshape(3, 7, 5)
shapes = [(5, 9), (6, 8), (5, 6), (5, 5), (6, 4), (7, 4),
(9, 4), (5, 6), (6, 5)]
source2 = []
biggest = 0
num_channels = 2
for shp in shapes:
biggest = max(biggest, shp[0] * shp[1] * 2)
ex = numpy.arange(shp[0] * shp[1] * num_channels).reshape(
(num_channels,) + shp).astype('uint8')
source2.append(ex)
self.source2_biggest = biggest
axis_labels = {'source1': ('batch', 'channel', 'height', 'width'),
'source2': ('batch', 'channel', 'height', 'width')}
self.dataset = IndexableDataset(OrderedDict([('source1', source1),
('source2', source2)]),
axis_labels=axis_labels)
self.common_setup()
def test_ndarray_batch_source(self):
# Make sure that with enough epochs we sample everything.
stream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source1',))
seen_indices = numpy.array([], dtype='uint8')
for i in range(30):
for batch in stream.get_epoch_iterator():
assert batch[0].shape[1:] == (3, 5, 4)
assert batch[0].shape[0] in (1, 2)
seen_indices = numpy.union1d(seen_indices, batch[0].flatten())
if 3 * 7 * 5 == len(seen_indices):
break
else:
assert False
def test_list_batch_source(self):
# Make sure that with enough epochs we sample everything.
stream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source2',))
seen_indices = numpy.array([], dtype='uint8')
for i in range(30):
for batch in stream.get_epoch_iterator():
for example in batch[1]:
assert example.shape == (2, 5, 4)
seen_indices = numpy.union1d(seen_indices,
example.flatten())
assert len(batch[1]) in (1, 2)
if self.source2_biggest == len(seen_indices):
break
else:
assert False
def test_format_exceptions(self):
estream = RandomFixedSizeCrop(self.example_stream, (5, 4),
which_sources=('source2',))
bstream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source2',))
assert_raises(ValueError, estream.transform_source_example,
numpy.empty((5, 6)), 'source2')
assert_raises(ValueError, bstream.transform_source_batch,
[numpy.empty((7, 6))], 'source2')
assert_raises(ValueError, bstream.transform_source_batch,
[numpy.empty((8, 6))], 'source2')
def test_window_too_big_exceptions(self):
stream = RandomFixedSizeCrop(self.example_stream, (5, 4),
which_sources=('source2',))
assert_raises(ValueError, stream.transform_source_example,
numpy.empty((3, 4, 2)), 'source2')
bstream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source1',))
assert_raises(ValueError, bstream.transform_source_batch,
numpy.empty((5, 3, 4, 2)), 'source1')
| [
"wardefar@iro.umontreal.ca"
] | wardefar@iro.umontreal.ca |
f12f01d52af9e9c739bd9b77c749631275a251d3 | 01b0dac640bbf26b8957c7bf4c519e3d36108664 | /offset-inventory-GWO.py | 401d8a6c2a0dcc624544b57516a6f79b60d3cd05 | [
"MIT"
] | permissive | curlyz/comparision_offsetting_inventory_between-GWO_and_PSO | 61aac065cf2697ece3bfa9fe37c35af380149724 | 98b23c9069fdddd87c5986aa05b71f19017b7106 | refs/heads/master | 2020-09-20T18:22:09.073135 | 2019-11-28T02:57:03 | 2019-11-28T02:57:03 | 224,557,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,321 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 14:15:12 2019
@author: USER
"""
import copy
import xlrd
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
wb = xlrd.open_workbook('data.xlsx', on_demand=True)
sheets=(wb.sheet_names())
df=pd.read_excel('data.xlsx',sheet_name=sheets)
sh = '200'
def getdata(Quantity,Point,Cycle,Period):
array = []
result = []
for i in range(Period):
mod = i%Cycle
if mod == 0:
temp = Quantity
if mod != 0:
temp = round(-(Quantity/Cycle)*mod + Quantity,2)
array.append(temp)
for j in range(Period):
result.append(array[j-Point])
return result
def initialization(num_searchagent, Ub, Lb):
Positions=np.zeros((num_searchagent, len(Ub)),dtype = int)
dim=len(Lb);
for i in range(num_searchagent):
for j in range(dim):
Positions[i][j]=(np.random.randint(low=Lb[j],high=Ub[j]))
return Positions
def Cost(arr_Quantity,arr_Point,arr_Cycle,int_Period):
data = []
sum_temp = 0
arr_temp = []
for i in range(len(arr_Quantity)):
data.append(getdata(arr_Quantity[i],arr_Point[i],arr_Cycle[i],int_Period))
for j in range(int_Period):
for k in range(len(data)):
sum_temp+=data[k][j]
arr_temp.append(sum_temp)
sum_temp = 0
return max(arr_temp)
def GWO(SearchAgents_no,Max_iter,ub,lb,dim):
Alpha_pos=np.zeros(dim)
Alpha_score=np.inf
Beta_pos=np.zeros(dim)
Beta_score=np.inf
Delta_pos=np.zeros(dim)
Delta_score=np.inf
Positions=initialization(SearchAgents_no,ub,lb)
Convergence_curve=np.zeros(Max_iter+1)
l=0
while l<Max_iter:
fitness = []
for i in range(0,SearchAgents_no):
Flag4ub=Positions[i]>ub
Flag4lb=Positions[i]<lb
Positions[i]=(Positions[i]*(~(Flag4ub+Flag4lb)))+ub*Flag4ub+lb*Flag4lb
# print(Positions[i])
fitness.append(Cost(Quan,Positions[i],Cycle,Period))
for i in range(0,SearchAgents_no):
if fitness[i]<Alpha_score:
Alpha_score=fitness[i]
Alpha_pos=Positions[i].copy()
if ((fitness[i]>Alpha_score) and (fitness[i]<Beta_score)):
Beta_score=fitness[i]
Beta_pos=Positions[i].copy()
if (fitness[i]>Alpha_score) and (fitness[i]>Beta_score) and (fitness[i]<Delta_score):
Delta_score=fitness[i]
Delta_pos=Positions[i].copy()
#a=10-l*l*((10)/(Max_iter*Max_iter))
a=2-l*((2)/(Max_iter))
for i in range(0,SearchAgents_no):
for j in range(len(Positions[0])):
r1=random.random()
r2=random.random()
A1=2*a*r1-a
C1=2*r2
D_alpha=abs(C1*Alpha_pos[j]-Positions[i][j])
X1=Alpha_pos[j]-A1*D_alpha
# rand=np.random.rand()
# if rand<0.5:
# D_alpha=np.random.rand()*np.sin(np.random.rand())*abs(C1*Alpha_pos[j]-Positions[i][j])
# else:
# D_alpha=np.random.rand()*np.cos(np.random.rand())*abs(C1*Alpha_pos[j]-Positions[i][j])
# X1=Alpha_pos[j]-A1*D_alpha
r1=random.random()
r2=random.random()
A2=2*a*r1-a
C2=2*r2
D_beta=abs(C2*Beta_pos[j]-Positions[i][j])
X2=Beta_pos[j]-A2*D_beta
r1=random.random()
r2=random.random()
A3=2*a*r1-a
C3=2*r2
D_delta=abs(C3*Delta_pos[j]-Positions[i][j])
X3=Delta_pos[j]-A3*D_delta
# wr1 = random.random()
# wr2 = random.random()
# wr3 = random.random()
#
# w1 = round((wr1/(wr1 + wr2 + wr3)),2)*10
# w2 = round((wr2/(wr1 + wr2 + wr3)),2)*10
# w3 = round((wr3/(wr1 + wr2 + wr3)),2)*10
#
#
# Positions[i][j]=round((w1*X1+w2*X2+w3*X3)/3,0)
Positions[i][j]=round((X1+X2+X3)/3,0)
Convergence_curve[l]=Alpha_score
print('Iteration:',l,'-Obj - ',Alpha_score,'- w -', a)
l+=1
Convergence_curve[l] = Convergence_curve[l-1]
return Alpha_score, Alpha_pos, Convergence_curve
Quan = df[sh]['Quan']
Cycle = df[sh]['Cycle']
SearchAgents_no=30
Max_iter=100
Ub = np.array([max(Cycle)]*len(Cycle))
Lb = np.array([0]*len(Cycle))
dim=len(Lb)
Period = 300
Best_score, Best_pos, CC=GWO(SearchAgents_no,Max_iter,Ub,Lb,dim)
print(type(CC))
plt.plot(CC)
plt.xlabel('Iteration')
plt.ylabel('Obj Value')
plt.title('Convergence rate ' +str(Best_score))
fig = plt.gcf()
fig.set_size_inches(15, 10)
plt.show()
| [
"noreply@github.com"
] | curlyz.noreply@github.com |
b722747a673abeb16c78385aa295b6da3e1a5809 | d82595a7161e3125caa9f3db51ad4bdaaec3d018 | /Python/SLAPMi/__init__.py | 725874455460242c214e96f37a243437a84f2783 | [] | no_license | KasparP/PSI_simulations | ad34c33e9ed6695778b6ec595da8abcfb28ff3b6 | 5d740d8fe1d0f729847da61e2d285c1f3de697f1 | refs/heads/master | 2016-08-12T19:27:11.214856 | 2016-03-10T21:46:06 | 2016-03-10T21:46:47 | 36,687,037 | 5 | 2 | null | 2015-11-03T22:22:01 | 2015-06-01T20:31:38 | Matlab | UTF-8 | Python | false | false | 22 | py | from slapmi import *
| [
"k.podgorski@gmail.com"
] | k.podgorski@gmail.com |
5ba82c6bf3b10cd1ddea1cfdd7d5c3b6cf9cea6b | 5609adbab83e073020974baedc63d2e3cc7ba5bc | /ive.py | 9a5da4c5ad9c2c052edeb3f1533d2d831432d49c | [] | no_license | KenNN99/code_2020ICASSP_five | e5eb5e5cd184b5d5fbeb73605723567f5f9ba9a7 | 66ddc55d50bbbc41cb73e375c7a93ac34d45dc43 | refs/heads/master | 2023-05-01T20:05:59.190573 | 2020-05-07T13:56:22 | 2020-05-07T13:56:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,396 | py | # Copyright (c) 2019 Robin Scheibler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Blind Source Extraction using Independent Vector Extraction via the OGIVE algorithm [1].
[1] Z. Koldovský and P. Tichavský, “Gradient Algorithms for Complex
Non-Gaussian Independent Component/Vector Extraction, Question of Convergence,”
IEEE Trans. Signal Process., pp. 1050–1064, Dec. 2018.
"""
import os
import numpy as np
from pyroomacoustics.bss import projection_back
def ogive(
X,
n_iter=4000,
step_size=0.1,
tol=1e-3,
update="demix",
proj_back=True,
W0=None,
model="laplace",
init_eig=False,
return_filters=False,
callback=None,
callback_checkpoints=[],
):
"""
Implementation of Orthogonally constrained Independent Vector Extraction
(OGIVE) described in
Z. Koldovský and P. Tichavský, “Gradient Algorithms for Complex
Non-Gaussian Independent Component/Vector Extraction, Question of Convergence,”
IEEE Trans. Signal Process., pp. 1050–1064, Dec. 2018.
Parameters
----------
X: ndarray (nframes, nfrequencies, nchannels)
STFT representation of the signal
n_src: int, optional
The number of sources or independent components
n_iter: int, optional
The number of iterations (default 20)
step_size: float
The step size of the gradient ascent
tol: float
Stop when the gradient is smaller than this number
update: str
Selects update of the mixing or demixing matrix, or a switching scheme,
possible values: "mix", "demix", "switching"
proj_back: bool, optional
Scaling on first mic by back projection (default True)
W0: ndarray (nfrequencies, nsrc, nchannels), optional
Initial value for demixing matrix
model: str
The model of source distribution 'gauss' or 'laplace' (default)
init_eig: bool, optional (default ``False``)
If ``True``, and if ``W0 is None``, then the weights are initialized
using the principal eigenvectors of the covariance matrix of the input
data.
return_filters: bool
If true, the function will return the demixing matrix too
callback: func
A callback function called every 10 iterations, allows to monitor
convergence
callback_checkpoints: list of int
A list of epoch number when the callback should be called
Returns
-------
Returns an (nframes, nfrequencies, nsources) array. Also returns
the demixing matrix (nfrequencies, nchannels, nsources)
if ``return_values`` keyword is True.
"""
n_frames, n_freq, n_chan = X.shape
n_src = 1
# covariance matrix of input signal (n_freq, n_chan, n_chan)
Cx = np.mean(X[:, :, :, None] * np.conj(X[:, :, None, :]), axis=0)
Cx_inv = np.linalg.inv(Cx)
Cx_norm = np.linalg.norm(Cx, axis=(1, 2))
w = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)
a = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)
delta = np.zeros((n_freq, n_chan, 1), dtype=X.dtype)
lambda_a = np.zeros((n_freq, 1, 1), dtype=np.float64)
def tensor_H(T):
return np.conj(T).swapaxes(1, 2)
# eigenvectors of the input covariance
eigval, eigvec = np.linalg.eig(Cx)
lead_eigval = np.max(eigval, axis=1)
lead_eigvec = np.zeros((n_freq, n_chan), dtype=Cx.dtype)
for f in range(n_freq):
ind = np.argmax(eigval[f])
lead_eigvec[f, :] = eigvec[f, :, ind]
# initialize A and W
if W0 is None:
if init_eig:
# Initialize the demixing matrices with the principal
# eigenvector
w[:, :, 0] = lead_eigvec
else:
# Or with identity
w[:, 0] = 1.0
else:
w[:, :] = W0
def update_a_from_w(I):
v_new = Cx[I] @ w[I]
lambda_w = 1.0 / np.real(tensor_H(w[I]) @ v_new)
a[I, :, :] = lambda_w * v_new
def update_w_from_a(I):
v_new = Cx_inv @ a
lambda_a[:] = 1.0 / np.real(tensor_H(a) @ v_new)
w[I, :, :] = lambda_a[I] * v_new[I]
def switching_criterion():
a_n = a / a[:, :1, :1]
b_n = Cx @ a_n
lmb = b_n[:, :1, :1].copy() # copy is important here!
b_n /= lmb
p1 = np.linalg.norm(a_n - b_n, axis=(1, 2)) / Cx_norm
Cbb = (
lmb
* (b_n @ tensor_H(b_n))
/ np.linalg.norm(b_n, axis=(1, 2), keepdims=True) ** 2
)
p2 = np.linalg.norm(Cx - Cbb, axis=(1, 2))
kappa = p1 * p2 / np.sqrt(n_chan)
thresh = 0.1
I_do_a[:] = kappa >= thresh
I_do_w[:] = kappa < thresh
# Compute the demixed output
def demix(Y, X, W):
Y[:, :, :] = X @ np.conj(W)
# The very first update of a
update_a_from_w(np.ones(n_freq, dtype=np.bool))
if update == "mix":
I_do_w = np.zeros(n_freq, dtype=np.bool)
I_do_a = np.ones(n_freq, dtype=np.bool)
else: # default is "demix"
I_do_w = np.ones(n_freq, dtype=np.bool)
I_do_a = np.zeros(n_freq, dtype=np.bool)
r_inv = np.zeros((n_frames, n_src))
r = np.zeros((n_frames, n_src))
# Things are more efficient when the frequencies are over the first axis
Y = np.zeros((n_freq, n_frames, n_src), dtype=X.dtype)
X_ref = X # keep a reference to input signal
X = X.swapaxes(0, 1).copy() # more efficient order for processing
for epoch in range(n_iter):
# compute the switching criterion
if update == "switching" and epoch % 10 == 0:
switching_criterion()
# Extract the target signal
demix(Y, X, w)
# Now run any necessary callback
if callback is not None and epoch in callback_checkpoints:
Y_tmp = Y.swapaxes(0, 1).copy()
if proj_back:
z = projection_back(Y_tmp, X_ref[:, :, 0])
callback(Y_tmp * np.conj(z[None, :, :]))
else:
callback(Y_tmp)
# simple loop as a start
# shape: (n_frames, n_src)
if model == "laplace":
r[:, :] = np.linalg.norm(Y, axis=0) / np.sqrt(n_freq)
elif model == "gauss":
r[:, :] = (np.linalg.norm(Y, axis=0) ** 2) / n_freq
eps = 1e-15
r[r < eps] = eps
r_inv[:, :] = 1.0 / r
# Compute the score function
psi = r_inv[None, :, :] * np.conj(Y)
# "Nu" in Algo 3 in [1]
# shape (n_freq, 1, 1)
zeta = Y.swapaxes(1, 2) @ psi
x_psi = (X.swapaxes(1, 2) @ psi) / zeta
# The w-step
# shape (n_freq, n_chan, 1)
delta[I_do_w] = a[I_do_w] - x_psi[I_do_w]
w[I_do_w] += step_size * delta[I_do_w]
# The a-step
# shape (n_freq, n_chan, 1)
delta[I_do_a] = w[I_do_a] - (Cx_inv[I_do_a] @ x_psi[I_do_a]) * lambda_a[I_do_a]
a[I_do_a] += step_size * delta[I_do_a]
# Apply the orthogonal constraints
update_a_from_w(I_do_w)
update_w_from_a(I_do_a)
max_delta = np.max(np.linalg.norm(delta, axis=(1, 2)))
if max_delta < tol:
break
# Extract target
demix(Y, X, w)
Y = Y.swapaxes(0, 1).copy()
X = X.swapaxes(0, 1)
if proj_back:
z = projection_back(Y, X_ref[:, :, 0])
Y *= np.conj(z[None, :, :])
if return_filters:
return Y, w
else:
return Y
| [
"fakufaku@gmail.com"
] | fakufaku@gmail.com |
2e574eac9c395ecd6a108c5528442f4546c52b38 | 1df9e499033265aff0a118827654af95ac30791f | /13.py | 2d5a1b76f7477ddea2573aa852f493358506d7f2 | [] | no_license | RamyaSekaran/guvi | 99d3b0ea3f3301fc98b97e8339cbade5ee29648c | e44c3bf846be531f5a443592b1263ac9109a2fb8 | refs/heads/master | 2020-03-22T05:53:36.928817 | 2018-08-09T16:08:39 | 2018-08-09T16:08:39 | 139,597,655 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | a=input()
count=0
if int(a)==1:
print("No")
else :
a=int(a)
for i in range(2,a):
if a%i==0:
count=1
if count==1:
print("No")
else:
print("Yes")
| [
"noreply@github.com"
] | RamyaSekaran.noreply@github.com |
0bc5e43c87e3091c5c4d74c36eb378586fad1b1d | fd3df5db5fec7daf5711018024773272e9a78043 | /pajbot/modules/songrequest.py | 86ea9f40a7be27d3d0a2eee40b6cb623ac08f093 | [
"MIT"
] | permissive | metrize/troybot | af7de305345b3d165f2316c4cc9472c83f948467 | c42f46569b3a7af14af7300e3aa033269cf3a487 | refs/heads/master | 2022-06-09T00:04:03.043950 | 2020-05-06T23:55:42 | 2020-05-06T23:55:42 | 261,893,673 | 0 | 0 | null | 2020-05-06T22:35:39 | 2020-05-06T22:35:38 | null | UTF-8 | Python | false | false | 18,605 | py | import logging
import urllib
import urllib.request
import urllib.parse
import re
import json
import random
from pajbot.managers.db import DBManager
from pajbot.managers.handler import HandlerManager
from pajbot.models.command import Command
from pajbot.models.songrequest import SongrequestQueue
from pajbot.models.songrequest import SongRequestSongInfo
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
def find_youtube_id_in_string(string):
if len(string) < 11:
# Too short to be a youtube ID
return False
if len(string) == 11:
# Assume it's a straight up youtube ID
return string
if not (string.lower().startswith("http://") or string.lower().startswith("https://")):
string = "http://" + string
urldata = urllib.parse.urlparse(string)
if urldata.netloc == "youtu.be":
youtube_id = urldata.path[1:]
elif urldata.netloc.endswith("youtube.com"):
qs = urllib.parse.parse_qs(urldata.query)
if "v" not in qs:
return False
youtube_id = qs["v"][0]
else:
return False
return youtube_id
def find_youtube_video_by_search(search):
try:
query_string = urllib.parse.urlencode({"search_query": search})
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
return re.findall(r"href=\"\/watch\?v=(.{11})", html_content.read().decode())[0]
except:
return None
class SongrequestModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Songrequest"
DESCRIPTION = "Request Songs"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(key="youtube_key", label="Youtube developer key", type="text", required=True, default=""),
ModuleSetting(
key="max_song_length",
label="Max song length (in seconds)",
type="number",
required=True,
placeholder="Max song length (in seconds)",
default=360,
constraints={"min_value": 1, "max_value": 3600},
),
ModuleSetting(
key="point_cost",
label="Point costs for requesting a song",
type="number",
required=True,
default=500,
constraints={"min_value": 0, "max_value": 250000},
),
ModuleSetting(
key="backup_playlist_id",
label="Songs to play when no song is being requested backup playlist id",
type="text",
required=True,
default="",
),
ModuleSetting(
key="volume",
label="Default volume for song requests",
type="number",
required=True,
default=100,
constraints={"min_value": 0, "max_value": 100},
),
ModuleSetting(
key="volume_multiplier",
label="Volume multiplier",
type="number",
required=True,
default="100",
constraints={"min_value": 0, "max_value": 100},
),
ModuleSetting(
key="use_spotify",
label="Checks Spotify for current song if no song is playing",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="send_message_in_chat",
label="Send a message in chat upon a song request",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="message_in_chat",
label="Message sent in chat after someone requests a song {username} is the requestor, {title} is the song title, {current_pos} is the current queue position, {playing_in} is how long until the song is played",
type="text",
required=True,
default='{username} just requested the song "{title}" to be played KKona',
),
ModuleSetting(
key="message_in_chat_no_songs_playing",
label="Message sent when no songs are playing",
type="text",
required=True,
default="No songs are currently playing",
),
ModuleSetting(
key="message_in_chat_when_song_is_playing",
label="Message sent when a song is playing, {title} is the title of the song, {requestor} is the person who requested, {time_left} is the time left for playing",
type="text",
required=True,
default="The current song is {title} requested by {requestor}",
),
ModuleSetting(
key="message_in_chat_when_song_is_playing_spotify",
label="Message sent when a song is playing, {title} is the title of the song, {artists} is the list of artists",
type="text",
required=True,
default="The current song is {title} by {artists}",
),
ModuleSetting(
key="message_in_chat_when_next_song",
label="Message sent when a next song is requested, {title} is the title of the song, {requestor} is the person who requested, {playing_in} is when the song will play",
type="text",
required=True,
default="The next song is {title} requested by {requestor}",
),
ModuleSetting(
key="message_in_chat_when_next_song_none",
label="Message sent when a next song is requested but there isn't one",
type="text",
required=True,
default="There are no songs currently queued",
),
ModuleSetting(
key="send_message_on_open",
label="Send message when song request is opened",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="message_sent_on_open",
label="Message sent when song request is opened",
type="text",
required=True,
default="Song Request has been opened!",
),
ModuleSetting(
key="send_message_on_close",
label="Send message when song request is closed",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="message_sent_on_close",
label="Message sent when song request is closed",
type="text",
required=True,
default="Song Request has been closed!",
),
]
def getBackUpListSongs(self, next_page=None):
songs = []
urlin = (
f"https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={self.settings['backup_playlist_id']}&key={self.settings['youtube_key']}"
+ (f"&pageToken={next_page}" if next_page else "")
)
with urllib.request.urlopen(urlin) as url:
data = json.loads(url.read().decode())
for song in data["items"]:
songs.append(song["snippet"]["resourceId"]["videoId"])
try:
next_page = data["nextPageToken"]
return songs + self.getBackUpListSongs(next_page)
except:
return songs
def create_song_request_queue(self, video_id, bot, source):
with DBManager.create_session_scope() as db_session:
song_info = SongRequestSongInfo._create_or_get(db_session, video_id, self.youtube)
if not song_info:
log.error("There was an error!")
return False
if song_info.banned:
bot.whisper(source, "That song is banned! FeelsWeirdMan")
return False
skip_after = (
self.settings["max_song_length"] if song_info.duration > self.settings["max_song_length"] else None
)
songrequest_queue = SongrequestQueue._create(db_session, video_id, skip_after, source.id)
db_session.commit()
m, s = divmod(int(songrequest_queue.playing_in(db_session)), 60)
m = int(m)
s = int(s)
playing_in = f"{m:02d}:{s:02d}"
current_song = SongrequestQueue._from_id(db_session, self.bot.songrequest_manager.current_song_id)
if not current_song or not current_song.requested_by:
self.bot.songrequest_manager.load_song()
if self.settings["send_message_in_chat"]:
bot.say(
self.settings["message_in_chat"].format(
username=source.username_raw,
title=song_info.title,
current_pos=songrequest_queue.queue
+ (1 if SongrequestQueue._get_current_song(db_session) else 0),
playing_in=playing_in,
)
)
self.bot.songrequest_manager._playlist()
return True
def add_song(self, bot, source, message, **rest):
if not message:
self.bot.whisper(source, "Could not find a valid youtube ID in your argument.")
return False
# 1. Find youtube ID in message
msg_split = message.split(" ")
youtube_id = find_youtube_id_in_string(msg_split[0])
if youtube_id is False:
youtube_id = find_youtube_video_by_search(message)
if youtube_id is None:
self.bot.whisper(source, "Could not find a valid youtube ID in your argument.")
return False
# 2. Make sure the stream is live
# stream_id = StreamHelper.get_current_stream_id()
# if stream_id is None or stream_id is False:
# self.bot.whisper(source, "You cannot request songs while the stream is offline.")
# return False
return self.create_song_request_queue(youtube_id, bot, source)
def get_current_song(self, bot, source, message, **rest):
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._get_current_song(db_session)
if current_song:
m, s = divmod(current_song.playing_in(db_session), 60)
m = int(m)
s = int(s)
time_left = f"{m:02d}:{s:02d}"
if current_song.requested_by:
bot.say(
self.settings["message_in_chat_when_song_is_playing"].format(
title=current_song.song_info.title,
requestor=current_song.requested_by.username_raw,
time_left=time_left,
)
)
return True
bot.say(
self.settings["message_in_chat_when_song_is_playing"].format(
title=current_song.song_info.title, requestor="Backup Playlist", time_left=time_left
)
)
return True
if self.settings["use_spotify"]:
is_playing, title, artistsArr = bot.spotify_api.state(bot.spotify_token_manager)
if is_playing:
bot.say(
self.settings["message_in_chat_when_song_is_playing_spotify"].format(
title=title, artists=", ".join([str(artist) for artist in artistsArr])
)
)
return True
bot.say(self.settings["message_in_chat_no_songs_playing"])
return True
def get_next_song(self, bot, source, message, **rest):
with DBManager.create_session_scope() as db_session:
next_song = SongrequestQueue._get_next_song(db_session)
if next_song:
m, s = divmod(next_song.playing_in(db_session), 60)
m = int(m)
s = int(s)
playing_in = f"{m:02d}:{s:02d}"
if next_song.requested_by:
bot.say(
self.settings["message_in_chat_when_next_song"].format(
title=next_song.song_info.title,
requestor=next_song.requested_by.username_raw,
playing_in=playing_in,
)
)
return True
bot.say(
self.settings["message_in_chat_when_next_song"].format(
title=next_song.song_info.title, requestor="Backup Playlist", playing_in=playing_in
)
)
return True
bot.say(self.settings["message_in_chat_when_next_song_none"])
return True
def open_module(self, bot, source, message, **rest):
if self.bot.songrequest_manager.open_module_function():
if self.settings["send_message_on_open"]:
bot.whisper(source, self.settings["message_sent_on_open"])
bot.say(self.settings["message_sent_on_open"])
return
bot.whisper(source, "Song request is already open!")
def close_module(self, bot, source, message, **rest):
if self.bot.songrequest_manager.close_module_function():
if self.settings["send_message_on_open"]:
bot.whisper(source, self.settings["message_sent_on_close"])
bot.say(self.settings["message_sent_on_close"])
return
bot.whisper(source, "Song request is already closed!")
def skip(self, bot, source, message, **rest):
if self.bot.songrequest_manager.skip_function(source.login):
bot.whisper(source, "Song has been skipped!")
return
bot.whisper(source, "No song is playing!")
def pause(self, bot, source, message, **rest):
if self.bot.songrequest_manager.pause_function():
bot.whisper(source, "Song has been paused")
return
bot.whisper(source, "Song is already paused!")
def resume(self, bot, source, message, **rest):
if self.bot.songrequest_manager.resume_function():
bot.whisper(source, "Song has been resumed")
return
bot.whisper(source, "Song is already playing!")
def volume(self, bot, source, message, **rest):
if not message:
bot.say(f"The current volume is {self.bot.songrequest_manager.volume_val}%")
return True
try:
val = int(message)
if val < 0 or val > 100:
bot.whisper(source, "Invalid volume setting enter a volume between 0-100")
return False
except:
bot.whisper(source, "Invalid volume setting enter a volume between 0-100")
return False
self.bot.songrequest_manager.volume_function(val)
bot.whisper(source, "Volume has been changed to " + message + "%")
return True
def show_video(self, bot, source, message, **rest):
if self.bot.songrequest_manager.show_function():
bot.whisper(source, "The video has been shown!")
return True
bot.whisper(source, "The video is already showing!")
return True
def hide_video(self, bot, source, message, **rest):
if self.bot.songrequest_manager.hide_function():
bot.whisper(source, "The video has been hidden!")
return True
bot.whisper(source, "The video is already hidden!")
return True
def load_commands(self, **options):
self.commands["sr"] = self.commands["songrequest"] = Command.raw_command(
self.add_song, delay_all=0, delay_user=3, notify_on_error=True, cost=self.settings["point_cost"]
)
self.commands["song"] = Command.raw_command(
self.get_current_song, delay_all=0, delay_user=3, notify_on_error=True
)
self.commands["next"] = Command.raw_command(self.get_next_song, delay_all=0, delay_user=3, notify_on_error=True)
self.commands["opensr"] = Command.raw_command(
self.open_module, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["closesr"] = Command.raw_command(
self.close_module, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["skip"] = Command.raw_command(
self.skip, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["pause"] = Command.raw_command(
self.pause, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["resume"] = Command.raw_command(
self.resume, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["volume"] = Command.raw_command(
self.volume, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["showvideo"] = Command.raw_command(
self.show_video, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
self.commands["hidevideo"] = Command.raw_command(
self.hide_video, delay_all=0, delay_user=3, level=500, notify_on_error=True
)
def enable(self, bot):
if not self.bot:
return
import apiclient
from apiclient.discovery import build
def build_request(_, *args, **kwargs):
import httplib2
new_http = httplib2.Http()
return apiclient.http.HttpRequest(new_http, *args, **kwargs)
self.youtube = build("youtube", "v3", developerKey=self.settings["youtube_key"], requestBuilder=build_request)
with DBManager.create_session_scope() as db_session:
SongrequestQueue._clear_backup_songs(db_session)
if self.settings["backup_playlist_id"] and self.settings["backup_playlist_id"] != "":
backup_songs = self.getBackUpListSongs()
random.shuffle(backup_songs)
SongrequestQueue._load_backup_songs(db_session, backup_songs, self.youtube, self.settings)
db_session.commit()
self.bot.songrequest_manager.enable(self.settings, self.youtube)
HandlerManager.add_handler("on_stream_stop", self.bot.songrequest_manager.close_module_function)
def disable(self, bot):
if not self.bot:
return
self.bot.songrequest_manager.disable()
HandlerManager.remove_handler("on_stream_stop", self.bot.songrequest_manager.close_module_function)
| [
"troybensonsa@gmail.com"
] | troybensonsa@gmail.com |
3937c0bb55b8d468311b4c62e567d8e7c277c070 | 6eab4ba163cbee11ae1e31263e2ed0df69ad304b | /scripts/on_the_fly_assessment/add_feature_to_master.py | 68cfbdd764241d283c42c11b93411a0fc7cb7e4e | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | liuze-cloud/on_the_fly_assessment | c72bbdc2649afa6d7b3c0b9bf72a000d1957d370 | 102a7985d1765b11e6a7fdc1a11ac973cbc5fe3d | refs/heads/master | 2020-11-28T23:03:34.244558 | 2017-08-02T21:00:20 | 2017-08-02T21:00:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 21 14:48:10 2016
@author: fangren
"""
import numpy as np
import os.path
import csv
import os
def add_feature_to_master(features, base_filename, folder_path, save_path, master_index, index):
"""
add a feature 'feature' to master meta data, feature is in the form of a ziped row
"""
master_filename = os.path.join(folder_path, base_filename + 'scan1.csv')
for file in os.listdir(folder_path):
if file.endswith(".csv"):
master_filename = file
# print master_filename
if os.path.exists(master_filename):
csv_input = open(master_filename, 'rb')
reader = csv.reader(csv_input, delimiter=',')
i = 0
master_data = []
for row in reader:
if i == 0:
line_for_specPlot = row
elif i == 1:
header = row
else:
master_data.append(row)
i += 1
csv_input.close
# there are wired string like ' 4.38247e-' in the data, need to replace them with zero first.
master_data = np.array(master_data)
for i in range(len(master_data[:, 2])):
if 'e' in master_data[i][2]:
master_data[i][2] = 0
for i in range(len(master_data[:, 1])):
if 'e' in master_data[i][1]:
master_data[i][1] = 0
# change data array into float
# master_data = master_data.astype(float)
# for debugging
# print type(header)
# print type(features[0,:])
header = header[:master_data.shape[1]] + list(features[0, :])
num_of_scan_processed = features.shape[0]-1
# for debugging
# print header
# print dimension
# print master_data.shape[0], features.shape[0] - 1
# print num_of_scan_processed
# print master_data[(index-num_of_scan_processed):index, :].shape
# print features[:num_of_scan_processed, :].shape
master_data = np.concatenate((master_data[(index-num_of_scan_processed):index, :], features[1:num_of_scan_processed+1, :]), axis=1)
# print master_data
csv_output = open(os.path.join(save_path, base_filename + master_index + 'master_csv.csv'), 'wb')
writer = csv.writer(csv_output, delimiter=',')
writer.writerow(line_for_specPlot)
writer.writerow(header)
for row in master_data:
writer.writerow(row)
csv_output.close
else:
csv_output = open(os.path.join(save_path, base_filename + master_index + 'master_csv.csv'), 'wb')
writer = csv.writer(csv_output, delimiter=',')
for row in features:
writer.writerow(row)
csv_output.close | [
"ren.fang.ren@gmail.com"
] | ren.fang.ren@gmail.com |
231ac3f59f691f80ea3a67f977c3d947c79a9390 | 34bcf239c02aabe9a70d70b1a33b2286200e3ad9 | /banking_system/bank/views.py | ea28220babddd1c82803ce73c94c22ad23592dfb | [] | no_license | mruga7/Sparks-Internship | ce564308a3d29919667c78a11fd8f73e0ffae18c | 91751bbbb1484f475d24dc1f3d7f71922d3fe4ed | refs/heads/master | 2023-06-09T00:31:25.840835 | 2021-06-21T08:17:36 | 2021-06-21T08:17:36 | 378,855,478 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | from .models import Transfer,History
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return render(request,"bank/Home.html")
# def customer(request):
# return render(request,"bank/Customer-List.html")
def info(request,id):
return render(request,"bank/info.html",{
"infolist":Transfer.objects.all().filter(id=id),
})
def info2(request):
if request.method=="POST":
Sender=request.POST.get('Name',False)
Receiver=request.POST.get('email',False)
Amount=request.POST.get('text',False)
history=History(Sender=Sender,Receiver=Receiver,Amount=Amount)
history.save()
return render(request,"bank/Transaction-History.html",{
"hi":History.objects.all()
})
def transaction(request):
return render(request,"bank/Transaction-History.html")
def transfer(request):
return render(request,"bank/info.html")
def userlist(request):
return render(request,"bank/Customer-List.html",{
"userlist" : Transfer.objects.all()
})
| [
"mrugakshi9@gmail.com"
] | mrugakshi9@gmail.com |
28ca4de31ddec554ace8a4ed19c5f70d7811a4ae | 8520b42a00fc7d1cfd80f963fed959cfea079306 | /api/model.py | 310fb1c305b32c0ffd2927069153b1c2f339c429 | [] | no_license | sfioritto/dustbin | 5ae9054da3a26cb888bd34d368ea54c781b37b90 | cae8debcbcb689b949b03bc15c50cee6e13c4b91 | refs/heads/master | 2020-11-26T19:32:19.960993 | 2012-12-21T15:45:55 | 2012-12-23T18:11:03 | 6,970,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,285 | py | import dustbin.config as config
import json
import re
import cgi
import dateutil.parser
import hashlib
import urllib
import os.path as path
import types
from bleach import clean
from markdown2 import markdown
from datetime import datetime as dt
from time import strftime
from urls import urlpatterns
subdomainre = re.compile('^[a-zA-Z0-9]+$')
class Base(object):
def init(self, kwargs):
#TODO: can you get rid of this?
if kwargs.has_key('self'):
del kwargs['self']
Base.__init__(self, **kwargs)
def update(self):
self.load(self.url)
def __init__(self, db=None, **kwargs):
self.meta = {}
self.db = db
for key, value in kwargs.items():
self.__setattr__(key, value)
def __getattr__(self, name):
if self.meta.has_key(name):
return self.meta[name]
else:
raise AttributeError
def __setattr__(self, name, value):
#meta and db never go into the meta property
if hasattr(type(self), name) or name in ['meta', 'db']:
object.__setattr__(self, name, value)
else:
self.meta[name] = value
def __eq__(self, other):
for key, value in self.meta.items():
if other.meta[key] != value:
return False
return True
def delete(self, db=None):
if db:
self.db = db
self.db.remove(self.url)
def load(self, key, db=None):
if db:
self.db = db
assert self.db, 'No db instance. Provide a db instance when creating the model or as a keyword to this method'
self.meta = json.loads(self.db.get(key))
return self
@property
def json(self):
return json.dumps(self.meta)
@property
def author(self):
url = Author.get_url(self.meta['author']['subdomain'])
return Author(db=self.db).load(url)
@author.setter
def author(self, author):
if author and type(author) == Author:
self.meta['author'] = {'subdomain' : author.subdomain,
'email' : author.email}
# this is a dictionary format.
elif author:
self.meta['author'] = author
else:
self.meta['author'] = None
class Lense(Base):
def __init__(self,
name='',
subdomain = '',
feed=None,
public=True,
db=None,
author=None):
Base.init(self, locals())
@property
def feed(self):
return Feed.get(self.meta['feed'], self.author, self.db)
@feed.setter
def feed(self, feed):
if type(feed) == types.UnicodeType:
self.meta['feed'] = feed
elif type(feed) == Feed:
self.meta['feed'] = feed.url
@property
def url(self):
pubpriv = "public"
if not self.public:
pubpriv = "private"
return "/%s/%s/%s" % (self.subdomain, pubpriv, self.name)
def save(self, db=None):
if db:
self.db = db
assert self.db, 'You must provide a db instance to the model constructor to save.'
assert self.name != 'posts', "A lense can't be named 'posts'."
assert self.name not in [lense.name for lense in self.author.lenses]
self.feed = Feed.get(self.url + '/posts', self.author, self.db).save()
self.db.set(self.url, self.json)
self.author.add_lense(self)
return self
def delete(self, db=None):
if db:
self.db = db
self.feed.delete()
self.author.remove_lense(self)
self.db.remove(self.url)
class Post(Base):
def __init__(self,
content='',
prefix='',
title = '',
date = None,
filename = '',
db=None,
author = None,
lense = None):
if not date:
date = dt.now()
if not filename:
filename = self.generate_filename(title, content, date.isoformat())
Base.init(self, locals())
@property
def lense(self):
if self.meta.has_key('lense'):
return Lense(db=self.db).load(self.meta['lense'])
else:
return None
@lense.setter
def lense(self, lense):
if type(lense) == types.UnicodeType:
self.meta['lense'] = lense
elif type(lense) == Lense:
self.meta['lense'] = lense.url
@property
def date(self):
return dateutil.parser.parse(self.meta['date'])
@date.setter
def date(self, value):
if type(value) == types.UnicodeType:
self.meta['date'] = value
else:
self.meta['date'] = value.isoformat()
if hasattr(self, 'filename'):
self.filename = self.generate_filename(self.title, self.content, self.meta['date'])
@property
def url(self):
return path.join(*([self.prefix] + [str(x) for x in
self.date.month,
self.date.day,
self.date.year,
self.filename]))
@property
def fragment(self):
return clean(markdown(self.content),
tags=config.TAG_WHITELIST,
attributes=config.ATTR_WHITELIST,
strip=True)
def save(self, db=None):
if db:
self.db = db
assert self.db, 'You must provide a db instance to the model constructor to save.'
self.db.set(self.url + '.json', self.json)
self.db.set(self.url + '.html', self.fragment)
feed = Feed.get(self.prefix, self.author, self.db)
feed.add_post(self)
self.author.feed.add_post(self)
return self
def delete(self, db=None):
if db:
self.db = db
assert self.db, 'You must provide a db instance to the model constructor to save.'
feed = Feed.get(self.prefix, self.author, self.db)
feed.remove_post(self)
self.author.feed.remove_post(self)
self.db.remove(self.url + '.json')
self.db.remove(self.url + '.html')
def generate_filename(self, title, content, date):
if title:
title = title.replace(' ', '-')
return urllib.pathname2url(title)
else:
hash = hashlib.sha256(content + date).digest()
return urllib.pathname2url(hash)
class Author(Base):
def __init__(self,
db=None,
email=None,
subdomain=None,
feed=None,
lenses=None):
if not lenses:
lenses = []
Base.init(self, locals())
def save(self, db=None):
if db:
self.db = db
assert self.db, 'You must provide a db instance to the model constructor to save.'
assert self.email, 'email is required'
assert self.subdomain, 'subdomain is required'
assert Author.valid_subdomain(self.subdomain), 'Subdomain is invalid'
#TODO: always save to the url without .json extension as a default, only add json extension
# if there are multiple representation possibilities.
feed = Feed.get("/%s/feed" % self.subdomain, self, self.db)
self.feed = feed
self.db.set(self.url, self.json)
#this must come after saving the author model,
#otherwise it fails when trying to access the author model.
feed.save()
return self
def delete(self, db=None):
if db:
self.db = db
for lense in self.lenses:
lense.delete(db=self.db)
self.feed.delete(db=self.db)
self.db.remove(self.url)
def add_lense(self, lense):
assert lense.url not in [lense.url for lense in self.lenses]
self.lenses = [lense.url] + self.meta['lenses']
self.save()
def remove_lense(self, lense):
if type(lense) == Lense:
id = lense.url
else:
id = lense
if id in [lense.url for lense in self.lenses]:
self.lenses = [lense.url for lense in self.lenses if lense.url != id]
self.save()
else:
raise Exception('Lense not found')
#TODO: this pattern is so common, factor it out.
@property
def feed(self):
return Feed.get(self.meta['feed'], self, self.db)
@feed.setter
def feed(self, feed):
if type(feed) == types.UnicodeType:
self.meta['feed'] = feed
elif type(feed) == Feed:
self.meta['feed'] = feed.url
@property
def lenses(self):
return [Lense(**json.loads(jsons)) for jsons in self.db.get_bulk(self.meta['lenses'])]
@lenses.setter
def lenses(self, lenses):
self.meta['lenses'] = lenses
@property
def url(self):
return Author.get_url(self.subdomain)
@staticmethod
def get_url(subdomain):
return '/' + subdomain
@staticmethod
def valid_subdomain(subdomain):
if len(subdomain) > 63:
return False
elif len(config.domain) + len(subdomain) > 255:
return False
elif not subdomainre.match(subdomain):
return False
else:
return True
class Feed(Base):
def __init__(self,
title=None,
url='',
db=None,
links=None,
updated=None,
author=None,
entries=None):
if not entries:
entries = []
if not links:
links = [{'href' : 'http://www.%s%s' % (config.domain, url),
'rel' : 'self'},
{'href' : 'http://www.%s' % config.domain}]
if not updated:
updated = dt.now()
if not title:
title = 'feed'
Base.init(self, locals())
def add_post(self, post):
entry = {}
entry['content'] = cgi.escape(post.fragment)
entry['id'] = post.url
entry['title'] = post.title
entry['link'] = post.url
entry['updated'] = strftime('%Y-%m-%d %H:%M:%S', post.date.utctimetuple())
self.entries = [entry] + self.entries
self.save()
def remove_post(self, post):
if type(post) == Post:
id = post.url
else:
id = post
if id in [entry['id'] for entry in self.entries]:
self.entries = [entry for entry in self.entries if entry['id'] != id]
self.save()
else:
raise Exception('Post not found')
@property
def updated(self):
return dateutil.parser.parse(self.meta['updated'])
@updated.setter
def updated(self, value):
if type(value) == types.UnicodeType:
self.meta['updated'] = value
else:
self.meta['updated'] = strftime('%Y-%m-%d %H:%M:%S', value.utctimetuple())
def save(self, db=None):
if db:
self.db = db
assert self.url.startswith('/' + self.author.subdomain), "url %s doesn't start with subdomain of author: %s" % (self.url, self.author.subdomain)
assert self.db, 'You must provide a db instance to the model constructor to save.'
assert self.title, 'Feeds require a title.'
assert self.url.endswith('/posts') or self.url.endswith('/feed')
self.db.set(self.url, self.json)
return self
@staticmethod
def get(url, author, db):
try:
feed = Feed(db=db).load(url)
except:
pattern = re.compile(urlpatterns['FeedHandler'])
groups = pattern.match(url)
title = groups.groupdict()['subdomain']
feed = Feed(db=db,
url=url,
author=author,
title=title)
return feed
| [
"sean.fioritto@gmail.com"
] | sean.fioritto@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.